mirror of
				https://github.com/RGBCube/serenity
				synced 2025-10-31 14:12:44 +00:00 
			
		
		
		
	Use uintptr_t instead of u32 when storing pointers as integers
uintptr_t is 32-bit or 64-bit depending on the target platform. This will help us write pointer size agnostic code so that when the day comes that we want to do a 64-bit port, we'll be in better shape.
This commit is contained in:
		
							parent
							
								
									e07b34b9b8
								
							
						
					
					
						commit
						a246e9cd7e
					
				
					 14 changed files with 110 additions and 110 deletions
				
			
		|  | @ -63,8 +63,8 @@ ACPI_RAW::SDTHeader* ACPIStaticParser::find_table(const char* sig) | |||
|     dbgprintf("ACPI: Calling Find Table method!\n"); | ||||
| #endif | ||||
|     for (auto* physical_sdt_ptr : m_main_sdt->get_sdt_pointers()) { | ||||
|         auto region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((u32)physical_sdt_ptr)), (PAGE_SIZE * 2), "ACPI Static Parser Tables Finding", Region::Access::Read); | ||||
|         ACPI_RAW::SDTHeader* sdt = (ACPI_RAW::SDTHeader*)region->vaddr().offset(offset_in_page((u32)physical_sdt_ptr)).as_ptr(); | ||||
|         auto region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((uintptr_t)physical_sdt_ptr)), (PAGE_SIZE * 2), "ACPI Static Parser Tables Finding", Region::Access::Read); | ||||
|         ACPI_RAW::SDTHeader* sdt = (ACPI_RAW::SDTHeader*)region->vaddr().offset(offset_in_page((uintptr_t)physical_sdt_ptr)).as_ptr(); | ||||
| #ifdef ACPI_DEBUG | ||||
|         dbgprintf("ACPI: Examining Table @ P 0x%x\n", physical_sdt_ptr); | ||||
| #endif | ||||
|  | @ -85,20 +85,20 @@ void ACPIStaticParser::init_fadt() | |||
|     ASSERT(find_table("FACP") != nullptr); | ||||
|     auto* fadt_ptr = find_table("FACP"); | ||||
| 
 | ||||
|     auto checkup_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((u32)(fadt_ptr))), (PAGE_SIZE * 2), "ACPI Static Parser", Region::Access::Read); | ||||
|     auto checkup_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((uintptr_t)(fadt_ptr))), (PAGE_SIZE * 2), "ACPI Static Parser", Region::Access::Read); | ||||
| #ifdef ACPI_DEBUG | ||||
|     dbgprintf("ACPI: Checking FADT Length to choose the correct mapping size\n"); | ||||
| #endif | ||||
| 
 | ||||
|     ACPI_RAW::SDTHeader* sdt = (ACPI_RAW::SDTHeader*)checkup_region->vaddr().offset(offset_in_page((u32)(fadt_ptr))).as_ptr(); | ||||
|     ACPI_RAW::SDTHeader* sdt = (ACPI_RAW::SDTHeader*)checkup_region->vaddr().offset(offset_in_page((uintptr_t)(fadt_ptr))).as_ptr(); | ||||
| #ifdef ACPI_DEBUG | ||||
|     dbgprintf("ACPI: FADT @ V 0x%x, P 0x%x\n", sdt, fadt_ptr); | ||||
| #endif | ||||
|     u32 length = sdt->length; | ||||
|     kprintf("ACPI: Fixed ACPI data, Revision %u\n", sdt->revision); | ||||
| 
 | ||||
|     auto fadt_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((u32)(fadt_ptr))), PAGE_ROUND_UP(length) + PAGE_SIZE, "ACPI Static Parser", Region::Access::Read); | ||||
|     m_fadt = make<ACPI::FixedACPIData>(*(ACPI_RAW::FADT*)fadt_region->vaddr().offset(offset_in_page((u32)(fadt_ptr))).as_ptr()); | ||||
|     auto fadt_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((uintptr_t)(fadt_ptr))), PAGE_ROUND_UP(length) + PAGE_SIZE, "ACPI Static Parser", Region::Access::Read); | ||||
|     m_fadt = make<ACPI::FixedACPIData>(*(ACPI_RAW::FADT*)fadt_region->vaddr().offset(offset_in_page((uintptr_t)(fadt_ptr))).as_ptr()); | ||||
| #ifdef ACPI_DEBUG | ||||
|     dbgprintf("ACPI: Finished to initialize Fixed ACPI data\n"); | ||||
| #endif | ||||
|  | @ -143,8 +143,8 @@ size_t ACPIStaticParser::get_table_size(ACPI_RAW::SDTHeader& p_header) | |||
| #ifdef ACPI_DEBUG | ||||
|     dbgprintf("ACPI: Checking SDT Length\n"); | ||||
| #endif | ||||
|     auto region = MM.allocate_kernel_region(PhysicalAddress((u32)&p_header & PAGE_MASK), (PAGE_SIZE * 2), "ACPI get_table_size()", Region::Access::Read); | ||||
|     volatile auto* sdt = (ACPI_RAW::SDTHeader*)region->vaddr().offset(offset_in_page((u32)&p_header)).as_ptr(); | ||||
|     auto region = MM.allocate_kernel_region(PhysicalAddress((uintptr_t)&p_header & PAGE_MASK), (PAGE_SIZE * 2), "ACPI get_table_size()", Region::Access::Read); | ||||
|     volatile auto* sdt = (ACPI_RAW::SDTHeader*)region->vaddr().offset(offset_in_page((uintptr_t)&p_header)).as_ptr(); | ||||
|     return sdt->length; | ||||
| } | ||||
| 
 | ||||
|  | @ -154,8 +154,8 @@ u8 ACPIStaticParser::get_table_revision(ACPI_RAW::SDTHeader& p_header) | |||
| #ifdef ACPI_DEBUG | ||||
|     dbgprintf("ACPI: Checking SDT Revision\n"); | ||||
| #endif | ||||
|     auto region = MM.allocate_kernel_region(PhysicalAddress((u32)&p_header & PAGE_MASK), (PAGE_SIZE * 2), "ACPI get_table_revision()", Region::Access::Read); | ||||
|     volatile auto* sdt = (ACPI_RAW::SDTHeader*)region->vaddr().offset(offset_in_page((u32)&p_header)).as_ptr(); | ||||
|     auto region = MM.allocate_kernel_region(PhysicalAddress((uintptr_t)&p_header & PAGE_MASK), (PAGE_SIZE * 2), "ACPI get_table_revision()", Region::Access::Read); | ||||
|     volatile auto* sdt = (ACPI_RAW::SDTHeader*)region->vaddr().offset(offset_in_page((uintptr_t)&p_header)).as_ptr(); | ||||
|     return sdt->revision; | ||||
| } | ||||
| 
 | ||||
|  | @ -175,8 +175,8 @@ void ACPIStaticParser::initialize_main_system_description_table() | |||
|         revision = get_table_revision(*m_main_system_description_table); | ||||
|     } | ||||
| 
 | ||||
|     auto main_sdt_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((u32)m_main_system_description_table)), PAGE_ROUND_UP(length) + PAGE_SIZE, "ACPI Static Parser Initialization", Region::Access::Read, false, true); | ||||
|     volatile auto* sdt = (ACPI_RAW::SDTHeader*)main_sdt_region->vaddr().offset(offset_in_page((u32)m_main_system_description_table)).as_ptr(); | ||||
|     auto main_sdt_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((uintptr_t)m_main_system_description_table)), PAGE_ROUND_UP(length) + PAGE_SIZE, "ACPI Static Parser Initialization", Region::Access::Read, false, true); | ||||
|     volatile auto* sdt = (ACPI_RAW::SDTHeader*)main_sdt_region->vaddr().offset(offset_in_page((uintptr_t)m_main_system_description_table)).as_ptr(); | ||||
|     kprintf("ACPI: Main Description Table valid? 0x%x\n", validate_acpi_table(const_cast<ACPI_RAW::SDTHeader&>(*sdt), length)); | ||||
| 
 | ||||
|     Vector<ACPI_RAW::SDTHeader*> sdt_pointers; | ||||
|  | @ -236,8 +236,8 @@ void ACPIStaticParser::locate_all_aml_tables() | |||
|     kprintf("ACPI: Searching for AML Tables\n"); | ||||
|     m_aml_tables_ptrs.append(m_fadt->get_dsdt()); | ||||
|     for (auto* sdt_ptr : m_main_sdt->get_sdt_pointers()) { | ||||
|         auto region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((u32)sdt_ptr)), (PAGE_SIZE * 2), "ACPI Static Parser AML Tables Finding", Region::Access::Read); | ||||
|         auto* sdt = (ACPI_RAW::SDTHeader*)region->vaddr().offset(offset_in_page((u32)sdt_ptr)).as_ptr(); | ||||
|         auto region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((uintptr_t)sdt_ptr)), (PAGE_SIZE * 2), "ACPI Static Parser AML Tables Finding", Region::Access::Read); | ||||
|         auto* sdt = (ACPI_RAW::SDTHeader*)region->vaddr().offset(offset_in_page((uintptr_t)sdt_ptr)).as_ptr(); | ||||
| #ifdef ACPI_DEBUG | ||||
|         dbgprintf("ACPI: Examining Table @ P 0x%x\n", sdt_ptr); | ||||
| #endif | ||||
|  | @ -387,7 +387,7 @@ ACPI::FixedACPIData::FixedACPIData(ACPI_RAW::FADT& fadt) | |||
| 
 | ||||
| ACPI_RAW::SDTHeader* ACPI::FixedACPIData::get_dsdt() | ||||
| { | ||||
|     if (m_x_dsdt_ptr != (u32) nullptr) | ||||
|     if (m_x_dsdt_ptr != (uintptr_t) nullptr) | ||||
|         return (ACPI_RAW::SDTHeader*)m_x_dsdt_ptr; | ||||
|     else { | ||||
|         ASSERT((ACPI_RAW::SDTHeader*)m_dsdt_ptr != nullptr); | ||||
|  |  | |||
|  | @ -90,10 +90,10 @@ void DMIDecoder::enumerate_smbios_tables() | |||
|     u32 table_length = m_table_length; | ||||
|     SMBIOS::TableHeader* p_table_ptr = m_structure_table; | ||||
| 
 | ||||
|     PhysicalAddress paddr = PhysicalAddress(page_base_of((u32)p_table_ptr)); | ||||
|     PhysicalAddress paddr = PhysicalAddress(page_base_of((uintptr_t)p_table_ptr)); | ||||
|     auto region = MM.allocate_kernel_region(paddr, PAGE_ROUND_UP(table_length), "DMI Decoder Enumerating SMBIOS", Region::Access::Read, false, false); | ||||
| 
 | ||||
|     volatile SMBIOS::TableHeader* v_table_ptr = (SMBIOS::TableHeader*)region->vaddr().offset(offset_in_page((u32)p_table_ptr)).as_ptr(); | ||||
|     volatile SMBIOS::TableHeader* v_table_ptr = (SMBIOS::TableHeader*)region->vaddr().offset(offset_in_page((uintptr_t)p_table_ptr)).as_ptr(); | ||||
| #ifdef SMBIOS_DEBUG | ||||
|     dbgprintf("DMIDecoder: Total Table length %d\n", m_table_length); | ||||
| #endif | ||||
|  | @ -104,7 +104,7 @@ void DMIDecoder::enumerate_smbios_tables() | |||
|         dbgprintf("DMIDecoder: Examining table @ P 0x%x V 0x%x\n", p_table_ptr, v_table_ptr); | ||||
| #endif | ||||
|         structures_count++; | ||||
|         if (v_table_ptr->type == (u32)SMBIOS::TableType::EndOfTable) { | ||||
|         if (v_table_ptr->type == (u8)SMBIOS::TableType::EndOfTable) { | ||||
|             kprintf("DMIDecoder: Detected table with type 127, End of SMBIOS data.\n"); | ||||
|             break; | ||||
|         } | ||||
|  | @ -113,8 +113,8 @@ void DMIDecoder::enumerate_smbios_tables() | |||
|         table_length -= v_table_ptr->length; | ||||
| 
 | ||||
|         size_t table_size = get_table_size(*p_table_ptr); | ||||
|         p_table_ptr = (SMBIOS::TableHeader*)((u32)p_table_ptr + (u32)table_size); | ||||
|         v_table_ptr = (SMBIOS::TableHeader*)((u32)v_table_ptr + (u32)table_size); | ||||
|         p_table_ptr = (SMBIOS::TableHeader*)((uintptr_t)p_table_ptr + table_size); | ||||
|         v_table_ptr = (SMBIOS::TableHeader*)((uintptr_t)v_table_ptr + table_size); | ||||
| #ifdef SMBIOS_DEBUG | ||||
|         dbgprintf("DMIDecoder: Next table @ P 0x%x\n", p_table_ptr); | ||||
| #endif | ||||
|  | @ -146,7 +146,7 @@ size_t DMIDecoder::get_table_size(SMBIOS::TableHeader& table) | |||
| 
 | ||||
| SMBIOS::TableHeader* DMIDecoder::get_next_physical_table(SMBIOS::TableHeader& p_table) | ||||
| { | ||||
|     return (SMBIOS::TableHeader*)((u32)&p_table + get_table_size(p_table)); | ||||
|     return (SMBIOS::TableHeader*)((uintptr_t)&p_table + get_table_size(p_table)); | ||||
| } | ||||
| 
 | ||||
| SMBIOS::TableHeader* DMIDecoder::get_smbios_physical_table_by_handle(u16 handle) | ||||
|  | @ -155,8 +155,8 @@ SMBIOS::TableHeader* DMIDecoder::get_smbios_physical_table_by_handle(u16 handle) | |||
|     for (auto* table : m_smbios_tables) { | ||||
|         if (!table) | ||||
|             continue; | ||||
|         auto region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((u32)table)), PAGE_SIZE * 2, "DMI Decoder Finding Table", Region::Access::Read, false, false); | ||||
|         SMBIOS::TableHeader* table_v_ptr = (SMBIOS::TableHeader*)region->vaddr().offset(offset_in_page((u32)table)).as_ptr(); | ||||
|         auto region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((uintptr_t)table)), PAGE_SIZE * 2, "DMI Decoder Finding Table", Region::Access::Read, false, false); | ||||
|         SMBIOS::TableHeader* table_v_ptr = (SMBIOS::TableHeader*)region->vaddr().offset(offset_in_page((uintptr_t)table)).as_ptr(); | ||||
| 
 | ||||
|         if (table_v_ptr->handle == handle) { | ||||
|             return table; | ||||
|  | @ -170,8 +170,8 @@ SMBIOS::TableHeader* DMIDecoder::get_smbios_physical_table_by_type(u8 table_type | |||
|     for (auto* table : m_smbios_tables) { | ||||
|         if (!table) | ||||
|             continue; | ||||
|         auto region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((u32)table)), PAGE_ROUND_UP(PAGE_SIZE * 2), "DMI Decoder Finding Table", Region::Access::Read, false, false); | ||||
|         SMBIOS::TableHeader* table_v_ptr = (SMBIOS::TableHeader*)region->vaddr().offset(offset_in_page((u32)table)).as_ptr(); | ||||
|         auto region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((uintptr_t)table)), PAGE_ROUND_UP(PAGE_SIZE * 2), "DMI Decoder Finding Table", Region::Access::Read, false, false); | ||||
|         SMBIOS::TableHeader* table_v_ptr = (SMBIOS::TableHeader*)region->vaddr().offset(offset_in_page((uintptr_t)table)).as_ptr(); | ||||
|         if (table_v_ptr->type == table_type) { | ||||
|             return table; | ||||
|         } | ||||
|  |  | |||
|  | @ -201,7 +201,7 @@ struct GenericAddressStructure { | |||
|         this->bit_width = other.bit_width; | ||||
|         this->bit_offset = other.bit_offset; | ||||
|         this->access_size = other.access_size; | ||||
|         this->address = (u32)other.address; | ||||
|         this->address = (uintptr_t)other.address; | ||||
|         return *this; | ||||
|     } | ||||
|     GenericAddressStructure& operator=(const ACPI_RAW::GenericAddressStructure& other) | ||||
|  | @ -210,7 +210,7 @@ struct GenericAddressStructure { | |||
|         this->bit_width = other.bit_width; | ||||
|         this->bit_offset = other.bit_offset; | ||||
|         this->access_size = other.access_size; | ||||
|         this->address = (u32)other.address; | ||||
|         this->address = (uintptr_t)other.address; | ||||
|         return *this; | ||||
|     } | ||||
| }; | ||||
|  |  | |||
|  | @ -33,7 +33,7 @@ | |||
| #include <Kernel/kstdio.h> | ||||
| 
 | ||||
| #define PAGE_SIZE 4096 | ||||
| #define PAGE_MASK 0xfffff000 | ||||
| #define PAGE_MASK ((uintptr_t)0xfffff000u) | ||||
| 
 | ||||
| class MemoryManager; | ||||
| class PageDirectory; | ||||
|  | @ -452,12 +452,12 @@ struct [[gnu::aligned(16)]] FPUState | |||
|     u8 buffer[512]; | ||||
| }; | ||||
| 
 | ||||
| inline constexpr u32 page_base_of(u32 address) | ||||
| inline constexpr uintptr_t page_base_of(uintptr_t address) | ||||
| { | ||||
|     return address & PAGE_MASK; | ||||
| } | ||||
| 
 | ||||
| inline constexpr u32 offset_in_page(u32 address) | ||||
| inline constexpr uintptr_t offset_in_page(uintptr_t address) | ||||
| { | ||||
|     return address & (~PAGE_MASK); | ||||
| } | ||||
|  |  | |||
|  | @ -134,13 +134,13 @@ static void load_ksyms_from_data(const ByteBuffer& buffer) | |||
|     int recognized_symbol_count = 0; | ||||
|     if (use_ksyms) { | ||||
|         for (u32* stack_ptr = (u32*)ebp; | ||||
|              (current ? current->process().validate_read_from_kernel(VirtualAddress((u32)stack_ptr), sizeof(void*) * 2) : 1) && recognized_symbol_count < max_recognized_symbol_count; stack_ptr = (u32*)*stack_ptr) { | ||||
|              (current ? current->process().validate_read_from_kernel(VirtualAddress((uintptr_t)stack_ptr), sizeof(void*) * 2) : 1) && recognized_symbol_count < max_recognized_symbol_count; stack_ptr = (u32*)*stack_ptr) { | ||||
|             u32 retaddr = stack_ptr[1]; | ||||
|             recognized_symbols[recognized_symbol_count++] = { retaddr, ksymbolicate(retaddr) }; | ||||
|         } | ||||
|     } else { | ||||
|         for (u32* stack_ptr = (u32*)ebp; | ||||
|              (current ? current->process().validate_read_from_kernel(VirtualAddress((u32)stack_ptr), sizeof(void*) * 2) : 1); stack_ptr = (u32*)*stack_ptr) { | ||||
|              (current ? current->process().validate_read_from_kernel(VirtualAddress((uintptr_t)stack_ptr), sizeof(void*) * 2) : 1); stack_ptr = (u32*)*stack_ptr) { | ||||
|             u32 retaddr = stack_ptr[1]; | ||||
|             dbgprintf("%x (next: %x)\n", retaddr, stack_ptr ? (u32*)*stack_ptr : 0); | ||||
|         } | ||||
|  |  | |||
|  | @ -253,14 +253,14 @@ bool E1000NetworkAdapter::link_up() | |||
| 
 | ||||
| void E1000NetworkAdapter::initialize_rx_descriptors() | ||||
| { | ||||
|     auto ptr = (u32)kmalloc_eternal(sizeof(e1000_rx_desc) * number_of_rx_descriptors + 16); | ||||
|     auto ptr = (uintptr_t)kmalloc_eternal(sizeof(e1000_rx_desc) * number_of_rx_descriptors + 16); | ||||
|     // Make sure it's 16-byte aligned.
 | ||||
|     if (ptr % 16) | ||||
|         ptr = (ptr + 16) - (ptr % 16); | ||||
|     m_rx_descriptors = (e1000_rx_desc*)ptr; | ||||
|     for (int i = 0; i < number_of_rx_descriptors; ++i) { | ||||
|         auto& descriptor = m_rx_descriptors[i]; | ||||
|         auto addr = (u32)kmalloc_eternal(8192 + 16); | ||||
|         auto addr = (uintptr_t)kmalloc_eternal(8192 + 16); | ||||
|         if (addr % 16) | ||||
|             addr = (addr + 16) - (addr % 16); | ||||
|         descriptor.addr = addr - 0xc0000000; | ||||
|  | @ -278,14 +278,14 @@ void E1000NetworkAdapter::initialize_rx_descriptors() | |||
| 
 | ||||
| void E1000NetworkAdapter::initialize_tx_descriptors() | ||||
| { | ||||
|     auto ptr = (u32)kmalloc_eternal(sizeof(e1000_tx_desc) * number_of_tx_descriptors + 16); | ||||
|     auto ptr = (uintptr_t)kmalloc_eternal(sizeof(e1000_tx_desc) * number_of_tx_descriptors + 16); | ||||
|     // Make sure it's 16-byte aligned.
 | ||||
|     if (ptr % 16) | ||||
|         ptr = (ptr + 16) - (ptr % 16); | ||||
|     m_tx_descriptors = (e1000_tx_desc*)ptr; | ||||
|     for (int i = 0; i < number_of_tx_descriptors; ++i) { | ||||
|         auto& descriptor = m_tx_descriptors[i]; | ||||
|         auto addr = (u32)kmalloc_eternal(8192 + 16); | ||||
|         auto addr = (uintptr_t)kmalloc_eternal(8192 + 16); | ||||
|         if (addr % 16) | ||||
|             addr = (addr + 16) - (addr % 16); | ||||
|         descriptor.addr = addr - 0xc0000000; | ||||
|  |  | |||
|  | @ -157,16 +157,16 @@ RTL8139NetworkAdapter::RTL8139NetworkAdapter(PCI::Address pci_address, u8 irq) | |||
|     // we add space to account for overhang from the last packet - the rtl8139
 | ||||
|     // can optionally guarantee that packets will be contiguous by
 | ||||
|     // purposefully overrunning the rx buffer
 | ||||
|     m_rx_buffer_addr = (u32)virtual_to_low_physical(kmalloc_aligned(RX_BUFFER_SIZE + PACKET_SIZE_MAX, 16)); | ||||
|     m_rx_buffer_addr = (uintptr_t)virtual_to_low_physical(kmalloc_aligned(RX_BUFFER_SIZE + PACKET_SIZE_MAX, 16)); | ||||
|     kprintf("RTL8139: RX buffer: P%p\n", m_rx_buffer_addr); | ||||
| 
 | ||||
|     auto tx_buffer_addr = (u32)virtual_to_low_physical(kmalloc_aligned(TX_BUFFER_SIZE * 4, 16)); | ||||
|     auto tx_buffer_addr = (uintptr_t)virtual_to_low_physical(kmalloc_aligned(TX_BUFFER_SIZE * 4, 16)); | ||||
|     for (int i = 0; i < RTL8139_TX_BUFFER_COUNT; i++) { | ||||
|         m_tx_buffer_addr[i] = tx_buffer_addr + TX_BUFFER_SIZE * i; | ||||
|         kprintf("RTL8139: TX buffer %d: P%p\n", i, m_tx_buffer_addr[i]); | ||||
|     } | ||||
| 
 | ||||
|     m_packet_buffer = (u32)kmalloc(PACKET_SIZE_MAX); | ||||
|     m_packet_buffer = (uintptr_t)kmalloc(PACKET_SIZE_MAX); | ||||
| 
 | ||||
|     reset(); | ||||
| 
 | ||||
|  |  | |||
|  | @ -274,7 +274,7 @@ int Process::sys$set_mmap_name(const Syscall::SC_set_mmap_name_params* user_para | |||
|     if (name.is_null()) | ||||
|         return -EFAULT; | ||||
| 
 | ||||
|     auto* region = region_from_range({ VirtualAddress((u32)params.addr), params.size }); | ||||
|     auto* region = region_from_range({ VirtualAddress((uintptr_t)params.addr), params.size }); | ||||
|     if (!region) | ||||
|         return -EINVAL; | ||||
|     if (!region->is_mmap()) | ||||
|  | @ -364,7 +364,7 @@ void* Process::sys$mmap(const Syscall::SC_mmap_params* user_params) | |||
| 
 | ||||
|     if (size == 0) | ||||
|         return (void*)-EINVAL; | ||||
|     if ((u32)addr & ~PAGE_MASK) | ||||
|     if ((uintptr_t)addr & ~PAGE_MASK) | ||||
|         return (void*)-EINVAL; | ||||
| 
 | ||||
|     bool map_shared = flags & MAP_SHARED; | ||||
|  | @ -390,11 +390,11 @@ void* Process::sys$mmap(const Syscall::SC_mmap_params* user_params) | |||
| 
 | ||||
|     if (map_purgeable) { | ||||
|         auto vmobject = PurgeableVMObject::create_with_size(size); | ||||
|         region = allocate_region_with_vmobject(VirtualAddress((u32)addr), size, vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot); | ||||
|         region = allocate_region_with_vmobject(VirtualAddress((uintptr_t)addr), size, vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot); | ||||
|         if (!region && (!map_fixed && addr != 0)) | ||||
|             region = allocate_region_with_vmobject({}, size, vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot); | ||||
|     } else if (map_anonymous) { | ||||
|         region = allocate_region(VirtualAddress((u32)addr), size, !name.is_null() ? name : "mmap", prot, false); | ||||
|         region = allocate_region(VirtualAddress((uintptr_t)addr), size, !name.is_null() ? name : "mmap", prot, false); | ||||
|         if (!region && (!map_fixed && addr != 0)) | ||||
|             region = allocate_region({}, size, !name.is_null() ? name : "mmap", prot, false); | ||||
|     } else { | ||||
|  | @ -418,7 +418,7 @@ void* Process::sys$mmap(const Syscall::SC_mmap_params* user_params) | |||
|             if (!validate_inode_mmap_prot(*this, prot, *description->inode())) | ||||
|                 return (void*)-EACCES; | ||||
|         } | ||||
|         auto region_or_error = description->mmap(*this, VirtualAddress((u32)addr), static_cast<size_t>(offset), size, prot); | ||||
|         auto region_or_error = description->mmap(*this, VirtualAddress((uintptr_t)addr), static_cast<size_t>(offset), size, prot); | ||||
|         if (region_or_error.is_error()) { | ||||
|             // Fail if MAP_FIXED or address is 0, retry otherwise
 | ||||
|             if (map_fixed || addr == 0) | ||||
|  | @ -445,7 +445,7 @@ void* Process::sys$mmap(const Syscall::SC_mmap_params* user_params) | |||
| int Process::sys$munmap(void* addr, size_t size) | ||||
| { | ||||
|     REQUIRE_PROMISE(stdio); | ||||
|     Range range_to_unmap { VirtualAddress((u32)addr), size }; | ||||
|     Range range_to_unmap { VirtualAddress((uintptr_t)addr), size }; | ||||
|     if (auto* whole_region = region_from_range(range_to_unmap)) { | ||||
|         if (!whole_region->is_mmap()) | ||||
|             return -EPERM; | ||||
|  | @ -482,7 +482,7 @@ int Process::sys$munmap(void* addr, size_t size) | |||
| int Process::sys$mprotect(void* addr, size_t size, int prot) | ||||
| { | ||||
|     REQUIRE_PROMISE(stdio); | ||||
|     Range range_to_mprotect = { VirtualAddress((u32)addr), size }; | ||||
|     Range range_to_mprotect = { VirtualAddress((uintptr_t)addr), size }; | ||||
| 
 | ||||
|     if (auto* whole_region = region_from_range(range_to_mprotect)) { | ||||
|         if (!whole_region->is_mmap()) | ||||
|  | @ -545,7 +545,7 @@ int Process::sys$mprotect(void* addr, size_t size, int prot) | |||
| int Process::sys$madvise(void* address, size_t size, int advice) | ||||
| { | ||||
|     REQUIRE_PROMISE(stdio); | ||||
|     auto* region = region_from_range({ VirtualAddress((u32)address), size }); | ||||
|     auto* region = region_from_range({ VirtualAddress((uintptr_t)address), size }); | ||||
|     if (!region) | ||||
|         return -EINVAL; | ||||
|     if (!region->is_mmap()) | ||||
|  | @ -1219,7 +1219,7 @@ Process* Process::create_user_process(Thread*& first_thread, const String& path, | |||
| Process* Process::create_kernel_process(Thread*& first_thread, String&& name, void (*e)()) | ||||
| { | ||||
|     auto* process = new Process(first_thread, move(name), (uid_t)0, (gid_t)0, (pid_t)0, Ring0); | ||||
|     first_thread->tss().eip = (u32)e; | ||||
|     first_thread->tss().eip = (uintptr_t)e; | ||||
| 
 | ||||
|     if (process->pid() != 0) { | ||||
|         InterruptDisabler disabler; | ||||
|  | @ -1415,7 +1415,7 @@ int Process::sys$sigreturn(RegisterDump& registers) | |||
|     stack_ptr++; | ||||
| 
 | ||||
|     //pop edi, esi, ebp, esp, ebx, edx, ecx and eax
 | ||||
|     memcpy(®isters.edi, stack_ptr, 8 * sizeof(u32)); | ||||
|     memcpy(®isters.edi, stack_ptr, 8 * sizeof(uintptr_t)); | ||||
|     stack_ptr += 8; | ||||
| 
 | ||||
|     registers.eip = *stack_ptr; | ||||
|  | @ -2356,7 +2356,7 @@ bool Process::validate_read_from_kernel(VirtualAddress vaddr, ssize_t size) cons | |||
| bool Process::validate_read(const void* address, ssize_t size) const | ||||
| { | ||||
|     ASSERT(size >= 0); | ||||
|     VirtualAddress first_address((u32)address); | ||||
|     VirtualAddress first_address((uintptr_t)address); | ||||
|     if (is_ring0()) { | ||||
|         if (is_kmalloc_address(address)) | ||||
|             return true; | ||||
|  | @ -2369,7 +2369,7 @@ bool Process::validate_read(const void* address, ssize_t size) const | |||
| bool Process::validate_write(void* address, ssize_t size) const | ||||
| { | ||||
|     ASSERT(size >= 0); | ||||
|     VirtualAddress first_address((u32)address); | ||||
|     VirtualAddress first_address((uintptr_t)address); | ||||
|     if (is_ring0()) { | ||||
|         if (is_kmalloc_address(address)) | ||||
|             return true; | ||||
|  | @ -3632,13 +3632,13 @@ int Process::sys$create_thread(void* (*entry)(void*), void* argument, const Sysc | |||
|     thread->set_joinable(is_thread_joinable); | ||||
| 
 | ||||
|     auto& tss = thread->tss(); | ||||
|     tss.eip = (u32)entry; | ||||
|     tss.eip = (uintptr_t)entry; | ||||
|     tss.eflags = 0x0202; | ||||
|     tss.cr3 = page_directory().cr3(); | ||||
|     tss.esp = user_stack_address; | ||||
| 
 | ||||
|     // NOTE: The stack needs to be 16-byte aligned.
 | ||||
|     thread->push_value_on_stack((u32)argument); | ||||
|     thread->push_value_on_stack((uintptr_t)argument); | ||||
|     thread->push_value_on_stack(0); | ||||
| 
 | ||||
|     thread->make_thread_specific_region({}); | ||||
|  | @ -4399,7 +4399,7 @@ Thread& Process::any_thread() | |||
| 
 | ||||
| WaitQueue& Process::futex_queue(i32* userspace_address) | ||||
| { | ||||
|     auto& queue = m_futex_queues.ensure((u32)userspace_address); | ||||
|     auto& queue = m_futex_queues.ensure((uintptr_t)userspace_address); | ||||
|     if (!queue) | ||||
|         queue = make<WaitQueue>(); | ||||
|     return *queue; | ||||
|  |  | |||
|  | @ -597,8 +597,8 @@ void Thread::set_default_signal_dispositions() | |||
| { | ||||
|     // FIXME: Set up all the right default actions. See signal(7).
 | ||||
|     memset(&m_signal_action_data, 0, sizeof(m_signal_action_data)); | ||||
|     m_signal_action_data[SIGCHLD].handler_or_sigaction = VirtualAddress((u32)SIG_IGN); | ||||
|     m_signal_action_data[SIGWINCH].handler_or_sigaction = VirtualAddress((u32)SIG_IGN); | ||||
|     m_signal_action_data[SIGCHLD].handler_or_sigaction = VirtualAddress((uintptr_t)SIG_IGN); | ||||
|     m_signal_action_data[SIGWINCH].handler_or_sigaction = VirtualAddress((uintptr_t)SIG_IGN); | ||||
| } | ||||
| 
 | ||||
| void Thread::push_value_on_stack(u32 value) | ||||
|  | @ -657,9 +657,9 @@ u32 Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vecto | |||
|     }; | ||||
| 
 | ||||
|     // NOTE: The stack needs to be 16-byte aligned.
 | ||||
|     push_on_new_stack((u32)env); | ||||
|     push_on_new_stack((u32)argv); | ||||
|     push_on_new_stack((u32)argc); | ||||
|     push_on_new_stack((uintptr_t)env); | ||||
|     push_on_new_stack((uintptr_t)argv); | ||||
|     push_on_new_stack((uintptr_t)argc); | ||||
|     push_on_new_stack(0); | ||||
|     return new_esp; | ||||
| } | ||||
|  | @ -770,20 +770,20 @@ String Thread::backtrace_impl() const | |||
|     auto& process = const_cast<Process&>(this->process()); | ||||
|     ProcessPagingScope paging_scope(process); | ||||
| 
 | ||||
|     u32 stack_ptr = start_frame; | ||||
|     uintptr_t stack_ptr = start_frame; | ||||
|     for (;;) { | ||||
|         if (!process.validate_read_from_kernel(VirtualAddress((u32)stack_ptr), sizeof(void*) * 2)) | ||||
|         if (!process.validate_read_from_kernel(VirtualAddress((uintptr_t)stack_ptr), sizeof(void*) * 2)) | ||||
|             break; | ||||
|         u32 retaddr; | ||||
|         uintptr_t retaddr; | ||||
| 
 | ||||
|         if (is_user_range(VirtualAddress(stack_ptr), sizeof(void*) * 2)) { | ||||
|             copy_from_user(&retaddr, &((u32*)stack_ptr)[1]); | ||||
|         if (is_user_range(VirtualAddress(stack_ptr), sizeof(uintptr_t) * 2)) { | ||||
|             copy_from_user(&retaddr, &((uintptr_t*)stack_ptr)[1]); | ||||
|             recognized_symbols.append({ retaddr, ksymbolicate(retaddr) }); | ||||
|             copy_from_user(&stack_ptr, (u32*)stack_ptr); | ||||
|             copy_from_user(&stack_ptr, (uintptr_t*)stack_ptr); | ||||
|         } else { | ||||
|             memcpy(&retaddr, &((u32*)stack_ptr)[1], sizeof(void*)); | ||||
|             memcpy(&retaddr, &((uintptr_t*)stack_ptr)[1], sizeof(uintptr_t)); | ||||
|             recognized_symbols.append({ retaddr, ksymbolicate(retaddr) }); | ||||
|             memcpy(&stack_ptr, (u32*)stack_ptr, sizeof(void*)); | ||||
|             memcpy(&stack_ptr, (uintptr_t*)stack_ptr, sizeof(uintptr_t)); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|  | @ -795,14 +795,14 @@ String Thread::backtrace_impl() const | |||
|     return builder.to_string(); | ||||
| } | ||||
| 
 | ||||
| Vector<u32> Thread::raw_backtrace(u32 ebp) const | ||||
| Vector<uintptr_t> Thread::raw_backtrace(uintptr_t ebp) const | ||||
| { | ||||
|     auto& process = const_cast<Process&>(this->process()); | ||||
|     ProcessPagingScope paging_scope(process); | ||||
|     Vector<u32, Profiling::max_stack_frame_count> backtrace; | ||||
|     Vector<uintptr_t, Profiling::max_stack_frame_count> backtrace; | ||||
|     backtrace.append(ebp); | ||||
|     for (u32* stack_ptr = (u32*)ebp; process.validate_read_from_kernel(VirtualAddress((u32)stack_ptr), sizeof(void*) * 2); stack_ptr = (u32*)*stack_ptr) { | ||||
|         u32 retaddr = stack_ptr[1]; | ||||
|     for (uintptr_t* stack_ptr = (uintptr_t*)ebp; process.validate_read_from_kernel(VirtualAddress((uintptr_t)stack_ptr), sizeof(uintptr_t) * 2); stack_ptr = (uintptr_t*)*stack_ptr) { | ||||
|         uintptr_t retaddr = stack_ptr[1]; | ||||
|         backtrace.append(retaddr); | ||||
|         if (backtrace.size() == Profiling::max_stack_frame_count) | ||||
|             break; | ||||
|  | @ -818,7 +818,7 @@ void Thread::make_thread_specific_region(Badge<Process>) | |||
|     SmapDisabler disabler; | ||||
|     auto* thread_specific_data = (ThreadSpecificData*)region->vaddr().offset(align_up_to(process().m_master_tls_size, thread_specific_region_alignment)).as_ptr(); | ||||
|     auto* thread_local_storage = (u8*)((u8*)thread_specific_data) - align_up_to(process().m_master_tls_size, process().m_master_tls_alignment); | ||||
|     m_thread_specific_data = VirtualAddress((u32)thread_specific_data); | ||||
|     m_thread_specific_data = VirtualAddress((uintptr_t)thread_specific_data); | ||||
|     thread_specific_data->self = thread_specific_data; | ||||
|     if (process().m_master_tls_size) | ||||
|         memcpy(thread_local_storage, process().m_master_tls_region->vaddr().as_ptr(), process().m_master_tls_size); | ||||
|  |  | |||
|  | @ -66,17 +66,17 @@ MemoryManager::~MemoryManager() | |||
| void MemoryManager::protect_kernel_image() | ||||
| { | ||||
|     // Disable writing to the kernel text and rodata segments.
 | ||||
|     extern u32 start_of_kernel_text; | ||||
|     extern u32 start_of_kernel_data; | ||||
|     for (size_t i = (u32)&start_of_kernel_text; i < (u32)&start_of_kernel_data; i += PAGE_SIZE) { | ||||
|     extern uintptr_t start_of_kernel_text; | ||||
|     extern uintptr_t start_of_kernel_data; | ||||
|     for (size_t i = (uintptr_t)&start_of_kernel_text; i < (uintptr_t)&start_of_kernel_data; i += PAGE_SIZE) { | ||||
|         auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i)); | ||||
|         pte.set_writable(false); | ||||
|     } | ||||
| 
 | ||||
|     if (g_cpu_supports_nx) { | ||||
|         // Disable execution of the kernel data and bss segments.
 | ||||
|         extern u32 end_of_kernel_bss; | ||||
|         for (size_t i = (u32)&start_of_kernel_data; i < (u32)&end_of_kernel_bss; i += PAGE_SIZE) { | ||||
|         extern uintptr_t end_of_kernel_bss; | ||||
|         for (size_t i = (uintptr_t)&start_of_kernel_data; i < (uintptr_t)&end_of_kernel_bss; i += PAGE_SIZE) { | ||||
|             auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i)); | ||||
|             pte.set_execute_disabled(true); | ||||
|         } | ||||
|  | @ -101,7 +101,7 @@ void MemoryManager::setup_low_1mb() | |||
|     if (g_cpu_supports_nx) | ||||
|         pde_zero.set_execute_disabled(true); | ||||
| 
 | ||||
|     for (u32 offset = 0; offset < (2 * MB); offset += PAGE_SIZE) { | ||||
|     for (uintptr_t offset = 0; offset < (2 * MB); offset += PAGE_SIZE) { | ||||
|         auto& page_table_page = m_low_page_table; | ||||
|         auto& pte = quickmap_pt(page_table_page->paddr())[offset / PAGE_SIZE]; | ||||
|         pte.set_physical_page_base(offset); | ||||
|  | @ -119,11 +119,11 @@ void MemoryManager::parse_memory_map() | |||
|     auto* mmap = (multiboot_memory_map_t*)(low_physical_to_virtual(multiboot_info_ptr->mmap_addr)); | ||||
|     for (; (unsigned long)mmap < (low_physical_to_virtual(multiboot_info_ptr->mmap_addr)) + (multiboot_info_ptr->mmap_length); mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) { | ||||
|         kprintf("MM: Multiboot mmap: base_addr = 0x%x%08x, length = 0x%x%08x, type = 0x%x\n", | ||||
|             (u32)(mmap->addr >> 32), | ||||
|             (u32)(mmap->addr & 0xffffffff), | ||||
|             (u32)(mmap->len >> 32), | ||||
|             (u32)(mmap->len & 0xffffffff), | ||||
|             (u32)mmap->type); | ||||
|             (uintptr_t)(mmap->addr >> 32), | ||||
|             (uintptr_t)(mmap->addr & 0xffffffff), | ||||
|             (uintptr_t)(mmap->len >> 32), | ||||
|             (uintptr_t)(mmap->len & 0xffffffff), | ||||
|             (uintptr_t)mmap->type); | ||||
| 
 | ||||
|         if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE) | ||||
|             continue; | ||||
|  | @ -135,7 +135,7 @@ void MemoryManager::parse_memory_map() | |||
|         if ((mmap->addr + mmap->len) > 0xffffffff) | ||||
|             continue; | ||||
| 
 | ||||
|         auto diff = (u32)mmap->addr % PAGE_SIZE; | ||||
|         auto diff = (uintptr_t)mmap->addr % PAGE_SIZE; | ||||
|         if (diff != 0) { | ||||
|             kprintf("MM: got an unaligned region base from the bootloader; correcting %p by %d bytes\n", mmap->addr, diff); | ||||
|             diff = PAGE_SIZE - diff; | ||||
|  | @ -153,7 +153,7 @@ void MemoryManager::parse_memory_map() | |||
| 
 | ||||
| #ifdef MM_DEBUG | ||||
|         kprintf("MM: considering memory at %p - %p\n", | ||||
|             (u32)mmap->addr, (u32)(mmap->addr + mmap->len)); | ||||
|             (uintptr_t)mmap->addr, (uintptr_t)(mmap->addr + mmap->len)); | ||||
| #endif | ||||
| 
 | ||||
|         for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) { | ||||
|  | @ -219,7 +219,7 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual | |||
|         page_directory.m_physical_pages.set(page_directory_index, move(page_table)); | ||||
|     } | ||||
| 
 | ||||
|     return quickmap_pt(PhysicalAddress((u32)pde.page_table_base()))[page_table_index]; | ||||
|     return quickmap_pt(PhysicalAddress((uintptr_t)pde.page_table_base()))[page_table_index]; | ||||
| } | ||||
| 
 | ||||
| void MemoryManager::initialize() | ||||
|  | @ -410,7 +410,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s | |||
| #endif | ||||
| 
 | ||||
|     if (should_zero_fill == ShouldZeroFill::Yes) { | ||||
|         auto* ptr = (u32*)quickmap_page(*page); | ||||
|         auto* ptr = quickmap_page(*page); | ||||
|         memset(ptr, 0, PAGE_SIZE); | ||||
|         unquickmap_page(); | ||||
|     } | ||||
|  |  | |||
|  | @ -30,9 +30,9 @@ | |||
| #include <Kernel/VM/MemoryManager.h> | ||||
| #include <Kernel/VM/PageDirectory.h> | ||||
| 
 | ||||
| static const u32 userspace_range_base = 0x00800000; | ||||
| static const u32 userspace_range_ceiling = 0xbe000000; | ||||
| static const u32 kernelspace_range_base = 0xc0800000; | ||||
| static const uintptr_t userspace_range_base = 0x00800000; | ||||
| static const uintptr_t userspace_range_ceiling = 0xbe000000; | ||||
| static const uintptr_t kernelspace_range_base = 0xc0800000; | ||||
| 
 | ||||
| static HashMap<u32, PageDirectory*>& cr3_map() | ||||
| { | ||||
|  | @ -58,9 +58,9 @@ PageDirectory::PageDirectory() | |||
|     m_range_allocator.initialize_with_range(VirtualAddress(0xc0800000), 0x3f000000); | ||||
| 
 | ||||
|     // Adopt the page tables already set up by boot.S
 | ||||
|     PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((u32)boot_pdpt)); | ||||
|     PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((u32)boot_pd0)); | ||||
|     PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((u32)boot_pd3)); | ||||
|     PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((uintptr_t)boot_pdpt)); | ||||
|     PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((uintptr_t)boot_pd0)); | ||||
|     PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((uintptr_t)boot_pd3)); | ||||
|     kprintf("MM: boot_pdpt @ P%p\n", boot_pdpt_paddr.get()); | ||||
|     kprintf("MM: boot_pd0 @ P%p\n", boot_pd0_paddr.get()); | ||||
|     kprintf("MM: boot_pd3 @ P%p\n", boot_pd3_paddr.get()); | ||||
|  |  | |||
|  | @ -32,22 +32,22 @@ | |||
| class PhysicalAddress { | ||||
| public: | ||||
|     PhysicalAddress() {} | ||||
|     explicit PhysicalAddress(u32 address) | ||||
|     explicit PhysicalAddress(uintptr_t address) | ||||
|         : m_address(address) | ||||
|     { | ||||
|     } | ||||
| 
 | ||||
|     PhysicalAddress offset(u32 o) const { return PhysicalAddress(m_address + o); } | ||||
|     u32 get() const { return m_address; } | ||||
|     void set(u32 address) { m_address = address; } | ||||
|     void mask(u32 m) { m_address &= m; } | ||||
|     PhysicalAddress offset(uintptr_t o) const { return PhysicalAddress(m_address + o); } | ||||
|     uintptr_t get() const { return m_address; } | ||||
|     void set(uintptr_t address) { m_address = address; } | ||||
|     void mask(uintptr_t m) { m_address &= m; } | ||||
| 
 | ||||
|     bool is_null() const { return m_address == 0; } | ||||
| 
 | ||||
|     u8* as_ptr() { return reinterpret_cast<u8*>(m_address); } | ||||
|     const u8* as_ptr() const { return reinterpret_cast<const u8*>(m_address); } | ||||
| 
 | ||||
|     u32 page_base() const { return m_address & 0xfffff000; } | ||||
|     uintptr_t page_base() const { return m_address & 0xfffff000; } | ||||
| 
 | ||||
|     bool operator==(const PhysicalAddress& other) const { return m_address == other.m_address; } | ||||
|     bool operator!=(const PhysicalAddress& other) const { return m_address != other.m_address; } | ||||
|  | @ -57,7 +57,7 @@ public: | |||
|     bool operator<=(const PhysicalAddress& other) const { return m_address <= other.m_address; } | ||||
| 
 | ||||
| private: | ||||
|     u32 m_address { 0 }; | ||||
|     uintptr_t m_address { 0 }; | ||||
| }; | ||||
| 
 | ||||
| inline const LogStream& operator<<(const LogStream& stream, PhysicalAddress value) | ||||
|  |  | |||
|  | @ -101,11 +101,11 @@ void PhysicalRegion::return_page_at(PhysicalAddress addr) | |||
|         ASSERT_NOT_REACHED(); | ||||
|     } | ||||
| 
 | ||||
|     int local_offset = addr.get() - m_lower.get(); | ||||
|     ptrdiff_t local_offset = addr.get() - m_lower.get(); | ||||
|     ASSERT(local_offset >= 0); | ||||
|     ASSERT((u32)local_offset < (u32)(m_pages * PAGE_SIZE)); | ||||
|     ASSERT((uintptr_t)local_offset < (uintptr_t)(m_pages * PAGE_SIZE)); | ||||
| 
 | ||||
|     auto page = (unsigned)local_offset / PAGE_SIZE; | ||||
|     auto page = (uintptr_t)local_offset / PAGE_SIZE; | ||||
|     if (page < m_last) | ||||
|         m_last = page; | ||||
| 
 | ||||
|  |  | |||
|  | @ -32,23 +32,23 @@ | |||
| class VirtualAddress { | ||||
| public: | ||||
|     VirtualAddress() {} | ||||
|     explicit VirtualAddress(u32 address) | ||||
|     explicit VirtualAddress(uintptr_t address) | ||||
|         : m_address(address) | ||||
|     { | ||||
|     } | ||||
| 
 | ||||
|     explicit VirtualAddress(const void* address) | ||||
|         : m_address((u32)address) | ||||
|         : m_address((uintptr_t)address) | ||||
|     { | ||||
|     } | ||||
| 
 | ||||
|     bool is_null() const { return m_address == 0; } | ||||
|     bool is_page_aligned() const { return (m_address & 0xfff) == 0; } | ||||
| 
 | ||||
|     VirtualAddress offset(u32 o) const { return VirtualAddress(m_address + o); } | ||||
|     u32 get() const { return m_address; } | ||||
|     void set(u32 address) { m_address = address; } | ||||
|     void mask(u32 m) { m_address &= m; } | ||||
|     VirtualAddress offset(uintptr_t o) const { return VirtualAddress(m_address + o); } | ||||
|     uintptr_t get() const { return m_address; } | ||||
|     void set(uintptr_t address) { m_address = address; } | ||||
|     void mask(uintptr_t m) { m_address &= m; } | ||||
| 
 | ||||
|     bool operator<=(const VirtualAddress& other) const { return m_address <= other.m_address; } | ||||
|     bool operator>=(const VirtualAddress& other) const { return m_address >= other.m_address; } | ||||
|  | @ -63,7 +63,7 @@ public: | |||
|     VirtualAddress page_base() const { return VirtualAddress(m_address & 0xfffff000); } | ||||
| 
 | ||||
| private: | ||||
|     u32 m_address { 0 }; | ||||
|     uintptr_t m_address { 0 }; | ||||
| }; | ||||
| 
 | ||||
| inline VirtualAddress operator-(const VirtualAddress& a, const VirtualAddress& b) | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Andreas Kling
						Andreas Kling