mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 00:17:46 +00:00
Use uintptr_t instead of u32 when storing pointers as integers
uintptr_t is 32-bit or 64-bit depending on the target platform. This will help us write pointer size agnostic code so that when the day comes that we want to do a 64-bit port, we'll be in better shape.
This commit is contained in:
parent
e07b34b9b8
commit
a246e9cd7e
14 changed files with 110 additions and 110 deletions
|
@ -66,17 +66,17 @@ MemoryManager::~MemoryManager()
|
|||
void MemoryManager::protect_kernel_image()
|
||||
{
|
||||
// Disable writing to the kernel text and rodata segments.
|
||||
extern u32 start_of_kernel_text;
|
||||
extern u32 start_of_kernel_data;
|
||||
for (size_t i = (u32)&start_of_kernel_text; i < (u32)&start_of_kernel_data; i += PAGE_SIZE) {
|
||||
extern uintptr_t start_of_kernel_text;
|
||||
extern uintptr_t start_of_kernel_data;
|
||||
for (size_t i = (uintptr_t)&start_of_kernel_text; i < (uintptr_t)&start_of_kernel_data; i += PAGE_SIZE) {
|
||||
auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
||||
pte.set_writable(false);
|
||||
}
|
||||
|
||||
if (g_cpu_supports_nx) {
|
||||
// Disable execution of the kernel data and bss segments.
|
||||
extern u32 end_of_kernel_bss;
|
||||
for (size_t i = (u32)&start_of_kernel_data; i < (u32)&end_of_kernel_bss; i += PAGE_SIZE) {
|
||||
extern uintptr_t end_of_kernel_bss;
|
||||
for (size_t i = (uintptr_t)&start_of_kernel_data; i < (uintptr_t)&end_of_kernel_bss; i += PAGE_SIZE) {
|
||||
auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
||||
pte.set_execute_disabled(true);
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ void MemoryManager::setup_low_1mb()
|
|||
if (g_cpu_supports_nx)
|
||||
pde_zero.set_execute_disabled(true);
|
||||
|
||||
for (u32 offset = 0; offset < (2 * MB); offset += PAGE_SIZE) {
|
||||
for (uintptr_t offset = 0; offset < (2 * MB); offset += PAGE_SIZE) {
|
||||
auto& page_table_page = m_low_page_table;
|
||||
auto& pte = quickmap_pt(page_table_page->paddr())[offset / PAGE_SIZE];
|
||||
pte.set_physical_page_base(offset);
|
||||
|
@ -119,11 +119,11 @@ void MemoryManager::parse_memory_map()
|
|||
auto* mmap = (multiboot_memory_map_t*)(low_physical_to_virtual(multiboot_info_ptr->mmap_addr));
|
||||
for (; (unsigned long)mmap < (low_physical_to_virtual(multiboot_info_ptr->mmap_addr)) + (multiboot_info_ptr->mmap_length); mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) {
|
||||
kprintf("MM: Multiboot mmap: base_addr = 0x%x%08x, length = 0x%x%08x, type = 0x%x\n",
|
||||
(u32)(mmap->addr >> 32),
|
||||
(u32)(mmap->addr & 0xffffffff),
|
||||
(u32)(mmap->len >> 32),
|
||||
(u32)(mmap->len & 0xffffffff),
|
||||
(u32)mmap->type);
|
||||
(uintptr_t)(mmap->addr >> 32),
|
||||
(uintptr_t)(mmap->addr & 0xffffffff),
|
||||
(uintptr_t)(mmap->len >> 32),
|
||||
(uintptr_t)(mmap->len & 0xffffffff),
|
||||
(uintptr_t)mmap->type);
|
||||
|
||||
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
|
||||
continue;
|
||||
|
@ -135,7 +135,7 @@ void MemoryManager::parse_memory_map()
|
|||
if ((mmap->addr + mmap->len) > 0xffffffff)
|
||||
continue;
|
||||
|
||||
auto diff = (u32)mmap->addr % PAGE_SIZE;
|
||||
auto diff = (uintptr_t)mmap->addr % PAGE_SIZE;
|
||||
if (diff != 0) {
|
||||
kprintf("MM: got an unaligned region base from the bootloader; correcting %p by %d bytes\n", mmap->addr, diff);
|
||||
diff = PAGE_SIZE - diff;
|
||||
|
@ -153,7 +153,7 @@ void MemoryManager::parse_memory_map()
|
|||
|
||||
#ifdef MM_DEBUG
|
||||
kprintf("MM: considering memory at %p - %p\n",
|
||||
(u32)mmap->addr, (u32)(mmap->addr + mmap->len));
|
||||
(uintptr_t)mmap->addr, (uintptr_t)(mmap->addr + mmap->len));
|
||||
#endif
|
||||
|
||||
for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
|
||||
|
@ -219,7 +219,7 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
|
|||
page_directory.m_physical_pages.set(page_directory_index, move(page_table));
|
||||
}
|
||||
|
||||
return quickmap_pt(PhysicalAddress((u32)pde.page_table_base()))[page_table_index];
|
||||
return quickmap_pt(PhysicalAddress((uintptr_t)pde.page_table_base()))[page_table_index];
|
||||
}
|
||||
|
||||
void MemoryManager::initialize()
|
||||
|
@ -410,7 +410,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
|
|||
#endif
|
||||
|
||||
if (should_zero_fill == ShouldZeroFill::Yes) {
|
||||
auto* ptr = (u32*)quickmap_page(*page);
|
||||
auto* ptr = quickmap_page(*page);
|
||||
memset(ptr, 0, PAGE_SIZE);
|
||||
unquickmap_page();
|
||||
}
|
||||
|
|
|
@ -30,9 +30,9 @@
|
|||
#include <Kernel/VM/MemoryManager.h>
|
||||
#include <Kernel/VM/PageDirectory.h>
|
||||
|
||||
static const u32 userspace_range_base = 0x00800000;
|
||||
static const u32 userspace_range_ceiling = 0xbe000000;
|
||||
static const u32 kernelspace_range_base = 0xc0800000;
|
||||
static const uintptr_t userspace_range_base = 0x00800000;
|
||||
static const uintptr_t userspace_range_ceiling = 0xbe000000;
|
||||
static const uintptr_t kernelspace_range_base = 0xc0800000;
|
||||
|
||||
static HashMap<u32, PageDirectory*>& cr3_map()
|
||||
{
|
||||
|
@ -58,9 +58,9 @@ PageDirectory::PageDirectory()
|
|||
m_range_allocator.initialize_with_range(VirtualAddress(0xc0800000), 0x3f000000);
|
||||
|
||||
// Adopt the page tables already set up by boot.S
|
||||
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((u32)boot_pdpt));
|
||||
PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((u32)boot_pd0));
|
||||
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((u32)boot_pd3));
|
||||
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((uintptr_t)boot_pdpt));
|
||||
PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((uintptr_t)boot_pd0));
|
||||
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((uintptr_t)boot_pd3));
|
||||
kprintf("MM: boot_pdpt @ P%p\n", boot_pdpt_paddr.get());
|
||||
kprintf("MM: boot_pd0 @ P%p\n", boot_pd0_paddr.get());
|
||||
kprintf("MM: boot_pd3 @ P%p\n", boot_pd3_paddr.get());
|
||||
|
|
|
@ -32,22 +32,22 @@
|
|||
class PhysicalAddress {
|
||||
public:
|
||||
PhysicalAddress() {}
|
||||
explicit PhysicalAddress(u32 address)
|
||||
explicit PhysicalAddress(uintptr_t address)
|
||||
: m_address(address)
|
||||
{
|
||||
}
|
||||
|
||||
PhysicalAddress offset(u32 o) const { return PhysicalAddress(m_address + o); }
|
||||
u32 get() const { return m_address; }
|
||||
void set(u32 address) { m_address = address; }
|
||||
void mask(u32 m) { m_address &= m; }
|
||||
PhysicalAddress offset(uintptr_t o) const { return PhysicalAddress(m_address + o); }
|
||||
uintptr_t get() const { return m_address; }
|
||||
void set(uintptr_t address) { m_address = address; }
|
||||
void mask(uintptr_t m) { m_address &= m; }
|
||||
|
||||
bool is_null() const { return m_address == 0; }
|
||||
|
||||
u8* as_ptr() { return reinterpret_cast<u8*>(m_address); }
|
||||
const u8* as_ptr() const { return reinterpret_cast<const u8*>(m_address); }
|
||||
|
||||
u32 page_base() const { return m_address & 0xfffff000; }
|
||||
uintptr_t page_base() const { return m_address & 0xfffff000; }
|
||||
|
||||
bool operator==(const PhysicalAddress& other) const { return m_address == other.m_address; }
|
||||
bool operator!=(const PhysicalAddress& other) const { return m_address != other.m_address; }
|
||||
|
@ -57,7 +57,7 @@ public:
|
|||
bool operator<=(const PhysicalAddress& other) const { return m_address <= other.m_address; }
|
||||
|
||||
private:
|
||||
u32 m_address { 0 };
|
||||
uintptr_t m_address { 0 };
|
||||
};
|
||||
|
||||
inline const LogStream& operator<<(const LogStream& stream, PhysicalAddress value)
|
||||
|
|
|
@ -101,11 +101,11 @@ void PhysicalRegion::return_page_at(PhysicalAddress addr)
|
|||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
|
||||
int local_offset = addr.get() - m_lower.get();
|
||||
ptrdiff_t local_offset = addr.get() - m_lower.get();
|
||||
ASSERT(local_offset >= 0);
|
||||
ASSERT((u32)local_offset < (u32)(m_pages * PAGE_SIZE));
|
||||
ASSERT((uintptr_t)local_offset < (uintptr_t)(m_pages * PAGE_SIZE));
|
||||
|
||||
auto page = (unsigned)local_offset / PAGE_SIZE;
|
||||
auto page = (uintptr_t)local_offset / PAGE_SIZE;
|
||||
if (page < m_last)
|
||||
m_last = page;
|
||||
|
||||
|
|
|
@ -32,23 +32,23 @@
|
|||
class VirtualAddress {
|
||||
public:
|
||||
VirtualAddress() {}
|
||||
explicit VirtualAddress(u32 address)
|
||||
explicit VirtualAddress(uintptr_t address)
|
||||
: m_address(address)
|
||||
{
|
||||
}
|
||||
|
||||
explicit VirtualAddress(const void* address)
|
||||
: m_address((u32)address)
|
||||
: m_address((uintptr_t)address)
|
||||
{
|
||||
}
|
||||
|
||||
bool is_null() const { return m_address == 0; }
|
||||
bool is_page_aligned() const { return (m_address & 0xfff) == 0; }
|
||||
|
||||
VirtualAddress offset(u32 o) const { return VirtualAddress(m_address + o); }
|
||||
u32 get() const { return m_address; }
|
||||
void set(u32 address) { m_address = address; }
|
||||
void mask(u32 m) { m_address &= m; }
|
||||
VirtualAddress offset(uintptr_t o) const { return VirtualAddress(m_address + o); }
|
||||
uintptr_t get() const { return m_address; }
|
||||
void set(uintptr_t address) { m_address = address; }
|
||||
void mask(uintptr_t m) { m_address &= m; }
|
||||
|
||||
bool operator<=(const VirtualAddress& other) const { return m_address <= other.m_address; }
|
||||
bool operator>=(const VirtualAddress& other) const { return m_address >= other.m_address; }
|
||||
|
@ -63,7 +63,7 @@ public:
|
|||
VirtualAddress page_base() const { return VirtualAddress(m_address & 0xfffff000); }
|
||||
|
||||
private:
|
||||
u32 m_address { 0 };
|
||||
uintptr_t m_address { 0 };
|
||||
};
|
||||
|
||||
inline VirtualAddress operator-(const VirtualAddress& a, const VirtualAddress& b)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue