1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 11:58:12 +00:00

AK: Add global FlatPtr typedef. It's u32 or u64, based on sizeof(void*)

Use this instead of uintptr_t throughout the codebase. This makes it
possible to pass a FlatPtr to something that has u32 and u64 overloads.
This commit is contained in:
Andreas Kling 2020-03-08 10:36:51 +01:00
parent b98d8ad5b0
commit b1058b33fb
36 changed files with 164 additions and 161 deletions

View file

@ -41,9 +41,9 @@
//#define MM_DEBUG
//#define PAGE_FAULT_DEBUG
extern uintptr_t start_of_kernel_text;
extern uintptr_t start_of_kernel_data;
extern uintptr_t end_of_kernel_bss;
extern FlatPtr start_of_kernel_text;
extern FlatPtr start_of_kernel_data;
extern FlatPtr end_of_kernel_bss;
namespace Kernel {
@ -72,14 +72,14 @@ MemoryManager::~MemoryManager()
void MemoryManager::protect_kernel_image()
{
// Disable writing to the kernel text and rodata segments.
for (size_t i = (uintptr_t)&start_of_kernel_text; i < (uintptr_t)&start_of_kernel_data; i += PAGE_SIZE) {
for (size_t i = (FlatPtr)&start_of_kernel_text; i < (FlatPtr)&start_of_kernel_data; i += PAGE_SIZE) {
auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
pte.set_writable(false);
}
if (g_cpu_supports_nx) {
// Disable execution of the kernel data and bss segments.
for (size_t i = (uintptr_t)&start_of_kernel_data; i < (uintptr_t)&end_of_kernel_bss; i += PAGE_SIZE) {
for (size_t i = (FlatPtr)&start_of_kernel_data; i < (FlatPtr)&end_of_kernel_bss; i += PAGE_SIZE) {
auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
pte.set_execute_disabled(true);
}
@ -104,7 +104,7 @@ void MemoryManager::setup_low_identity_mapping()
if (g_cpu_supports_nx)
pde_zero.set_execute_disabled(true);
for (uintptr_t offset = (1 * MB); offset < (2 * MB); offset += PAGE_SIZE) {
for (FlatPtr offset = (1 * MB); offset < (2 * MB); offset += PAGE_SIZE) {
auto& page_table_page = m_low_page_table;
auto& pte = quickmap_pt(page_table_page->paddr())[offset / PAGE_SIZE];
pte.set_physical_page_base(offset);
@ -132,7 +132,7 @@ void MemoryManager::parse_memory_map()
if ((mmap->addr + mmap->len) > 0xffffffff)
continue;
auto diff = (uintptr_t)mmap->addr % PAGE_SIZE;
auto diff = (FlatPtr)mmap->addr % PAGE_SIZE;
if (diff != 0) {
klog() << "MM: got an unaligned region base from the bootloader; correcting " << String::format("%p", mmap->addr) << " by " << diff << " bytes";
diff = PAGE_SIZE - diff;
@ -149,7 +149,7 @@ void MemoryManager::parse_memory_map()
}
#ifdef MM_DEBUG
klog() << "MM: considering memory at " << String::format("%p", (uintptr_t)mmap->addr) << " - " << String::format("%p", (uintptr_t)(mmap->addr + mmap->len));
klog() << "MM: considering memory at " << String::format("%p", (FlatPtr)mmap->addr) << " - " << String::format("%p", (FlatPtr)(mmap->addr + mmap->len));
#endif
for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
@ -196,7 +196,7 @@ const PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, Vi
if (!pde.is_present())
return nullptr;
return &quickmap_pt(PhysicalAddress((uintptr_t)pde.page_table_base()))[page_table_index];
return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
}
PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
@ -224,7 +224,7 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
page_directory.m_physical_pages.set(page_directory_index, move(page_table));
}
return quickmap_pt(PhysicalAddress((uintptr_t)pde.page_table_base()))[page_table_index];
return quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
}
void MemoryManager::initialize()

View file

@ -32,9 +32,9 @@
namespace Kernel {
static const uintptr_t userspace_range_base = 0x00800000;
static const uintptr_t userspace_range_ceiling = 0xbe000000;
static const uintptr_t kernelspace_range_base = 0xc0800000;
static const FlatPtr userspace_range_base = 0x00800000;
static const FlatPtr userspace_range_ceiling = 0xbe000000;
static const FlatPtr kernelspace_range_base = 0xc0800000;
static HashMap<u32, PageDirectory*>& cr3_map()
{
@ -60,9 +60,9 @@ PageDirectory::PageDirectory()
m_range_allocator.initialize_with_range(VirtualAddress(0xc0800000), 0x3f000000);
// Adopt the page tables already set up by boot.S
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((uintptr_t)boot_pdpt));
PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((uintptr_t)boot_pd0));
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((uintptr_t)boot_pd3));
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((FlatPtr)boot_pdpt));
PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((FlatPtr)boot_pd0));
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((FlatPtr)boot_pd3));
klog() << "MM: boot_pdpt @ " << boot_pdpt_paddr;
klog() << "MM: boot_pd0 @ " << boot_pd0_paddr;
klog() << "MM: boot_pd3 @ " << boot_pd3_paddr;

View file

@ -105,9 +105,9 @@ void PhysicalRegion::return_page_at(PhysicalAddress addr)
ptrdiff_t local_offset = addr.get() - m_lower.get();
ASSERT(local_offset >= 0);
ASSERT((uintptr_t)local_offset < (uintptr_t)(m_pages * PAGE_SIZE));
ASSERT((FlatPtr)local_offset < (FlatPtr)(m_pages * PAGE_SIZE));
auto page = (uintptr_t)local_offset / PAGE_SIZE;
auto page = (FlatPtr)local_offset / PAGE_SIZE;
if (page < m_last)
m_last = page;

View file

@ -112,8 +112,8 @@ Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
if (available_range.size() < (effective_size + alignment))
continue;
uintptr_t initial_base = available_range.base().offset(offset_from_effective_base).get();
uintptr_t aligned_base = round_up_to_power_of_two(initial_base, alignment);
FlatPtr initial_base = available_range.base().offset(offset_from_effective_base).get();
FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
Range allocated_range(VirtualAddress(aligned_base), size);
if (available_range == allocated_range) {