1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-30 22:58:12 +00:00

Kernel: Add support for kernel addresses other than 3-4GB

This commit is contained in:
Gunnar Beutner 2021-07-17 02:42:59 +02:00 committed by Andreas Kling
parent 6c6b778e2e
commit b708b23b13
5 changed files with 28 additions and 29 deletions

View file

@ -141,10 +141,11 @@ class PageDirectoryPointerTable {
public: public:
PageDirectoryEntry* directory(size_t index) PageDirectoryEntry* directory(size_t index)
{ {
VERIFY(index <= (NumericLimits<size_t>::max() << 30));
return (PageDirectoryEntry*)(PhysicalAddress::physical_page_base(raw[index])); return (PageDirectoryEntry*)(PhysicalAddress::physical_page_base(raw[index]));
} }
u64 raw[4]; u64 raw[512];
}; };
} }

View file

@ -390,10 +390,10 @@ pae_supported:
movl $(boot_pdpt - KERNEL_BASE), %edi movl $(boot_pdpt - KERNEL_BASE), %edi
#if ARCH(X86_64) #if ARCH(X86_64)
movl $((boot_pd0 - KERNEL_BASE) + 3), 0(%edi) movl $((boot_pd0 - KERNEL_BASE) + 3), 0(%edi)
movl $((boot_pd3 - KERNEL_BASE) + 3), 24(%edi) movl $((boot_pd3 - KERNEL_BASE) + 3), (8 * (KERNEL_BASE >> 30 & 0x1ff))(%edi)
#else #else
movl $((boot_pd0 - KERNEL_BASE) + 1), 0(%edi) movl $((boot_pd0 - KERNEL_BASE) + 1), 0(%edi)
movl $((boot_pd3 - KERNEL_BASE) + 1), 24(%edi) movl $((boot_pd3 - KERNEL_BASE) + 1), (8 * (KERNEL_BASE >> 30 & 0x1ff))(%edi)
#endif #endif
/* clear pd0 */ /* clear pd0 */

View file

@ -436,7 +436,6 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
unquickmap_page(); unquickmap_page();
// Hook the page table into the kernel page directory // Hook the page table into the kernel page directory
VERIFY(((virtual_page_base_for_this_pt >> 30) & 0x3) == 3);
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((FlatPtr)boot_pd3)); PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((FlatPtr)boot_pd3));
u32 page_directory_index = (virtual_page_base_for_this_pt >> 21) & 0x1ff; u32 page_directory_index = (virtual_page_base_for_this_pt >> 21) & 0x1ff;
@ -507,7 +506,7 @@ PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.own_lock()); VERIFY(s_mm_lock.own_lock());
VERIFY(page_directory.get_lock().own_lock()); VERIFY(page_directory.get_lock().own_lock());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3; u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff; u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff; u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@ -524,7 +523,7 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.own_lock()); VERIFY(s_mm_lock.own_lock());
VERIFY(page_directory.get_lock().own_lock()); VERIFY(page_directory.get_lock().own_lock());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3; u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff; u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff; u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@ -565,7 +564,7 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.own_lock()); VERIFY(s_mm_lock.own_lock());
VERIFY(page_directory.get_lock().own_lock()); VERIFY(page_directory.get_lock().own_lock());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3; u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff; u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff; u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;

View file

@ -83,17 +83,16 @@ PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)
m_directory_table = MM.allocate_user_physical_page(); m_directory_table = MM.allocate_user_physical_page();
if (!m_directory_table) if (!m_directory_table)
return; return;
m_directory_pages[0] = MM.allocate_user_physical_page(); auto kernel_pd_index = (KERNEL_BASE >> 30) & 0xffu;
if (!m_directory_pages[0]) for (size_t i = 0; i < 4; i++) {
return; if (i == kernel_pd_index)
m_directory_pages[1] = MM.allocate_user_physical_page(); continue;
if (!m_directory_pages[1]) m_directory_pages[i] = MM.allocate_user_physical_page();
return; if (!m_directory_pages[i])
m_directory_pages[2] = MM.allocate_user_physical_page();
if (!m_directory_pages[2])
return; return;
}
// Share the top 1 GiB of kernel-only mappings (>=3GiB or >=KERNEL_BASE) // Share the top 1 GiB of kernel-only mappings (>=3GiB or >=KERNEL_BASE)
m_directory_pages[3] = MM.kernel_page_directory().m_directory_pages[3]; m_directory_pages[kernel_pd_index] = MM.kernel_page_directory().m_directory_pages[kernel_pd_index];
#if ARCH(X86_64) #if ARCH(X86_64)
{ {
@ -105,17 +104,15 @@ PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)
{ {
auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*m_directory_table); auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*m_directory_table);
for (size_t i = 0; i < sizeof(m_directory_pages) / sizeof(m_directory_pages[0]); i++) {
if (m_directory_pages[i]) {
#if ARCH(I386) #if ARCH(I386)
table.raw[0] = (FlatPtr)m_directory_pages[0]->paddr().as_ptr() | 1; table.raw[i] = (FlatPtr)m_directory_pages[i]->paddr().as_ptr() | 1;
table.raw[1] = (FlatPtr)m_directory_pages[1]->paddr().as_ptr() | 1;
table.raw[2] = (FlatPtr)m_directory_pages[2]->paddr().as_ptr() | 1;
table.raw[3] = (FlatPtr)m_directory_pages[3]->paddr().as_ptr() | 1;
#else #else
table.raw[0] = (FlatPtr)m_directory_pages[0]->paddr().as_ptr() | 7; table.raw[i] = (FlatPtr)m_directory_pages[i]->paddr().as_ptr() | 7;
table.raw[1] = (FlatPtr)m_directory_pages[1]->paddr().as_ptr() | 7;
table.raw[2] = (FlatPtr)m_directory_pages[2]->paddr().as_ptr() | 7;
table.raw[3] = (FlatPtr)m_directory_pages[3]->paddr().as_ptr() | 7;
#endif #endif
}
}
// 2 ** MAXPHYADDR - 1 // 2 ** MAXPHYADDR - 1
// Where MAXPHYADDR = physical_address_bit_width // Where MAXPHYADDR = physical_address_bit_width
@ -137,10 +134,8 @@ PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)
// when writing out the PDPT pointer to CR3. // when writing out the PDPT pointer to CR3.
// The reason we're not checking the page directory's physical address directly is because // The reason we're not checking the page directory's physical address directly is because
// we're checking for sign extension when putting it into a PDPTE. See issue #4584. // we're checking for sign extension when putting it into a PDPTE. See issue #4584.
VERIFY((table.raw[0] & ~pdpte_bit_flags) <= max_physical_address); for (auto table_entry : table.raw)
VERIFY((table.raw[1] & ~pdpte_bit_flags) <= max_physical_address); VERIFY((table_entry & ~pdpte_bit_flags) <= max_physical_address);
VERIFY((table.raw[2] & ~pdpte_bit_flags) <= max_physical_address);
VERIFY((table.raw[3] & ~pdpte_bit_flags) <= max_physical_address);
MM.unquickmap_page(); MM.unquickmap_page();
} }

View file

@ -67,7 +67,11 @@ private:
RefPtr<PhysicalPage> m_pml4t; RefPtr<PhysicalPage> m_pml4t;
#endif #endif
RefPtr<PhysicalPage> m_directory_table; RefPtr<PhysicalPage> m_directory_table;
#if ARCH(X86_64)
RefPtr<PhysicalPage> m_directory_pages[512];
#else
RefPtr<PhysicalPage> m_directory_pages[4]; RefPtr<PhysicalPage> m_directory_pages[4];
#endif
HashMap<u32, RefPtr<PhysicalPage>> m_page_tables; HashMap<u32, RefPtr<PhysicalPage>> m_page_tables;
RecursiveSpinLock m_lock; RecursiveSpinLock m_lock;
bool m_valid { false }; bool m_valid { false };