From 183205d51cefcb68b9fd8102fe2b566212a8e59e Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Wed, 26 Jun 2019 21:45:56 +0200 Subject: [PATCH] Kernel: Make the x86 paging code slightly less insane. Instead of PDE's and PTE's being weird wrappers around dword*, just have MemoryManager::ensure_pte() return a PageDirectoryEntry&, which in turn has a PageTableEntry* entries(). I've been trying to understand how things ended up this way, and I suspect it was because I inadvertently invoked the PageDirectoryEntry copy ctor in the original work on this, which must have made me very confused.. Anyways, now things are a bit saner and we can move forward towards a better future, etc. :^) --- Kernel/Arch/i386/CPU.h | 105 +++++++++++++++++++++++++++++++++++ Kernel/VM/MemoryManager.cpp | 36 ++++++------ Kernel/VM/MemoryManager.h | 106 +----------------------------------- Kernel/VM/PageDirectory.h | 2 +- 4 files changed, 126 insertions(+), 123 deletions(-) diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h index bb0f7a037b..4a90bab110 100644 --- a/Kernel/Arch/i386/CPU.h +++ b/Kernel/Arch/i386/CPU.h @@ -1,11 +1,16 @@ #pragma once +#include +#include #include #include #define PAGE_SIZE 4096 #define PAGE_MASK 0xfffff000 +class MemoryManager; +class PageTableEntry; + struct [[gnu::packed]] TSS32 { word backlink, __blh; @@ -79,6 +84,106 @@ union [[gnu::packed]] Descriptor } }; +class PageDirectoryEntry { + AK_MAKE_NONCOPYABLE(PageDirectoryEntry); + +public: + PageTableEntry* page_table_base() { return reinterpret_cast(m_raw & 0xfffff000u); } + void set_page_table_base(dword value) + { + m_raw &= 0xfff; + m_raw |= value & 0xfffff000; + } + + dword raw() const { return m_raw; } + void copy_from(Badge, const PageDirectoryEntry& other) { m_raw = other.m_raw; } + + enum Flags { + Present = 1 << 0, + ReadWrite = 1 << 1, + UserSupervisor = 1 << 2, + WriteThrough = 1 << 3, + CacheDisabled = 1 << 4, + }; + + bool is_present() const { return raw() & Present; } + void set_present(bool b) { set_bit(Present, b); } + + bool is_user_allowed() const { return raw() & UserSupervisor; } + void set_user_allowed(bool b) { set_bit(UserSupervisor, b); } + + bool is_writable() const { return raw() & ReadWrite; } + void set_writable(bool b) { set_bit(ReadWrite, b); } + + bool is_write_through() const { return raw() & WriteThrough; } + void set_write_through(bool b) { set_bit(WriteThrough, b); } + + bool is_cache_disabled() const { return raw() & CacheDisabled; } + void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); } + + void set_bit(byte bit, bool value) + { + if (value) + m_raw |= bit; + else + m_raw &= ~bit; + } + +private: + dword m_raw; +}; + +class PageTableEntry { + AK_MAKE_NONCOPYABLE(PageTableEntry); + +public: + void* physical_page_base() { return reinterpret_cast(m_raw & 0xfffff000u); } + void set_physical_page_base(dword value) + { + m_raw &= 0xfff; + m_raw |= value & 0xfffff000; + } + + dword raw() const { return m_raw; } + + enum Flags { + Present = 1 << 0, + ReadWrite = 1 << 1, + UserSupervisor = 1 << 2, + WriteThrough = 1 << 3, + CacheDisabled = 1 << 4, + }; + + bool is_present() const { return raw() & Present; } + void set_present(bool b) { set_bit(Present, b); } + + bool is_user_allowed() const { return raw() & UserSupervisor; } + void set_user_allowed(bool b) { set_bit(UserSupervisor, b); } + + bool is_writable() const { return raw() & ReadWrite; } + void set_writable(bool b) { set_bit(ReadWrite, b); } + + bool is_write_through() const { return raw() & WriteThrough; } + void set_write_through(bool b) { set_bit(WriteThrough, b); } + + bool is_cache_disabled() const { return raw() & CacheDisabled; } + void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); } + + void set_bit(byte bit, bool value) + { + if (value) + m_raw |= bit; + else + m_raw &= ~bit; + } + +private: + dword m_raw; +}; + +static_assert(sizeof(PageDirectoryEntry) == 4); +static_assert(sizeof(PageTableEntry) == 4); + class IRQHandler; void gdt_init(); diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index dd0f0f9ef0..2d01fda98e 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -21,8 +21,8 @@ MemoryManager& MM MemoryManager::MemoryManager() { m_kernel_page_directory = PageDirectory::create_at_fixed_address(PhysicalAddress(0x4000)); - m_page_table_zero = (dword*)0x6000; - m_page_table_one = (dword*)0x7000; + m_page_table_zero = (PageTableEntry*)0x6000; + m_page_table_one = (PageTableEntry*)0x7000; initialize_paging(); @@ -36,17 +36,15 @@ MemoryManager::~MemoryManager() void MemoryManager::populate_page_directory(PageDirectory& page_directory) { page_directory.m_directory_page = allocate_supervisor_physical_page(); - page_directory.entries()[0] = kernel_page_directory().entries()[0]; - page_directory.entries()[1] = kernel_page_directory().entries()[1]; + page_directory.entries()[0].copy_from({}, kernel_page_directory().entries()[0]); + page_directory.entries()[1].copy_from({}, kernel_page_directory().entries()[1]); // Defer to the kernel page tables for 0xC0000000-0xFFFFFFFF for (int i = 768; i < 1024; ++i) - page_directory.entries()[i] = kernel_page_directory().entries()[i]; + page_directory.entries()[i].copy_from({}, kernel_page_directory().entries()[i]); } void MemoryManager::initialize_paging() { - static_assert(sizeof(MemoryManager::PageDirectoryEntry) == 4); - static_assert(sizeof(MemoryManager::PageTableEntry) == 4); memset(m_page_table_zero, 0, PAGE_SIZE); memset(m_page_table_one, 0, PAGE_SIZE); @@ -167,7 +165,7 @@ void MemoryManager::remove_identity_mapping(PageDirectory& page_directory, Virtu // FIXME: ASSERT(vaddr is 4KB aligned); for (dword offset = 0; offset < size; offset += PAGE_SIZE) { auto pte_address = vaddr.offset(offset); - auto pte = ensure_pte(page_directory, pte_address); + auto& pte = ensure_pte(page_directory, pte_address); pte.set_physical_page_base(0); pte.set_user_allowed(false); pte.set_present(true); @@ -176,13 +174,13 @@ void MemoryManager::remove_identity_mapping(PageDirectory& page_directory, Virtu } } -auto MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr) -> PageTableEntry +PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr) { ASSERT_INTERRUPTS_DISABLED(); dword page_directory_index = (vaddr.get() >> 22) & 0x3ff; dword page_table_index = (vaddr.get() >> 12) & 0x3ff; - PageDirectoryEntry pde = PageDirectoryEntry(&page_directory.entries()[page_directory_index]); + PageDirectoryEntry& pde = page_directory.entries()[page_directory_index]; if (!pde.is_present()) { #ifdef MM_DEBUG dbgprintf("MM: PDE %u not present (requested for L%x), allocating\n", page_directory_index, vaddr.get()); @@ -219,7 +217,7 @@ auto MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vad page_directory.m_physical_pages.set(page_directory_index, move(page_table)); } } - return PageTableEntry(&pde.page_table_base()[page_table_index]); + return pde.page_table_base()[page_table_index]; } void MemoryManager::map_protected(VirtualAddress vaddr, size_t length) @@ -228,7 +226,7 @@ void MemoryManager::map_protected(VirtualAddress vaddr, size_t length) ASSERT(vaddr.is_page_aligned()); for (dword offset = 0; offset < length; offset += PAGE_SIZE) { auto pte_address = vaddr.offset(offset); - auto pte = ensure_pte(kernel_page_directory(), pte_address); + auto& pte = ensure_pte(kernel_page_directory(), pte_address); pte.set_physical_page_base(pte_address.get()); pte.set_user_allowed(false); pte.set_present(false); @@ -243,7 +241,7 @@ void MemoryManager::create_identity_mapping(PageDirectory& page_directory, Virtu ASSERT((vaddr.get() & ~PAGE_MASK) == 0); for (dword offset = 0; offset < size; offset += PAGE_SIZE) { auto pte_address = vaddr.offset(offset); - auto pte = ensure_pte(page_directory, pte_address); + auto& pte = ensure_pte(page_directory, pte_address); pte.set_physical_page_base(pte_address.get()); pte.set_user_allowed(false); pte.set_present(true); @@ -595,7 +593,7 @@ void MemoryManager::flush_tlb(VirtualAddress vaddr) void MemoryManager::map_for_kernel(VirtualAddress vaddr, PhysicalAddress paddr) { - auto pte = ensure_pte(kernel_page_directory(), vaddr); + auto& pte = ensure_pte(kernel_page_directory(), vaddr); pte.set_physical_page_base(paddr.get()); pte.set_present(true); pte.set_writable(true); @@ -609,7 +607,7 @@ byte* MemoryManager::quickmap_page(PhysicalPage& physical_page) ASSERT(!m_quickmap_in_use); m_quickmap_in_use = true; auto page_vaddr = m_quickmap_addr; - auto pte = ensure_pte(kernel_page_directory(), page_vaddr); + auto& pte = ensure_pte(kernel_page_directory(), page_vaddr); pte.set_physical_page_base(physical_page.paddr().get()); pte.set_present(true); pte.set_writable(true); @@ -627,7 +625,7 @@ void MemoryManager::unquickmap_page() ASSERT_INTERRUPTS_DISABLED(); ASSERT(m_quickmap_in_use); auto page_vaddr = m_quickmap_addr; - auto pte = ensure_pte(kernel_page_directory(), page_vaddr); + auto& pte = ensure_pte(kernel_page_directory(), page_vaddr); #ifdef MM_DEBUG auto old_physical_address = pte.physical_page_base(); #endif @@ -646,7 +644,7 @@ void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_reg ASSERT(region.page_directory()); InterruptDisabler disabler; auto page_vaddr = region.vaddr().offset(page_index_in_region * PAGE_SIZE); - auto pte = ensure_pte(*region.page_directory(), page_vaddr); + auto& pte = ensure_pte(*region.page_directory(), page_vaddr); auto& physical_page = region.vmo().physical_pages()[page_index_in_region]; ASSERT(physical_page); pte.set_physical_page_base(physical_page->paddr().get()); @@ -681,7 +679,7 @@ void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& #endif for (size_t i = 0; i < region.page_count(); ++i) { auto page_vaddr = vaddr.offset(i * PAGE_SIZE); - auto pte = ensure_pte(page_directory, page_vaddr); + auto& pte = ensure_pte(page_directory, page_vaddr); auto& physical_page = vmo.physical_pages()[region.first_page_index() + i]; if (physical_page) { pte.set_physical_page_base(physical_page->paddr().get()); @@ -712,7 +710,7 @@ bool MemoryManager::unmap_region(Region& region) InterruptDisabler disabler; for (size_t i = 0; i < region.page_count(); ++i) { auto vaddr = region.vaddr().offset(i * PAGE_SIZE); - auto pte = ensure_pte(*region.page_directory(), vaddr); + auto& pte = ensure_pte(*region.page_directory(), vaddr); pte.set_physical_page_base(0); pte.set_present(false); pte.set_writable(false); diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 4475866259..7acf50c72b 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -112,111 +112,11 @@ private: PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; } - struct PageDirectoryEntry { - explicit PageDirectoryEntry(dword* pde) - : m_pde(pde) - { - } - - dword* page_table_base() { return reinterpret_cast(raw() & 0xfffff000u); } - void set_page_table_base(dword value) - { - *m_pde &= 0xfff; - *m_pde |= value & 0xfffff000; - } - - dword raw() const { return *m_pde; } - dword* ptr() { return m_pde; } - - enum Flags { - Present = 1 << 0, - ReadWrite = 1 << 1, - UserSupervisor = 1 << 2, - WriteThrough = 1 << 3, - CacheDisabled = 1 << 4, - }; - - bool is_present() const { return raw() & Present; } - void set_present(bool b) { set_bit(Present, b); } - - bool is_user_allowed() const { return raw() & UserSupervisor; } - void set_user_allowed(bool b) { set_bit(UserSupervisor, b); } - - bool is_writable() const { return raw() & ReadWrite; } - void set_writable(bool b) { set_bit(ReadWrite, b); } - - bool is_write_through() const { return raw() & WriteThrough; } - void set_write_through(bool b) { set_bit(WriteThrough, b); } - - bool is_cache_disabled() const { return raw() & CacheDisabled; } - void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); } - - void set_bit(byte bit, bool value) - { - if (value) - *m_pde |= bit; - else - *m_pde &= ~bit; - } - - dword* m_pde; - }; - - struct PageTableEntry { - explicit PageTableEntry(dword* pte) - : m_pte(pte) - { - } - - dword* physical_page_base() { return reinterpret_cast(raw() & 0xfffff000u); } - void set_physical_page_base(dword value) - { - *m_pte &= 0xfffu; - *m_pte |= value & 0xfffff000u; - } - - dword raw() const { return *m_pte; } - dword* ptr() { return m_pte; } - - enum Flags { - Present = 1 << 0, - ReadWrite = 1 << 1, - UserSupervisor = 1 << 2, - WriteThrough = 1 << 3, - CacheDisabled = 1 << 4, - }; - - bool is_present() const { return raw() & Present; } - void set_present(bool b) { set_bit(Present, b); } - - bool is_user_allowed() const { return raw() & UserSupervisor; } - void set_user_allowed(bool b) { set_bit(UserSupervisor, b); } - - bool is_writable() const { return raw() & ReadWrite; } - void set_writable(bool b) { set_bit(ReadWrite, b); } - - bool is_write_through() const { return raw() & WriteThrough; } - void set_write_through(bool b) { set_bit(WriteThrough, b); } - - bool is_cache_disabled() const { return raw() & CacheDisabled; } - void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); } - - void set_bit(byte bit, bool value) - { - if (value) - *m_pte |= bit; - else - *m_pte &= ~bit; - } - - dword* m_pte; - }; - - PageTableEntry ensure_pte(PageDirectory&, VirtualAddress); + PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress); RefPtr m_kernel_page_directory; - dword* m_page_table_zero { nullptr }; - dword* m_page_table_one { nullptr }; + PageTableEntry* m_page_table_zero { nullptr }; + PageTableEntry* m_page_table_one { nullptr }; VirtualAddress m_quickmap_addr; diff --git a/Kernel/VM/PageDirectory.h b/Kernel/VM/PageDirectory.h index 06698f75c5..b7917aa9f1 100644 --- a/Kernel/VM/PageDirectory.h +++ b/Kernel/VM/PageDirectory.h @@ -15,7 +15,7 @@ public: ~PageDirectory(); dword cr3() const { return m_directory_page->paddr().get(); } - dword* entries() { return reinterpret_cast(cr3()); } + PageDirectoryEntry* entries() { return reinterpret_cast(cr3()); } void flush(VirtualAddress);