From 1d43544e080ef69c076c4220f79f30bf2106e8de Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Mon, 13 Apr 2020 22:35:37 +0200 Subject: [PATCH] Kernel: Switch the first-8MB-of-upper-3GB pseudo mappings to 4KB pages This memory range was set up using 2MB pages by the code in boot.S. Because of that, the kernel image protection code didn't work, since it assumed 4KB pages. We now switch to 4KB pages during MemoryManager initialization. This makes the kernel image protection code work correctly again. :^) --- Kernel/VM/MemoryManager.cpp | 31 +++++++++++++++++++++++++++++++ Kernel/VM/MemoryManager.h | 3 +++ 2 files changed, 34 insertions(+) diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 4ee2a19e58..24b3902bc8 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -63,6 +63,7 @@ MemoryManager::MemoryManager() parse_memory_map(); write_cr3(kernel_page_directory().cr3()); setup_low_identity_mapping(); + setup_low_pseudo_identity_mapping(); protect_kernel_image(); m_shared_zero_page = allocate_user_physical_page(); @@ -72,6 +73,36 @@ MemoryManager::~MemoryManager() { } +void MemoryManager::setup_low_pseudo_identity_mapping() +{ + // This code switches the pseudo-identity mapping (8 first MB above 3G mark) from 2MB pages to 4KB pages. + // The boot code sets it up as 2MB huge pages for convenience. But we need 4KB pages to be able to protect + // the kernel soon! + + for (size_t i = 0; i < 4; ++i) { + m_low_pseudo_identity_mapping_pages[i] = allocate_supervisor_physical_page(); + FlatPtr base = i * (2 * MB); + auto* page_table = (PageTableEntry*)quickmap_page(*m_low_pseudo_identity_mapping_pages[i]); + for (size_t j = 0; j < 512; ++j) { + auto& pte = page_table[j]; + pte.set_physical_page_base(base + j * PAGE_SIZE); + pte.set_writable(true); + pte.set_present(true); + pte.set_execute_disabled(false); + pte.set_user_allowed(false); + } + unquickmap_page(); + } + + auto* pd = quickmap_pd(*m_kernel_page_directory, 3); + for (size_t i = 0; i < 4; ++i) { + pd[i].set_huge(false); + pd[i].set_page_table_base(m_low_pseudo_identity_mapping_pages[i]->paddr().get()); + } + + flush_entire_tlb(); +} + void MemoryManager::protect_kernel_image() { // Disable writing to the kernel text and rodata segments. diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 02d3f64f37..88ae2b26a3 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -150,6 +150,7 @@ private: void detect_cpu_features(); void setup_low_identity_mapping(); + void setup_low_pseudo_identity_mapping(); void protect_kernel_image(); void parse_memory_map(); void flush_entire_tlb(); @@ -191,6 +192,8 @@ private: InlineLinkedList m_vmobjects; bool m_quickmap_in_use { false }; + + RefPtr m_low_pseudo_identity_mapping_pages[4]; }; template