From 83ddf3d8502014ab95fe6dc8ee2f1b8101a0d6dc Mon Sep 17 00:00:00 2001 From: Tom Date: Tue, 1 Sep 2020 13:40:34 -0600 Subject: [PATCH] Kernel: Fix memory purge clobbering mapped page directory in ensure_pte If allocating a page table triggers purging memory, we need to call quickmap_pd again to make sure the underlying physical page is remapped to the correct one. This is needed because purging itself may trigger calls to ensure_pte as well. Fixes #3370 --- Kernel/VM/MemoryManager.cpp | 18 ++++++++++++++++-- Kernel/VM/MemoryManager.h | 2 +- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 15fdc65b75..79d37784bc 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -223,7 +223,16 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual #ifdef MM_DEBUG dbg() << "MM: PDE " << page_directory_index << " not present (requested for " << vaddr << "), allocating"; #endif - auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes); + bool did_purge = false; + auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge); + if (did_purge) { + // If any memory had to be purged, ensure_pte may have been called as part + // of the purging process. So we need to re-map the pd in this case to ensure + // we're writing to the correct underlying physical page + pd = quickmap_pd(page_directory, page_directory_table_index); + ASSERT(&pde == &pd[page_directory_index]); // Sanity check + ASSERT(!pde.is_present()); // Should have not changed + } #ifdef MM_DEBUG dbg() << "MM: PD K" << &page_directory << " (" << (&page_directory == m_kernel_page_directory ? "Kernel" : "User") << ") at " << PhysicalAddress(page_directory.cr3()) << " allocated page table #" << page_directory_index << " (for " << vaddr << ") at " << page_table->paddr(); #endif @@ -482,10 +491,11 @@ RefPtr MemoryManager::find_free_user_physical_page() return page; } -RefPtr MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill) +RefPtr MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge) { ScopedSpinLock lock(s_mm_lock); auto page = find_free_user_physical_page(); + bool purged_pages = false; if (!page) { // We didn't have a single free physical page. Let's try to free something up! @@ -495,6 +505,7 @@ RefPtr MemoryManager::allocate_user_physical_page(ShouldZeroFill s if (purged_page_count) { klog() << "MM: Purge saved the day! Purged " << purged_page_count << " pages from PurgeableVMObject{" << &vmobject << "}"; page = find_free_user_physical_page(); + purged_pages = true; ASSERT(page); return IterationDecision::Break; } @@ -517,6 +528,9 @@ RefPtr MemoryManager::allocate_user_physical_page(ShouldZeroFill s unquickmap_page(); } + if (did_purge) + *did_purge = purged_pages; + ++m_user_physical_pages_used; return page; } diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 1da2c9edee..f30fda1c33 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -112,7 +112,7 @@ public: Yes }; - RefPtr allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes); + RefPtr allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr); RefPtr allocate_supervisor_physical_page(); NonnullRefPtrVector allocate_contiguous_supervisor_physical_pages(size_t size); void deallocate_user_physical_page(const PhysicalPage&);