1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-07 08:07:35 +00:00

Kernel: Fix memory purge clobbering mapped page directory in ensure_pte

If allocating a page table triggers purging memory, we need to call
quickmap_pd again to make sure the underlying physical page is
remapped to the correct one. This is needed because purging itself
may trigger calls to ensure_pte as well.

Fixes #3370
This commit is contained in:
Tom 2020-09-01 13:40:34 -06:00 committed by Andreas Kling
parent 30d36a3ad1
commit 83ddf3d850
2 changed files with 17 additions and 3 deletions

View file

@ -223,7 +223,16 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
#ifdef MM_DEBUG
dbg() << "MM: PDE " << page_directory_index << " not present (requested for " << vaddr << "), allocating";
#endif
auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes);
bool did_purge = false;
auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge);
if (did_purge) {
// If any memory had to be purged, ensure_pte may have been called as part
// of the purging process. So we need to re-map the pd in this case to ensure
// we're writing to the correct underlying physical page
pd = quickmap_pd(page_directory, page_directory_table_index);
ASSERT(&pde == &pd[page_directory_index]); // Sanity check
ASSERT(!pde.is_present()); // Should have not changed
}
#ifdef MM_DEBUG
dbg() << "MM: PD K" << &page_directory << " (" << (&page_directory == m_kernel_page_directory ? "Kernel" : "User") << ") at " << PhysicalAddress(page_directory.cr3()) << " allocated page table #" << page_directory_index << " (for " << vaddr << ") at " << page_table->paddr();
#endif
@ -482,10 +491,11 @@ RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page()
return page;
}
RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill)
RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
{
ScopedSpinLock lock(s_mm_lock);
auto page = find_free_user_physical_page();
bool purged_pages = false;
if (!page) {
// We didn't have a single free physical page. Let's try to free something up!
@ -495,6 +505,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
if (purged_page_count) {
klog() << "MM: Purge saved the day! Purged " << purged_page_count << " pages from PurgeableVMObject{" << &vmobject << "}";
page = find_free_user_physical_page();
purged_pages = true;
ASSERT(page);
return IterationDecision::Break;
}
@ -517,6 +528,9 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
unquickmap_page();
}
if (did_purge)
*did_purge = purged_pages;
++m_user_physical_pages_used;
return page;
}