From c1006a368916e2070eba55937765d5bd0a1a6807 Mon Sep 17 00:00:00 2001 From: Tom Date: Wed, 7 Jul 2021 20:28:51 -0600 Subject: [PATCH] Kernel: Return an already destructed PhysicalPage to the allocators By making sure the PhysicalPage instance is fully destructed the allocators will have a chance to reclaim the PhysicalPageEntry for free-list purposes. Just pass them the physical address of the page that was freed, which is enough to lookup the PhysicalPageEntry later. --- Kernel/VM/MemoryManager.cpp | 18 +++++++++--------- Kernel/VM/MemoryManager.h | 4 ++-- Kernel/VM/PhysicalPage.cpp | 19 +++++++++++++------ Kernel/VM/PhysicalPage.h | 9 +++------ Kernel/VM/PhysicalRegion.cpp | 6 +++--- Kernel/VM/PhysicalRegion.h | 4 ++-- 6 files changed, 32 insertions(+), 28 deletions(-) diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 2d8754b7b5..fb3b0a5451 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -728,14 +728,14 @@ void MemoryManager::uncommit_user_physical_pages(size_t page_count) m_system_memory_info.user_physical_pages_committed -= page_count; } -void MemoryManager::deallocate_user_physical_page(const PhysicalPage& page) +void MemoryManager::deallocate_user_physical_page(PhysicalAddress paddr) { ScopedSpinLock lock(s_mm_lock); for (auto& region : m_user_physical_regions) { - if (!region.contains(page)) + if (!region.contains(paddr)) continue; - region.return_page(page); + region.return_page(paddr); --m_system_memory_info.user_physical_pages_used; // Always return pages to the uncommitted pool. Pages that were @@ -745,7 +745,7 @@ void MemoryManager::deallocate_user_physical_page(const PhysicalPage& page) return; } - dmesgln("MM: deallocate_user_physical_page couldn't figure out region for user page @ {}", page.paddr()); + dmesgln("MM: deallocate_user_physical_page couldn't figure out region for user page @ {}", paddr); VERIFY_NOT_REACHED(); } @@ -825,21 +825,21 @@ RefPtr MemoryManager::allocate_user_physical_page(ShouldZeroFill s return page; } -void MemoryManager::deallocate_supervisor_physical_page(const PhysicalPage& page) +void MemoryManager::deallocate_supervisor_physical_page(PhysicalAddress paddr) { ScopedSpinLock lock(s_mm_lock); for (auto& region : m_super_physical_regions) { - if (!region.contains(page)) { - dbgln("MM: deallocate_supervisor_physical_page: {} not in {} - {}", page.paddr(), region.lower(), region.upper()); + if (!region.contains(paddr)) { + dbgln("MM: deallocate_supervisor_physical_page: {} not in {} - {}", paddr, region.lower(), region.upper()); continue; } - region.return_page(page); + region.return_page(paddr); --m_system_memory_info.super_physical_pages_used; return; } - dbgln("MM: deallocate_supervisor_physical_page couldn't figure out region for super page @ {}", page.paddr()); + dbgln("MM: deallocate_supervisor_physical_page couldn't figure out region for super page @ {}", paddr); VERIFY_NOT_REACHED(); } diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index bc96051448..03a5b29fa8 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -143,8 +143,8 @@ public: RefPtr allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr); RefPtr allocate_supervisor_physical_page(); NonnullRefPtrVector allocate_contiguous_supervisor_physical_pages(size_t size, size_t physical_alignment = PAGE_SIZE); - void deallocate_user_physical_page(const PhysicalPage&); - void deallocate_supervisor_physical_page(const PhysicalPage&); + void deallocate_user_physical_page(PhysicalAddress); + void deallocate_supervisor_physical_page(PhysicalAddress); OwnPtr allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, size_t physical_alignment = PAGE_SIZE, Region::Cacheable = Region::Cacheable::Yes); OwnPtr allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes); diff --git a/Kernel/VM/PhysicalPage.cpp b/Kernel/VM/PhysicalPage.cpp index d9faf4e87e..a1c565dc0f 100644 --- a/Kernel/VM/PhysicalPage.cpp +++ b/Kernel/VM/PhysicalPage.cpp @@ -27,14 +27,21 @@ PhysicalAddress PhysicalPage::paddr() const return MM.get_physical_address(*this); } -void PhysicalPage::return_to_freelist() const +void PhysicalPage::free_this() { - VERIFY((paddr().get() & ~PAGE_MASK) == 0); + if (m_may_return_to_freelist) { + auto paddr = MM.get_physical_address(*this); + bool is_supervisor = m_supervisor; - if (m_supervisor) - MM.deallocate_supervisor_physical_page(*this); - else - MM.deallocate_user_physical_page(*this); + this->~PhysicalPage(); // delete in place + + if (is_supervisor) + MM.deallocate_supervisor_physical_page(paddr); + else + MM.deallocate_user_physical_page(paddr); + } else { + this->~PhysicalPage(); // delete in place + } } } diff --git a/Kernel/VM/PhysicalPage.h b/Kernel/VM/PhysicalPage.h index bd17a5de7d..af68660538 100644 --- a/Kernel/VM/PhysicalPage.h +++ b/Kernel/VM/PhysicalPage.h @@ -28,11 +28,8 @@ public: void unref() { - if (m_ref_count.fetch_sub(1, AK::memory_order_acq_rel) == 1) { - if (m_may_return_to_freelist) - return_to_freelist(); - this->~PhysicalPage(); // delete in place - } + if (m_ref_count.fetch_sub(1, AK::memory_order_acq_rel) == 1) + free_this(); } static NonnullRefPtr create(PhysicalAddress, bool supervisor, bool may_return_to_freelist = true); @@ -46,7 +43,7 @@ private: PhysicalPage(bool supervisor, bool may_return_to_freelist = true); ~PhysicalPage() = default; - void return_to_freelist() const; + void free_this(); Atomic m_ref_count { 1 }; bool m_may_return_to_freelist { true }; diff --git a/Kernel/VM/PhysicalRegion.cpp b/Kernel/VM/PhysicalRegion.cpp index 4b492cacf6..9dd32dcbcd 100644 --- a/Kernel/VM/PhysicalRegion.cpp +++ b/Kernel/VM/PhysicalRegion.cpp @@ -172,7 +172,7 @@ void PhysicalRegion::free_page_at(PhysicalAddress addr) m_used--; } -void PhysicalRegion::return_page(const PhysicalPage& page) +void PhysicalRegion::return_page(PhysicalAddress paddr) { auto returned_count = m_recently_returned.size(); if (returned_count >= m_recently_returned.capacity()) { @@ -180,10 +180,10 @@ void PhysicalRegion::return_page(const PhysicalPage& page) // and replace the entry with this page auto& entry = m_recently_returned[get_fast_random()]; free_page_at(entry); - entry = page.paddr(); + entry = paddr; } else { // Still filling the return queue, just append it - m_recently_returned.append(page.paddr()); + m_recently_returned.append(paddr); } } diff --git a/Kernel/VM/PhysicalRegion.h b/Kernel/VM/PhysicalRegion.h index e2f95da0d5..4d7191c22d 100644 --- a/Kernel/VM/PhysicalRegion.h +++ b/Kernel/VM/PhysicalRegion.h @@ -29,13 +29,13 @@ public: unsigned size() const { return m_pages; } unsigned used() const { return m_used - m_recently_returned.size(); } unsigned free() const { return m_pages - m_used + m_recently_returned.size(); } - bool contains(const PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; } + bool contains(PhysicalAddress paddr) const { return paddr >= m_lower && paddr <= m_upper; } NonnullRefPtr take_pages_from_beginning(unsigned); RefPtr take_free_page(bool supervisor); NonnullRefPtrVector take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment = PAGE_SIZE); - void return_page(const PhysicalPage& page); + void return_page(PhysicalAddress); private: unsigned find_contiguous_free_pages(size_t count, size_t physical_alignment = PAGE_SIZE);