1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 03:07:44 +00:00

Kernel: Return an already destructed PhysicalPage to the allocators

By making sure the PhysicalPage instance is fully destructed the
allocators will have a chance to reclaim the PhysicalPageEntry for
free-list purposes. Just pass them the physical address of the page
that was freed, which is enough to lookup the PhysicalPageEntry later.
This commit is contained in:
Tom 2021-07-07 20:28:51 -06:00 committed by Andreas Kling
parent 87dc4c3d2c
commit c1006a3689
6 changed files with 32 additions and 28 deletions

View file

@ -728,14 +728,14 @@ void MemoryManager::uncommit_user_physical_pages(size_t page_count)
m_system_memory_info.user_physical_pages_committed -= page_count;
}
void MemoryManager::deallocate_user_physical_page(const PhysicalPage& page)
void MemoryManager::deallocate_user_physical_page(PhysicalAddress paddr)
{
ScopedSpinLock lock(s_mm_lock);
for (auto& region : m_user_physical_regions) {
if (!region.contains(page))
if (!region.contains(paddr))
continue;
region.return_page(page);
region.return_page(paddr);
--m_system_memory_info.user_physical_pages_used;
// Always return pages to the uncommitted pool. Pages that were
@ -745,7 +745,7 @@ void MemoryManager::deallocate_user_physical_page(const PhysicalPage& page)
return;
}
dmesgln("MM: deallocate_user_physical_page couldn't figure out region for user page @ {}", page.paddr());
dmesgln("MM: deallocate_user_physical_page couldn't figure out region for user page @ {}", paddr);
VERIFY_NOT_REACHED();
}
@ -825,21 +825,21 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
return page;
}
void MemoryManager::deallocate_supervisor_physical_page(const PhysicalPage& page)
void MemoryManager::deallocate_supervisor_physical_page(PhysicalAddress paddr)
{
ScopedSpinLock lock(s_mm_lock);
for (auto& region : m_super_physical_regions) {
if (!region.contains(page)) {
dbgln("MM: deallocate_supervisor_physical_page: {} not in {} - {}", page.paddr(), region.lower(), region.upper());
if (!region.contains(paddr)) {
dbgln("MM: deallocate_supervisor_physical_page: {} not in {} - {}", paddr, region.lower(), region.upper());
continue;
}
region.return_page(page);
region.return_page(paddr);
--m_system_memory_info.super_physical_pages_used;
return;
}
dbgln("MM: deallocate_supervisor_physical_page couldn't figure out region for super page @ {}", page.paddr());
dbgln("MM: deallocate_supervisor_physical_page couldn't figure out region for super page @ {}", paddr);
VERIFY_NOT_REACHED();
}

View file

@ -143,8 +143,8 @@ public:
RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
RefPtr<PhysicalPage> allocate_supervisor_physical_page();
NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size, size_t physical_alignment = PAGE_SIZE);
void deallocate_user_physical_page(const PhysicalPage&);
void deallocate_supervisor_physical_page(const PhysicalPage&);
void deallocate_user_physical_page(PhysicalAddress);
void deallocate_supervisor_physical_page(PhysicalAddress);
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, size_t physical_alignment = PAGE_SIZE, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);

View file

@ -27,14 +27,21 @@ PhysicalAddress PhysicalPage::paddr() const
return MM.get_physical_address(*this);
}
void PhysicalPage::return_to_freelist() const
void PhysicalPage::free_this()
{
VERIFY((paddr().get() & ~PAGE_MASK) == 0);
if (m_may_return_to_freelist) {
auto paddr = MM.get_physical_address(*this);
bool is_supervisor = m_supervisor;
if (m_supervisor)
MM.deallocate_supervisor_physical_page(*this);
this->~PhysicalPage(); // delete in place
if (is_supervisor)
MM.deallocate_supervisor_physical_page(paddr);
else
MM.deallocate_user_physical_page(*this);
MM.deallocate_user_physical_page(paddr);
} else {
this->~PhysicalPage(); // delete in place
}
}
}

View file

@ -28,11 +28,8 @@ public:
void unref()
{
if (m_ref_count.fetch_sub(1, AK::memory_order_acq_rel) == 1) {
if (m_may_return_to_freelist)
return_to_freelist();
this->~PhysicalPage(); // delete in place
}
if (m_ref_count.fetch_sub(1, AK::memory_order_acq_rel) == 1)
free_this();
}
static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, bool supervisor, bool may_return_to_freelist = true);
@ -46,7 +43,7 @@ private:
PhysicalPage(bool supervisor, bool may_return_to_freelist = true);
~PhysicalPage() = default;
void return_to_freelist() const;
void free_this();
Atomic<u32> m_ref_count { 1 };
bool m_may_return_to_freelist { true };

View file

@ -172,7 +172,7 @@ void PhysicalRegion::free_page_at(PhysicalAddress addr)
m_used--;
}
void PhysicalRegion::return_page(const PhysicalPage& page)
void PhysicalRegion::return_page(PhysicalAddress paddr)
{
auto returned_count = m_recently_returned.size();
if (returned_count >= m_recently_returned.capacity()) {
@ -180,10 +180,10 @@ void PhysicalRegion::return_page(const PhysicalPage& page)
// and replace the entry with this page
auto& entry = m_recently_returned[get_fast_random<u8>()];
free_page_at(entry);
entry = page.paddr();
entry = paddr;
} else {
// Still filling the return queue, just append it
m_recently_returned.append(page.paddr());
m_recently_returned.append(paddr);
}
}

View file

@ -29,13 +29,13 @@ public:
unsigned size() const { return m_pages; }
unsigned used() const { return m_used - m_recently_returned.size(); }
unsigned free() const { return m_pages - m_used + m_recently_returned.size(); }
bool contains(const PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; }
bool contains(PhysicalAddress paddr) const { return paddr >= m_lower && paddr <= m_upper; }
NonnullRefPtr<PhysicalRegion> take_pages_from_beginning(unsigned);
RefPtr<PhysicalPage> take_free_page(bool supervisor);
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment = PAGE_SIZE);
void return_page(const PhysicalPage& page);
void return_page(PhysicalAddress);
private:
unsigned find_contiguous_free_pages(size_t count, size_t physical_alignment = PAGE_SIZE);