1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-24 23:07:34 +00:00

Kernel: Release page tables when no longer needed

When unmapping regions, check if page tables can be freed.

This is a follow-up change for #3254.
This commit is contained in:
Tom 2020-08-27 21:29:17 -06:00 committed by Andreas Kling
parent 88319b188e
commit 67dbb56444
5 changed files with 50 additions and 7 deletions

View file

@ -140,6 +140,7 @@ public:
m_raw |= value & 0xfffff000; m_raw |= value & 0xfffff000;
} }
bool is_null() const { return m_raw == 0; }
void clear() { m_raw = 0; } void clear() { m_raw = 0; }
u64 raw() const { return m_raw; } u64 raw() const { return m_raw; }
@ -234,6 +235,7 @@ public:
bool is_execute_disabled() const { return raw() & NoExecute; } bool is_execute_disabled() const { return raw() & NoExecute; }
void set_execute_disabled(bool b) { set_bit(NoExecute, b); } void set_execute_disabled(bool b) { set_bit(NoExecute, b); }
bool is_null() const { return m_raw == 0; }
void clear() { m_raw = 0; } void clear() { m_raw = 0; }
void set_bit(u64 bit, bool value) void set_bit(u64 bit, bool value)

View file

@ -188,7 +188,7 @@ void MemoryManager::parse_memory_map()
ASSERT(m_user_physical_pages > 0); ASSERT(m_user_physical_pages > 0);
} }
const PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr) PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr)
{ {
ASSERT_INTERRUPTS_DISABLED(); ASSERT_INTERRUPTS_DISABLED();
ASSERT(s_mm_lock.own_lock()); ASSERT(s_mm_lock.own_lock());
@ -227,13 +227,53 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
pde.set_present(true); pde.set_present(true);
pde.set_writable(true); pde.set_writable(true);
pde.set_global(&page_directory == m_kernel_page_directory.ptr()); pde.set_global(&page_directory == m_kernel_page_directory.ptr());
auto result = page_directory.m_physical_pages.set(move(page_table)); // Use page_directory_table_index and page_directory_index as key
// This allows us to release the page table entry when no longer needed
auto result = page_directory.m_page_tables.set(vaddr.get() & ~0x1fffff, move(page_table));
ASSERT(result == AK::HashSetResult::InsertedNewEntry); ASSERT(result == AK::HashSetResult::InsertedNewEntry);
} }
return quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index]; return quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
} }
void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, bool is_last_release)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(s_mm_lock.own_lock());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
auto* pd = quickmap_pd(page_directory, page_directory_table_index);
PageDirectoryEntry& pde = pd[page_directory_index];
if (pde.is_present()) {
auto* page_table = quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()));
auto& pte = page_table[page_table_index];
pte.clear();
if (is_last_release || page_table_index == 0x1ff) {
// If this is the last PTE in a region or the last PTE in a page table then
// check if we can also release the page table
bool all_clear = true;
for (u32 i = 0; i <= 0x1ff; i++) {
if (!page_table[i].is_null()) {
all_clear = false;
break;
}
}
if (all_clear) {
pde.clear();
auto result = page_directory.m_page_tables.remove(vaddr.get() & ~0x1fffff);
ASSERT(result);
#ifdef MM_DEBUG
dbg() << "MM: Released page table for " << VirtualAddress(vaddr.get() & ~0x1fffff);
#endif
}
}
}
}
void MemoryManager::initialize(u32 cpu) void MemoryManager::initialize(u32 cpu)
{ {
auto mm_data = new MemoryManagerData; auto mm_data = new MemoryManagerData;

View file

@ -193,8 +193,9 @@ private:
PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index); PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
PageTableEntry* quickmap_pt(PhysicalAddress); PageTableEntry* quickmap_pt(PhysicalAddress);
const PageTableEntry* pte(const PageDirectory&, VirtualAddress); PageTableEntry* pte(const PageDirectory&, VirtualAddress);
PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress); PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress);
void release_pte(PageDirectory&, VirtualAddress, bool);
RefPtr<PageDirectory> m_kernel_page_directory; RefPtr<PageDirectory> m_kernel_page_directory;
RefPtr<PhysicalPage> m_low_page_table; RefPtr<PhysicalPage> m_low_page_table;

View file

@ -66,7 +66,7 @@ private:
RangeAllocator m_identity_range_allocator; RangeAllocator m_identity_range_allocator;
RefPtr<PhysicalPage> m_directory_table; RefPtr<PhysicalPage> m_directory_table;
RefPtr<PhysicalPage> m_directory_pages[4]; RefPtr<PhysicalPage> m_directory_pages[4];
HashTable<RefPtr<PhysicalPage>> m_physical_pages; HashMap<u32, RefPtr<PhysicalPage>> m_page_tables;
}; };
} }

View file

@ -266,10 +266,10 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
{ {
ScopedSpinLock lock(s_mm_lock); ScopedSpinLock lock(s_mm_lock);
ASSERT(m_page_directory); ASSERT(m_page_directory);
for (size_t i = 0; i < page_count(); ++i) { size_t count = page_count();
for (size_t i = 0; i < count; ++i) {
auto vaddr = vaddr_from_page_index(i); auto vaddr = vaddr_from_page_index(i);
auto& pte = MM.ensure_pte(*m_page_directory, vaddr); MM.release_pte(*m_page_directory, vaddr, i == count - 1);
pte.clear();
#ifdef MM_DEBUG #ifdef MM_DEBUG
auto* page = physical_page(i); auto* page = physical_page(i);
dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", page ? page->paddr().get() : 0) << " <<"; dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", page ? page->paddr().get() : 0) << " <<";