diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h index bb933eea46..f159f01a5a 100644 --- a/Kernel/Arch/i386/CPU.h +++ b/Kernel/Arch/i386/CPU.h @@ -140,6 +140,7 @@ public: m_raw |= value & 0xfffff000; } + bool is_null() const { return m_raw == 0; } void clear() { m_raw = 0; } u64 raw() const { return m_raw; } @@ -234,6 +235,7 @@ public: bool is_execute_disabled() const { return raw() & NoExecute; } void set_execute_disabled(bool b) { set_bit(NoExecute, b); } + bool is_null() const { return m_raw == 0; } void clear() { m_raw = 0; } void set_bit(u64 bit, bool value) diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 3856a72b1d..fc8f8da12a 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -188,7 +188,7 @@ void MemoryManager::parse_memory_map() ASSERT(m_user_physical_pages > 0); } -const PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr) +PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr) { ASSERT_INTERRUPTS_DISABLED(); ASSERT(s_mm_lock.own_lock()); @@ -227,13 +227,53 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual pde.set_present(true); pde.set_writable(true); pde.set_global(&page_directory == m_kernel_page_directory.ptr()); - auto result = page_directory.m_physical_pages.set(move(page_table)); + // Use page_directory_table_index and page_directory_index as key + // This allows us to release the page table entry when no longer needed + auto result = page_directory.m_page_tables.set(vaddr.get() & ~0x1fffff, move(page_table)); ASSERT(result == AK::HashSetResult::InsertedNewEntry); } return quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index]; } +void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, bool is_last_release) +{ + ASSERT_INTERRUPTS_DISABLED(); + ASSERT(s_mm_lock.own_lock()); + u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3; + u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff; + u32 page_table_index = (vaddr.get() >> 12) & 0x1ff; + + auto* pd = quickmap_pd(page_directory, page_directory_table_index); + PageDirectoryEntry& pde = pd[page_directory_index]; + if (pde.is_present()) { + auto* page_table = quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base())); + auto& pte = page_table[page_table_index]; + pte.clear(); + + if (is_last_release || page_table_index == 0x1ff) { + // If this is the last PTE in a region or the last PTE in a page table then + // check if we can also release the page table + bool all_clear = true; + for (u32 i = 0; i <= 0x1ff; i++) { + if (!page_table[i].is_null()) { + all_clear = false; + break; + } + } + if (all_clear) { + pde.clear(); + + auto result = page_directory.m_page_tables.remove(vaddr.get() & ~0x1fffff); + ASSERT(result); +#ifdef MM_DEBUG + dbg() << "MM: Released page table for " << VirtualAddress(vaddr.get() & ~0x1fffff); +#endif + } + } + } +} + void MemoryManager::initialize(u32 cpu) { auto mm_data = new MemoryManagerData; diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 586caa5d0d..a66bb551d4 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -193,8 +193,9 @@ private: PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index); PageTableEntry* quickmap_pt(PhysicalAddress); - const PageTableEntry* pte(const PageDirectory&, VirtualAddress); + PageTableEntry* pte(const PageDirectory&, VirtualAddress); PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress); + void release_pte(PageDirectory&, VirtualAddress, bool); RefPtr m_kernel_page_directory; RefPtr m_low_page_table; diff --git a/Kernel/VM/PageDirectory.h b/Kernel/VM/PageDirectory.h index d6de42b570..98c256b920 100644 --- a/Kernel/VM/PageDirectory.h +++ b/Kernel/VM/PageDirectory.h @@ -66,7 +66,7 @@ private: RangeAllocator m_identity_range_allocator; RefPtr m_directory_table; RefPtr m_directory_pages[4]; - HashTable> m_physical_pages; + HashMap> m_page_tables; }; } diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index 1bb452e090..f0a103d6f5 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -266,10 +266,10 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) { ScopedSpinLock lock(s_mm_lock); ASSERT(m_page_directory); - for (size_t i = 0; i < page_count(); ++i) { + size_t count = page_count(); + for (size_t i = 0; i < count; ++i) { auto vaddr = vaddr_from_page_index(i); - auto& pte = MM.ensure_pte(*m_page_directory, vaddr); - pte.clear(); + MM.release_pte(*m_page_directory, vaddr, i == count - 1); #ifdef MM_DEBUG auto* page = physical_page(i); dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", page ? page->paddr().get() : 0) << " <<";