1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 16:47:36 +00:00

x86: Simplify region unmapping a bit

Add PageTableEntry::clear() to zero out a whole PTE, and use that for
unmapping instead of clearing individual fields.
This commit is contained in:
Andreas Kling 2020-02-08 12:49:00 +01:00
parent 3b95d61b22
commit a9d7902bb7
3 changed files with 5 additions and 8 deletions

View file

@ -215,6 +215,8 @@ public:
bool is_execute_disabled() const { return raw() & NoExecute; } bool is_execute_disabled() const { return raw() & NoExecute; }
void set_execute_disabled(bool b) { set_bit(NoExecute, b); } void set_execute_disabled(bool b) { set_bit(NoExecute, b); }
void clear() { m_raw = 0; }
void set_bit(u64 bit, bool value) void set_bit(u64 bit, bool value)
{ {
if (value) if (value)

View file

@ -559,8 +559,7 @@ void MemoryManager::unquickmap_page()
ASSERT_INTERRUPTS_DISABLED(); ASSERT_INTERRUPTS_DISABLED();
ASSERT(m_quickmap_in_use); ASSERT(m_quickmap_in_use);
auto& pte = boot_pd3_pde1023_pt[0]; auto& pte = boot_pd3_pde1023_pt[0];
pte.set_physical_page_base(0); pte.clear();
pte.set_present(false);
flush_tlb(VirtualAddress(0xffe00000)); flush_tlb(VirtualAddress(0xffe00000));
m_quickmap_in_use = false; m_quickmap_in_use = false;
} }

View file

@ -255,8 +255,7 @@ void Region::map_individual_page_impl(size_t page_index)
auto& pte = MM.ensure_pte(*m_page_directory, page_vaddr); auto& pte = MM.ensure_pte(*m_page_directory, page_vaddr);
auto& physical_page = vmobject().physical_pages()[first_page_index() + page_index]; auto& physical_page = vmobject().physical_pages()[first_page_index() + page_index];
if (!physical_page) { if (!physical_page) {
pte.set_physical_page_base(0); pte.clear();
pte.set_present(false);
} else { } else {
pte.set_cache_disabled(!m_cacheable); pte.set_cache_disabled(!m_cacheable);
pte.set_physical_page_base(physical_page->paddr().get()); pte.set_physical_page_base(physical_page->paddr().get());
@ -290,10 +289,7 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
for (size_t i = 0; i < page_count(); ++i) { for (size_t i = 0; i < page_count(); ++i) {
auto vaddr = this->vaddr().offset(i * PAGE_SIZE); auto vaddr = this->vaddr().offset(i * PAGE_SIZE);
auto& pte = MM.ensure_pte(*m_page_directory, vaddr); auto& pte = MM.ensure_pte(*m_page_directory, vaddr);
pte.set_physical_page_base(0); pte.clear();
pte.set_present(false);
pte.set_writable(false);
pte.set_user_allowed(false);
MM.flush_tlb(vaddr); MM.flush_tlb(vaddr);
#ifdef MM_DEBUG #ifdef MM_DEBUG
auto& physical_page = vmobject().physical_pages()[first_page_index() + i]; auto& physical_page = vmobject().physical_pages()[first_page_index() + i];