diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 0f7f2cbfa2..082666ea69 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -302,7 +302,7 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region) #ifdef PAGE_FAULT_DEBUG dbgprintf("MM: zero_page() but page already present. Fine with me!\n"); #endif - remap_region_page(region, page_index_in_region); + region.remap_page(page_index_in_region); return true; } @@ -314,7 +314,7 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region) dbgprintf(" >> ZERO P%p\n", physical_page->paddr().get()); #endif vmo.physical_pages()[page_index_in_region] = move(physical_page); - remap_region_page(region, page_index_in_region); + region.remap_page(page_index_in_region); return true; } @@ -327,7 +327,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region) dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n"); #endif region.set_should_cow(page_index_in_region, false); - remap_region_page(region, page_index_in_region); + region.remap_page(page_index_in_region); return true; } @@ -348,7 +348,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region) vmo.physical_pages()[page_index_in_region] = move(physical_page); unquickmap_page(); region.set_should_cow(page_index_in_region, false); - remap_region_page(region, page_index_in_region); + region.remap_page(page_index_in_region); return true; } @@ -372,7 +372,7 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re #ifdef PAGE_FAULT_DEBUG dbgprintf("MM: page_in_from_inode() but page already present. Fine with me!\n"); #endif - remap_region_page(region, page_index_in_region); + region.remap_page( page_index_in_region); return true; } @@ -400,7 +400,7 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re kprintf("MM: page_in_from_inode was unable to allocate a physical page\n"); return false; } - remap_region_page(region, page_index_in_region); + region.remap_page(page_index_in_region); u8* dest_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr(); memcpy(dest_ptr, page_buffer, PAGE_SIZE); return true; @@ -672,27 +672,6 @@ void MemoryManager::unquickmap_page() m_quickmap_in_use = false; } -void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_region) -{ - ASSERT(region.page_directory()); - InterruptDisabler disabler; - auto page_vaddr = region.vaddr().offset(page_index_in_region * PAGE_SIZE); - auto& pte = ensure_pte(*region.page_directory(), page_vaddr); - auto& physical_page = region.vmobject().physical_pages()[page_index_in_region]; - ASSERT(physical_page); - pte.set_physical_page_base(physical_page->paddr().get()); - pte.set_present(true); // FIXME: Maybe we should use the is_readable flag here? - if (region.should_cow(page_index_in_region)) - pte.set_writable(false); - else - pte.set_writable(region.is_writable()); - pte.set_user_allowed(region.is_user_accessible()); - region.page_directory()->flush(page_vaddr); -#ifdef MM_DEBUG - dbg() << "MM: >> remap_region_page (PD=" << region.page_directory()->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << region.name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")"; -#endif -} - void MemoryManager::remap_region(PageDirectory& page_directory, Region& region) { InterruptDisabler disabler; diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 683a75e5ab..5c6b53bd03 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -98,8 +98,6 @@ private: void register_region(Region&); void unregister_region(Region&); - void remap_region_page(Region&, unsigned page_index_in_region); - void initialize_paging(); void flush_entire_tlb(); void flush_tlb(VirtualAddress); diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index 1fae206797..097c698c74 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -96,7 +96,7 @@ int Region::commit() return -ENOMEM; } vmobject().physical_pages()[i] = move(physical_page); - MM.remap_region_page(*this, i); + remap_page(i - first_page_index()); } return 0; } @@ -169,3 +169,25 @@ Bitmap& Region::ensure_cow_map() const m_cow_map = make(page_count(), true); return *m_cow_map; } + +void Region::remap_page(size_t index) +{ + ASSERT(page_directory()); + InterruptDisabler disabler; + auto page_vaddr = vaddr().offset(index * PAGE_SIZE); + auto& pte = MM.ensure_pte(*page_directory(), page_vaddr); + auto& physical_page = vmobject().physical_pages()[first_page_index() + index]; + ASSERT(physical_page); + pte.set_physical_page_base(physical_page->paddr().get()); + pte.set_present(true); + if (should_cow(index)) + pte.set_writable(false); + else + pte.set_writable(is_writable()); + pte.set_user_allowed(is_user_accessible()); + page_directory()->flush(page_vaddr); +#ifdef MM_DEBUG + dbg() << "MM: >> region.remap_page (PD=" << page_directory()->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")"; +#endif + +} diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h index 82b626cf11..f7244b1e9c 100644 --- a/Kernel/VM/Region.h +++ b/Kernel/VM/Region.h @@ -114,6 +114,8 @@ public: m_access &= ~Access::Write; } + void remap_page(size_t index); + // For InlineLinkedListNode Region* m_next { nullptr }; Region* m_prev { nullptr };