mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 00:58:12 +00:00
Kernel: Move page remapping into Region::remap_page(index)
Let Region deal with this, instead of everyone calling MemoryManager.
This commit is contained in:
parent
dc35b1d647
commit
fe455c5ac4
4 changed files with 31 additions and 30 deletions
|
@ -302,7 +302,7 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
dbgprintf("MM: zero_page() but page already present. Fine with me!\n");
|
dbgprintf("MM: zero_page() but page already present. Fine with me!\n");
|
||||||
#endif
|
#endif
|
||||||
remap_region_page(region, page_index_in_region);
|
region.remap_page(page_index_in_region);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -314,7 +314,7 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
|
||||||
dbgprintf(" >> ZERO P%p\n", physical_page->paddr().get());
|
dbgprintf(" >> ZERO P%p\n", physical_page->paddr().get());
|
||||||
#endif
|
#endif
|
||||||
vmo.physical_pages()[page_index_in_region] = move(physical_page);
|
vmo.physical_pages()[page_index_in_region] = move(physical_page);
|
||||||
remap_region_page(region, page_index_in_region);
|
region.remap_page(page_index_in_region);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -327,7 +327,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
|
||||||
dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
|
dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
|
||||||
#endif
|
#endif
|
||||||
region.set_should_cow(page_index_in_region, false);
|
region.set_should_cow(page_index_in_region, false);
|
||||||
remap_region_page(region, page_index_in_region);
|
region.remap_page(page_index_in_region);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -348,7 +348,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
|
||||||
vmo.physical_pages()[page_index_in_region] = move(physical_page);
|
vmo.physical_pages()[page_index_in_region] = move(physical_page);
|
||||||
unquickmap_page();
|
unquickmap_page();
|
||||||
region.set_should_cow(page_index_in_region, false);
|
region.set_should_cow(page_index_in_region, false);
|
||||||
remap_region_page(region, page_index_in_region);
|
region.remap_page(page_index_in_region);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -372,7 +372,7 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
dbgprintf("MM: page_in_from_inode() but page already present. Fine with me!\n");
|
dbgprintf("MM: page_in_from_inode() but page already present. Fine with me!\n");
|
||||||
#endif
|
#endif
|
||||||
remap_region_page(region, page_index_in_region);
|
region.remap_page( page_index_in_region);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -400,7 +400,7 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
|
||||||
kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
|
kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
remap_region_page(region, page_index_in_region);
|
region.remap_page(page_index_in_region);
|
||||||
u8* dest_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
|
u8* dest_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
|
||||||
memcpy(dest_ptr, page_buffer, PAGE_SIZE);
|
memcpy(dest_ptr, page_buffer, PAGE_SIZE);
|
||||||
return true;
|
return true;
|
||||||
|
@ -672,27 +672,6 @@ void MemoryManager::unquickmap_page()
|
||||||
m_quickmap_in_use = false;
|
m_quickmap_in_use = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_region)
|
|
||||||
{
|
|
||||||
ASSERT(region.page_directory());
|
|
||||||
InterruptDisabler disabler;
|
|
||||||
auto page_vaddr = region.vaddr().offset(page_index_in_region * PAGE_SIZE);
|
|
||||||
auto& pte = ensure_pte(*region.page_directory(), page_vaddr);
|
|
||||||
auto& physical_page = region.vmobject().physical_pages()[page_index_in_region];
|
|
||||||
ASSERT(physical_page);
|
|
||||||
pte.set_physical_page_base(physical_page->paddr().get());
|
|
||||||
pte.set_present(true); // FIXME: Maybe we should use the is_readable flag here?
|
|
||||||
if (region.should_cow(page_index_in_region))
|
|
||||||
pte.set_writable(false);
|
|
||||||
else
|
|
||||||
pte.set_writable(region.is_writable());
|
|
||||||
pte.set_user_allowed(region.is_user_accessible());
|
|
||||||
region.page_directory()->flush(page_vaddr);
|
|
||||||
#ifdef MM_DEBUG
|
|
||||||
dbg() << "MM: >> remap_region_page (PD=" << region.page_directory()->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << region.name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemoryManager::remap_region(PageDirectory& page_directory, Region& region)
|
void MemoryManager::remap_region(PageDirectory& page_directory, Region& region)
|
||||||
{
|
{
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
|
|
|
@ -98,8 +98,6 @@ private:
|
||||||
void register_region(Region&);
|
void register_region(Region&);
|
||||||
void unregister_region(Region&);
|
void unregister_region(Region&);
|
||||||
|
|
||||||
void remap_region_page(Region&, unsigned page_index_in_region);
|
|
||||||
|
|
||||||
void initialize_paging();
|
void initialize_paging();
|
||||||
void flush_entire_tlb();
|
void flush_entire_tlb();
|
||||||
void flush_tlb(VirtualAddress);
|
void flush_tlb(VirtualAddress);
|
||||||
|
|
|
@ -96,7 +96,7 @@ int Region::commit()
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
vmobject().physical_pages()[i] = move(physical_page);
|
vmobject().physical_pages()[i] = move(physical_page);
|
||||||
MM.remap_region_page(*this, i);
|
remap_page(i - first_page_index());
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -169,3 +169,25 @@ Bitmap& Region::ensure_cow_map() const
|
||||||
m_cow_map = make<Bitmap>(page_count(), true);
|
m_cow_map = make<Bitmap>(page_count(), true);
|
||||||
return *m_cow_map;
|
return *m_cow_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Region::remap_page(size_t index)
|
||||||
|
{
|
||||||
|
ASSERT(page_directory());
|
||||||
|
InterruptDisabler disabler;
|
||||||
|
auto page_vaddr = vaddr().offset(index * PAGE_SIZE);
|
||||||
|
auto& pte = MM.ensure_pte(*page_directory(), page_vaddr);
|
||||||
|
auto& physical_page = vmobject().physical_pages()[first_page_index() + index];
|
||||||
|
ASSERT(physical_page);
|
||||||
|
pte.set_physical_page_base(physical_page->paddr().get());
|
||||||
|
pte.set_present(true);
|
||||||
|
if (should_cow(index))
|
||||||
|
pte.set_writable(false);
|
||||||
|
else
|
||||||
|
pte.set_writable(is_writable());
|
||||||
|
pte.set_user_allowed(is_user_accessible());
|
||||||
|
page_directory()->flush(page_vaddr);
|
||||||
|
#ifdef MM_DEBUG
|
||||||
|
dbg() << "MM: >> region.remap_page (PD=" << page_directory()->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -114,6 +114,8 @@ public:
|
||||||
m_access &= ~Access::Write;
|
m_access &= ~Access::Write;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void remap_page(size_t index);
|
||||||
|
|
||||||
// For InlineLinkedListNode
|
// For InlineLinkedListNode
|
||||||
Region* m_next { nullptr };
|
Region* m_next { nullptr };
|
||||||
Region* m_prev { nullptr };
|
Region* m_prev { nullptr };
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue