1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-24 21:37:34 +00:00

Kernel: Clean up a bunch of wrong-looking Region/VMObject code

Since a Region is merely a "window" onto a VMObject, it can both begin
and end at a distance from the VMObject's boundaries.
Therefore, we should always be computing indices into a VMObject's
physical page array by adding the Region's "first_page_index()".

There was a whole bunch of code that forgot to do that. This fixes
many wrong behaviors for Regions that start part-way into a VMObject.
This commit is contained in:
Andreas Kling 2019-11-03 15:41:49 +01:00
parent fe455c5ac4
commit a221cddeec
2 changed files with 25 additions and 23 deletions

View file

@ -291,14 +291,15 @@ const Region* MemoryManager::region_from_vaddr(const Process& process, VirtualAd
bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region) bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
{ {
ASSERT_INTERRUPTS_DISABLED(); ASSERT_INTERRUPTS_DISABLED();
auto& vmo = region.vmobject(); ASSERT(region.vmobject().is_anonymous());
auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region];
ASSERT(vmo.is_anonymous()); auto& vmobject = region.vmobject();
auto& vmobject_physical_page_entry = vmobject.physical_pages()[region.first_page_index() + page_index_in_region];
// NOTE: We don't need to acquire the VMObject's lock. // NOTE: We don't need to acquire the VMObject's lock.
// This function is already exclusive due to interrupts being blocked. // This function is already exclusive due to interrupts being blocked.
if (!vmo_page.is_null()) { if (!vmobject_physical_page_entry.is_null()) {
#ifdef PAGE_FAULT_DEBUG #ifdef PAGE_FAULT_DEBUG
dbgprintf("MM: zero_page() but page already present. Fine with me!\n"); dbgprintf("MM: zero_page() but page already present. Fine with me!\n");
#endif #endif
@ -313,7 +314,7 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
#ifdef PAGE_FAULT_DEBUG #ifdef PAGE_FAULT_DEBUG
dbgprintf(" >> ZERO P%p\n", physical_page->paddr().get()); dbgprintf(" >> ZERO P%p\n", physical_page->paddr().get());
#endif #endif
vmo.physical_pages()[page_index_in_region] = move(physical_page); vmobject_physical_page_entry = move(physical_page);
region.remap_page(page_index_in_region); region.remap_page(page_index_in_region);
return true; return true;
} }
@ -321,8 +322,9 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region) bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
{ {
ASSERT_INTERRUPTS_DISABLED(); ASSERT_INTERRUPTS_DISABLED();
auto& vmo = region.vmobject(); auto& vmobject = region.vmobject();
if (vmo.physical_pages()[page_index_in_region]->ref_count() == 1) { auto& vmobject_physical_page_entry = vmobject.physical_pages()[region.first_page_index() + page_index_in_region];
if (vmobject_physical_page_entry->ref_count() == 1) {
#ifdef PAGE_FAULT_DEBUG #ifdef PAGE_FAULT_DEBUG
dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n"); dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
#endif #endif
@ -337,7 +339,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
#ifdef PAGE_FAULT_DEBUG #ifdef PAGE_FAULT_DEBUG
dbgprintf(" >> It's a COW page and it's time to COW!\n"); dbgprintf(" >> It's a COW page and it's time to COW!\n");
#endif #endif
auto physical_page_to_copy = move(vmo.physical_pages()[page_index_in_region]); auto physical_page_to_copy = move(vmobject_physical_page_entry);
auto physical_page = allocate_user_physical_page(ShouldZeroFill::No); auto physical_page = allocate_user_physical_page(ShouldZeroFill::No);
u8* dest_ptr = quickmap_page(*physical_page); u8* dest_ptr = quickmap_page(*physical_page);
const u8* src_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr(); const u8* src_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
@ -345,7 +347,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
dbgprintf(" >> COW P%p <- P%p\n", physical_page->paddr().get(), physical_page_to_copy->paddr().get()); dbgprintf(" >> COW P%p <- P%p\n", physical_page->paddr().get(), physical_page_to_copy->paddr().get());
#endif #endif
memcpy(dest_ptr, src_ptr, PAGE_SIZE); memcpy(dest_ptr, src_ptr, PAGE_SIZE);
vmo.physical_pages()[page_index_in_region] = move(physical_page); vmobject_physical_page_entry = move(physical_page);
unquickmap_page(); unquickmap_page();
region.set_should_cow(page_index_in_region, false); region.set_should_cow(page_index_in_region, false);
region.remap_page(page_index_in_region); region.remap_page(page_index_in_region);
@ -355,24 +357,23 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region) bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
{ {
ASSERT(region.page_directory()); ASSERT(region.page_directory());
auto& vmo = region.vmobject(); ASSERT(region.vmobject().is_inode());
ASSERT(vmo.is_inode());
auto& inode_vmobject = static_cast<InodeVMObject&>(vmo); auto& vmobject = region.vmobject();
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject);
auto& vmo_page = inode_vmobject.physical_pages()[region.first_page_index() + page_index_in_region]; auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[region.first_page_index() + page_index_in_region];
InterruptFlagSaver saver; InterruptFlagSaver saver;
sti(); sti();
LOCKER(vmo.m_paging_lock); LOCKER(vmobject.m_paging_lock);
cli(); cli();
if (!vmo_page.is_null()) { if (!vmobject_physical_page_entry.is_null()) {
#ifdef PAGE_FAULT_DEBUG #ifdef PAGE_FAULT_DEBUG
dbgprintf("MM: page_in_from_inode() but page already present. Fine with me!\n"); dbgprintf("MM: page_in_from_inode() but page already present. Fine with me!\n");
#endif #endif
region.remap_page( page_index_in_region); region.remap_page(page_index_in_region);
return true; return true;
} }
@ -395,8 +396,8 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
memset(page_buffer + nread, 0, PAGE_SIZE - nread); memset(page_buffer + nread, 0, PAGE_SIZE - nread);
} }
cli(); cli();
vmo_page = allocate_user_physical_page(ShouldZeroFill::No); vmobject_physical_page_entry = allocate_user_physical_page(ShouldZeroFill::No);
if (vmo_page.is_null()) { if (vmobject_physical_page_entry.is_null()) {
kprintf("MM: page_in_from_inode was unable to allocate a physical page\n"); kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
return false; return false;
} }

View file

@ -87,16 +87,17 @@ int Region::commit()
#ifdef MM_DEBUG #ifdef MM_DEBUG
dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at V%p\n", vmobject().page_count(), this, &vmobject(), vaddr().get()); dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at V%p\n", vmobject().page_count(), this, &vmobject(), vaddr().get());
#endif #endif
for (size_t i = first_page_index(); i <= last_page_index(); ++i) { for (size_t i = 0; i < page_count(); ++i) {
if (!vmobject().physical_pages()[i].is_null()) auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + i];
if (!vmobject_physical_page_entry.is_null())
continue; continue;
auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes); auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
if (!physical_page) { if (!physical_page) {
kprintf("MM: commit was unable to allocate a physical page\n"); kprintf("MM: commit was unable to allocate a physical page\n");
return -ENOMEM; return -ENOMEM;
} }
vmobject().physical_pages()[i] = move(physical_page); vmobject_physical_page_entry = move(physical_page);
remap_page(i - first_page_index()); remap_page(i);
} }
return 0; return 0;
} }