1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 15:57:45 +00:00

Kernel: Reduce time under VMObject lock while handling zero faults

We only need to hold the VMObject lock while inspecting and/or updating
the physical page array in the VMObject.
This commit is contained in:
Andreas Kling 2022-08-19 12:51:52 +02:00
parent a84d893af8
commit 5ada38f9c3
2 changed files with 27 additions and 20 deletions

View file

@ -392,7 +392,7 @@ PageFaultResponse Region::handle_fault(PageFault const& fault)
auto phys_page = physical_page(page_index_in_region); auto phys_page = physical_page(page_index_in_region);
if (phys_page->is_shared_zero_page() || phys_page->is_lazy_committed_page()) { if (phys_page->is_shared_zero_page() || phys_page->is_lazy_committed_page()) {
dbgln_if(PAGE_FAULT_DEBUG, "NP(zero) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr()); dbgln_if(PAGE_FAULT_DEBUG, "NP(zero) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
return handle_zero_fault(page_index_in_region); return handle_zero_fault(page_index_in_region, *phys_page);
} }
return handle_cow_fault(page_index_in_region); return handle_cow_fault(page_index_in_region);
} }
@ -400,42 +400,49 @@ PageFaultResponse Region::handle_fault(PageFault const& fault)
return PageFaultResponse::ShouldCrash; return PageFaultResponse::ShouldCrash;
} }
PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region) PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, PhysicalPage& page_in_slot_at_time_of_fault)
{ {
VERIFY(vmobject().is_anonymous()); VERIFY(vmobject().is_anonymous());
SpinlockLocker locker(vmobject().m_lock);
auto& page_slot = physical_page_slot(page_index_in_region);
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region); auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) {
dbgln_if(PAGE_FAULT_DEBUG, "MM: zero_page() but page already present. Fine with me!");
if (!remap_vmobject_page(page_index_in_vmobject, *page_slot))
return PageFaultResponse::OutOfMemory;
return PageFaultResponse::Continue;
}
auto current_thread = Thread::current(); auto current_thread = Thread::current();
if (current_thread != nullptr) if (current_thread != nullptr)
current_thread->did_zero_fault(); current_thread->did_zero_fault();
if (page_slot->is_lazy_committed_page()) { RefPtr<PhysicalPage> new_physical_page;
if (page_in_slot_at_time_of_fault.is_lazy_committed_page()) {
VERIFY(m_vmobject->is_anonymous()); VERIFY(m_vmobject->is_anonymous());
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({}); new_physical_page = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({});
dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED COMMITTED {}", page_slot->paddr()); dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED COMMITTED {}", new_physical_page->paddr());
} else { } else {
auto page_or_error = MM.allocate_physical_page(MemoryManager::ShouldZeroFill::Yes); auto page_or_error = MM.allocate_physical_page(MemoryManager::ShouldZeroFill::Yes);
if (page_or_error.is_error()) { if (page_or_error.is_error()) {
dmesgln("MM: handle_zero_fault was unable to allocate a physical page"); dmesgln("MM: handle_zero_fault was unable to allocate a physical page");
return PageFaultResponse::OutOfMemory; return PageFaultResponse::OutOfMemory;
} }
page_slot = page_or_error.release_value(); new_physical_page = page_or_error.release_value();
dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED {}", page_slot->paddr()); dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED {}", new_physical_page->paddr());
} }
if (!remap_vmobject_page(page_index_in_vmobject, *page_slot)) { bool already_handled = false;
dmesgln("MM: handle_zero_fault was unable to allocate a page table to map {}", page_slot);
{
SpinlockLocker locker(vmobject().m_lock);
auto& page_slot = physical_page_slot(page_index_in_region);
already_handled = !page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page();
if (already_handled) {
// Someone else already faulted in a new page in this slot. That's fine, we'll just remap with their page.
new_physical_page = page_slot;
} else {
// Install the newly allocated page into the VMObject.
page_slot = new_physical_page;
}
}
if (!remap_vmobject_page(page_index_in_vmobject, *new_physical_page)) {
dmesgln("MM: handle_zero_fault was unable to allocate a page table to map {}", new_physical_page);
return PageFaultResponse::OutOfMemory; return PageFaultResponse::OutOfMemory;
} }
return PageFaultResponse::Continue; return PageFaultResponse::Continue;

View file

@ -211,7 +211,7 @@ private:
[[nodiscard]] PageFaultResponse handle_cow_fault(size_t page_index); [[nodiscard]] PageFaultResponse handle_cow_fault(size_t page_index);
[[nodiscard]] PageFaultResponse handle_inode_fault(size_t page_index); [[nodiscard]] PageFaultResponse handle_inode_fault(size_t page_index);
[[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index); [[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index, PhysicalPage& page_in_slot_at_time_of_fault);
[[nodiscard]] bool map_individual_page_impl(size_t page_index); [[nodiscard]] bool map_individual_page_impl(size_t page_index);
[[nodiscard]] bool map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage>); [[nodiscard]] bool map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage>);