1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 15:57:45 +00:00

Kernel: Don't remap all regions from Region::remap_vmobject_page()

When handling a page fault, we only need to remap the faulting region in
the current process. There's no need to traverse *all* regions that map
the same VMObject and remap them cross-process as well.

Those other regions will get remapped lazily by their own page fault
handlers eventually. Or maybe they won't and we avoided some work. :^)
This commit is contained in:
Andreas Kling 2022-08-18 15:23:56 +02:00
parent 45e6123de8
commit 27c1135d30
2 changed files with 8 additions and 19 deletions

View file

@ -237,14 +237,16 @@ bool Region::map_individual_page_impl(size_t page_index)
return true; return true;
} }
bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush) bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
{ {
if (!m_page_directory)
return true; // not an error, region may have not yet mapped it
if (!translate_vmobject_page(page_index))
return true; // not an error, region doesn't map this page
SpinlockLocker page_lock(m_page_directory->get_lock()); SpinlockLocker page_lock(m_page_directory->get_lock());
SpinlockLocker lock(s_mm_lock); SpinlockLocker mm_lock(s_mm_lock);
SpinlockLocker lock(m_vmobject->m_lock);
// NOTE: `page_index` is a VMObject page index, so first we convert it to a Region page index.
if (!translate_vmobject_page(page_index))
return false;
VERIFY(physical_page(page_index)); VERIFY(physical_page(page_index));
bool success = map_individual_page_impl(page_index); bool success = map_individual_page_impl(page_index);
if (with_flush) if (with_flush)
@ -252,18 +254,6 @@ bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
return success; return success;
} }
bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
{
auto& vmobject = this->vmobject();
bool success = true;
SpinlockLocker lock(vmobject.m_lock);
vmobject.for_each_region([&](auto& region) {
if (!region.do_remap_vmobject_page(page_index, with_flush))
success = false;
});
return success;
}
void Region::unmap(ShouldFlushTLB should_flush_tlb) void Region::unmap(ShouldFlushTLB should_flush_tlb)
{ {
if (!m_page_directory) if (!m_page_directory)

View file

@ -200,7 +200,6 @@ private:
Region(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared); Region(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
[[nodiscard]] bool remap_vmobject_page(size_t page_index, bool with_flush = true); [[nodiscard]] bool remap_vmobject_page(size_t page_index, bool with_flush = true);
[[nodiscard]] bool do_remap_vmobject_page(size_t page_index, bool with_flush = true);
void set_access_bit(Access access, bool b) void set_access_bit(Access access, bool b)
{ {