1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-10 08:57:35 +00:00

Kernel: Simplify VMObject locking & page fault handlers

This patch greatly simplifies VMObject locking by doing two things:

1. Giving VMObject an IntrusiveList of all its mapping Region objects.
2. Removing VMObject::m_paging_lock in favor of VMObject::m_lock

Before (1), VMObject::for_each_region() was forced to acquire the
global MM lock (since it worked by walking MemoryManager's list of
all regions and checking for regions that pointed to itself.)

With each VMObject having its own list of Regions, VMObject's own
m_lock is all we need.

Before (2), page fault handlers used a separate mutex for preventing
overlapping work. This design required multiple temporary unlocks
and was generally extremely hard to reason about.

Instead, page fault handlers now use VMObject's own m_lock as well.
This commit is contained in:
Andreas Kling 2021-07-23 02:40:16 +02:00
parent 64babcaa83
commit 082ed6f417
10 changed files with 116 additions and 155 deletions

View file

@ -153,27 +153,6 @@ AnonymousVMObject::~AnonymousVMObject()
}
int AnonymousVMObject::purge()
{
MutexLocker locker(m_paging_lock);
return purge_impl();
}
int AnonymousVMObject::purge_with_interrupts_disabled(Badge<MemoryManager>)
{
VERIFY_INTERRUPTS_DISABLED();
if (m_paging_lock.is_locked())
return 0;
return purge_impl();
}
void AnonymousVMObject::set_was_purged(VolatilePageRange const& range)
{
VERIFY(m_lock.is_locked());
for (auto* purgeable_ranges : m_purgeable_ranges)
purgeable_ranges->set_was_purged(range);
}
int AnonymousVMObject::purge_impl()
{
int purged_page_count = 0;
ScopedSpinLock lock(m_lock);
@ -193,30 +172,35 @@ int AnonymousVMObject::purge_impl()
purged_page_count += purged_in_range;
set_was_purged(range);
for_each_region([&](auto& region) {
if (&region.vmobject() == this) {
if (auto owner = region.get_owner()) {
// we need to hold a reference the process here (if there is one) as we may not own this region
dmesgln("Purged {} pages from region {} owned by {} at {} - {}",
purged_in_range,
region.name(),
*owner,
region.vaddr_from_page_index(range.base),
region.vaddr_from_page_index(range.base + range.count));
} else {
dmesgln("Purged {} pages from region {} (no ownership) at {} - {}",
purged_in_range,
region.name(),
region.vaddr_from_page_index(range.base),
region.vaddr_from_page_index(range.base + range.count));
}
region.remap_vmobject_page_range(range.base, range.count);
if (auto owner = region.get_owner()) {
// we need to hold a reference the process here (if there is one) as we may not own this region
dmesgln("Purged {} pages from region {} owned by {} at {} - {}",
purged_in_range,
region.name(),
*owner,
region.vaddr_from_page_index(range.base),
region.vaddr_from_page_index(range.base + range.count));
} else {
dmesgln("Purged {} pages from region {} (no ownership) at {} - {}",
purged_in_range,
region.name(),
region.vaddr_from_page_index(range.base),
region.vaddr_from_page_index(range.base + range.count));
}
region.remap_vmobject_page_range(range.base, range.count);
});
}
});
return purged_page_count;
}
void AnonymousVMObject::set_was_purged(VolatilePageRange const& range)
{
VERIFY(m_lock.is_locked());
for (auto* purgeable_ranges : m_purgeable_ranges)
purgeable_ranges->set_was_purged(range);
}
void AnonymousVMObject::register_purgeable_page_ranges(PurgeablePageRanges& purgeable_page_ranges)
{
ScopedSpinLock lock(m_lock);