mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 13:38:11 +00:00

This patch greatly simplifies VMObject locking by doing two things: 1. Giving VMObject an IntrusiveList of all its mapping Region objects. 2. Removing VMObject::m_paging_lock in favor of VMObject::m_lock Before (1), VMObject::for_each_region() was forced to acquire the global MM lock (since it worked by walking MemoryManager's list of all regions and checking for regions that pointed to itself.) With each VMObject having its own list of Regions, VMObject's own m_lock is all we need. Before (2), page fault handlers used a separate mutex for preventing overlapping work. This design required multiple temporary unlocks and was generally extremely hard to reason about. Instead, page fault handlers now use VMObject's own m_lock as well.
72 lines
2.4 KiB
C++
72 lines
2.4 KiB
C++
/*
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
*
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
*/
|
|
|
|
#include <AK/NonnullRefPtrVector.h>
|
|
#include <Kernel/Arch/x86/InterruptDisabler.h>
|
|
#include <Kernel/Process.h>
|
|
#include <Kernel/VM/AnonymousVMObject.h>
|
|
#include <Kernel/VM/InodeVMObject.h>
|
|
#include <Kernel/VM/MemoryManager.h>
|
|
|
|
namespace Kernel {
|
|
|
|
KResultOr<FlatPtr> Process::sys$purge(int mode)
|
|
{
|
|
VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this)
|
|
REQUIRE_NO_PROMISES;
|
|
if (!is_superuser())
|
|
return EPERM;
|
|
int purged_page_count = 0;
|
|
if (mode & PURGE_ALL_VOLATILE) {
|
|
NonnullRefPtrVector<AnonymousVMObject> vmobjects;
|
|
{
|
|
KResult result(KSuccess);
|
|
MM.for_each_vmobject([&](auto& vmobject) {
|
|
if (vmobject.is_anonymous()) {
|
|
// In the event that the append fails, only attempt to continue
|
|
// the purge if we have already appended something successfully.
|
|
if (!vmobjects.try_append(vmobject) && vmobjects.is_empty()) {
|
|
result = ENOMEM;
|
|
return IterationDecision::Break;
|
|
}
|
|
}
|
|
return IterationDecision::Continue;
|
|
});
|
|
|
|
if (result.is_error())
|
|
return result.error();
|
|
}
|
|
for (auto& vmobject : vmobjects) {
|
|
purged_page_count += vmobject.purge();
|
|
}
|
|
}
|
|
if (mode & PURGE_ALL_CLEAN_INODE) {
|
|
NonnullRefPtrVector<InodeVMObject> vmobjects;
|
|
{
|
|
KResult result(KSuccess);
|
|
MM.for_each_vmobject([&](auto& vmobject) {
|
|
if (vmobject.is_inode()) {
|
|
// In the event that the append fails, only attempt to continue
|
|
// the purge if we have already appended something successfully.
|
|
if (!vmobjects.try_append(static_cast<InodeVMObject&>(vmobject)) && vmobjects.is_empty()) {
|
|
result = ENOMEM;
|
|
return IterationDecision::Break;
|
|
}
|
|
}
|
|
return IterationDecision::Continue;
|
|
});
|
|
|
|
if (result.is_error())
|
|
return result.error();
|
|
}
|
|
for (auto& vmobject : vmobjects) {
|
|
purged_page_count += vmobject.release_all_clean_pages();
|
|
}
|
|
}
|
|
return purged_page_count;
|
|
}
|
|
|
|
}
|