mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 13:18:13 +00:00

This patch greatly simplifies VMObject locking by doing two things: 1. Giving VMObject an IntrusiveList of all its mapping Region objects. 2. Removing VMObject::m_paging_lock in favor of VMObject::m_lock Before (1), VMObject::for_each_region() was forced to acquire the global MM lock (since it worked by walking MemoryManager's list of all regions and checking for regions that pointed to itself.) With each VMObject having its own list of Regions, VMObject's own m_lock is all we need. Before (2), page fault handlers used a separate mutex for preventing overlapping work. This design required multiple temporary unlocks and was generally extremely hard to reason about. Instead, page fault handlers now use VMObject's own m_lock as well.
125 lines
3.3 KiB
C++
125 lines
3.3 KiB
C++
/*
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
*
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
#include <AK/FixedArray.h>
|
|
#include <AK/HashTable.h>
|
|
#include <AK/IntrusiveList.h>
|
|
#include <AK/RefCounted.h>
|
|
#include <AK/RefPtr.h>
|
|
#include <AK/Vector.h>
|
|
#include <AK/Weakable.h>
|
|
#include <Kernel/Forward.h>
|
|
#include <Kernel/Mutex.h>
|
|
#include <Kernel/VM/Region.h>
|
|
|
|
namespace Kernel {
|
|
|
|
class VMObjectDeletedHandler {
|
|
public:
|
|
virtual ~VMObjectDeletedHandler() = default;
|
|
virtual void vmobject_deleted(VMObject&) = 0;
|
|
};
|
|
|
|
class VMObject : public RefCounted<VMObject>
|
|
, public Weakable<VMObject> {
|
|
friend class MemoryManager;
|
|
friend class Region;
|
|
|
|
public:
|
|
virtual ~VMObject();
|
|
|
|
virtual RefPtr<VMObject> try_clone() = 0;
|
|
|
|
virtual bool is_anonymous() const { return false; }
|
|
virtual bool is_inode() const { return false; }
|
|
virtual bool is_shared_inode() const { return false; }
|
|
virtual bool is_private_inode() const { return false; }
|
|
virtual bool is_contiguous() const { return false; }
|
|
|
|
size_t page_count() const { return m_physical_pages.size(); }
|
|
Span<RefPtr<PhysicalPage> const> physical_pages() const { return m_physical_pages.span(); }
|
|
Span<RefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); }
|
|
|
|
size_t size() const { return m_physical_pages.size() * PAGE_SIZE; }
|
|
|
|
virtual StringView class_name() const = 0;
|
|
|
|
ALWAYS_INLINE void add_region(Region& region)
|
|
{
|
|
ScopedSpinLock locker(m_lock);
|
|
m_regions_count++;
|
|
m_regions.append(region);
|
|
}
|
|
|
|
ALWAYS_INLINE void remove_region(Region& region)
|
|
{
|
|
ScopedSpinLock locker(m_lock);
|
|
m_regions_count--;
|
|
m_regions.remove(region);
|
|
}
|
|
|
|
ALWAYS_INLINE bool is_shared_by_multiple_regions() const { return m_regions_count > 1; }
|
|
|
|
void register_on_deleted_handler(VMObjectDeletedHandler& handler)
|
|
{
|
|
m_on_deleted.set(&handler);
|
|
}
|
|
void unregister_on_deleted_handler(VMObjectDeletedHandler& handler)
|
|
{
|
|
m_on_deleted.remove(&handler);
|
|
}
|
|
|
|
protected:
|
|
explicit VMObject(size_t);
|
|
explicit VMObject(VMObject const&);
|
|
|
|
template<typename Callback>
|
|
void for_each_region(Callback);
|
|
|
|
IntrusiveListNode<VMObject> m_list_node;
|
|
FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
|
|
|
|
mutable RecursiveSpinLock m_lock;
|
|
|
|
private:
|
|
VMObject& operator=(VMObject const&) = delete;
|
|
VMObject& operator=(VMObject&&) = delete;
|
|
VMObject(VMObject&&) = delete;
|
|
|
|
Atomic<u32, AK::MemoryOrder::memory_order_relaxed> m_regions_count { 0 };
|
|
HashTable<VMObjectDeletedHandler*> m_on_deleted;
|
|
SpinLock<u8> m_on_deleted_lock;
|
|
|
|
Region::ListInVMObject m_regions;
|
|
|
|
public:
|
|
using List = IntrusiveList<VMObject, RawPtr<VMObject>, &VMObject::m_list_node>;
|
|
};
|
|
|
|
template<typename Callback>
|
|
inline void VMObject::for_each_region(Callback callback)
|
|
{
|
|
ScopedSpinLock lock(m_lock);
|
|
for (auto& region : m_regions) {
|
|
callback(region);
|
|
}
|
|
}
|
|
|
|
inline PhysicalPage const* Region::physical_page(size_t index) const
|
|
{
|
|
VERIFY(index < page_count());
|
|
return vmobject().physical_pages()[first_page_index() + index];
|
|
}
|
|
|
|
inline RefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
|
|
{
|
|
VERIFY(index < page_count());
|
|
return vmobject().physical_pages()[first_page_index() + index];
|
|
}
|
|
|
|
}
|