mirror of
https://github.com/RGBCube/serenity
synced 2025-05-21 20:05:06 +00:00

This step would ideally not have been necessary (increases amount of refactoring and templates necessary, which in turn increases build times), but it gives us a couple of nice properties: - SpinlockProtected inside Singleton (a very common combination) can now obtain any lock rank just via the template parameter. It was not previously possible to do this with SingletonInstanceCreator magic. - SpinlockProtected's lock rank is now mandatory; this is the majority of cases and allows us to see where we're still missing proper ranks. - The type already informs us what lock rank a lock has, which aids code readability and (possibly, if gdb cooperates) lock mismatch debugging. - The rank of a lock can no longer be dynamic, which is not something we wanted in the first place (or made use of). Locks randomly changing their rank sounds like a disaster waiting to happen. - In some places, we might be able to statically check that locks are taken in the right order (with the right lock rank checking implementation) as rank information is fully statically known. This refactoring even more exposes the fact that Mutex has no lock rank capabilites, which is not fixed here.
91 lines
2.5 KiB
C++
91 lines
2.5 KiB
C++
/*
|
|
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
|
*
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
#include <AK/FixedArray.h>
|
|
#include <AK/IntrusiveList.h>
|
|
#include <AK/RefPtr.h>
|
|
#include <Kernel/Forward.h>
|
|
#include <Kernel/Library/ListedRefCounted.h>
|
|
#include <Kernel/Library/LockWeakable.h>
|
|
#include <Kernel/Locking/Mutex.h>
|
|
#include <Kernel/Memory/Region.h>
|
|
|
|
namespace Kernel::Memory {
|
|
|
|
class VMObject
|
|
: public ListedRefCounted<VMObject, LockType::Spinlock>
|
|
, public LockWeakable<VMObject> {
|
|
friend class MemoryManager;
|
|
friend class Region;
|
|
|
|
public:
|
|
virtual ~VMObject();
|
|
|
|
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() = 0;
|
|
|
|
virtual bool is_anonymous() const { return false; }
|
|
virtual bool is_inode() const { return false; }
|
|
virtual bool is_shared_inode() const { return false; }
|
|
virtual bool is_private_inode() const { return false; }
|
|
|
|
size_t page_count() const { return m_physical_pages.size(); }
|
|
|
|
virtual Span<RefPtr<PhysicalPage> const> physical_pages() const { return m_physical_pages.span(); }
|
|
virtual Span<RefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); }
|
|
|
|
size_t size() const { return m_physical_pages.size() * PAGE_SIZE; }
|
|
|
|
virtual StringView class_name() const = 0;
|
|
|
|
ALWAYS_INLINE void add_region(Region& region)
|
|
{
|
|
SpinlockLocker locker(m_lock);
|
|
m_regions.append(region);
|
|
}
|
|
|
|
ALWAYS_INLINE void remove_region(Region& region)
|
|
{
|
|
SpinlockLocker locker(m_lock);
|
|
m_regions.remove(region);
|
|
}
|
|
|
|
protected:
|
|
static ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_create_physical_pages(size_t);
|
|
ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_clone_physical_pages() const;
|
|
explicit VMObject(FixedArray<RefPtr<PhysicalPage>>&&);
|
|
|
|
template<typename Callback>
|
|
void for_each_region(Callback);
|
|
|
|
IntrusiveListNode<VMObject> m_list_node;
|
|
FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
|
|
|
|
mutable RecursiveSpinlock<LockRank::None> m_lock {};
|
|
|
|
private:
|
|
VMObject& operator=(VMObject const&) = delete;
|
|
VMObject& operator=(VMObject&&) = delete;
|
|
VMObject(VMObject&&) = delete;
|
|
|
|
Region::ListInVMObject m_regions;
|
|
|
|
public:
|
|
using AllInstancesList = IntrusiveList<&VMObject::m_list_node>;
|
|
static SpinlockProtected<VMObject::AllInstancesList, LockRank::None>& all_instances();
|
|
};
|
|
|
|
template<typename Callback>
|
|
inline void VMObject::for_each_region(Callback callback)
|
|
{
|
|
SpinlockLocker lock(m_lock);
|
|
for (auto& region : m_regions) {
|
|
callback(region);
|
|
}
|
|
}
|
|
|
|
}
|