mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 03:17:35 +00:00
Kernel: Turn lock ranks into template parameters
This step would ideally not have been necessary (increases amount of refactoring and templates necessary, which in turn increases build times), but it gives us a couple of nice properties: - SpinlockProtected inside Singleton (a very common combination) can now obtain any lock rank just via the template parameter. It was not previously possible to do this with SingletonInstanceCreator magic. - SpinlockProtected's lock rank is now mandatory; this is the majority of cases and allows us to see where we're still missing proper ranks. - The type already informs us what lock rank a lock has, which aids code readability and (possibly, if gdb cooperates) lock mismatch debugging. - The rank of a lock can no longer be dynamic, which is not something we wanted in the first place (or made use of). Locks randomly changing their rank sounds like a disaster waiting to happen. - In some places, we might be able to statically check that locks are taken in the right order (with the right lock rank checking implementation) as rank information is fully statically known. This refactoring even more exposes the fact that Mutex has no lock rank capabilites, which is not fixed here.
This commit is contained in:
parent
363cc12146
commit
a6a439243f
94 changed files with 235 additions and 259 deletions
|
@ -78,7 +78,7 @@ private:
|
|||
void uncommit_one();
|
||||
|
||||
private:
|
||||
Spinlock m_lock { LockRank::None };
|
||||
Spinlock<LockRank::None> m_lock {};
|
||||
CommittedPhysicalPageSet m_committed_pages;
|
||||
};
|
||||
|
||||
|
|
|
@ -88,7 +88,6 @@ MemoryManager::GlobalData::GlobalData()
|
|||
}
|
||||
|
||||
UNMAP_AFTER_INIT MemoryManager::MemoryManager()
|
||||
: m_global_data(LockRank::None)
|
||||
{
|
||||
s_the = this;
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ struct PhysicalMemoryRange {
|
|||
struct MemoryManagerData {
|
||||
static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::MemoryManager; }
|
||||
|
||||
Spinlock m_quickmap_in_use { LockRank::None };
|
||||
Spinlock<LockRank::None> m_quickmap_in_use {};
|
||||
InterruptsState m_quickmap_previous_interrupts_state;
|
||||
};
|
||||
|
||||
|
@ -304,7 +304,7 @@ private:
|
|||
Vector<ContiguousReservedMemoryRange> reserved_memory_ranges;
|
||||
};
|
||||
|
||||
SpinlockProtected<GlobalData> m_global_data;
|
||||
SpinlockProtected<GlobalData, LockRank::None> m_global_data;
|
||||
};
|
||||
|
||||
inline bool is_user_address(VirtualAddress vaddr)
|
||||
|
|
|
@ -52,7 +52,7 @@ public:
|
|||
|
||||
void set_space(Badge<AddressSpace>, AddressSpace& space) { m_space = &space; }
|
||||
|
||||
RecursiveSpinlock& get_lock() { return m_lock; }
|
||||
RecursiveSpinlock<LockRank::None>& get_lock() { return m_lock; }
|
||||
|
||||
// This has to be public to let the global singleton access the member pointer
|
||||
IntrusiveRedBlackTreeNode<FlatPtr, PageDirectory, RawPtr<PageDirectory>> m_tree_node;
|
||||
|
@ -72,7 +72,7 @@ private:
|
|||
#else
|
||||
RefPtr<PhysicalPage> m_directory_pages[4];
|
||||
#endif
|
||||
RecursiveSpinlock m_lock { LockRank::None };
|
||||
RecursiveSpinlock<LockRank::None> m_lock {};
|
||||
};
|
||||
|
||||
void activate_kernel_page_directory(PageDirectory const& pgd);
|
||||
|
|
|
@ -270,7 +270,7 @@ void Region::unmap(ShouldFlushTLB should_flush_tlb)
|
|||
unmap_with_locks_held(should_flush_tlb, pd_locker);
|
||||
}
|
||||
|
||||
void Region::unmap_with_locks_held(ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&)
|
||||
void Region::unmap_with_locks_held(ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock<LockRank::None>>&)
|
||||
{
|
||||
if (!m_page_directory)
|
||||
return;
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <Kernel/Forward.h>
|
||||
#include <Kernel/KString.h>
|
||||
#include <Kernel/Library/LockWeakable.h>
|
||||
#include <Kernel/Locking/LockRank.h>
|
||||
#include <Kernel/Memory/PageFaultResponse.h>
|
||||
#include <Kernel/Memory/VirtualRange.h>
|
||||
#include <Kernel/Sections.h>
|
||||
|
@ -192,7 +193,7 @@ public:
|
|||
void set_page_directory(PageDirectory&);
|
||||
ErrorOr<void> map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes);
|
||||
void unmap(ShouldFlushTLB = ShouldFlushTLB::Yes);
|
||||
void unmap_with_locks_held(ShouldFlushTLB, SpinlockLocker<RecursiveSpinlock>& pd_locker);
|
||||
void unmap_with_locks_held(ShouldFlushTLB, SpinlockLocker<RecursiveSpinlock<LockRank::None>>& pd_locker);
|
||||
|
||||
void remap();
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ public:
|
|||
void reclaim_space(PhysicalAddress chunk_start, size_t chunk_size);
|
||||
PhysicalAddress start_of_used() const;
|
||||
|
||||
Spinlock& lock() { return m_lock; }
|
||||
Spinlock<LockRank::None>& lock() { return m_lock; }
|
||||
size_t used_bytes() const { return m_num_used_bytes; }
|
||||
PhysicalAddress start_of_region() const { return m_region->physical_page(0)->paddr(); }
|
||||
VirtualAddress vaddr() const { return m_region->vaddr(); }
|
||||
|
@ -32,7 +32,7 @@ private:
|
|||
RingBuffer(NonnullOwnPtr<Memory::Region> region, size_t capacity);
|
||||
|
||||
NonnullOwnPtr<Memory::Region> m_region;
|
||||
Spinlock m_lock { LockRank::None };
|
||||
Spinlock<LockRank::None> m_lock {};
|
||||
size_t m_start_of_used {};
|
||||
size_t m_num_used_bytes {};
|
||||
size_t m_capacity_in_bytes {};
|
||||
|
|
|
@ -87,7 +87,7 @@ private:
|
|||
LockRefPtr<FakeWritesFramebufferVMObject> m_fake_writes_framebuffer_vmobject;
|
||||
LockRefPtr<RealWritesFramebufferVMObject> m_real_writes_framebuffer_vmobject;
|
||||
bool m_writes_are_faked { false };
|
||||
mutable RecursiveSpinlock m_writes_state_lock { LockRank::None };
|
||||
mutable RecursiveSpinlock<LockRank::None> m_writes_state_lock {};
|
||||
CommittedPhysicalPageSet m_committed_pages;
|
||||
};
|
||||
|
||||
|
|
|
@ -10,9 +10,9 @@
|
|||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
static Singleton<SpinlockProtected<VMObject::AllInstancesList>> s_all_instances;
|
||||
static Singleton<SpinlockProtected<VMObject::AllInstancesList, LockRank::None>> s_all_instances;
|
||||
|
||||
SpinlockProtected<VMObject::AllInstancesList>& VMObject::all_instances()
|
||||
SpinlockProtected<VMObject::AllInstancesList, LockRank::None>& VMObject::all_instances()
|
||||
{
|
||||
return s_all_instances;
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ protected:
|
|||
IntrusiveListNode<VMObject> m_list_node;
|
||||
FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
|
||||
|
||||
mutable RecursiveSpinlock m_lock { LockRank::None };
|
||||
mutable RecursiveSpinlock<LockRank::None> m_lock {};
|
||||
|
||||
private:
|
||||
VMObject& operator=(VMObject const&) = delete;
|
||||
|
@ -76,7 +76,7 @@ private:
|
|||
|
||||
public:
|
||||
using AllInstancesList = IntrusiveList<&VMObject::m_list_node>;
|
||||
static SpinlockProtected<VMObject::AllInstancesList>& all_instances();
|
||||
static SpinlockProtected<VMObject::AllInstancesList, LockRank::None>& all_instances();
|
||||
};
|
||||
|
||||
template<typename Callback>
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue