mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 05:37:34 +00:00
Kernel: Turn lock ranks into template parameters
This step would ideally not have been necessary (increases amount of refactoring and templates necessary, which in turn increases build times), but it gives us a couple of nice properties: - SpinlockProtected inside Singleton (a very common combination) can now obtain any lock rank just via the template parameter. It was not previously possible to do this with SingletonInstanceCreator magic. - SpinlockProtected's lock rank is now mandatory; this is the majority of cases and allows us to see where we're still missing proper ranks. - The type already informs us what lock rank a lock has, which aids code readability and (possibly, if gdb cooperates) lock mismatch debugging. - The rank of a lock can no longer be dynamic, which is not something we wanted in the first place (or made use of). Locks randomly changing their rank sounds like a disaster waiting to happen. - In some places, we might be able to statically check that locks are taken in the right order (with the right lock rank checking implementation) as rank information is fully statically known. This refactoring even more exposes the fact that Mutex has no lock rank capabilites, which is not fixed here.
This commit is contained in:
parent
363cc12146
commit
a6a439243f
94 changed files with 235 additions and 259 deletions
|
@ -211,7 +211,7 @@ void Mutex::unlock()
|
|||
}
|
||||
}
|
||||
|
||||
void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker<Spinlock>& lock, u32 requested_locks)
|
||||
void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker<Spinlock<LockRank::None>>& lock, u32 requested_locks)
|
||||
{
|
||||
if constexpr (LOCK_IN_CRITICAL_DEBUG) {
|
||||
// There are no interrupts enabled in early boot.
|
||||
|
|
|
@ -82,7 +82,8 @@ private:
|
|||
// FIXME: remove this after annihilating Process::m_big_lock
|
||||
using BigLockBlockedThreadList = IntrusiveList<&Thread::m_big_lock_blocked_threads_list_node>;
|
||||
|
||||
void block(Thread&, Mode, SpinlockLocker<Spinlock>&, u32);
|
||||
// FIXME: Allow any lock rank.
|
||||
void block(Thread&, Mode, SpinlockLocker<Spinlock<LockRank::None>>&, u32);
|
||||
void unblock_waiters(Mode);
|
||||
|
||||
StringView m_name;
|
||||
|
@ -117,10 +118,10 @@ private:
|
|||
}
|
||||
};
|
||||
// FIXME: Use a specific lock rank passed by constructor.
|
||||
SpinlockProtected<BlockedThreadLists> m_blocked_thread_lists { LockRank::None };
|
||||
SpinlockProtected<BlockedThreadLists, LockRank::None> m_blocked_thread_lists {};
|
||||
|
||||
// FIXME: See above.
|
||||
mutable Spinlock m_lock { LockRank::None };
|
||||
mutable Spinlock<LockRank::None> m_lock {};
|
||||
|
||||
#if LOCK_SHARED_UPGRADE_DEBUG
|
||||
HashMap<Thread*, u32> m_shared_holders_map;
|
||||
|
|
|
@ -1,66 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <Kernel/Locking/Spinlock.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
InterruptsState Spinlock::lock()
|
||||
{
|
||||
InterruptsState previous_interrupts_state = processor_interrupts_state();
|
||||
Processor::enter_critical();
|
||||
Processor::disable_interrupts();
|
||||
while (m_lock.exchange(1, AK::memory_order_acquire) != 0)
|
||||
Processor::wait_check();
|
||||
track_lock_acquire(m_rank);
|
||||
return previous_interrupts_state;
|
||||
}
|
||||
|
||||
void Spinlock::unlock(InterruptsState previous_interrupts_state)
|
||||
{
|
||||
VERIFY(is_locked());
|
||||
track_lock_release(m_rank);
|
||||
m_lock.store(0, AK::memory_order_release);
|
||||
|
||||
Processor::leave_critical();
|
||||
restore_processor_interrupts_state(previous_interrupts_state);
|
||||
}
|
||||
|
||||
InterruptsState RecursiveSpinlock::lock()
|
||||
{
|
||||
InterruptsState previous_interrupts_state = processor_interrupts_state();
|
||||
Processor::disable_interrupts();
|
||||
Processor::enter_critical();
|
||||
auto& proc = Processor::current();
|
||||
FlatPtr cpu = FlatPtr(&proc);
|
||||
FlatPtr expected = 0;
|
||||
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
|
||||
if (expected == cpu)
|
||||
break;
|
||||
Processor::wait_check();
|
||||
expected = 0;
|
||||
}
|
||||
if (m_recursions == 0)
|
||||
track_lock_acquire(m_rank);
|
||||
m_recursions++;
|
||||
return previous_interrupts_state;
|
||||
}
|
||||
|
||||
void RecursiveSpinlock::unlock(InterruptsState previous_interrupts_state)
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(m_recursions > 0);
|
||||
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
|
||||
if (--m_recursions == 0) {
|
||||
track_lock_release(m_rank);
|
||||
m_lock.store(0, AK::memory_order_release);
|
||||
}
|
||||
|
||||
Processor::leave_critical();
|
||||
restore_processor_interrupts_state(previous_interrupts_state);
|
||||
}
|
||||
|
||||
}
|
|
@ -13,18 +13,34 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
template<LockRank Rank>
|
||||
class Spinlock {
|
||||
AK_MAKE_NONCOPYABLE(Spinlock);
|
||||
AK_MAKE_NONMOVABLE(Spinlock);
|
||||
|
||||
public:
|
||||
Spinlock(LockRank rank)
|
||||
: m_rank(rank)
|
||||
Spinlock() = default;
|
||||
|
||||
InterruptsState lock()
|
||||
{
|
||||
InterruptsState previous_interrupts_state = processor_interrupts_state();
|
||||
Processor::enter_critical();
|
||||
Processor::disable_interrupts();
|
||||
while (m_lock.exchange(1, AK::memory_order_acquire) != 0)
|
||||
Processor::wait_check();
|
||||
track_lock_acquire(m_rank);
|
||||
return previous_interrupts_state;
|
||||
}
|
||||
|
||||
InterruptsState lock();
|
||||
void unlock(InterruptsState);
|
||||
void unlock(InterruptsState previous_interrupts_state)
|
||||
{
|
||||
VERIFY(is_locked());
|
||||
track_lock_release(m_rank);
|
||||
m_lock.store(0, AK::memory_order_release);
|
||||
|
||||
Processor::leave_critical();
|
||||
restore_processor_interrupts_state(previous_interrupts_state);
|
||||
}
|
||||
|
||||
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
|
||||
{
|
||||
|
@ -38,21 +54,50 @@ public:
|
|||
|
||||
private:
|
||||
Atomic<u8> m_lock { 0 };
|
||||
const LockRank m_rank;
|
||||
static constexpr LockRank const m_rank { Rank };
|
||||
};
|
||||
|
||||
template<LockRank Rank>
|
||||
class RecursiveSpinlock {
|
||||
AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
|
||||
AK_MAKE_NONMOVABLE(RecursiveSpinlock);
|
||||
|
||||
public:
|
||||
RecursiveSpinlock(LockRank rank)
|
||||
: m_rank(rank)
|
||||
RecursiveSpinlock() = default;
|
||||
|
||||
InterruptsState lock()
|
||||
{
|
||||
InterruptsState previous_interrupts_state = processor_interrupts_state();
|
||||
Processor::disable_interrupts();
|
||||
Processor::enter_critical();
|
||||
auto& proc = Processor::current();
|
||||
FlatPtr cpu = FlatPtr(&proc);
|
||||
FlatPtr expected = 0;
|
||||
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
|
||||
if (expected == cpu)
|
||||
break;
|
||||
Processor::wait_check();
|
||||
expected = 0;
|
||||
}
|
||||
if (m_recursions == 0)
|
||||
track_lock_acquire(m_rank);
|
||||
m_recursions++;
|
||||
return previous_interrupts_state;
|
||||
}
|
||||
|
||||
InterruptsState lock();
|
||||
void unlock(InterruptsState);
|
||||
void unlock(InterruptsState previous_interrupts_state)
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(m_recursions > 0);
|
||||
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
|
||||
if (--m_recursions == 0) {
|
||||
track_lock_release(m_rank);
|
||||
m_lock.store(0, AK::memory_order_release);
|
||||
}
|
||||
|
||||
Processor::leave_critical();
|
||||
restore_processor_interrupts_state(previous_interrupts_state);
|
||||
}
|
||||
|
||||
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
|
||||
{
|
||||
|
@ -72,7 +117,7 @@ public:
|
|||
private:
|
||||
Atomic<FlatPtr> m_lock { 0 };
|
||||
u32 m_recursions { 0 };
|
||||
const LockRank m_rank;
|
||||
static constexpr LockRank const m_rank { Rank };
|
||||
};
|
||||
|
||||
template<typename LockType>
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
template<typename T>
|
||||
template<typename T, LockRank Rank>
|
||||
class SpinlockProtected {
|
||||
AK_MAKE_NONCOPYABLE(SpinlockProtected);
|
||||
AK_MAKE_NONMOVABLE(SpinlockProtected);
|
||||
|
@ -22,7 +22,7 @@ private:
|
|||
AK_MAKE_NONMOVABLE(Locked);
|
||||
|
||||
public:
|
||||
Locked(U& value, RecursiveSpinlock& spinlock)
|
||||
Locked(U& value, RecursiveSpinlock<Rank>& spinlock)
|
||||
: m_value(value)
|
||||
, m_locker(spinlock)
|
||||
{
|
||||
|
@ -39,7 +39,7 @@ private:
|
|||
|
||||
private:
|
||||
U& m_value;
|
||||
SpinlockLocker<RecursiveSpinlock> m_locker;
|
||||
SpinlockLocker<RecursiveSpinlock<Rank>> m_locker;
|
||||
};
|
||||
|
||||
auto lock_const() const { return Locked<T const>(m_value, m_spinlock); }
|
||||
|
@ -47,9 +47,8 @@ private:
|
|||
|
||||
public:
|
||||
template<typename... Args>
|
||||
SpinlockProtected(LockRank rank, Args&&... args)
|
||||
SpinlockProtected(Args&&... args)
|
||||
: m_value(forward<Args>(args)...)
|
||||
, m_spinlock(rank)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -87,7 +86,8 @@ public:
|
|||
|
||||
private:
|
||||
T m_value;
|
||||
RecursiveSpinlock mutable m_spinlock;
|
||||
RecursiveSpinlock<Rank> mutable m_spinlock;
|
||||
static constexpr LockRank const m_rank { Rank };
|
||||
};
|
||||
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue