1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 21:57:35 +00:00

Kernel: Move Spinlock functions back to arch independent Locking folder

Now that the Spinlock code is not dependent on architectural specific
code anymore, we can move it back to the Locking folder. This also means
that the Spinlock implemenation is now used for the aarch64 kernel.
This commit is contained in:
Timon Kruiper 2022-08-23 22:14:07 +02:00 committed by Andreas Kling
parent c9118de5a6
commit 026f37b031
6 changed files with 75 additions and 116 deletions

View file

@ -8,11 +8,73 @@
#include <AK/Atomic.h>
#include <AK/Types.h>
#include <Kernel/Arch/Spinlock.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Locking/LockRank.h>
namespace Kernel {
class Spinlock {
AK_MAKE_NONCOPYABLE(Spinlock);
AK_MAKE_NONMOVABLE(Spinlock);
public:
Spinlock(LockRank rank)
: m_rank(rank)
{
}
InterruptsState lock();
void unlock(InterruptsState);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<u8> m_lock { 0 };
const LockRank m_rank;
};
class RecursiveSpinlock {
AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
AK_MAKE_NONMOVABLE(RecursiveSpinlock);
public:
RecursiveSpinlock(LockRank rank)
: m_rank(rank)
{
}
InterruptsState lock();
void unlock(InterruptsState);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
[[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
{
return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current());
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<FlatPtr> m_lock { 0 };
u32 m_recursions { 0 };
const LockRank m_rank;
};
template<typename LockType>
class [[nodiscard]] SpinlockLocker {
AK_MAKE_NONCOPYABLE(SpinlockLocker);