1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 11:57:35 +00:00

Kernel: Move spinlock into Arch

Spinlocks are tied to the platform they are built for, this is why they
have been moved into the Arch folder. They are still available via
"Locking/Spinlock.h"

An Aarch64 stub has been created
This commit is contained in:
James Mintram 2021-10-14 20:18:56 +01:00 committed by Linus Groh
parent dfe4810c3a
commit e8f09279d3
4 changed files with 229 additions and 118 deletions

17
Kernel/Arch/Spinlock.h Normal file
View file

@ -0,0 +1,17 @@
/*
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Platform.h>
#if ARCH(X86_64) || ARCH(I386)
# include <Kernel/Arch/x86/Spinlock.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/Spinlock.h>
#else
# error "Unknown architecture"
#endif

View file

@ -0,0 +1,78 @@
/*
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Noncopyable.h>
#include <AK/Types.h>
#include <Kernel/Locking/LockRank.h>
namespace Kernel {
class Spinlock {
AK_MAKE_NONCOPYABLE(Spinlock);
AK_MAKE_NONMOVABLE(Spinlock);
public:
Spinlock(LockRank rank = LockRank::None)
{
(void)rank;
}
ALWAYS_INLINE u32 lock()
{
return 0;
}
ALWAYS_INLINE void unlock(u32 /*prev_flags*/)
{
}
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return false;
}
ALWAYS_INLINE void initialize()
{
}
};
class RecursiveSpinlock {
AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
AK_MAKE_NONMOVABLE(RecursiveSpinlock);
public:
RecursiveSpinlock(LockRank rank = LockRank::None)
{
(void)rank;
}
ALWAYS_INLINE u32 lock()
{
return 0;
}
ALWAYS_INLINE void unlock(u32 /*prev_flags*/)
{
}
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return false;
}
[[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
{
return false;
}
ALWAYS_INLINE void initialize()
{
}
};
}

133
Kernel/Arch/x86/Spinlock.h Normal file
View file

@ -0,0 +1,133 @@
/*
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Arch/Processor.h>
#include <Kernel/Locking/LockRank.h>
namespace Kernel {
class Spinlock {
AK_MAKE_NONCOPYABLE(Spinlock);
AK_MAKE_NONMOVABLE(Spinlock);
public:
Spinlock(LockRank rank = LockRank::None)
: m_rank(rank)
{
}
ALWAYS_INLINE u32 lock()
{
u32 prev_flags = cpu_flags();
Processor::enter_critical();
cli();
while (m_lock.exchange(1, AK::memory_order_acquire) != 0) {
Processor::wait_check();
}
track_lock_acquire(m_rank);
return prev_flags;
return 0;
}
ALWAYS_INLINE void unlock(u32 prev_flags)
{
VERIFY(is_locked());
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
if (prev_flags & 0x200)
sti();
else
cli();
Processor::leave_critical();
}
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<u8> m_lock { 0 };
const LockRank m_rank;
};
class RecursiveSpinlock {
AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
AK_MAKE_NONMOVABLE(RecursiveSpinlock);
public:
RecursiveSpinlock(LockRank rank = LockRank::None)
: m_rank(rank)
{
}
ALWAYS_INLINE u32 lock()
{
u32 prev_flags = cpu_flags();
cli();
Processor::enter_critical();
auto& proc = Processor::current();
FlatPtr cpu = FlatPtr(&proc);
FlatPtr expected = 0;
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
if (expected == cpu)
break;
Processor::wait_check();
expected = 0;
}
if (m_recursions == 0)
track_lock_acquire(m_rank);
m_recursions++;
return prev_flags;
return 0;
}
ALWAYS_INLINE void unlock(u32 prev_flags)
{
VERIFY(m_recursions > 0);
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
if (--m_recursions == 0) {
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
}
if (prev_flags & 0x200)
sti();
else
cli();
Processor::leave_critical();
}
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
[[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
{
return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current());
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<FlatPtr> m_lock { 0 };
u32 m_recursions { 0 };
const LockRank m_rank;
};
}

View file

@ -8,128 +8,11 @@
#include <AK/Atomic.h> #include <AK/Atomic.h>
#include <AK/Types.h> #include <AK/Types.h>
#include <Kernel/Arch/Processor.h> #include <Kernel/Arch/Spinlock.h>
#include <Kernel/Locking/LockRank.h> #include <Kernel/Locking/LockRank.h>
namespace Kernel { namespace Kernel {
class Spinlock {
AK_MAKE_NONCOPYABLE(Spinlock);
AK_MAKE_NONMOVABLE(Spinlock);
public:
Spinlock(LockRank rank = LockRank::None)
: m_rank(rank)
{
}
ALWAYS_INLINE u32 lock()
{
u32 prev_flags = cpu_flags();
Processor::enter_critical();
cli();
while (m_lock.exchange(1, AK::memory_order_acquire) != 0) {
Processor::wait_check();
}
track_lock_acquire(m_rank);
return prev_flags;
}
ALWAYS_INLINE void unlock(u32 prev_flags)
{
VERIFY(is_locked());
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
if (prev_flags & 0x200)
sti();
else
cli();
Processor::leave_critical();
}
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<u8> m_lock { 0 };
const LockRank m_rank;
};
class RecursiveSpinlock {
AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
AK_MAKE_NONMOVABLE(RecursiveSpinlock);
public:
RecursiveSpinlock(LockRank rank = LockRank::None)
: m_rank(rank)
{
}
ALWAYS_INLINE u32 lock()
{
u32 prev_flags = cpu_flags();
cli();
Processor::enter_critical();
auto& proc = Processor::current();
FlatPtr cpu = FlatPtr(&proc);
FlatPtr expected = 0;
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
if (expected == cpu)
break;
Processor::wait_check();
expected = 0;
}
if (m_recursions == 0)
track_lock_acquire(m_rank);
m_recursions++;
return prev_flags;
}
ALWAYS_INLINE void unlock(u32 prev_flags)
{
VERIFY(m_recursions > 0);
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
if (--m_recursions == 0) {
track_lock_release(m_rank);
m_lock.store(0, AK::memory_order_release);
}
if (prev_flags & 0x200)
sti();
else
cli();
Processor::leave_critical();
}
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
[[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
{
return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current());
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<FlatPtr> m_lock { 0 };
u32 m_recursions { 0 };
const LockRank m_rank;
};
template<typename LockType> template<typename LockType>
class [[nodiscard]] SpinlockLocker { class [[nodiscard]] SpinlockLocker {
AK_MAKE_NONCOPYABLE(SpinlockLocker); AK_MAKE_NONCOPYABLE(SpinlockLocker);