diff --git a/Kernel/Arch/Spinlock.h b/Kernel/Arch/Spinlock.h new file mode 100644 index 0000000000..ac084e5e68 --- /dev/null +++ b/Kernel/Arch/Spinlock.h @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2020, Andreas Kling + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include + +#if ARCH(X86_64) || ARCH(I386) +# include +#elif ARCH(AARCH64) +# include +#else +# error "Unknown architecture" +#endif diff --git a/Kernel/Arch/aarch64/Spinlock.h b/Kernel/Arch/aarch64/Spinlock.h new file mode 100644 index 0000000000..5dd711b0d5 --- /dev/null +++ b/Kernel/Arch/aarch64/Spinlock.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2020, Andreas Kling + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include +#include +#include + +namespace Kernel { + +class Spinlock { + AK_MAKE_NONCOPYABLE(Spinlock); + AK_MAKE_NONMOVABLE(Spinlock); + +public: + Spinlock(LockRank rank = LockRank::None) + { + (void)rank; + } + + ALWAYS_INLINE u32 lock() + { + return 0; + } + + ALWAYS_INLINE void unlock(u32 /*prev_flags*/) + { + } + + [[nodiscard]] ALWAYS_INLINE bool is_locked() const + { + return false; + } + + ALWAYS_INLINE void initialize() + { + } +}; + +class RecursiveSpinlock { + AK_MAKE_NONCOPYABLE(RecursiveSpinlock); + AK_MAKE_NONMOVABLE(RecursiveSpinlock); + +public: + RecursiveSpinlock(LockRank rank = LockRank::None) + { + (void)rank; + } + + ALWAYS_INLINE u32 lock() + { + return 0; + } + + ALWAYS_INLINE void unlock(u32 /*prev_flags*/) + { + } + + [[nodiscard]] ALWAYS_INLINE bool is_locked() const + { + return false; + } + + [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const + { + return false; + } + + ALWAYS_INLINE void initialize() + { + } +}; + +} diff --git a/Kernel/Arch/x86/Spinlock.h b/Kernel/Arch/x86/Spinlock.h new file mode 100644 index 0000000000..f396e53c12 --- /dev/null +++ b/Kernel/Arch/x86/Spinlock.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2020, Andreas Kling + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include +#include + +namespace Kernel { + +class Spinlock { + AK_MAKE_NONCOPYABLE(Spinlock); + AK_MAKE_NONMOVABLE(Spinlock); + +public: + Spinlock(LockRank rank = LockRank::None) + : m_rank(rank) + { + } + + ALWAYS_INLINE u32 lock() + { + u32 prev_flags = cpu_flags(); + Processor::enter_critical(); + cli(); + while (m_lock.exchange(1, AK::memory_order_acquire) != 0) { + Processor::wait_check(); + } + track_lock_acquire(m_rank); + return prev_flags; + return 0; + } + + ALWAYS_INLINE void unlock(u32 prev_flags) + { + VERIFY(is_locked()); + track_lock_release(m_rank); + m_lock.store(0, AK::memory_order_release); + if (prev_flags & 0x200) + sti(); + else + cli(); + + Processor::leave_critical(); + } + + [[nodiscard]] ALWAYS_INLINE bool is_locked() const + { + return m_lock.load(AK::memory_order_relaxed) != 0; + } + + ALWAYS_INLINE void initialize() + { + m_lock.store(0, AK::memory_order_relaxed); + } + +private: + Atomic m_lock { 0 }; + const LockRank m_rank; +}; + +class RecursiveSpinlock { + AK_MAKE_NONCOPYABLE(RecursiveSpinlock); + AK_MAKE_NONMOVABLE(RecursiveSpinlock); + +public: + RecursiveSpinlock(LockRank rank = LockRank::None) + : m_rank(rank) + { + } + + ALWAYS_INLINE u32 lock() + { + u32 prev_flags = cpu_flags(); + cli(); + Processor::enter_critical(); + auto& proc = Processor::current(); + FlatPtr cpu = FlatPtr(&proc); + FlatPtr expected = 0; + while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) { + if (expected == cpu) + break; + Processor::wait_check(); + expected = 0; + } + if (m_recursions == 0) + track_lock_acquire(m_rank); + m_recursions++; + return prev_flags; + return 0; + } + + ALWAYS_INLINE void unlock(u32 prev_flags) + { + VERIFY(m_recursions > 0); + VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current())); + if (--m_recursions == 0) { + track_lock_release(m_rank); + m_lock.store(0, AK::memory_order_release); + } + if (prev_flags & 0x200) + sti(); + else + cli(); + + Processor::leave_critical(); + } + + [[nodiscard]] ALWAYS_INLINE bool is_locked() const + { + return m_lock.load(AK::memory_order_relaxed) != 0; + } + + [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const + { + return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()); + } + + ALWAYS_INLINE void initialize() + { + m_lock.store(0, AK::memory_order_relaxed); + } + +private: + Atomic m_lock { 0 }; + u32 m_recursions { 0 }; + const LockRank m_rank; +}; + +} diff --git a/Kernel/Locking/Spinlock.h b/Kernel/Locking/Spinlock.h index 7e8f83708a..136ae54bf6 100644 --- a/Kernel/Locking/Spinlock.h +++ b/Kernel/Locking/Spinlock.h @@ -8,128 +8,11 @@ #include #include -#include +#include #include namespace Kernel { -class Spinlock { - AK_MAKE_NONCOPYABLE(Spinlock); - AK_MAKE_NONMOVABLE(Spinlock); - -public: - Spinlock(LockRank rank = LockRank::None) - : m_rank(rank) - { - } - - ALWAYS_INLINE u32 lock() - { - u32 prev_flags = cpu_flags(); - Processor::enter_critical(); - cli(); - while (m_lock.exchange(1, AK::memory_order_acquire) != 0) { - Processor::wait_check(); - } - track_lock_acquire(m_rank); - return prev_flags; - } - - ALWAYS_INLINE void unlock(u32 prev_flags) - { - VERIFY(is_locked()); - track_lock_release(m_rank); - m_lock.store(0, AK::memory_order_release); - if (prev_flags & 0x200) - sti(); - else - cli(); - - Processor::leave_critical(); - } - - [[nodiscard]] ALWAYS_INLINE bool is_locked() const - { - return m_lock.load(AK::memory_order_relaxed) != 0; - } - - ALWAYS_INLINE void initialize() - { - m_lock.store(0, AK::memory_order_relaxed); - } - -private: - Atomic m_lock { 0 }; - const LockRank m_rank; -}; - -class RecursiveSpinlock { - AK_MAKE_NONCOPYABLE(RecursiveSpinlock); - AK_MAKE_NONMOVABLE(RecursiveSpinlock); - -public: - RecursiveSpinlock(LockRank rank = LockRank::None) - : m_rank(rank) - { - } - - ALWAYS_INLINE u32 lock() - { - u32 prev_flags = cpu_flags(); - cli(); - Processor::enter_critical(); - auto& proc = Processor::current(); - FlatPtr cpu = FlatPtr(&proc); - FlatPtr expected = 0; - while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) { - if (expected == cpu) - break; - Processor::wait_check(); - expected = 0; - } - if (m_recursions == 0) - track_lock_acquire(m_rank); - m_recursions++; - return prev_flags; - } - - ALWAYS_INLINE void unlock(u32 prev_flags) - { - VERIFY(m_recursions > 0); - VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current())); - if (--m_recursions == 0) { - track_lock_release(m_rank); - m_lock.store(0, AK::memory_order_release); - } - if (prev_flags & 0x200) - sti(); - else - cli(); - - Processor::leave_critical(); - } - - [[nodiscard]] ALWAYS_INLINE bool is_locked() const - { - return m_lock.load(AK::memory_order_relaxed) != 0; - } - - [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const - { - return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()); - } - - ALWAYS_INLINE void initialize() - { - m_lock.store(0, AK::memory_order_relaxed); - } - -private: - Atomic m_lock { 0 }; - u32 m_recursions { 0 }; - const LockRank m_rank; -}; - template class [[nodiscard]] SpinlockLocker { AK_MAKE_NONCOPYABLE(SpinlockLocker);