diff --git a/Kernel/Arch/Spinlock.h b/Kernel/Arch/Spinlock.h index ac084e5e68..995dc36c37 100644 --- a/Kernel/Arch/Spinlock.h +++ b/Kernel/Arch/Spinlock.h @@ -1,17 +1,76 @@ /* - * Copyright (c) 2020, Andreas Kling + * Copyright (c) 2020-2022, Andreas Kling * * SPDX-License-Identifier: BSD-2-Clause */ #pragma once -#include +#include +#include -#if ARCH(X86_64) || ARCH(I386) -# include -#elif ARCH(AARCH64) -# include -#else -# error "Unknown architecture" -#endif +namespace Kernel { + +class Spinlock { + AK_MAKE_NONCOPYABLE(Spinlock); + AK_MAKE_NONMOVABLE(Spinlock); + +public: + Spinlock(LockRank rank = LockRank::None) + : m_rank(rank) + { + } + + u32 lock(); + void unlock(u32 prev_flags); + + [[nodiscard]] ALWAYS_INLINE bool is_locked() const + { + return m_lock.load(AK::memory_order_relaxed) != 0; + } + + ALWAYS_INLINE void initialize() + { + m_lock.store(0, AK::memory_order_relaxed); + } + +private: + Atomic m_lock { 0 }; + const LockRank m_rank; +}; + +class RecursiveSpinlock { + AK_MAKE_NONCOPYABLE(RecursiveSpinlock); + AK_MAKE_NONMOVABLE(RecursiveSpinlock); + +public: + RecursiveSpinlock(LockRank rank = LockRank::None) + : m_rank(rank) + { + } + + u32 lock(); + void unlock(u32 prev_flags); + + [[nodiscard]] ALWAYS_INLINE bool is_locked() const + { + return m_lock.load(AK::memory_order_relaxed) != 0; + } + + [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const + { + return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()); + } + + ALWAYS_INLINE void initialize() + { + m_lock.store(0, AK::memory_order_relaxed); + } + +private: + Atomic m_lock { 0 }; + u32 m_recursions { 0 }; + const LockRank m_rank; +}; + +} diff --git a/Kernel/Arch/aarch64/Spinlock.cpp b/Kernel/Arch/aarch64/Spinlock.cpp new file mode 100644 index 0000000000..fe549aa450 --- /dev/null +++ b/Kernel/Arch/aarch64/Spinlock.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022, Timon Kruiper + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include + +namespace Kernel { + +u32 Spinlock::lock() +{ + VERIFY_NOT_REACHED(); + return 0; +} + +void Spinlock::unlock(u32) +{ + VERIFY_NOT_REACHED(); +} + +u32 RecursiveSpinlock::lock() +{ + VERIFY_NOT_REACHED(); + return 0; +} + +void RecursiveSpinlock::unlock(u32) +{ + VERIFY_NOT_REACHED(); +} + +} diff --git a/Kernel/Arch/aarch64/Spinlock.h b/Kernel/Arch/aarch64/Spinlock.h deleted file mode 100644 index ac763a55fc..0000000000 --- a/Kernel/Arch/aarch64/Spinlock.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2020, Andreas Kling - * - * SPDX-License-Identifier: BSD-2-Clause - */ - -#pragma once - -#include -#include -#include - -namespace Kernel { - -class Spinlock { - AK_MAKE_NONCOPYABLE(Spinlock); - AK_MAKE_NONMOVABLE(Spinlock); - -public: - Spinlock(LockRank rank = LockRank::None) - { - (void)rank; - } - - ALWAYS_INLINE u32 lock() - { - VERIFY_NOT_REACHED(); - return 0; - } - - ALWAYS_INLINE void unlock(u32 /*prev_flags*/) - { - VERIFY_NOT_REACHED(); - } - - [[nodiscard]] ALWAYS_INLINE bool is_locked() const - { - VERIFY_NOT_REACHED(); - return false; - } - - ALWAYS_INLINE void initialize() - { - VERIFY_NOT_REACHED(); - } -}; - -class RecursiveSpinlock { - AK_MAKE_NONCOPYABLE(RecursiveSpinlock); - AK_MAKE_NONMOVABLE(RecursiveSpinlock); - -public: - RecursiveSpinlock(LockRank rank = LockRank::None) - { - (void)rank; - VERIFY_NOT_REACHED(); - } - - ALWAYS_INLINE u32 lock() - { - VERIFY_NOT_REACHED(); - return 0; - } - - ALWAYS_INLINE void unlock(u32 /*prev_flags*/) - { - VERIFY_NOT_REACHED(); - } - - [[nodiscard]] ALWAYS_INLINE bool is_locked() const - { - VERIFY_NOT_REACHED(); - return false; - } - - [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const - { - VERIFY_NOT_REACHED(); - return false; - } - - ALWAYS_INLINE void initialize() - { - VERIFY_NOT_REACHED(); - } -}; - -} diff --git a/Kernel/Arch/x86/Spinlock.h b/Kernel/Arch/x86/Spinlock.h deleted file mode 100644 index 54af902754..0000000000 --- a/Kernel/Arch/x86/Spinlock.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2020-2022, Andreas Kling - * - * SPDX-License-Identifier: BSD-2-Clause - */ - -#pragma once - -#include -#include - -#include -VALIDATE_IS_X86() - -namespace Kernel { - -class Spinlock { - AK_MAKE_NONCOPYABLE(Spinlock); - AK_MAKE_NONMOVABLE(Spinlock); - -public: - Spinlock(LockRank rank = LockRank::None) - : m_rank(rank) - { - } - - u32 lock(); - void unlock(u32 prev_flags); - - [[nodiscard]] ALWAYS_INLINE bool is_locked() const - { - return m_lock.load(AK::memory_order_relaxed) != 0; - } - - ALWAYS_INLINE void initialize() - { - m_lock.store(0, AK::memory_order_relaxed); - } - -private: - Atomic m_lock { 0 }; - const LockRank m_rank; -}; - -class RecursiveSpinlock { - AK_MAKE_NONCOPYABLE(RecursiveSpinlock); - AK_MAKE_NONMOVABLE(RecursiveSpinlock); - -public: - RecursiveSpinlock(LockRank rank = LockRank::None) - : m_rank(rank) - { - } - - u32 lock(); - void unlock(u32 prev_flags); - - [[nodiscard]] ALWAYS_INLINE bool is_locked() const - { - return m_lock.load(AK::memory_order_relaxed) != 0; - } - - [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const - { - return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()); - } - - ALWAYS_INLINE void initialize() - { - m_lock.store(0, AK::memory_order_relaxed); - } - -private: - Atomic m_lock { 0 }; - u32 m_recursions { 0 }; - const LockRank m_rank; -}; - -} diff --git a/Kernel/Arch/x86/common/Spinlock.cpp b/Kernel/Arch/x86/common/Spinlock.cpp index 2da4509bea..e442377598 100644 --- a/Kernel/Arch/x86/common/Spinlock.cpp +++ b/Kernel/Arch/x86/common/Spinlock.cpp @@ -4,7 +4,7 @@ * SPDX-License-Identifier: BSD-2-Clause */ -#include +#include namespace Kernel { diff --git a/Kernel/CMakeLists.txt b/Kernel/CMakeLists.txt index acc4cbf13c..6619d410d0 100644 --- a/Kernel/CMakeLists.txt +++ b/Kernel/CMakeLists.txt @@ -421,6 +421,7 @@ else() Arch/aarch64/SafeMem.cpp Arch/aarch64/ScopedCritical.cpp Arch/aarch64/SmapDisabler.cpp + Arch/aarch64/Spinlock.cpp Arch/aarch64/init.cpp Arch/aarch64/vector_table.S