From 026f37b031abc4948505894b6d6a98402e334716 Mon Sep 17 00:00:00 2001 From: Timon Kruiper Date: Tue, 23 Aug 2022 22:14:07 +0200 Subject: [PATCH] Kernel: Move Spinlock functions back to arch independent Locking folder Now that the Spinlock code is not dependent on architectural specific code anymore, we can move it back to the Locking folder. This also means that the Spinlock implemenation is now used for the aarch64 kernel. --- Kernel/Arch/Spinlock.h | 80 ------------------- Kernel/Arch/aarch64/Dummy.cpp | 8 ++ Kernel/Arch/aarch64/Spinlock.cpp | 32 -------- Kernel/CMakeLists.txt | 5 +- .../{Arch/x86/common => Locking}/Spinlock.cpp | 2 +- Kernel/Locking/Spinlock.h | 64 ++++++++++++++- 6 files changed, 75 insertions(+), 116 deletions(-) delete mode 100644 Kernel/Arch/Spinlock.h delete mode 100644 Kernel/Arch/aarch64/Spinlock.cpp rename Kernel/{Arch/x86/common => Locking}/Spinlock.cpp (98%) diff --git a/Kernel/Arch/Spinlock.h b/Kernel/Arch/Spinlock.h deleted file mode 100644 index 05709d5f87..0000000000 --- a/Kernel/Arch/Spinlock.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2020-2022, Andreas Kling - * - * SPDX-License-Identifier: BSD-2-Clause - */ - -#pragma once - -#include -#include - -namespace Kernel { - -class Spinlock { - AK_MAKE_NONCOPYABLE(Spinlock); - AK_MAKE_NONMOVABLE(Spinlock); - -public: - Spinlock(LockRank rank) - : m_rank(rank) - { - } - - InterruptsState lock(); - void unlock(InterruptsState); - - [[nodiscard]] ALWAYS_INLINE bool is_locked() const - { - // FIXME: Implement Spinlock on aarch64 -#if ARCH(AARCH64) - return true; -#endif - return m_lock.load(AK::memory_order_relaxed) != 0; - } - - ALWAYS_INLINE void initialize() - { - m_lock.store(0, AK::memory_order_relaxed); - } - -private: - Atomic m_lock { 0 }; - const LockRank m_rank; -}; - -class RecursiveSpinlock { - AK_MAKE_NONCOPYABLE(RecursiveSpinlock); - AK_MAKE_NONMOVABLE(RecursiveSpinlock); - -public: - RecursiveSpinlock(LockRank rank) - : m_rank(rank) - { - } - - InterruptsState lock(); - void unlock(InterruptsState); - - [[nodiscard]] ALWAYS_INLINE bool is_locked() const - { - return m_lock.load(AK::memory_order_relaxed) != 0; - } - - [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const - { - return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()); - } - - ALWAYS_INLINE void initialize() - { - m_lock.store(0, AK::memory_order_relaxed); - } - -private: - Atomic m_lock { 0 }; - u32 m_recursions { 0 }; - const LockRank m_rank; -}; - -} diff --git a/Kernel/Arch/aarch64/Dummy.cpp b/Kernel/Arch/aarch64/Dummy.cpp index 4fc958f271..4bab613404 100644 --- a/Kernel/Arch/aarch64/Dummy.cpp +++ b/Kernel/Arch/aarch64/Dummy.cpp @@ -50,6 +50,14 @@ void Mutex::unlock() } +// LockRank +namespace Kernel { + +void track_lock_acquire(LockRank) { } +void track_lock_release(LockRank) { } + +} + // Inode namespace Kernel { diff --git a/Kernel/Arch/aarch64/Spinlock.cpp b/Kernel/Arch/aarch64/Spinlock.cpp deleted file mode 100644 index bcb81dd2c9..0000000000 --- a/Kernel/Arch/aarch64/Spinlock.cpp +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2022, Timon Kruiper - * - * SPDX-License-Identifier: BSD-2-Clause - */ - -#include - -// FIXME: Actually implement the correct logic once the aarch64 build can -// do interrupts and/or has support for multiple processors. - -namespace Kernel { - -InterruptsState Spinlock::lock() -{ - return InterruptsState::Disabled; -} - -void Spinlock::unlock(InterruptsState) -{ -} - -InterruptsState RecursiveSpinlock::lock() -{ - return InterruptsState::Disabled; -} - -void RecursiveSpinlock::unlock(InterruptsState) -{ -} - -} diff --git a/Kernel/CMakeLists.txt b/Kernel/CMakeLists.txt index acdee8de2a..790bcc9b77 100644 --- a/Kernel/CMakeLists.txt +++ b/Kernel/CMakeLists.txt @@ -202,6 +202,7 @@ set(KERNEL_SOURCES MiniStdLib.cpp Locking/LockRank.cpp Locking/Mutex.cpp + Locking/Spinlock.cpp Net/Intel/E1000ENetworkAdapter.cpp Net/Intel/E1000NetworkAdapter.cpp Net/NE2000/NetworkAdapter.cpp @@ -333,7 +334,6 @@ if ("${SERENITY_ARCH}" STREQUAL "i686" OR "${SERENITY_ARCH}" STREQUAL "x86_64") Arch/x86/common/ScopedCritical.cpp Arch/x86/common/SmapDisabler.cpp - Arch/x86/common/Spinlock.cpp ) set(KERNEL_SOURCES @@ -464,7 +464,6 @@ else() Arch/aarch64/SafeMem.cpp Arch/aarch64/ScopedCritical.cpp Arch/aarch64/SmapDisabler.cpp - Arch/aarch64/Spinlock.cpp Arch/aarch64/vector_table.S # Files from base Kernel @@ -475,6 +474,8 @@ else() Graphics/Console/BootFramebufferConsole.cpp Graphics/Console/GenericFramebufferConsole.cpp + Locking/Spinlock.cpp + Memory/AddressSpace.cpp Memory/AnonymousVMObject.cpp Memory/InodeVMObject.cpp diff --git a/Kernel/Arch/x86/common/Spinlock.cpp b/Kernel/Locking/Spinlock.cpp similarity index 98% rename from Kernel/Arch/x86/common/Spinlock.cpp rename to Kernel/Locking/Spinlock.cpp index 6cf3cfbae5..7c1f57a2a1 100644 --- a/Kernel/Arch/x86/common/Spinlock.cpp +++ b/Kernel/Locking/Spinlock.cpp @@ -4,7 +4,7 @@ * SPDX-License-Identifier: BSD-2-Clause */ -#include +#include namespace Kernel { diff --git a/Kernel/Locking/Spinlock.h b/Kernel/Locking/Spinlock.h index b424aa689d..a7177d4209 100644 --- a/Kernel/Locking/Spinlock.h +++ b/Kernel/Locking/Spinlock.h @@ -8,11 +8,73 @@ #include #include -#include +#include #include namespace Kernel { +class Spinlock { + AK_MAKE_NONCOPYABLE(Spinlock); + AK_MAKE_NONMOVABLE(Spinlock); + +public: + Spinlock(LockRank rank) + : m_rank(rank) + { + } + + InterruptsState lock(); + void unlock(InterruptsState); + + [[nodiscard]] ALWAYS_INLINE bool is_locked() const + { + return m_lock.load(AK::memory_order_relaxed) != 0; + } + + ALWAYS_INLINE void initialize() + { + m_lock.store(0, AK::memory_order_relaxed); + } + +private: + Atomic m_lock { 0 }; + const LockRank m_rank; +}; + +class RecursiveSpinlock { + AK_MAKE_NONCOPYABLE(RecursiveSpinlock); + AK_MAKE_NONMOVABLE(RecursiveSpinlock); + +public: + RecursiveSpinlock(LockRank rank) + : m_rank(rank) + { + } + + InterruptsState lock(); + void unlock(InterruptsState); + + [[nodiscard]] ALWAYS_INLINE bool is_locked() const + { + return m_lock.load(AK::memory_order_relaxed) != 0; + } + + [[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const + { + return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()); + } + + ALWAYS_INLINE void initialize() + { + m_lock.store(0, AK::memory_order_relaxed); + } + +private: + Atomic m_lock { 0 }; + u32 m_recursions { 0 }; + const LockRank m_rank; +}; + template class [[nodiscard]] SpinlockLocker { AK_MAKE_NONCOPYABLE(SpinlockLocker);