diff --git a/Kernel/Arch/x86/Spinlock.h b/Kernel/Arch/x86/Spinlock.h index 9331e689ab..54af902754 100644 --- a/Kernel/Arch/x86/Spinlock.h +++ b/Kernel/Arch/x86/Spinlock.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Andreas Kling + * Copyright (c) 2020-2022, Andreas Kling * * SPDX-License-Identifier: BSD-2-Clause */ @@ -24,30 +24,8 @@ public: { } - ALWAYS_INLINE u32 lock() - { - u32 prev_flags = cpu_flags(); - Processor::enter_critical(); - cli(); - while (m_lock.exchange(1, AK::memory_order_acquire) != 0) { - Processor::wait_check(); - } - track_lock_acquire(m_rank); - return prev_flags; - } - - ALWAYS_INLINE void unlock(u32 prev_flags) - { - VERIFY(is_locked()); - track_lock_release(m_rank); - m_lock.store(0, AK::memory_order_release); - if ((prev_flags & 0x200) != 0) - sti(); - else - cli(); - - Processor::leave_critical(); - } + u32 lock(); + void unlock(u32 prev_flags); [[nodiscard]] ALWAYS_INLINE bool is_locked() const { @@ -74,41 +52,8 @@ public: { } - ALWAYS_INLINE u32 lock() - { - u32 prev_flags = cpu_flags(); - cli(); - Processor::enter_critical(); - auto& proc = Processor::current(); - FlatPtr cpu = FlatPtr(&proc); - FlatPtr expected = 0; - while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) { - if (expected == cpu) - break; - Processor::wait_check(); - expected = 0; - } - if (m_recursions == 0) - track_lock_acquire(m_rank); - m_recursions++; - return prev_flags; - } - - ALWAYS_INLINE void unlock(u32 prev_flags) - { - VERIFY(m_recursions > 0); - VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current())); - if (--m_recursions == 0) { - track_lock_release(m_rank); - m_lock.store(0, AK::memory_order_release); - } - if ((prev_flags & 0x200) != 0) - sti(); - else - cli(); - - Processor::leave_critical(); - } + u32 lock(); + void unlock(u32 prev_flags); [[nodiscard]] ALWAYS_INLINE bool is_locked() const { diff --git a/Kernel/Arch/x86/common/Spinlock.cpp b/Kernel/Arch/x86/common/Spinlock.cpp new file mode 100644 index 0000000000..2da4509bea --- /dev/null +++ b/Kernel/Arch/x86/common/Spinlock.cpp @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2020-2022, Andreas Kling + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include + +namespace Kernel { + +u32 Spinlock::lock() +{ + u32 prev_flags = cpu_flags(); + Processor::enter_critical(); + cli(); + while (m_lock.exchange(1, AK::memory_order_acquire) != 0) + Processor::wait_check(); + track_lock_acquire(m_rank); + return prev_flags; +} + +void Spinlock::unlock(u32 prev_flags) +{ + VERIFY(is_locked()); + track_lock_release(m_rank); + m_lock.store(0, AK::memory_order_release); + if ((prev_flags & 0x200) != 0) + sti(); + else + cli(); + + Processor::leave_critical(); +} + +u32 RecursiveSpinlock::lock() +{ + u32 prev_flags = cpu_flags(); + cli(); + Processor::enter_critical(); + auto& proc = Processor::current(); + FlatPtr cpu = FlatPtr(&proc); + FlatPtr expected = 0; + while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) { + if (expected == cpu) + break; + Processor::wait_check(); + expected = 0; + } + if (m_recursions == 0) + track_lock_acquire(m_rank); + m_recursions++; + return prev_flags; +} + +void RecursiveSpinlock::unlock(u32 prev_flags) +{ + VERIFY(m_recursions > 0); + VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current())); + if (--m_recursions == 0) { + track_lock_release(m_rank); + m_lock.store(0, AK::memory_order_release); + } + if ((prev_flags & 0x200) != 0) + sti(); + else + cli(); + + Processor::leave_critical(); +} + +} diff --git a/Kernel/CMakeLists.txt b/Kernel/CMakeLists.txt index 4a9cfe39fe..fe7e851b0d 100644 --- a/Kernel/CMakeLists.txt +++ b/Kernel/CMakeLists.txt @@ -300,6 +300,7 @@ if ("${SERENITY_ARCH}" STREQUAL "i686" OR "${SERENITY_ARCH}" STREQUAL "x86_64") ${KERNEL_SOURCES} Arch/x86/common/ScopedCritical.cpp Arch/x86/common/SmapDisabler.cpp + Arch/x86/common/Spinlock.cpp ) set(KERNEL_SOURCES