From 7f508059036e9b46b2b2482f372ff8cb439a3a47 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Wed, 11 Aug 2021 00:47:07 +0200 Subject: [PATCH] Kernel/SMP: Fix RecursiveSpinLock remembering the wrong CPU when locking We have to disable interrupts before capturing the current Processor*, or we risk storing the wrong one if we get preempted and resume on a different CPU. Caught by the VERIFY in RecursiveSpinLock::unlock() --- Kernel/Locking/SpinLock.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Kernel/Locking/SpinLock.h b/Kernel/Locking/SpinLock.h index 0f83375f4c..fb7714e8a8 100644 --- a/Kernel/Locking/SpinLock.h +++ b/Kernel/Locking/SpinLock.h @@ -66,11 +66,11 @@ public: ALWAYS_INLINE u32 lock() { + u32 prev_flags = cpu_flags(); + cli(); + Processor::enter_critical(); auto& proc = Processor::current(); FlatPtr cpu = FlatPtr(&proc); - u32 prev_flags = cpu_flags(); - Processor::enter_critical(); - cli(); FlatPtr expected = 0; while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) { if (expected == cpu)