From ec330c2ce6df29101049b4d7c9e13fefebada430 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Wed, 17 Aug 2022 20:14:49 +0200 Subject: [PATCH] Kernel: Use consistent lock acquisition order in Thread::block*() We want to grab g_scheduler_lock *before* Thread::m_block_lock. This appears to have fixed a deadlock that I encountered while building DOOM with make -j2. --- Kernel/Thread.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index c0460d6b14..0198c65fce 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -155,6 +155,8 @@ Thread::BlockResult Thread::block_impl(BlockTimeout const& timeout, Blocker& blo ScopedCritical critical; VERIFY(!Memory::s_mm_lock.is_locked_by_current_processor()); + SpinlockLocker scheduler_lock(g_scheduler_lock); + SpinlockLocker block_lock(m_block_lock); // We need to hold m_block_lock so that nobody can unblock a blocker as soon // as it is constructed and registered elsewhere @@ -168,7 +170,6 @@ Thread::BlockResult Thread::block_impl(BlockTimeout const& timeout, Blocker& blo return BlockResult::NotBlocked; } - SpinlockLocker scheduler_lock(g_scheduler_lock); // Relaxed semantics are fine for timeout_unblocked because we // synchronize on the spin locks already. Atomic timeout_unblocked(false); @@ -212,8 +213,8 @@ Thread::BlockResult Thread::block_impl(BlockTimeout const& timeout, Blocker& blo set_state(Thread::State::Blocked); - scheduler_lock.unlock(); block_lock.unlock(); + scheduler_lock.unlock(); dbgln_if(THREAD_DEBUG, "Thread {} blocking on {} ({}) -->", *this, &blocker, blocker.state_string()); bool did_timeout = false; @@ -296,8 +297,8 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker& lock_lock, u32 set_state(Thread::State::Blocked); - scheduler_lock.unlock(); block_lock.unlock(); + scheduler_lock.unlock(); lock_lock.unlock();