mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 08:37:45 +00:00
Kernel: Don't hold scheduler lock while setting up blocker in Thread::block
This fixes a deadlock when one processor is trying to block while another is trying to unblock the same.
This commit is contained in:
parent
250a310454
commit
ccaeb47401
1 changed files with 8 additions and 9 deletions
|
@ -821,13 +821,14 @@ public:
|
||||||
ASSERT(this == Thread::current());
|
ASSERT(this == Thread::current());
|
||||||
ScopedCritical critical;
|
ScopedCritical critical;
|
||||||
ASSERT(!s_mm_lock.own_lock());
|
ASSERT(!s_mm_lock.own_lock());
|
||||||
ScopedSpinLock scheduler_lock(g_scheduler_lock);
|
|
||||||
ScopedSpinLock block_lock(m_block_lock);
|
ScopedSpinLock block_lock(m_block_lock);
|
||||||
// We need to hold m_block_lock so that nobody can unblock a blocker as soon
|
// We need to hold m_block_lock so that nobody can unblock a blocker as soon
|
||||||
// as it is constructed and registered elsewhere
|
// as it is constructed and registered elsewhere
|
||||||
m_in_block = true;
|
m_in_block = true;
|
||||||
T t(forward<Args>(args)...);
|
T t(forward<Args>(args)...);
|
||||||
|
|
||||||
|
ScopedSpinLock scheduler_lock(g_scheduler_lock);
|
||||||
// Relaxed semantics are fine for timeout_unblocked because we
|
// Relaxed semantics are fine for timeout_unblocked because we
|
||||||
// synchronize on the spin locks already.
|
// synchronize on the spin locks already.
|
||||||
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
|
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
|
||||||
|
@ -881,23 +882,20 @@ public:
|
||||||
set_state(Thread::Blocked);
|
set_state(Thread::Blocked);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
scheduler_lock.unlock();
|
||||||
block_lock.unlock();
|
block_lock.unlock();
|
||||||
|
|
||||||
|
dbgln<THREAD_DEBUG>("Thread {} blocking on {} ({}) -->", *this, &t, t.state_string());
|
||||||
bool did_timeout = false;
|
bool did_timeout = false;
|
||||||
auto previous_locked = LockMode::Unlocked;
|
|
||||||
u32 lock_count_to_restore = 0;
|
u32 lock_count_to_restore = 0;
|
||||||
|
auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
|
||||||
for (;;) {
|
for (;;) {
|
||||||
scheduler_lock.unlock();
|
|
||||||
|
|
||||||
// Yield to the scheduler, and wait for us to resume unblocked.
|
// Yield to the scheduler, and wait for us to resume unblocked.
|
||||||
if (previous_locked == LockMode::Unlocked)
|
|
||||||
previous_locked = unlock_process_if_locked(lock_count_to_restore);
|
|
||||||
|
|
||||||
ASSERT(!g_scheduler_lock.own_lock());
|
ASSERT(!g_scheduler_lock.own_lock());
|
||||||
ASSERT(Processor::current().in_critical());
|
ASSERT(Processor::current().in_critical());
|
||||||
yield_while_not_holding_big_lock();
|
yield_while_not_holding_big_lock();
|
||||||
|
ASSERT(Processor::current().in_critical());
|
||||||
|
|
||||||
scheduler_lock.lock();
|
|
||||||
ScopedSpinLock block_lock2(m_block_lock);
|
ScopedSpinLock block_lock2(m_block_lock);
|
||||||
if (should_be_stopped() || state() == Stopped) {
|
if (should_be_stopped() || state() == Stopped) {
|
||||||
dbgln("Thread should be stopped, current state: {}", state_string());
|
dbgln("Thread should be stopped, current state: {}", state_string());
|
||||||
|
@ -918,11 +916,13 @@ public:
|
||||||
ASSERT(m_blocker == &t);
|
ASSERT(m_blocker == &t);
|
||||||
m_blocker = nullptr;
|
m_blocker = nullptr;
|
||||||
}
|
}
|
||||||
|
dbgln<THREAD_DEBUG>("<-- Thread {} unblocked from {} ({})", *this, &t, t.state_string());
|
||||||
m_in_block = false;
|
m_in_block = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (t.was_interrupted_by_signal()) {
|
if (t.was_interrupted_by_signal()) {
|
||||||
|
ScopedSpinLock scheduler_lock(g_scheduler_lock);
|
||||||
ScopedSpinLock lock(m_lock);
|
ScopedSpinLock lock(m_lock);
|
||||||
dispatch_one_pending_signal();
|
dispatch_one_pending_signal();
|
||||||
}
|
}
|
||||||
|
@ -931,7 +931,6 @@ public:
|
||||||
// to clean up now while we're still holding m_lock
|
// to clean up now while we're still holding m_lock
|
||||||
auto result = t.end_blocking({}, did_timeout); // calls was_unblocked internally
|
auto result = t.end_blocking({}, did_timeout); // calls was_unblocked internally
|
||||||
|
|
||||||
scheduler_lock.unlock();
|
|
||||||
if (timer && !did_timeout) {
|
if (timer && !did_timeout) {
|
||||||
// Cancel the timer while not holding any locks. This allows
|
// Cancel the timer while not holding any locks. This allows
|
||||||
// the timer function to complete before we remove it
|
// the timer function to complete before we remove it
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue