1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 03:57:43 +00:00

Kernel: Fix a few deadlocks with Thread::m_lock and g_scheduler_lock

g_scheduler_lock cannot safely be acquired after Thread::m_lock
because another processor may already hold g_scheduler_lock and wait
for the same Thread::m_lock.
This commit is contained in:
Tom 2020-10-25 20:22:59 -06:00 committed by Andreas Kling
parent 8c764319ad
commit 1e2e3eed62
6 changed files with 55 additions and 37 deletions

View file

@ -422,28 +422,31 @@ public:
{
T t(forward<Args>(args)...);
ScopedSpinLock lock(m_lock);
// We should never be blocking a blocked (or otherwise non-active) thread.
ASSERT(state() == Thread::Running);
ASSERT(m_blocker == nullptr);
{
ScopedSpinLock lock(m_lock);
// We should never be blocking a blocked (or otherwise non-active) thread.
ASSERT(state() == Thread::Running);
ASSERT(m_blocker == nullptr);
if (t.should_unblock(*this)) {
// Don't block if the wake condition is already met
return BlockResult::NotBlocked;
if (t.should_unblock(*this)) {
// Don't block if the wake condition is already met
return BlockResult::NotBlocked;
}
m_blocker = &t;
m_blocker_timeout = t.override_timeout(timeout);
}
m_blocker = &t;
m_blocker_timeout = t.override_timeout(timeout);
set_state(Thread::Blocked);
// Release our lock
lock.unlock();
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
set_state(Thread::Blocked);
}
// Yield to the scheduler, and wait for us to resume unblocked.
yield_without_holding_big_lock();
// Acquire our lock again
lock.lock();
ScopedSpinLock lock(m_lock);
// We should no longer be blocked once we woke up
ASSERT(state() != Thread::Blocked);