mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 07:37:35 +00:00
Kernel: Fix Lock racing to the WaitQueue
There was a time window between releasing Lock::m_lock and calling into the lock's WaitQueue where someone else could take m_lock and bring two threads into a deadlock situation. Fix this issue by holding Lock::m_lock until interrupts are disabled by either Thread::wait_on() or WaitQueue::wake_one().
This commit is contained in:
parent
61e6b1fb7c
commit
41376d4662
5 changed files with 13 additions and 10 deletions
|
@ -18,8 +18,7 @@ void Lock::lock()
|
|||
m_lock.store(false, AK::memory_order_release);
|
||||
return;
|
||||
}
|
||||
m_lock.store(false, AK::memory_order_release);
|
||||
current->wait_on(m_queue, m_holder, m_name);
|
||||
current->wait_on(m_queue, &m_lock, m_holder, m_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -37,8 +36,7 @@ void Lock::unlock()
|
|||
return;
|
||||
}
|
||||
m_holder = nullptr;
|
||||
m_queue.wake_one();
|
||||
m_lock.store(false, AK::memory_order_release);
|
||||
m_queue.wake_one(&m_lock);
|
||||
return;
|
||||
}
|
||||
// I don't know *who* is using "m_lock", so just yield.
|
||||
|
@ -66,8 +64,7 @@ bool Lock::unlock_if_locked()
|
|||
return false;
|
||||
}
|
||||
m_holder = nullptr;
|
||||
m_lock.store(false, AK::memory_order_release);
|
||||
m_queue.wake_one();
|
||||
m_queue.wake_one(&m_lock);
|
||||
return true;
|
||||
}
|
||||
// I don't know *who* is using "m_lock", so just yield.
|
||||
|
|
|
@ -783,10 +783,12 @@ const LogStream& operator<<(const LogStream& stream, const Thread& value)
|
|||
return stream << value.process().name() << "(" << value.pid() << ":" << value.tid() << ")";
|
||||
}
|
||||
|
||||
void Thread::wait_on(WaitQueue& queue, Thread* beneficiary, const char* reason)
|
||||
void Thread::wait_on(WaitQueue& queue, Atomic<bool>* lock, Thread* beneficiary, const char* reason)
|
||||
{
|
||||
bool did_unlock = unlock_process_if_locked();
|
||||
cli();
|
||||
if (lock)
|
||||
*lock = false;
|
||||
set_state(State::Queued);
|
||||
queue.enqueue(*current);
|
||||
// Yield and wait for the queue to wake us up again.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#pragma once
|
||||
|
||||
#include <AK/Atomic.h>
|
||||
#include <AK/Function.h>
|
||||
#include <AK/IntrusiveList.h>
|
||||
#include <AK/OwnPtr.h>
|
||||
|
@ -300,7 +301,7 @@ public:
|
|||
return block<ConditionBlocker>(state_string, move(condition));
|
||||
}
|
||||
|
||||
void wait_on(WaitQueue& queue, Thread* beneficiary = nullptr, const char* reason = nullptr);
|
||||
void wait_on(WaitQueue& queue, Atomic<bool>* lock = nullptr, Thread* beneficiary = nullptr, const char* reason = nullptr);
|
||||
void wake_from_queue();
|
||||
|
||||
void unblock();
|
||||
|
|
|
@ -15,9 +15,11 @@ void WaitQueue::enqueue(Thread& thread)
|
|||
m_threads.append(thread);
|
||||
}
|
||||
|
||||
void WaitQueue::wake_one()
|
||||
void WaitQueue::wake_one(Atomic<bool>* lock)
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
if (lock)
|
||||
*lock = false;
|
||||
if (m_threads.is_empty())
|
||||
return;
|
||||
if (auto* thread = m_threads.take_first())
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#pragma once
|
||||
|
||||
#include <AK/Atomic.h>
|
||||
#include <AK/SinglyLinkedList.h>
|
||||
#include <Kernel/Thread.h>
|
||||
|
||||
|
@ -9,7 +10,7 @@ public:
|
|||
~WaitQueue();
|
||||
|
||||
void enqueue(Thread&);
|
||||
void wake_one();
|
||||
void wake_one(Atomic<bool>* lock = nullptr);
|
||||
void wake_all();
|
||||
|
||||
private:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue