1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-28 22:35:06 +00:00

Kernel: Enhance WaitQueue to remember pending wakes

If WaitQueue::wake_all, WaitQueue::wake_one, or WaitQueue::wake_n
is called but nobody is currently waiting, we should remember that
fact and prevent someone from waiting after such a request. This
solves a race condition where the Finalizer thread is notified
to finalize a thread, but it is not (yet) waiting on this queue.

Fixes #2693
This commit is contained in:
Tom 2020-07-05 15:46:51 -06:00 committed by Andreas Kling
parent 2a82a25fec
commit 9725bda63e
5 changed files with 93 additions and 27 deletions

View file

@ -54,6 +54,7 @@ void Lock::lock(Mode mode)
for (;;) {
bool expected = false;
if (m_lock.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
do {
// FIXME: Do not add new readers if writers are queued.
bool modes_dont_conflict = !modes_conflict(m_mode, mode);
bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == current_thread;
@ -66,8 +67,7 @@ void Lock::lock(Mode mode)
m_lock.store(false, AK::memory_order_release);
return;
}
timeval* timeout = nullptr;
current_thread->wait_on(m_queue, m_name, timeout, &m_lock, m_holder);
} while (current_thread->wait_on(m_queue, m_name, nullptr, &m_lock, m_holder) == Thread::BlockResult::NotBlocked);
} else if (Processor::current().in_critical()) {
// If we're in a critical section and trying to lock, no context
// switch will happen, so yield.

View file

@ -872,12 +872,18 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
// we need to wait until the scheduler lock is released again
{
ScopedSpinLock sched_lock(g_scheduler_lock);
if (!queue.enqueue(*Thread::current())) {
// The WaitQueue was already requested to wake someone when
// nobody was waiting. So return right away as we shouldn't
// be waiting
return BlockResult::NotBlocked;
}
did_unlock = unlock_process_if_locked();
if (lock)
*lock = false;
set_state(State::Queued);
m_wait_reason = reason;
queue.enqueue(*Thread::current());
if (timeout) {

View file

@ -301,6 +301,7 @@ public:
enum class BlockResult {
WokeNormally,
NotBlocked,
InterruptedBySignal,
InterruptedByDeath,
InterruptedByTimeout,

View file

@ -27,8 +27,11 @@
#include <Kernel/Thread.h>
#include <Kernel/WaitQueue.h>
//#define WAITQUEUE_DEBUG
namespace Kernel {
WaitQueue::WaitQueue()
{
}
@ -37,10 +40,20 @@ WaitQueue::~WaitQueue()
{
}
void WaitQueue::enqueue(Thread& thread)
bool WaitQueue::enqueue(Thread& thread)
{
ScopedSpinLock queue_lock(m_lock);
if (m_wake_requested) {
// wake_* was called when no threads were in the queue
// we shouldn't wait at all
m_wake_requested = false;
#ifdef WAITQUEUE_DEBUG
dbg() << "WaitQueue " << VirtualAddress(this) << ": enqueue: wake_all pending";
#endif
return false;
}
m_threads.append(thread);
return true;
}
void WaitQueue::wake_one(Atomic<bool>* lock)
@ -48,42 +61,87 @@ void WaitQueue::wake_one(Atomic<bool>* lock)
ScopedSpinLock queue_lock(m_lock);
if (lock)
*lock = false;
if (m_threads.is_empty())
if (m_threads.is_empty()) {
// Save the fact that a wake was requested
m_wake_requested = true;
#ifdef WAITQUEUE_DEBUG
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_one: nobody to wake, mark as pending";
#endif
return;
if (auto* thread = m_threads.take_first())
}
#ifdef WAITQUEUE_DEBUG
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_one:";
#endif
auto* thread = m_threads.take_first();
#ifdef WAITQUEUE_DEBUG
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_one: wake thread " << *thread;
#endif
thread->wake_from_queue();
m_wake_requested = false;
Scheduler::yield();
}
void WaitQueue::wake_n(i32 wake_count)
void WaitQueue::wake_n(u32 wake_count)
{
ScopedSpinLock queue_lock(m_lock);
if (m_threads.is_empty())
if (m_threads.is_empty()) {
// Save the fact that a wake was requested
m_wake_requested = true;
#ifdef WAITQUEUE_DEBUG
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_n: nobody to wake, mark as pending";
#endif
return;
}
for (i32 i = 0; i < wake_count; ++i) {
#ifdef WAITQUEUE_DEBUG
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_n: " << wake_count;
#endif
for (u32 i = 0; i < wake_count; ++i) {
Thread* thread = m_threads.take_first();
if (!thread)
break;
#ifdef WAITQUEUE_DEBUG
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_n: wake thread " << *thread;
#endif
thread->wake_from_queue();
}
m_wake_requested = false;
Scheduler::yield();
}
void WaitQueue::wake_all()
{
ScopedSpinLock queue_lock(m_lock);
if (m_threads.is_empty())
if (m_threads.is_empty()) {
// Save the fact that a wake was requested
m_wake_requested = true;
#ifdef WAITQUEUE_DEBUG
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_all: nobody to wake, mark as pending";
#endif
return;
while (!m_threads.is_empty())
m_threads.take_first()->wake_from_queue();
}
#ifdef WAITQUEUE_DEBUG
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_all: ";
#endif
while (!m_threads.is_empty()) {
Thread* thread = m_threads.take_first();
#ifdef WAITQUEUE_DEBUG
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_all: wake thread " << *thread;
#endif
thread->wake_from_queue();
}
m_wake_requested = false;
Scheduler::yield();
}
void WaitQueue::clear()
{
ScopedSpinLock queue_lock(m_lock);
#ifdef WAITQUEUE_DEBUG
dbg() << "WaitQueue " << VirtualAddress(this) << ": clear";
#endif
m_threads.clear();
m_wake_requested = false;
}
}

View file

@ -38,9 +38,9 @@ public:
WaitQueue();
~WaitQueue();
void enqueue(Thread&);
bool enqueue(Thread&);
void wake_one(Atomic<bool>* lock = nullptr);
void wake_n(i32 wake_count);
void wake_n(u32 wake_count);
void wake_all();
void clear();
@ -48,6 +48,7 @@ private:
typedef IntrusiveList<Thread, &Thread::m_wait_queue_node> ThreadList;
ThreadList m_threads;
SpinLock<u32> m_lock;
bool m_wake_requested { false };
};
}