mirror of
https://github.com/RGBCube/serenity
synced 2025-05-28 11:15:10 +00:00
Kernel: Enhance WaitQueue to remember pending wakes
If WaitQueue::wake_all, WaitQueue::wake_one, or WaitQueue::wake_n is called but nobody is currently waiting, we should remember that fact and prevent someone from waiting after such a request. This solves a race condition where the Finalizer thread is notified to finalize a thread, but it is not (yet) waiting on this queue. Fixes #2693
This commit is contained in:
parent
2a82a25fec
commit
9725bda63e
5 changed files with 93 additions and 27 deletions
|
@ -54,20 +54,20 @@ void Lock::lock(Mode mode)
|
||||||
for (;;) {
|
for (;;) {
|
||||||
bool expected = false;
|
bool expected = false;
|
||||||
if (m_lock.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
|
if (m_lock.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
|
||||||
// FIXME: Do not add new readers if writers are queued.
|
do {
|
||||||
bool modes_dont_conflict = !modes_conflict(m_mode, mode);
|
// FIXME: Do not add new readers if writers are queued.
|
||||||
bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == current_thread;
|
bool modes_dont_conflict = !modes_conflict(m_mode, mode);
|
||||||
if (modes_dont_conflict || already_hold_exclusive_lock) {
|
bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == current_thread;
|
||||||
// We got the lock!
|
if (modes_dont_conflict || already_hold_exclusive_lock) {
|
||||||
if (!already_hold_exclusive_lock)
|
// We got the lock!
|
||||||
m_mode = mode;
|
if (!already_hold_exclusive_lock)
|
||||||
m_holder = current_thread;
|
m_mode = mode;
|
||||||
m_times_locked++;
|
m_holder = current_thread;
|
||||||
m_lock.store(false, AK::memory_order_release);
|
m_times_locked++;
|
||||||
return;
|
m_lock.store(false, AK::memory_order_release);
|
||||||
}
|
return;
|
||||||
timeval* timeout = nullptr;
|
}
|
||||||
current_thread->wait_on(m_queue, m_name, timeout, &m_lock, m_holder);
|
} while (current_thread->wait_on(m_queue, m_name, nullptr, &m_lock, m_holder) == Thread::BlockResult::NotBlocked);
|
||||||
} else if (Processor::current().in_critical()) {
|
} else if (Processor::current().in_critical()) {
|
||||||
// If we're in a critical section and trying to lock, no context
|
// If we're in a critical section and trying to lock, no context
|
||||||
// switch will happen, so yield.
|
// switch will happen, so yield.
|
||||||
|
|
|
@ -872,12 +872,18 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
|
||||||
// we need to wait until the scheduler lock is released again
|
// we need to wait until the scheduler lock is released again
|
||||||
{
|
{
|
||||||
ScopedSpinLock sched_lock(g_scheduler_lock);
|
ScopedSpinLock sched_lock(g_scheduler_lock);
|
||||||
|
if (!queue.enqueue(*Thread::current())) {
|
||||||
|
// The WaitQueue was already requested to wake someone when
|
||||||
|
// nobody was waiting. So return right away as we shouldn't
|
||||||
|
// be waiting
|
||||||
|
return BlockResult::NotBlocked;
|
||||||
|
}
|
||||||
|
|
||||||
did_unlock = unlock_process_if_locked();
|
did_unlock = unlock_process_if_locked();
|
||||||
if (lock)
|
if (lock)
|
||||||
*lock = false;
|
*lock = false;
|
||||||
set_state(State::Queued);
|
set_state(State::Queued);
|
||||||
m_wait_reason = reason;
|
m_wait_reason = reason;
|
||||||
queue.enqueue(*Thread::current());
|
|
||||||
|
|
||||||
|
|
||||||
if (timeout) {
|
if (timeout) {
|
||||||
|
|
|
@ -301,6 +301,7 @@ public:
|
||||||
|
|
||||||
enum class BlockResult {
|
enum class BlockResult {
|
||||||
WokeNormally,
|
WokeNormally,
|
||||||
|
NotBlocked,
|
||||||
InterruptedBySignal,
|
InterruptedBySignal,
|
||||||
InterruptedByDeath,
|
InterruptedByDeath,
|
||||||
InterruptedByTimeout,
|
InterruptedByTimeout,
|
||||||
|
|
|
@ -27,8 +27,11 @@
|
||||||
#include <Kernel/Thread.h>
|
#include <Kernel/Thread.h>
|
||||||
#include <Kernel/WaitQueue.h>
|
#include <Kernel/WaitQueue.h>
|
||||||
|
|
||||||
|
//#define WAITQUEUE_DEBUG
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
|
|
||||||
WaitQueue::WaitQueue()
|
WaitQueue::WaitQueue()
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -37,10 +40,20 @@ WaitQueue::~WaitQueue()
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void WaitQueue::enqueue(Thread& thread)
|
bool WaitQueue::enqueue(Thread& thread)
|
||||||
{
|
{
|
||||||
ScopedSpinLock queue_lock(m_lock);
|
ScopedSpinLock queue_lock(m_lock);
|
||||||
|
if (m_wake_requested) {
|
||||||
|
// wake_* was called when no threads were in the queue
|
||||||
|
// we shouldn't wait at all
|
||||||
|
m_wake_requested = false;
|
||||||
|
#ifdef WAITQUEUE_DEBUG
|
||||||
|
dbg() << "WaitQueue " << VirtualAddress(this) << ": enqueue: wake_all pending";
|
||||||
|
#endif
|
||||||
|
return false;
|
||||||
|
}
|
||||||
m_threads.append(thread);
|
m_threads.append(thread);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void WaitQueue::wake_one(Atomic<bool>* lock)
|
void WaitQueue::wake_one(Atomic<bool>* lock)
|
||||||
|
@ -48,42 +61,87 @@ void WaitQueue::wake_one(Atomic<bool>* lock)
|
||||||
ScopedSpinLock queue_lock(m_lock);
|
ScopedSpinLock queue_lock(m_lock);
|
||||||
if (lock)
|
if (lock)
|
||||||
*lock = false;
|
*lock = false;
|
||||||
if (m_threads.is_empty())
|
if (m_threads.is_empty()) {
|
||||||
|
// Save the fact that a wake was requested
|
||||||
|
m_wake_requested = true;
|
||||||
|
#ifdef WAITQUEUE_DEBUG
|
||||||
|
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_one: nobody to wake, mark as pending";
|
||||||
|
#endif
|
||||||
return;
|
return;
|
||||||
if (auto* thread = m_threads.take_first())
|
}
|
||||||
thread->wake_from_queue();
|
#ifdef WAITQUEUE_DEBUG
|
||||||
|
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_one:";
|
||||||
|
#endif
|
||||||
|
auto* thread = m_threads.take_first();
|
||||||
|
#ifdef WAITQUEUE_DEBUG
|
||||||
|
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_one: wake thread " << *thread;
|
||||||
|
#endif
|
||||||
|
thread->wake_from_queue();
|
||||||
|
m_wake_requested = false;
|
||||||
Scheduler::yield();
|
Scheduler::yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
void WaitQueue::wake_n(i32 wake_count)
|
void WaitQueue::wake_n(u32 wake_count)
|
||||||
{
|
{
|
||||||
ScopedSpinLock queue_lock(m_lock);
|
ScopedSpinLock queue_lock(m_lock);
|
||||||
if (m_threads.is_empty())
|
if (m_threads.is_empty()) {
|
||||||
|
// Save the fact that a wake was requested
|
||||||
|
m_wake_requested = true;
|
||||||
|
#ifdef WAITQUEUE_DEBUG
|
||||||
|
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_n: nobody to wake, mark as pending";
|
||||||
|
#endif
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
for (i32 i = 0; i < wake_count; ++i) {
|
#ifdef WAITQUEUE_DEBUG
|
||||||
|
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_n: " << wake_count;
|
||||||
|
#endif
|
||||||
|
for (u32 i = 0; i < wake_count; ++i) {
|
||||||
Thread* thread = m_threads.take_first();
|
Thread* thread = m_threads.take_first();
|
||||||
if (!thread)
|
if (!thread)
|
||||||
break;
|
break;
|
||||||
|
#ifdef WAITQUEUE_DEBUG
|
||||||
|
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_n: wake thread " << *thread;
|
||||||
|
#endif
|
||||||
thread->wake_from_queue();
|
thread->wake_from_queue();
|
||||||
}
|
}
|
||||||
|
m_wake_requested = false;
|
||||||
Scheduler::yield();
|
Scheduler::yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
void WaitQueue::wake_all()
|
void WaitQueue::wake_all()
|
||||||
{
|
{
|
||||||
ScopedSpinLock queue_lock(m_lock);
|
ScopedSpinLock queue_lock(m_lock);
|
||||||
if (m_threads.is_empty())
|
if (m_threads.is_empty()) {
|
||||||
|
// Save the fact that a wake was requested
|
||||||
|
m_wake_requested = true;
|
||||||
|
#ifdef WAITQUEUE_DEBUG
|
||||||
|
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_all: nobody to wake, mark as pending";
|
||||||
|
#endif
|
||||||
return;
|
return;
|
||||||
while (!m_threads.is_empty())
|
}
|
||||||
m_threads.take_first()->wake_from_queue();
|
#ifdef WAITQUEUE_DEBUG
|
||||||
|
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_all: ";
|
||||||
|
#endif
|
||||||
|
while (!m_threads.is_empty()) {
|
||||||
|
Thread* thread = m_threads.take_first();
|
||||||
|
#ifdef WAITQUEUE_DEBUG
|
||||||
|
dbg() << "WaitQueue " << VirtualAddress(this) << ": wake_all: wake thread " << *thread;
|
||||||
|
#endif
|
||||||
|
thread->wake_from_queue();
|
||||||
|
}
|
||||||
|
m_wake_requested = false;
|
||||||
Scheduler::yield();
|
Scheduler::yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
void WaitQueue::clear()
|
void WaitQueue::clear()
|
||||||
{
|
{
|
||||||
ScopedSpinLock queue_lock(m_lock);
|
ScopedSpinLock queue_lock(m_lock);
|
||||||
|
#ifdef WAITQUEUE_DEBUG
|
||||||
|
dbg() << "WaitQueue " << VirtualAddress(this) << ": clear";
|
||||||
|
#endif
|
||||||
m_threads.clear();
|
m_threads.clear();
|
||||||
|
m_wake_requested = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,9 +38,9 @@ public:
|
||||||
WaitQueue();
|
WaitQueue();
|
||||||
~WaitQueue();
|
~WaitQueue();
|
||||||
|
|
||||||
void enqueue(Thread&);
|
bool enqueue(Thread&);
|
||||||
void wake_one(Atomic<bool>* lock = nullptr);
|
void wake_one(Atomic<bool>* lock = nullptr);
|
||||||
void wake_n(i32 wake_count);
|
void wake_n(u32 wake_count);
|
||||||
void wake_all();
|
void wake_all();
|
||||||
void clear();
|
void clear();
|
||||||
|
|
||||||
|
@ -48,6 +48,7 @@ private:
|
||||||
typedef IntrusiveList<Thread, &Thread::m_wait_queue_node> ThreadList;
|
typedef IntrusiveList<Thread, &Thread::m_wait_queue_node> ThreadList;
|
||||||
ThreadList m_threads;
|
ThreadList m_threads;
|
||||||
SpinLock<u32> m_lock;
|
SpinLock<u32> m_lock;
|
||||||
|
bool m_wake_requested { false };
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue