From faf15e37213f3b023b2d029c06dcaab6871f8f13 Mon Sep 17 00:00:00 2001 From: Brian Gianforcaro Date: Sun, 26 Apr 2020 02:32:37 -0700 Subject: [PATCH] Kernel: Add timeout support to Thread::wait_on This change plumbs a new optional timeout option to wait_on. The timeout is enabled by enqueing a timer on the timer queue while we are waiting. We can then see if we were woken up or timed out by checking if we are still on the wait queue or not. --- Kernel/Lock.cpp | 3 ++- Kernel/Thread.cpp | 22 +++++++++++++++++++--- Kernel/Thread.h | 3 ++- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/Kernel/Lock.cpp b/Kernel/Lock.cpp index eaeb383d64..b63651b70a 100644 --- a/Kernel/Lock.cpp +++ b/Kernel/Lock.cpp @@ -65,7 +65,8 @@ void Lock::lock(Mode mode) m_lock.store(false, AK::memory_order_release); return; } - Thread::current->wait_on(m_queue, &m_lock, m_holder, m_name); + timeval* timeout = nullptr; + Thread::current->wait_on(m_queue, timeout, &m_lock, m_holder, m_name); } } } diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index cac6cccd31..a15c5c388b 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -575,8 +576,7 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal) m_signal_mask |= new_signal_mask; - auto setup_stack = [&](ThreadState state, u32 * stack) - { + auto setup_stack = [&](ThreadState state, u32* stack) { u32 old_esp = *stack; u32 ret_eip = state.eip; u32 ret_eflags = state.eflags; @@ -884,7 +884,7 @@ const LogStream& operator<<(const LogStream& stream, const Thread& value) return stream << value.process().name() << "(" << value.pid() << ":" << value.tid() << ")"; } -void Thread::wait_on(WaitQueue& queue, Atomic* lock, Thread* beneficiary, const char* reason) +Thread::BlockResult Thread::wait_on(WaitQueue& queue, timeval* timeout, Atomic* lock, Thread* beneficiary, const char* reason) { cli(); bool did_unlock = unlock_process_if_locked(); @@ -892,6 +892,14 @@ void Thread::wait_on(WaitQueue& queue, Atomic* lock, Thread* beneficiary, *lock = false; set_state(State::Queued); queue.enqueue(*current); + + u64 timer_id = 0; + if (timeout) { + timer_id = TimerQueue::the().add_timer(*timeout, [&]() { + wake_from_queue(); + }); + } + // Yield and wait for the queue to wake us up again. if (beneficiary) Scheduler::donate_to(beneficiary, reason); @@ -900,6 +908,14 @@ void Thread::wait_on(WaitQueue& queue, Atomic* lock, Thread* beneficiary, // We've unblocked, relock the process if needed and carry on. if (did_unlock) relock_process(); + + BlockResult result = m_wait_queue_node.is_in_list() ? BlockResult::InterruptedByTimeout : BlockResult::WokeNormally; + + // Make sure we cancel the timer if woke normally. + if (timeout && result == BlockResult::WokeNormally) + TimerQueue::the().cancel_timer(timer_id); + + return result; } void Thread::wake_from_queue() diff --git a/Kernel/Thread.h b/Kernel/Thread.h index f1ded94c08..74ebee2cee 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -295,6 +295,7 @@ public: WokeNormally, InterruptedBySignal, InterruptedByDeath, + InterruptedByTimeout, }; template @@ -331,7 +332,7 @@ public: return block(state_string, move(condition)); } - void wait_on(WaitQueue& queue, Atomic* lock = nullptr, Thread* beneficiary = nullptr, const char* reason = nullptr); + BlockResult wait_on(WaitQueue& queue, timeval* timeout = nullptr, Atomic* lock = nullptr, Thread* beneficiary = nullptr, const char* reason = nullptr); void wake_from_queue(); void unblock();