mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 07:08:10 +00:00
Kernel: Allow killing queued threads
We need to dequeue and wake threads that are waiting if the process terminates. Fixes #3603 without the HackStudio fixes in #3606.
This commit is contained in:
parent
8b293119ab
commit
69a9c78783
2 changed files with 26 additions and 6 deletions
|
@ -151,6 +151,14 @@ void Thread::set_should_die()
|
||||||
// the kernel stacks can clean up. We won't ever return back
|
// the kernel stacks can clean up. We won't ever return back
|
||||||
// to user mode, though
|
// to user mode, though
|
||||||
resume_from_stopped();
|
resume_from_stopped();
|
||||||
|
} else if (state() == Queued) {
|
||||||
|
// m_queue can only be accessed safely if g_scheduler_lock is held!
|
||||||
|
if (m_queue) {
|
||||||
|
m_queue->dequeue(*this);
|
||||||
|
m_queue = nullptr;
|
||||||
|
// Wake the thread
|
||||||
|
wake_from_queue();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -973,6 +981,8 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
|
||||||
// we need to wait until the scheduler lock is released again
|
// we need to wait until the scheduler lock is released again
|
||||||
{
|
{
|
||||||
ScopedSpinLock sched_lock(g_scheduler_lock);
|
ScopedSpinLock sched_lock(g_scheduler_lock);
|
||||||
|
// m_queue can only be accessed safely if g_scheduler_lock is held!
|
||||||
|
m_queue = &queue;
|
||||||
if (!queue.enqueue(*current_thread)) {
|
if (!queue.enqueue(*current_thread)) {
|
||||||
// The WaitQueue was already requested to wake someone when
|
// The WaitQueue was already requested to wake someone when
|
||||||
// nobody was waiting. So return right away as we shouldn't
|
// nobody was waiting. So return right away as we shouldn't
|
||||||
|
@ -1026,9 +1036,18 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
|
||||||
// scheduler lock, which is held when we insert into the queue
|
// scheduler lock, which is held when we insert into the queue
|
||||||
ScopedSpinLock sched_lock(g_scheduler_lock);
|
ScopedSpinLock sched_lock(g_scheduler_lock);
|
||||||
|
|
||||||
// If our thread was still in the queue, we timed out
|
if (m_queue) {
|
||||||
if (queue.dequeue(*current_thread))
|
ASSERT(m_queue == &queue);
|
||||||
result = BlockResult::InterruptedByTimeout;
|
// If our thread was still in the queue, we timed out
|
||||||
|
m_queue = nullptr;
|
||||||
|
if (queue.dequeue(*current_thread))
|
||||||
|
result = BlockResult::InterruptedByTimeout;
|
||||||
|
} else {
|
||||||
|
// Our thread was already removed from the queue. The only
|
||||||
|
// way this can happen if someone else is trying to kill us.
|
||||||
|
// In this case, the queue should not contain us anymore.
|
||||||
|
return BlockResult::InterruptedByDeath;
|
||||||
|
}
|
||||||
|
|
||||||
// Make sure we cancel the timer if woke normally.
|
// Make sure we cancel the timer if woke normally.
|
||||||
if (timeout && !result.was_interrupted())
|
if (timeout && !result.was_interrupted())
|
||||||
|
|
|
@ -438,12 +438,12 @@ public:
|
||||||
// to clean up now while we're still holding m_lock
|
// to clean up now while we're still holding m_lock
|
||||||
t.was_unblocked();
|
t.was_unblocked();
|
||||||
|
|
||||||
if (t.was_interrupted_by_signal())
|
|
||||||
return BlockResult::InterruptedBySignal;
|
|
||||||
|
|
||||||
if (t.was_interrupted_by_death())
|
if (t.was_interrupted_by_death())
|
||||||
return BlockResult::InterruptedByDeath;
|
return BlockResult::InterruptedByDeath;
|
||||||
|
|
||||||
|
if (t.was_interrupted_by_signal())
|
||||||
|
return BlockResult::InterruptedBySignal;
|
||||||
|
|
||||||
return BlockResult::WokeNormally;
|
return BlockResult::WokeNormally;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -634,6 +634,7 @@ private:
|
||||||
Blocker* m_blocker { nullptr };
|
Blocker* m_blocker { nullptr };
|
||||||
timespec* m_blocker_timeout { nullptr };
|
timespec* m_blocker_timeout { nullptr };
|
||||||
const char* m_wait_reason { nullptr };
|
const char* m_wait_reason { nullptr };
|
||||||
|
WaitQueue* m_queue { nullptr };
|
||||||
|
|
||||||
Atomic<bool> m_is_active { false };
|
Atomic<bool> m_is_active { false };
|
||||||
bool m_is_joinable { true };
|
bool m_is_joinable { true };
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue