mirror of
https://github.com/RGBCube/serenity
synced 2025-07-24 14:47:34 +00:00
Revert "Kernel: Make sure threads which don't do any syscalls are t..."
This reverts commit 3c3a1726df
.
We cannot blindly kill threads just because they're not executing in a
system call. Being blocked (including in a page fault) needs proper
unblocking and potentially kernel stack cleanup before we can mark a
thread as Dying.
Fixes #8691
This commit is contained in:
parent
552185066e
commit
fa8fe40266
3 changed files with 0 additions and 19 deletions
|
@ -217,13 +217,6 @@ bool Scheduler::pick_next()
|
||||||
|
|
||||||
ScopedSpinLock lock(g_scheduler_lock);
|
ScopedSpinLock lock(g_scheduler_lock);
|
||||||
|
|
||||||
auto current_thread = Thread::current();
|
|
||||||
if (current_thread->should_die() && current_thread->may_die_immediately()) {
|
|
||||||
// Ordinarily the thread would die on syscall exit, however if the thread
|
|
||||||
// doesn't perform any syscalls we still need to mark it for termination here.
|
|
||||||
current_thread->set_state(Thread::Dying);
|
|
||||||
}
|
|
||||||
|
|
||||||
if constexpr (SCHEDULER_RUNNABLE_DEBUG) {
|
if constexpr (SCHEDULER_RUNNABLE_DEBUG) {
|
||||||
dump_thread_list();
|
dump_thread_list();
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
* SPDX-License-Identifier: BSD-2-Clause
|
* SPDX-License-Identifier: BSD-2-Clause
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <AK/ScopeGuard.h>
|
|
||||||
#include <Kernel/API/Syscall.h>
|
#include <Kernel/API/Syscall.h>
|
||||||
#include <Kernel/Arch/x86/Interrupts.h>
|
#include <Kernel/Arch/x86/Interrupts.h>
|
||||||
#include <Kernel/Arch/x86/TrapFrame.h>
|
#include <Kernel/Arch/x86/TrapFrame.h>
|
||||||
|
@ -154,14 +153,6 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap)
|
||||||
{
|
{
|
||||||
auto& regs = *trap->regs;
|
auto& regs = *trap->regs;
|
||||||
auto current_thread = Thread::current();
|
auto current_thread = Thread::current();
|
||||||
{
|
|
||||||
ScopedSpinLock lock(g_scheduler_lock);
|
|
||||||
current_thread->set_may_die_immediately(false);
|
|
||||||
}
|
|
||||||
ScopeGuard reset_may_die_immediately = [¤t_thread] {
|
|
||||||
ScopedSpinLock lock(g_scheduler_lock);
|
|
||||||
current_thread->set_may_die_immediately(true);
|
|
||||||
};
|
|
||||||
VERIFY(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
|
VERIFY(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
|
||||||
auto& process = current_thread->process();
|
auto& process = current_thread->process();
|
||||||
|
|
||||||
|
|
|
@ -1187,8 +1187,6 @@ public:
|
||||||
bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
|
bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
|
||||||
void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
|
void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
|
||||||
|
|
||||||
bool may_die_immediately() const { return m_may_die_immediately; }
|
|
||||||
void set_may_die_immediately(bool flag) { m_may_die_immediately = flag; }
|
|
||||||
InodeIndex global_procfs_inode_index() const { return m_global_procfs_inode_index; }
|
InodeIndex global_procfs_inode_index() const { return m_global_procfs_inode_index; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -1287,7 +1285,6 @@ private:
|
||||||
Kernel::Lock* m_blocking_lock { nullptr };
|
Kernel::Lock* m_blocking_lock { nullptr };
|
||||||
u32 m_lock_requested_count { 0 };
|
u32 m_lock_requested_count { 0 };
|
||||||
IntrusiveListNode<Thread> m_blocked_threads_list_node;
|
IntrusiveListNode<Thread> m_blocked_threads_list_node;
|
||||||
bool m_may_die_immediately { true };
|
|
||||||
|
|
||||||
#if LOCK_DEBUG
|
#if LOCK_DEBUG
|
||||||
struct HoldingLockInfo {
|
struct HoldingLockInfo {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue