1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 12:38:12 +00:00

Kernel: Make Thread refcounted

Similar to Process, we need to make Thread refcounted. This will solve
problems that will appear once we schedule threads on more than one
processor. This allows us to hold onto threads without necessarily
holding the scheduler lock for the entire duration.
This commit is contained in:
Tom 2020-09-27 08:53:35 -06:00 committed by Andreas Kling
parent 079486ed7e
commit 838d9fa251
14 changed files with 136 additions and 90 deletions

View file

@ -61,7 +61,7 @@ static void handle_tcp(const IPv4Packet&, const timeval& packet_timestamp);
void NetworkTask::spawn() void NetworkTask::spawn()
{ {
Thread* thread = nullptr; RefPtr<Thread> thread;
Process::create_kernel_process(thread, "NetworkTask", NetworkTask_main); Process::create_kernel_process(thread, "NetworkTask", NetworkTask_main);
} }

View file

@ -265,7 +265,7 @@ void Process::kill_all_threads()
}); });
} }
RefPtr<Process> Process::create_user_process(Thread*& first_thread, const String& path, uid_t uid, gid_t gid, ProcessID parent_pid, int& error, Vector<String>&& arguments, Vector<String>&& environment, TTY* tty) RefPtr<Process> Process::create_user_process(RefPtr<Thread>& first_thread, const String& path, uid_t uid, gid_t gid, ProcessID parent_pid, int& error, Vector<String>&& arguments, Vector<String>&& environment, TTY* tty)
{ {
auto parts = path.split('/'); auto parts = path.split('/');
if (arguments.is_empty()) { if (arguments.is_empty()) {
@ -298,7 +298,7 @@ RefPtr<Process> Process::create_user_process(Thread*& first_thread, const String
error = process->exec(path, move(arguments), move(environment)); error = process->exec(path, move(arguments), move(environment));
if (error != 0) { if (error != 0) {
dbg() << "Failed to exec " << path << ": " << error; dbg() << "Failed to exec " << path << ": " << error;
delete first_thread; first_thread = nullptr;
return {}; return {};
} }
@ -311,7 +311,7 @@ RefPtr<Process> Process::create_user_process(Thread*& first_thread, const String
return process; return process;
} }
NonnullRefPtr<Process> Process::create_kernel_process(Thread*& first_thread, String&& name, void (*e)(), u32 affinity) NonnullRefPtr<Process> Process::create_kernel_process(RefPtr<Thread>& first_thread, String&& name, void (*e)(), u32 affinity)
{ {
auto process = adopt(*new Process(first_thread, move(name), (uid_t)0, (gid_t)0, ProcessID(0), true)); auto process = adopt(*new Process(first_thread, move(name), (uid_t)0, (gid_t)0, ProcessID(0), true));
first_thread->tss().eip = (FlatPtr)e; first_thread->tss().eip = (FlatPtr)e;
@ -327,7 +327,7 @@ NonnullRefPtr<Process> Process::create_kernel_process(Thread*& first_thread, Str
return process; return process;
} }
Process::Process(Thread*& first_thread, const String& name, uid_t uid, gid_t gid, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> cwd, RefPtr<Custody> executable, TTY* tty, Process* fork_parent) Process::Process(RefPtr<Thread>& first_thread, const String& name, uid_t uid, gid_t gid, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> cwd, RefPtr<Custody> executable, TTY* tty, Process* fork_parent)
: m_name(move(name)) : m_name(move(name))
, m_pid(allocate_pid()) , m_pid(allocate_pid())
, m_euid(uid) , m_euid(uid)
@ -356,14 +356,15 @@ Process::Process(Thread*& first_thread, const String& name, uid_t uid, gid_t gid
first_thread = Thread::current()->clone(*this); first_thread = Thread::current()->clone(*this);
} else { } else {
// NOTE: This non-forked code path is only taken when the kernel creates a process "manually" (at boot.) // NOTE: This non-forked code path is only taken when the kernel creates a process "manually" (at boot.)
first_thread = new Thread(*this); first_thread = adopt(*new Thread(*this));
first_thread->detach(); first_thread->detach();
} }
} }
Process::~Process() Process::~Process()
{ {
ASSERT(thread_count() == 0); ASSERT(!m_next && !m_prev); // should have been reaped
ASSERT(thread_count() == 0); // all threads should have been finalized
} }
void Process::dump_regions() void Process::dump_regions()
@ -613,7 +614,7 @@ void Process::finalize()
{ {
InterruptDisabler disabler; InterruptDisabler disabler;
// FIXME: PID/TID BUG // FIXME: PID/TID BUG
if (auto* parent_thread = Thread::from_tid(m_ppid.value())) { if (auto parent_thread = Thread::from_tid(m_ppid.value())) {
if (parent_thread->m_signal_action_data[SIGCHLD].flags & SA_NOCLDWAIT) { if (parent_thread->m_signal_action_data[SIGCHLD].flags & SA_NOCLDWAIT) {
// NOTE: If the parent doesn't care about this process, let it go. // NOTE: If the parent doesn't care about this process, let it go.
m_ppid = 0; m_ppid = 0;
@ -761,13 +762,13 @@ KResult Process::send_signal(u8 signal, Process* sender)
return KResult(-ESRCH); return KResult(-ESRCH);
} }
Thread* Process::create_kernel_thread(void (*entry)(), u32 priority, const String& name, u32 affinity, bool joinable) RefPtr<Thread> Process::create_kernel_thread(void (*entry)(), u32 priority, const String& name, u32 affinity, bool joinable)
{ {
ASSERT((priority >= THREAD_PRIORITY_MIN) && (priority <= THREAD_PRIORITY_MAX)); ASSERT((priority >= THREAD_PRIORITY_MIN) && (priority <= THREAD_PRIORITY_MAX));
// FIXME: Do something with guard pages? // FIXME: Do something with guard pages?
auto* thread = new Thread(*this); auto thread = adopt(*new Thread(*this));
thread->set_name(name); thread->set_name(name);
thread->set_affinity(affinity); thread->set_affinity(affinity);

View file

@ -125,14 +125,14 @@ public:
return current_thread ? &current_thread->process() : nullptr; return current_thread ? &current_thread->process() : nullptr;
} }
static NonnullRefPtr<Process> create_kernel_process(Thread*& first_thread, String&& name, void (*entry)(), u32 affinity = THREAD_AFFINITY_DEFAULT); static NonnullRefPtr<Process> create_kernel_process(RefPtr<Thread>& first_thread, String&& name, void (*entry)(), u32 affinity = THREAD_AFFINITY_DEFAULT);
static RefPtr<Process> create_user_process(Thread*& first_thread, const String& path, uid_t, gid_t, ProcessID ppid, int& error, Vector<String>&& arguments = Vector<String>(), Vector<String>&& environment = Vector<String>(), TTY* = nullptr); static RefPtr<Process> create_user_process(RefPtr<Thread>& first_thread, const String& path, uid_t, gid_t, ProcessID ppid, int& error, Vector<String>&& arguments = Vector<String>(), Vector<String>&& environment = Vector<String>(), TTY* = nullptr);
~Process(); ~Process();
static Vector<ProcessID> all_pids(); static Vector<ProcessID> all_pids();
static AK::NonnullRefPtrVector<Process> all_processes(); static AK::NonnullRefPtrVector<Process> all_processes();
Thread* create_kernel_thread(void (*entry)(), u32 priority, const String& name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true); RefPtr<Thread> create_kernel_thread(void (*entry)(), u32 priority, const String& name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true);
bool is_profiling() const { return m_profiling; } bool is_profiling() const { return m_profiling; }
void set_profiling(bool profiling) { m_profiling = profiling; } void set_profiling(bool profiling) { m_profiling = profiling; }
@ -466,7 +466,7 @@ private:
friend class Scheduler; friend class Scheduler;
friend class Region; friend class Region;
Process(Thread*& first_thread, const String& name, uid_t, gid_t, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> cwd = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr); Process(RefPtr<Thread>& first_thread, const String& name, uid_t, gid_t, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> cwd = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
static ProcessID allocate_pid(); static ProcessID allocate_pid();
Range allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE); Range allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);

View file

@ -50,7 +50,7 @@ class SchedulerPerProcessorData {
public: public:
SchedulerPerProcessorData() = default; SchedulerPerProcessorData() = default;
Thread* m_pending_beneficiary { nullptr }; WeakPtr<Thread> m_pending_beneficiary;
const char* m_pending_donate_reason { nullptr }; const char* m_pending_donate_reason { nullptr };
bool m_in_scheduler { true }; bool m_in_scheduler { true };
}; };
@ -599,7 +599,7 @@ bool Scheduler::donate_to_and_switch(Thread* beneficiary, const char* reason)
return Scheduler::context_switch(beneficiary); return Scheduler::context_switch(beneficiary);
} }
bool Scheduler::donate_to(Thread* beneficiary, const char* reason) bool Scheduler::donate_to(RefPtr<Thread>& beneficiary, const char* reason)
{ {
ASSERT(beneficiary); ASSERT(beneficiary);
@ -625,7 +625,7 @@ bool Scheduler::donate_to(Thread* beneficiary, const char* reason)
ASSERT(!proc.in_irq()); ASSERT(!proc.in_irq());
if (proc.in_critical() > 1) { if (proc.in_critical() > 1) {
scheduler_data.m_pending_beneficiary = beneficiary; // Save the beneficiary scheduler_data.m_pending_beneficiary = beneficiary->make_weak_ptr(); // Save the beneficiary
scheduler_data.m_pending_donate_reason = reason; scheduler_data.m_pending_donate_reason = reason;
proc.invoke_scheduler_async(); proc.invoke_scheduler_async();
return false; return false;
@ -740,7 +740,7 @@ void Scheduler::initialize()
{ {
ASSERT(&Processor::current() != nullptr); // sanity check ASSERT(&Processor::current() != nullptr); // sanity check
Thread* idle_thread = nullptr; RefPtr<Thread> idle_thread;
g_scheduler_data = new SchedulerData; g_scheduler_data = new SchedulerData;
g_finalizer_wait_queue = new WaitQueue; g_finalizer_wait_queue = new WaitQueue;

View file

@ -60,7 +60,7 @@ public:
static timeval time_since_boot(); static timeval time_since_boot();
static bool yield(); static bool yield();
static bool donate_to_and_switch(Thread*, const char* reason); static bool donate_to_and_switch(Thread*, const char* reason);
static bool donate_to(Thread*, const char* reason); static bool donate_to(RefPtr<Thread>&, const char* reason);
static bool context_switch(Thread*); static bool context_switch(Thread*);
static void enter_current(Thread& prev_thread); static void enter_current(Thread& prev_thread);
static void leave_on_first_switch(u32 flags); static void leave_on_first_switch(u32 flags);

View file

@ -36,8 +36,10 @@ namespace Kernel {
pid_t Process::sys$fork(RegisterState& regs) pid_t Process::sys$fork(RegisterState& regs)
{ {
REQUIRE_PROMISE(proc); REQUIRE_PROMISE(proc);
Thread* child_first_thread = nullptr; RefPtr<Thread> child_first_thread;
auto* child = new Process(child_first_thread, m_name, m_uid, m_gid, m_pid, m_is_kernel_process, m_cwd, m_executable, m_tty, this); auto child = adopt(*new Process(child_first_thread, m_name, m_uid, m_gid, m_pid, m_is_kernel_process, m_cwd, m_executable, m_tty, this));
if (!child_first_thread)
return -ENOMEM;
child->m_root_directory = m_root_directory; child->m_root_directory = m_root_directory;
child->m_root_directory_relative_to_global_root = m_root_directory_relative_to_global_root; child->m_root_directory_relative_to_global_root = m_root_directory_relative_to_global_root;
child->m_promises = m_promises; child->m_promises = m_promises;
@ -92,6 +94,7 @@ pid_t Process::sys$fork(RegisterState& regs)
{ {
ScopedSpinLock lock(g_processes_lock); ScopedSpinLock lock(g_processes_lock);
g_processes->prepend(child); g_processes->prepend(child);
child->ref(); // This reference will be dropped by Process::reap
} }
child_first_thread->set_affinity(Thread::current()->affinity()); child_first_thread->set_affinity(Thread::current()->affinity());

View file

@ -40,8 +40,12 @@ int Process::sys$donate(pid_t tid)
REQUIRE_PROMISE(stdio); REQUIRE_PROMISE(stdio);
if (tid < 0) if (tid < 0)
return -EINVAL; return -EINVAL;
InterruptDisabler disabler;
auto* thread = Thread::from_tid(tid); // We don't strictly need to grab the scheduler lock here, but it
// will close a race where we can find the thread but it disappears
// before we call Scheduler::donate_to.
ScopedSpinLock lock(g_scheduler_lock);
auto thread = Thread::from_tid(tid);
if (!thread || thread->pid() != pid()) if (!thread || thread->pid() != pid())
return -ESRCH; return -ESRCH;
Scheduler::donate_to(thread, "sys$donate"); Scheduler::donate_to(thread, "sys$donate");
@ -55,8 +59,11 @@ int Process::sys$sched_setparam(int pid, Userspace<const struct sched_param*> us
if (!copy_from_user(&desired_param, user_param)) if (!copy_from_user(&desired_param, user_param))
return -EFAULT; return -EFAULT;
InterruptDisabler disabler; if (desired_param.sched_priority < THREAD_PRIORITY_MIN || desired_param.sched_priority > THREAD_PRIORITY_MAX)
return -EINVAL;
auto* peer = Thread::current(); auto* peer = Thread::current();
ScopedSpinLock lock(g_scheduler_lock);
if (pid != 0) if (pid != 0)
peer = Thread::from_tid(pid); peer = Thread::from_tid(pid);
@ -66,9 +73,6 @@ int Process::sys$sched_setparam(int pid, Userspace<const struct sched_param*> us
if (!is_superuser() && m_euid != peer->process().m_uid && m_uid != peer->process().m_uid) if (!is_superuser() && m_euid != peer->process().m_uid && m_uid != peer->process().m_uid)
return -EPERM; return -EPERM;
if (desired_param.sched_priority < THREAD_PRIORITY_MIN || desired_param.sched_priority > THREAD_PRIORITY_MAX)
return -EINVAL;
peer->set_priority((u32)desired_param.sched_priority); peer->set_priority((u32)desired_param.sched_priority);
return 0; return 0;
} }
@ -76,22 +80,27 @@ int Process::sys$sched_setparam(int pid, Userspace<const struct sched_param*> us
int Process::sys$sched_getparam(pid_t pid, Userspace<struct sched_param*> user_param) int Process::sys$sched_getparam(pid_t pid, Userspace<struct sched_param*> user_param)
{ {
REQUIRE_PROMISE(proc); REQUIRE_PROMISE(proc);
InterruptDisabler disabler; int priority;
auto* peer = Thread::current(); {
if (pid != 0) { auto* peer = Thread::current();
// FIXME: PID/TID BUG ScopedSpinLock lock(g_scheduler_lock);
// The entire process is supposed to be affected. if (pid != 0) {
peer = Thread::from_tid(pid); // FIXME: PID/TID BUG
// The entire process is supposed to be affected.
peer = Thread::from_tid(pid);
}
if (!peer)
return -ESRCH;
if (!is_superuser() && m_euid != peer->process().m_uid && m_uid != peer->process().m_uid)
return -EPERM;
priority = (int)peer->priority();
} }
if (!peer)
return -ESRCH;
if (!is_superuser() && m_euid != peer->process().m_uid && m_uid != peer->process().m_uid)
return -EPERM;
struct sched_param param { struct sched_param param {
(int)peer->priority() priority
}; };
if (!copy_to_user(user_param, &param)) if (!copy_to_user(user_param, &param))
return -EFAULT; return -EFAULT;
@ -103,8 +112,8 @@ int Process::sys$set_thread_boost(pid_t tid, int amount)
REQUIRE_PROMISE(proc); REQUIRE_PROMISE(proc);
if (amount < 0 || amount > 20) if (amount < 0 || amount > 20)
return -EINVAL; return -EINVAL;
InterruptDisabler disabler; ScopedSpinLock lock(g_scheduler_lock);
auto* thread = Thread::from_tid(tid); auto thread = Thread::from_tid(tid);
if (!thread) if (!thread)
return -ESRCH; return -ESRCH;
if (thread->state() == Thread::State::Dead || thread->state() == Thread::State::Dying) if (thread->state() == Thread::State::Dead || thread->state() == Thread::State::Dying)

View file

@ -102,8 +102,7 @@ void Process::sys$exit_thread(Userspace<void*> exit_value)
int Process::sys$detach_thread(pid_t tid) int Process::sys$detach_thread(pid_t tid)
{ {
REQUIRE_PROMISE(thread); REQUIRE_PROMISE(thread);
InterruptDisabler disabler; auto thread = Thread::from_tid(tid);
auto* thread = Thread::from_tid(tid);
if (!thread || thread->pid() != pid()) if (!thread || thread->pid() != pid())
return -ESRCH; return -ESRCH;
@ -118,8 +117,7 @@ int Process::sys$join_thread(pid_t tid, Userspace<void**> exit_value)
{ {
REQUIRE_PROMISE(thread); REQUIRE_PROMISE(thread);
InterruptDisabler disabler; auto thread = Thread::from_tid(tid);
auto* thread = Thread::from_tid(tid);
if (!thread || thread->pid() != pid()) if (!thread || thread->pid() != pid())
return -ESRCH; return -ESRCH;
@ -134,20 +132,14 @@ int Process::sys$join_thread(pid_t tid, Userspace<void**> exit_value)
KResult try_join_result(KSuccess); KResult try_join_result(KSuccess);
auto result = current_thread->block<Thread::JoinBlocker>(nullptr, *thread, try_join_result, joinee_exit_value); auto result = current_thread->block<Thread::JoinBlocker>(nullptr, *thread, try_join_result, joinee_exit_value);
if (result == Thread::BlockResult::NotBlocked) { if (result == Thread::BlockResult::NotBlocked) {
ASSERT_INTERRUPTS_DISABLED();
if (try_join_result.is_error()) if (try_join_result.is_error())
return try_join_result.error(); return try_join_result.error();
break; break;
} }
if (result == Thread::BlockResult::InterruptedByDeath) { if (result == Thread::BlockResult::InterruptedByDeath)
ASSERT_INTERRUPTS_DISABLED(); return 0; // we're not going to return back to user mode
break;
}
} }
// NOTE: 'thread' is very possibly deleted at this point. Clear it just to be safe.
thread = nullptr;
if (exit_value && !copy_to_user(exit_value, &joinee_exit_value)) if (exit_value && !copy_to_user(exit_value, &joinee_exit_value))
return -EFAULT; return -EFAULT;
return 0; return 0;
@ -164,8 +156,7 @@ int Process::sys$set_thread_name(pid_t tid, Userspace<const char*> user_name, si
if (name.length() > max_thread_name_size) if (name.length() > max_thread_name_size)
return -EINVAL; return -EINVAL;
InterruptDisabler disabler; auto thread = Thread::from_tid(tid);
auto* thread = Thread::from_tid(tid);
if (!thread || thread->pid() != pid()) if (!thread || thread->pid() != pid())
return -ESRCH; return -ESRCH;
@ -179,15 +170,16 @@ int Process::sys$get_thread_name(pid_t tid, Userspace<char*> buffer, size_t buff
if (buffer_size == 0) if (buffer_size == 0)
return -EINVAL; return -EINVAL;
InterruptDisabler disabler; auto thread = Thread::from_tid(tid);
auto* thread = Thread::from_tid(tid);
if (!thread || thread->pid() != pid()) if (!thread || thread->pid() != pid())
return -ESRCH; return -ESRCH;
if (thread->name().length() + 1 > (size_t)buffer_size) // We must make a temporary copy here to avoid a race with sys$set_thread_name
auto thread_name = thread->name();
if (thread_name.length() + 1 > (size_t)buffer_size)
return -ENAMETOOLONG; return -ENAMETOOLONG;
if (!copy_to_user(buffer, thread->name().characters(), thread->name().length() + 1)) if (!copy_to_user(buffer, thread_name.characters(), thread_name.length() + 1))
return -EFAULT; return -EFAULT;
return 0; return 0;
} }

View file

@ -68,7 +68,9 @@ KResultOr<siginfo_t> Process::do_waitid(idtype_t idtype, int id, int options)
return reap(*waitee_process); return reap(*waitee_process);
} else { } else {
// FIXME: PID/TID BUG // FIXME: PID/TID BUG
auto* waitee_thread = Thread::from_tid(waitee_pid.value()); // Make sure to hold the scheduler lock so that we operate on a consistent state
ScopedSpinLock scheduler_lock(g_scheduler_lock);
auto waitee_thread = Thread::from_tid(waitee_pid.value());
if (!waitee_thread) if (!waitee_thread)
return KResult(-ECHILD); return KResult(-ECHILD);
ASSERT((options & WNOHANG) || waitee_thread->state() == Thread::State::Stopped); ASSERT((options & WNOHANG) || waitee_thread->state() == Thread::State::Stopped);

View file

@ -31,7 +31,8 @@ namespace Kernel {
void FinalizerTask::spawn() void FinalizerTask::spawn()
{ {
Process::create_kernel_process(g_finalizer, "FinalizerTask", [] { RefPtr<Thread> finalizer_thread;
Process::create_kernel_process(finalizer_thread, "FinalizerTask", [] {
Thread::current()->set_priority(THREAD_PRIORITY_LOW); Thread::current()->set_priority(THREAD_PRIORITY_LOW);
for (;;) { for (;;) {
Thread::current()->wait_on(*g_finalizer_wait_queue, "FinalizerTask"); Thread::current()->wait_on(*g_finalizer_wait_queue, "FinalizerTask");
@ -41,6 +42,7 @@ void FinalizerTask::spawn()
Thread::finalize_dying_threads(); Thread::finalize_dying_threads();
} }
}); });
g_finalizer = finalizer_thread;
} }
} }

View file

@ -33,7 +33,7 @@ namespace Kernel {
void SyncTask::spawn() void SyncTask::spawn()
{ {
Thread* syncd_thread = nullptr; RefPtr<Thread> syncd_thread;
Process::create_kernel_process(syncd_thread, "SyncTask", [] { Process::create_kernel_process(syncd_thread, "SyncTask", [] {
dbg() << "SyncTask is running"; dbg() << "SyncTask is running";
for (;;) { for (;;) {

View file

@ -100,17 +100,20 @@ Thread::Thread(NonnullRefPtr<Process> process)
m_tss.esp0 = m_kernel_stack_top; m_tss.esp0 = m_kernel_stack_top;
} }
// We need to add another reference if we could successfully create
// all the resources needed for this thread. The reason for this is that
// we don't want to delete this thread after dropping the reference,
// it may still be running or scheduled to be run.
// The finalizer is responsible for dropping this reference once this
// thread is ready to be cleaned up.
ref();
if (m_process->pid() != 0) if (m_process->pid() != 0)
Scheduler::init_thread(*this); Scheduler::init_thread(*this);
} }
Thread::~Thread() Thread::~Thread()
{ {
kfree_aligned(m_fpu_state);
auto thread_cnt_before = m_process->m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
ASSERT(thread_cnt_before != 0);
ASSERT(!m_joiner); ASSERT(!m_joiner);
} }
@ -269,10 +272,12 @@ const char* Thread::state_string() const
return "Stopped"; return "Stopped";
case Thread::Queued: case Thread::Queued:
return "Queued"; return "Queued";
case Thread::Blocked: case Thread::Blocked: {
ScopedSpinLock lock(m_lock);
ASSERT(m_blocker != nullptr); ASSERT(m_blocker != nullptr);
return m_blocker->state_string(); return m_blocker->state_string();
} }
}
klog() << "Thread::state_string(): Invalid state: " << state(); klog() << "Thread::state_string(): Invalid state: " << state();
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
return nullptr; return nullptr;
@ -295,6 +300,13 @@ void Thread::finalize()
if (m_dump_backtrace_on_finalization) if (m_dump_backtrace_on_finalization)
dbg() << backtrace_impl(); dbg() << backtrace_impl();
kfree_aligned(m_fpu_state);
auto thread_cnt_before = m_process->m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
ASSERT(thread_cnt_before != 0);
if (thread_cnt_before == 1)
process().finalize();
} }
void Thread::finalize_dying_threads() void Thread::finalize_dying_threads()
@ -310,11 +322,12 @@ void Thread::finalize_dying_threads()
}); });
} }
for (auto* thread : dying_threads) { for (auto* thread : dying_threads) {
auto& process = thread->process();
thread->finalize(); thread->finalize();
delete thread;
if (process.m_thread_count.load(AK::MemoryOrder::memory_order_consume) == 0) // This thread will never execute again, drop the running reference
process.finalize(); // NOTE: This may not necessarily drop the last reference if anything
// else is still holding onto this thread!
thread->unref();
} }
} }
@ -762,9 +775,9 @@ KResultOr<u32> Thread::make_userspace_stack_for_main_thread(Vector<String> argum
return new_esp; return new_esp;
} }
Thread* Thread::clone(Process& process) RefPtr<Thread> Thread::clone(Process& process)
{ {
auto* clone = new Thread(process); auto clone = adopt(*new Thread(process));
memcpy(clone->m_signal_action_data, m_signal_action_data, sizeof(m_signal_action_data)); memcpy(clone->m_signal_action_data, m_signal_action_data, sizeof(m_signal_action_data));
clone->m_signal_mask = m_signal_mask; clone->m_signal_mask = m_signal_mask;
memcpy(clone->m_fpu_state, m_fpu_state, sizeof(FPUState)); memcpy(clone->m_fpu_state, m_fpu_state, sizeof(FPUState));
@ -967,7 +980,7 @@ const LogStream& operator<<(const LogStream& stream, const Thread& value)
return stream << value.process().name() << "(" << value.pid().value() << ":" << value.tid().value() << ")"; return stream << value.process().name() << "(" << value.pid().value() << ":" << value.tid().value() << ")";
} }
Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeval* timeout, Atomic<bool>* lock, Thread* beneficiary) Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeval* timeout, Atomic<bool>* lock, RefPtr<Thread> beneficiary)
{ {
auto* current_thread = Thread::current(); auto* current_thread = Thread::current();
TimerId timer_id {}; TimerId timer_id {};
@ -1046,7 +1059,7 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, timeva
// Our thread was already removed from the queue. The only // Our thread was already removed from the queue. The only
// way this can happen if someone else is trying to kill us. // way this can happen if someone else is trying to kill us.
// In this case, the queue should not contain us anymore. // In this case, the queue should not contain us anymore.
return BlockResult::InterruptedByDeath; result = BlockResult::InterruptedByDeath;
} }
// Make sure we cancel the timer if woke normally. // Make sure we cancel the timer if woke normally.
@ -1071,10 +1084,10 @@ void Thread::wake_from_queue()
set_state(State::Running); set_state(State::Running);
} }
Thread* Thread::from_tid(ThreadID tid) RefPtr<Thread> Thread::from_tid(ThreadID tid)
{ {
InterruptDisabler disabler; RefPtr<Thread> found_thread;
Thread* found_thread = nullptr; ScopedSpinLock lock(g_scheduler_lock);
Thread::for_each([&](auto& thread) { Thread::for_each([&](auto& thread) {
if (thread.tid() == tid) { if (thread.tid() == tid) {
found_thread = &thread; found_thread = &thread;
@ -1109,6 +1122,7 @@ void Thread::tracer_trap(const RegisterState& regs)
const Thread::Blocker& Thread::blocker() const const Thread::Blocker& Thread::blocker() const
{ {
ASSERT(m_lock.own_lock());
ASSERT(m_blocker); ASSERT(m_blocker);
return *m_blocker; return *m_blocker;
} }

View file

@ -32,6 +32,8 @@
#include <AK/OwnPtr.h> #include <AK/OwnPtr.h>
#include <AK/String.h> #include <AK/String.h>
#include <AK/Vector.h> #include <AK/Vector.h>
#include <AK/WeakPtr.h>
#include <AK/Weakable.h>
#include <Kernel/Arch/i386/CPU.h> #include <Kernel/Arch/i386/CPU.h>
#include <Kernel/Forward.h> #include <Kernel/Forward.h>
#include <Kernel/KResult.h> #include <Kernel/KResult.h>
@ -66,7 +68,9 @@ struct ThreadSpecificData {
#define THREAD_AFFINITY_DEFAULT 0xffffffff #define THREAD_AFFINITY_DEFAULT 0xffffffff
class Thread { class Thread
: public RefCounted<Thread>
, public Weakable<Thread> {
AK_MAKE_NONCOPYABLE(Thread); AK_MAKE_NONCOPYABLE(Thread);
AK_MAKE_NONMOVABLE(Thread); AK_MAKE_NONMOVABLE(Thread);
@ -82,7 +86,7 @@ public:
explicit Thread(NonnullRefPtr<Process>); explicit Thread(NonnullRefPtr<Process>);
~Thread(); ~Thread();
static Thread* from_tid(ThreadID); static RefPtr<Thread> from_tid(ThreadID);
static void finalize_dying_threads(); static void finalize_dying_threads();
ThreadID tid() const { return m_tid; } ThreadID tid() const { return m_tid; }
@ -143,9 +147,23 @@ public:
String backtrace(); String backtrace();
Vector<FlatPtr> raw_backtrace(FlatPtr ebp, FlatPtr eip) const; Vector<FlatPtr> raw_backtrace(FlatPtr ebp, FlatPtr eip) const;
const String& name() const { return m_name; } String name() const
void set_name(const StringView& s) { m_name = s; } {
void set_name(String&& name) { m_name = move(name); } // Because the name can be changed, we can't return a const
// reference here. We must make a copy
ScopedSpinLock lock(m_lock);
return m_name;
}
void set_name(const StringView& s)
{
ScopedSpinLock lock(m_lock);
m_name = s;
}
void set_name(String&& name)
{
ScopedSpinLock lock(m_lock);
m_name = move(name);
}
void finalize(); void finalize();
@ -452,7 +470,7 @@ public:
return block<ConditionBlocker>(nullptr, state_string, move(condition)); return block<ConditionBlocker>(nullptr, state_string, move(condition));
} }
BlockResult wait_on(WaitQueue& queue, const char* reason, timeval* timeout = nullptr, Atomic<bool>* lock = nullptr, Thread* beneficiary = nullptr); BlockResult wait_on(WaitQueue& queue, const char* reason, timeval* timeout = nullptr, Atomic<bool>* lock = nullptr, RefPtr<Thread> beneficiary = {});
void wake_from_queue(); void wake_from_queue();
void unblock(); void unblock();
@ -578,7 +596,7 @@ public:
return !m_is_joinable; return !m_is_joinable;
} }
Thread* clone(Process&); RefPtr<Thread> clone(Process&);
template<typename Callback> template<typename Callback>
static IterationDecision for_each_in_state(State, Callback); static IterationDecision for_each_in_state(State, Callback);

View file

@ -168,8 +168,13 @@ extern "C" [[noreturn]] void init()
Process::initialize(); Process::initialize();
Scheduler::initialize(); Scheduler::initialize();
Thread* init_stage2_thread = nullptr; {
Process::create_kernel_process(init_stage2_thread, "init_stage2", init_stage2); RefPtr<Thread> init_stage2_thread;
Process::create_kernel_process(init_stage2_thread, "init_stage2", init_stage2);
// We need to make sure we drop the reference for init_stage2_thread
// before calling into Scheduler::start, otherwise we will have a
// dangling Thread that never gets cleaned up
}
Scheduler::start(); Scheduler::start();
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
@ -351,7 +356,7 @@ void init_stage2()
// FIXME: It would be nicer to set the mode from userspace. // FIXME: It would be nicer to set the mode from userspace.
tty0->set_graphical(!text_mode); tty0->set_graphical(!text_mode);
Thread* thread = nullptr; RefPtr<Thread> thread;
auto userspace_init = kernel_command_line().lookup("init").value_or("/bin/SystemServer"); auto userspace_init = kernel_command_line().lookup("init").value_or("/bin/SystemServer");
Process::create_user_process(thread, userspace_init, (uid_t)0, (gid_t)0, ProcessID(0), error, {}, {}, tty0); Process::create_user_process(thread, userspace_init, (uid_t)0, (gid_t)0, ProcessID(0), error, {}, {}, tty0);
if (error != 0) { if (error != 0) {