diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index e4b01c735e..09c494f25b 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -907,4 +907,22 @@ PerformanceEventBuffer& Process::ensure_perf_events() m_perf_event_buffer = make(); return *m_perf_event_buffer; } + +bool Process::remove_thread(Thread& thread) +{ + auto thread_cnt_before = m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel); + ASSERT(thread_cnt_before != 0); + ScopedSpinLock thread_list_lock(m_thread_list_lock); + m_thread_list.remove(thread); + return thread_cnt_before == 1; +} + +bool Process::add_thread(Thread& thread) +{ + bool is_first = m_thread_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed) == 0; + ScopedSpinLock thread_list_lock(m_thread_list_lock); + m_thread_list.append(thread); + return is_first; +} + } diff --git a/Kernel/Process.h b/Kernel/Process.h index 1327da2e9e..5b851ace20 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -204,7 +204,7 @@ public: template void for_each_child(Callback); template - void for_each_thread(Callback) const; + IterationDecision for_each_thread(Callback) const; void die(); void finalize(); @@ -507,6 +507,9 @@ private: friend class Scheduler; friend class Region; + bool add_thread(Thread&); + bool remove_thread(Thread&); + PerformanceEventBuffer& ensure_perf_events(); Process(RefPtr& first_thread, const String& name, uid_t, gid_t, ProcessID ppid, bool is_kernel_process, RefPtr cwd = nullptr, RefPtr executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr); @@ -592,6 +595,8 @@ private: u8 m_termination_status { 0 }; u8 m_termination_signal { 0 }; Atomic m_thread_count { 0 }; + mutable IntrusiveList m_thread_list; + mutable RecursiveSpinLock m_thread_list_lock; const bool m_is_kernel_process; bool m_dead { false }; @@ -693,12 +698,9 @@ inline void Process::for_each_child(Callback callback) } template -inline void Process::for_each_thread(Callback callback) const +inline IterationDecision Process::for_each_thread(Callback callback) const { - InterruptDisabler disabler; - ProcessID my_pid = pid(); - - if (my_pid == 0) { + if (pid() == 0) { // NOTE: Special case the colonel process, since its main thread is not in the global thread table. Processor::for_each( [&](Processor& proc) -> IterationDecision { @@ -707,15 +709,15 @@ inline void Process::for_each_thread(Callback callback) const return callback(*idle_thread); return IterationDecision::Continue; }); - return; + } else { + ScopedSpinLock thread_list_lock(m_thread_list_lock); + for (auto& thread : m_thread_list) { + IterationDecision decision = callback(thread); + if (decision != IterationDecision::Continue) + return decision; + } } - - Thread::for_each([callback, my_pid](Thread& thread) -> IterationDecision { - if (thread.pid() == my_pid) - return callback(thread); - - return IterationDecision::Continue; - }); + return IterationDecision::Continue; } template diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 8ede0661b5..79c4392d77 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -49,7 +49,7 @@ Thread::Thread(NonnullRefPtr process) : m_process(move(process)) , m_name(m_process->name()) { - bool is_first_thread = m_process->m_thread_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed) == 0; + bool is_first_thread = m_process->add_thread(*this); ArmedScopeGuard guard([&]() { drop_thread_count(is_first_thread); }); @@ -130,6 +130,7 @@ Thread::~Thread() // block conditions would access m_process, which would be in // the middle of being destroyed. ScopedSpinLock lock(g_scheduler_lock); + ASSERT(!m_process_thread_list_node.is_in_list()); g_scheduler_data->thread_list_for_state(m_state).remove(*this); // We shouldn't be queued @@ -388,10 +389,9 @@ void Thread::finalize() void Thread::drop_thread_count(bool initializing_first_thread) { - auto thread_cnt_before = m_process->m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel); + bool is_last = process().remove_thread(*this); - ASSERT(thread_cnt_before != 0); - if (!initializing_first_thread && thread_cnt_before == 1) + if (!initializing_first_thread && is_last) process().finalize(); } diff --git a/Kernel/Thread.h b/Kernel/Thread.h index bbbf213670..0b91035a1e 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -50,6 +50,8 @@ namespace Kernel { +extern RecursiveSpinLock s_mm_lock; + enum class DispatchSignalResult { Deferred = 0, Yield, @@ -818,6 +820,7 @@ public: ASSERT(!Processor::current().in_irq()); ASSERT(this == Thread::current()); ScopedCritical critical; + ASSERT(!s_mm_lock.own_lock()); ScopedSpinLock scheduler_lock(g_scheduler_lock); ScopedSpinLock block_lock(m_block_lock); // We need to hold m_block_lock so that nobody can unblock a blocker as soon @@ -1061,18 +1064,12 @@ public: m_ipv4_socket_write_bytes += bytes; } - void set_active(bool active) - { - m_is_active.store(active, AK::memory_order_release); - } + void set_active(bool active) { m_is_active = active; } u32 saved_critical() const { return m_saved_critical; } void save_critical(u32 critical) { m_saved_critical = critical; } - [[nodiscard]] bool is_active() const - { - return m_is_active.load(AK::MemoryOrder::memory_order_acquire); - } + [[nodiscard]] bool is_active() const { return m_is_active; } [[nodiscard]] bool is_finalizable() const { @@ -1170,10 +1167,10 @@ public: void set_handling_page_fault(bool b) { m_handling_page_fault = b; } private: + IntrusiveListNode m_process_thread_list_node; IntrusiveListNode m_runnable_list_node; int m_runnable_priority { -1 }; -private: friend struct SchedulerData; friend class WaitQueue; @@ -1274,7 +1271,7 @@ private: #endif JoinBlockCondition m_join_condition; - Atomic m_is_active { false }; + Atomic m_is_active { false }; bool m_is_joinable { true }; bool m_handling_page_fault { false }; PreviousMode m_previous_mode { PreviousMode::UserMode };