diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index f53acb27a4..426ba3546f 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -605,6 +605,7 @@ void dump_thread_list(bool with_stack_traces) } if (with_stack_traces) dbgln("{}", thread.backtrace()); + return IterationDecision::Continue; }); } diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index b1730c1398..59389476b1 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -5,6 +5,7 @@ */ #include +#include #include #include #include @@ -29,12 +30,22 @@ namespace Kernel { -SpinLock Thread::g_tid_map_lock; -READONLY_AFTER_INIT HashMap* Thread::g_tid_map; +static Singleton> s_list; -UNMAP_AFTER_INIT void Thread::initialize() +SpinLockProtectedValue& Thread::all_threads() { - g_tid_map = new HashMap(); + return *s_list; +} + +bool Thread::unref() const +{ + return all_threads().with([&](auto&) { + if (deref_base()) + return false; + m_global_thread_list_node.remove(); + delete this; + return true; + }); } KResultOr> Thread::try_create(NonnullRefPtr process) @@ -77,11 +88,10 @@ Thread::Thread(NonnullRefPtr process, NonnullOwnPtr ker m_kernel_stack_region->set_name(KString::try_create(string)); } - { - ScopedSpinLock lock(g_tid_map_lock); - auto result = g_tid_map->set(m_tid, this); - VERIFY(result == AK::HashSetResult::InsertedNewEntry); - } + all_threads().with([&](auto& list) { + list.append(*this); + }); + if constexpr (THREAD_DEBUG) dbgln("Created new thread {}({}:{})", m_process->name(), m_process->pid().value(), m_tid.value()); @@ -162,11 +172,6 @@ Thread::~Thread() // We shouldn't be queued VERIFY(m_runnable_priority < 0); } - { - ScopedSpinLock lock(g_tid_map_lock); - auto result = g_tid_map->remove(m_tid); - VERIFY(result); - } } void Thread::block(Kernel::Mutex& lock, ScopedSpinLock>& lock_lock, u32 lock_count) @@ -1250,21 +1255,13 @@ KResult Thread::make_thread_specific_region(Badge) RefPtr Thread::from_tid(ThreadID tid) { - RefPtr found_thread; - { - ScopedSpinLock lock(g_tid_map_lock); - if (auto it = g_tid_map->find(tid); it != g_tid_map->end()) { - // We need to call try_ref() here as there is a window between - // dropping the last reference and calling the Thread's destructor! - // We shouldn't remove the threads from that list until it is truly - // destructed as it may stick around past finalization in order to - // be able to wait() on it! - if (it->value->try_ref()) { - found_thread = adopt_ref(*it->value); - } + return all_threads().with([&](auto& list) -> RefPtr { + for (Thread& thread : list) { + if (thread.tid() == tid) + return thread; } - } - return found_thread; + return nullptr; + }); } void Thread::reset_fpu_state() diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 4d8989deb3..0f35f917d1 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -130,7 +131,7 @@ struct ThreadRegisters { }; class Thread - : public RefCounted + : public RefCountedBase , public Weakable { AK_MAKE_NONCOPYABLE(Thread); AK_MAKE_NONMOVABLE(Thread); @@ -141,10 +142,9 @@ class Thread friend class Scheduler; friend struct ThreadReadyQueue; - static SpinLock g_tid_map_lock; - static HashMap* g_tid_map; - public: + bool unref() const; + inline static Thread* current() { return Processor::current_thread(); @@ -1372,8 +1372,14 @@ private: void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes); void drop_thread_count(bool); + mutable IntrusiveListNode m_global_thread_list_node; + public: using ListInProcess = IntrusiveList, &Thread::m_process_thread_list_node>; + using GlobalList = IntrusiveList, &Thread::m_global_thread_list_node>; + +private: + static SpinLockProtectedValue& all_threads(); }; AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags); @@ -1381,37 +1387,41 @@ AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags); template Callback> inline IterationDecision Thread::for_each(Callback callback) { - ScopedSpinLock lock(g_tid_map_lock); - for (auto& it : *g_tid_map) { - IterationDecision decision = callback(*it.value); - if (decision != IterationDecision::Continue) - return decision; - } - return IterationDecision::Continue; + return all_threads().with([&](auto& list) -> IterationDecision { + for (auto& thread : list) { + IterationDecision decision = callback(thread); + if (decision != IterationDecision::Continue) + return decision; + } + return IterationDecision::Continue; + }); } template Callback> inline IterationDecision Thread::for_each_in_state(State state, Callback callback) { - ScopedSpinLock lock(g_tid_map_lock); - for (auto& it : *g_tid_map) { - auto& thread = *it.value; - if (thread.state() != state) - continue; - IterationDecision decision = callback(thread); - if (decision != IterationDecision::Continue) - return decision; - } - return IterationDecision::Continue; + return all_threads().with([&](auto& list) -> IterationDecision { + for (auto& thread : list) { + if (thread.state() != state) + continue; + IterationDecision decision = callback(thread); + if (decision != IterationDecision::Continue) + return decision; + } + return IterationDecision::Continue; + }); } template Callback> inline IterationDecision Thread::for_each(Callback callback) { - ScopedSpinLock lock(g_tid_map_lock); - for (auto& it : *g_tid_map) - callback(*it.value); - return IterationDecision::Continue; + return all_threads().with([&](auto& list) { + for (auto& thread : list) { + if (callback(thread) == IterationDecision::Break) + return IterationDecision::Break; + } + return IterationDecision::Continue; + }); } template Callback> diff --git a/Kernel/init.cpp b/Kernel/init.cpp index bd275461d6..61c5fec4ab 100644 --- a/Kernel/init.cpp +++ b/Kernel/init.cpp @@ -207,7 +207,6 @@ extern "C" [[noreturn]] UNMAP_AFTER_INIT void init(BootInfo const& boot_info) __stack_chk_guard = get_fast_random(); ProcFSComponentRegistry::initialize(); - Thread::initialize(); Process::initialize(); Scheduler::initialize();