mirror of
https://github.com/RGBCube/serenity
synced 2025-05-28 18:45:10 +00:00
Kernel: Lock thread list while in Thread::unref()
This patch does three things: - Convert the global thread list from a HashMap to an IntrusiveList - Combine the thread list and its lock into a SpinLockProtectedValue - Customize Thread::unref() so it locks the list while unreffing This closes the same race window for Thread as @sin-ack's recent changes did for Process. Note that the HashMap->IntrusiveList conversion means that we lose O(1) lookups, but the majority of clients of this list are doing traversal, not lookup. Once we have an intrusive hashing solution, we should port this to use that, but for now, this gets rid of heap allocations during a sensitive time.
This commit is contained in:
parent
90c7307c6c
commit
37304203dd
4 changed files with 61 additions and 54 deletions
|
@ -605,6 +605,7 @@ void dump_thread_list(bool with_stack_traces)
|
||||||
}
|
}
|
||||||
if (with_stack_traces)
|
if (with_stack_traces)
|
||||||
dbgln("{}", thread.backtrace());
|
dbgln("{}", thread.backtrace());
|
||||||
|
return IterationDecision::Continue;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <AK/ScopeGuard.h>
|
#include <AK/ScopeGuard.h>
|
||||||
|
#include <AK/Singleton.h>
|
||||||
#include <AK/StringBuilder.h>
|
#include <AK/StringBuilder.h>
|
||||||
#include <AK/Time.h>
|
#include <AK/Time.h>
|
||||||
#include <Kernel/Arch/x86/SmapDisabler.h>
|
#include <Kernel/Arch/x86/SmapDisabler.h>
|
||||||
|
@ -29,12 +30,22 @@
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
SpinLock<u8> Thread::g_tid_map_lock;
|
static Singleton<SpinLockProtectedValue<Thread::GlobalList>> s_list;
|
||||||
READONLY_AFTER_INIT HashMap<ThreadID, Thread*>* Thread::g_tid_map;
|
|
||||||
|
|
||||||
UNMAP_AFTER_INIT void Thread::initialize()
|
SpinLockProtectedValue<Thread::GlobalList>& Thread::all_threads()
|
||||||
{
|
{
|
||||||
g_tid_map = new HashMap<ThreadID, Thread*>();
|
return *s_list;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Thread::unref() const
|
||||||
|
{
|
||||||
|
return all_threads().with([&](auto&) {
|
||||||
|
if (deref_base())
|
||||||
|
return false;
|
||||||
|
m_global_thread_list_node.remove();
|
||||||
|
delete this;
|
||||||
|
return true;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
KResultOr<NonnullRefPtr<Thread>> Thread::try_create(NonnullRefPtr<Process> process)
|
KResultOr<NonnullRefPtr<Thread>> Thread::try_create(NonnullRefPtr<Process> process)
|
||||||
|
@ -77,11 +88,10 @@ Thread::Thread(NonnullRefPtr<Process> process, NonnullOwnPtr<Memory::Region> ker
|
||||||
m_kernel_stack_region->set_name(KString::try_create(string));
|
m_kernel_stack_region->set_name(KString::try_create(string));
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
all_threads().with([&](auto& list) {
|
||||||
ScopedSpinLock lock(g_tid_map_lock);
|
list.append(*this);
|
||||||
auto result = g_tid_map->set(m_tid, this);
|
});
|
||||||
VERIFY(result == AK::HashSetResult::InsertedNewEntry);
|
|
||||||
}
|
|
||||||
if constexpr (THREAD_DEBUG)
|
if constexpr (THREAD_DEBUG)
|
||||||
dbgln("Created new thread {}({}:{})", m_process->name(), m_process->pid().value(), m_tid.value());
|
dbgln("Created new thread {}({}:{})", m_process->name(), m_process->pid().value(), m_tid.value());
|
||||||
|
|
||||||
|
@ -162,11 +172,6 @@ Thread::~Thread()
|
||||||
// We shouldn't be queued
|
// We shouldn't be queued
|
||||||
VERIFY(m_runnable_priority < 0);
|
VERIFY(m_runnable_priority < 0);
|
||||||
}
|
}
|
||||||
{
|
|
||||||
ScopedSpinLock lock(g_tid_map_lock);
|
|
||||||
auto result = g_tid_map->remove(m_tid);
|
|
||||||
VERIFY(result);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock, u32 lock_count)
|
void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock, u32 lock_count)
|
||||||
|
@ -1250,21 +1255,13 @@ KResult Thread::make_thread_specific_region(Badge<Process>)
|
||||||
|
|
||||||
RefPtr<Thread> Thread::from_tid(ThreadID tid)
|
RefPtr<Thread> Thread::from_tid(ThreadID tid)
|
||||||
{
|
{
|
||||||
RefPtr<Thread> found_thread;
|
return all_threads().with([&](auto& list) -> RefPtr<Thread> {
|
||||||
{
|
for (Thread& thread : list) {
|
||||||
ScopedSpinLock lock(g_tid_map_lock);
|
if (thread.tid() == tid)
|
||||||
if (auto it = g_tid_map->find(tid); it != g_tid_map->end()) {
|
return thread;
|
||||||
// We need to call try_ref() here as there is a window between
|
|
||||||
// dropping the last reference and calling the Thread's destructor!
|
|
||||||
// We shouldn't remove the threads from that list until it is truly
|
|
||||||
// destructed as it may stick around past finalization in order to
|
|
||||||
// be able to wait() on it!
|
|
||||||
if (it->value->try_ref()) {
|
|
||||||
found_thread = adopt_ref(*it->value);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
return nullptr;
|
||||||
return found_thread;
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::reset_fpu_state()
|
void Thread::reset_fpu_state()
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include <Kernel/KString.h>
|
#include <Kernel/KString.h>
|
||||||
#include <Kernel/Locking/LockLocation.h>
|
#include <Kernel/Locking/LockLocation.h>
|
||||||
#include <Kernel/Locking/LockMode.h>
|
#include <Kernel/Locking/LockMode.h>
|
||||||
|
#include <Kernel/Locking/SpinLockProtectedValue.h>
|
||||||
#include <Kernel/Memory/VirtualRange.h>
|
#include <Kernel/Memory/VirtualRange.h>
|
||||||
#include <Kernel/Scheduler.h>
|
#include <Kernel/Scheduler.h>
|
||||||
#include <Kernel/TimerQueue.h>
|
#include <Kernel/TimerQueue.h>
|
||||||
|
@ -130,7 +131,7 @@ struct ThreadRegisters {
|
||||||
};
|
};
|
||||||
|
|
||||||
class Thread
|
class Thread
|
||||||
: public RefCounted<Thread>
|
: public RefCountedBase
|
||||||
, public Weakable<Thread> {
|
, public Weakable<Thread> {
|
||||||
AK_MAKE_NONCOPYABLE(Thread);
|
AK_MAKE_NONCOPYABLE(Thread);
|
||||||
AK_MAKE_NONMOVABLE(Thread);
|
AK_MAKE_NONMOVABLE(Thread);
|
||||||
|
@ -141,10 +142,9 @@ class Thread
|
||||||
friend class Scheduler;
|
friend class Scheduler;
|
||||||
friend struct ThreadReadyQueue;
|
friend struct ThreadReadyQueue;
|
||||||
|
|
||||||
static SpinLock<u8> g_tid_map_lock;
|
|
||||||
static HashMap<ThreadID, Thread*>* g_tid_map;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
bool unref() const;
|
||||||
|
|
||||||
inline static Thread* current()
|
inline static Thread* current()
|
||||||
{
|
{
|
||||||
return Processor::current_thread();
|
return Processor::current_thread();
|
||||||
|
@ -1372,8 +1372,14 @@ private:
|
||||||
void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes);
|
void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes);
|
||||||
void drop_thread_count(bool);
|
void drop_thread_count(bool);
|
||||||
|
|
||||||
|
mutable IntrusiveListNode<Thread> m_global_thread_list_node;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using ListInProcess = IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_process_thread_list_node>;
|
using ListInProcess = IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_process_thread_list_node>;
|
||||||
|
using GlobalList = IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_global_thread_list_node>;
|
||||||
|
|
||||||
|
private:
|
||||||
|
static SpinLockProtectedValue<GlobalList>& all_threads();
|
||||||
};
|
};
|
||||||
|
|
||||||
AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
|
AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
|
||||||
|
@ -1381,37 +1387,41 @@ AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
|
||||||
template<IteratorFunction<Thread&> Callback>
|
template<IteratorFunction<Thread&> Callback>
|
||||||
inline IterationDecision Thread::for_each(Callback callback)
|
inline IterationDecision Thread::for_each(Callback callback)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(g_tid_map_lock);
|
return all_threads().with([&](auto& list) -> IterationDecision {
|
||||||
for (auto& it : *g_tid_map) {
|
for (auto& thread : list) {
|
||||||
IterationDecision decision = callback(*it.value);
|
IterationDecision decision = callback(thread);
|
||||||
if (decision != IterationDecision::Continue)
|
if (decision != IterationDecision::Continue)
|
||||||
return decision;
|
return decision;
|
||||||
}
|
}
|
||||||
return IterationDecision::Continue;
|
return IterationDecision::Continue;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
template<IteratorFunction<Thread&> Callback>
|
template<IteratorFunction<Thread&> Callback>
|
||||||
inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
|
inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(g_tid_map_lock);
|
return all_threads().with([&](auto& list) -> IterationDecision {
|
||||||
for (auto& it : *g_tid_map) {
|
for (auto& thread : list) {
|
||||||
auto& thread = *it.value;
|
if (thread.state() != state)
|
||||||
if (thread.state() != state)
|
continue;
|
||||||
continue;
|
IterationDecision decision = callback(thread);
|
||||||
IterationDecision decision = callback(thread);
|
if (decision != IterationDecision::Continue)
|
||||||
if (decision != IterationDecision::Continue)
|
return decision;
|
||||||
return decision;
|
}
|
||||||
}
|
return IterationDecision::Continue;
|
||||||
return IterationDecision::Continue;
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
template<VoidFunction<Thread&> Callback>
|
template<VoidFunction<Thread&> Callback>
|
||||||
inline IterationDecision Thread::for_each(Callback callback)
|
inline IterationDecision Thread::for_each(Callback callback)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(g_tid_map_lock);
|
return all_threads().with([&](auto& list) {
|
||||||
for (auto& it : *g_tid_map)
|
for (auto& thread : list) {
|
||||||
callback(*it.value);
|
if (callback(thread) == IterationDecision::Break)
|
||||||
return IterationDecision::Continue;
|
return IterationDecision::Break;
|
||||||
|
}
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
template<VoidFunction<Thread&> Callback>
|
template<VoidFunction<Thread&> Callback>
|
||||||
|
|
|
@ -207,7 +207,6 @@ extern "C" [[noreturn]] UNMAP_AFTER_INIT void init(BootInfo const& boot_info)
|
||||||
__stack_chk_guard = get_fast_random<size_t>();
|
__stack_chk_guard = get_fast_random<size_t>();
|
||||||
|
|
||||||
ProcFSComponentRegistry::initialize();
|
ProcFSComponentRegistry::initialize();
|
||||||
Thread::initialize();
|
|
||||||
Process::initialize();
|
Process::initialize();
|
||||||
|
|
||||||
Scheduler::initialize();
|
Scheduler::initialize();
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue