1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 15:57:45 +00:00

Kernel: Retire SchedulerData and add Thread lookup table

This allows us to get rid of the thread lists in SchedulerData.
Also, instead of iterating over all threads to find a thread by id,
just use a lookup table. In the rare case of having to iterate over
all threads, just iterate the lookup table.
This commit is contained in:
Tom 2021-01-27 22:58:24 -07:00 committed by Andreas Kling
parent e55d227f93
commit d5472426ec
5 changed files with 57 additions and 153 deletions

View file

@ -53,15 +53,8 @@ public:
bool m_in_scheduler { true }; bool m_in_scheduler { true };
}; };
SchedulerData* g_scheduler_data;
RecursiveSpinLock g_scheduler_lock; RecursiveSpinLock g_scheduler_lock;
void Scheduler::init_thread(Thread& thread)
{
ASSERT(g_scheduler_data);
g_scheduler_data->m_nonrunnable_threads.append(thread);
}
static u32 time_slice_for(const Thread& thread) static u32 time_slice_for(const Thread& thread)
{ {
// One time slice unit == 4ms (assuming 250 ticks/second) // One time slice unit == 4ms (assuming 250 ticks/second)
@ -238,36 +231,29 @@ bool Scheduler::pick_next()
} }
if constexpr (SCHEDULER_RUNNABLE_DEBUG) { if constexpr (SCHEDULER_RUNNABLE_DEBUG) {
dbgln("Scheduler[{}j]: Non-runnables:", Processor::id()); dbgln("Scheduler thread list:", Processor::id());
Scheduler::for_each_nonrunnable([&](Thread& thread) -> IterationDecision { Thread::for_each([&](Thread& thread) -> IterationDecision {
if (thread.state() == Thread::Dying) { switch (thread.state()) {
case Thread::Dying:
dbgln(" {:12} {} @ {:04x}:{:08x} Finalizable: {}", dbgln(" {:12} {} @ {:04x}:{:08x} Finalizable: {}",
thread.state_string(), thread.state_string(),
thread, thread,
thread.tss().cs, thread.tss().cs,
thread.tss().eip, thread.tss().eip,
thread.is_finalizable()); thread.is_finalizable());
} else { break;
dbgln(" {:12} {} @ {:04x}:{:08x}", default:
dbgln(" {:12} Pr:{:2} {} @ {:04x}:{:08x}",
thread.state_string(), thread.state_string(),
thread.priority(),
thread, thread,
thread.tss().cs, thread.tss().cs,
thread.tss().eip); thread.tss().eip);
break;
} }
return IterationDecision::Continue; return IterationDecision::Continue;
}); });
dbgln("Scheduler[{}j]: Runnables:", Processor::id());
Scheduler::for_each_runnable([](Thread& thread) -> IterationDecision {
dbgln(" {:2} {:12} @ {:04x}:{:08x}",
thread.priority(),
thread.state_string(),
thread.tss().cs,
thread.tss().eip);
return IterationDecision::Continue;
});
} }
auto pending_beneficiary = scheduler_data.m_pending_beneficiary.strong_ref(); auto pending_beneficiary = scheduler_data.m_pending_beneficiary.strong_ref();
@ -507,7 +493,6 @@ void Scheduler::initialize()
ASSERT(&Processor::current() != nullptr); // sanity check ASSERT(&Processor::current() != nullptr); // sanity check
RefPtr<Thread> idle_thread; RefPtr<Thread> idle_thread;
g_scheduler_data = new SchedulerData;
g_finalizer_wait_queue = new WaitQueue; g_finalizer_wait_queue = new WaitQueue;
g_ready_queues = new ThreadReadyQueue[g_ready_queue_buckets]; g_ready_queues = new ThreadReadyQueue[g_ready_queue_buckets];

View file

@ -40,12 +40,10 @@ class Process;
class Thread; class Thread;
class WaitQueue; class WaitQueue;
struct RegisterState; struct RegisterState;
struct SchedulerData;
extern Thread* g_finalizer; extern Thread* g_finalizer;
extern WaitQueue* g_finalizer_wait_queue; extern WaitQueue* g_finalizer_wait_queue;
extern Atomic<bool> g_finalizer_has_work; extern Atomic<bool> g_finalizer_has_work;
extern SchedulerData* g_scheduler_data;
extern RecursiveSpinLock g_scheduler_lock; extern RecursiveSpinLock g_scheduler_lock;
class Scheduler { class Scheduler {
@ -73,14 +71,6 @@ public:
static Thread& pull_next_runnable_thread(); static Thread& pull_next_runnable_thread();
static bool dequeue_runnable_thread(Thread&, bool = false); static bool dequeue_runnable_thread(Thread&, bool = false);
static void queue_runnable_thread(Thread&); static void queue_runnable_thread(Thread&);
template<typename Callback>
static inline IterationDecision for_each_runnable(Callback);
template<typename Callback>
static inline IterationDecision for_each_nonrunnable(Callback);
static void init_thread(Thread& thread);
}; };
} }

View file

@ -45,6 +45,14 @@
namespace Kernel { namespace Kernel {
SpinLock<u8> Thread::g_tid_map_lock;
HashMap<ThreadID, Thread*>* Thread::g_tid_map;
void Thread::initialize()
{
g_tid_map = new HashMap<ThreadID, Thread*>();
}
Thread::Thread(NonnullRefPtr<Process> process) Thread::Thread(NonnullRefPtr<Process> process)
: m_process(move(process)) : m_process(move(process))
, m_name(m_process->name()) , m_name(m_process->name())
@ -59,6 +67,11 @@ Thread::Thread(NonnullRefPtr<Process> process)
} else { } else {
m_tid = Process::allocate_pid().value(); m_tid = Process::allocate_pid().value();
} }
{
ScopedSpinLock lock(g_tid_map_lock);
auto result = g_tid_map->set(m_tid, this);
ASSERT(result == AK::HashSetResult::InsertedNewEntry);
}
if constexpr (THREAD_DEBUG) if constexpr (THREAD_DEBUG)
dbgln("Created new thread {}({}:{})", m_process->name(), m_process->pid().value(), m_tid.value()); dbgln("Created new thread {}({}:{})", m_process->name(), m_process->pid().value(), m_tid.value());
set_default_signal_dispositions(); set_default_signal_dispositions();
@ -115,9 +128,6 @@ Thread::Thread(NonnullRefPtr<Process> process)
// thread is ready to be cleaned up. // thread is ready to be cleaned up.
ref(); ref();
guard.disarm(); guard.disarm();
if (m_process->pid() != 0)
Scheduler::init_thread(*this);
} }
Thread::~Thread() Thread::~Thread()
@ -131,11 +141,14 @@ Thread::~Thread()
// the middle of being destroyed. // the middle of being destroyed.
ScopedSpinLock lock(g_scheduler_lock); ScopedSpinLock lock(g_scheduler_lock);
ASSERT(!m_process_thread_list_node.is_in_list()); ASSERT(!m_process_thread_list_node.is_in_list());
g_scheduler_data->thread_list_for_state(m_state).remove(*this);
// We shouldn't be queued // We shouldn't be queued
ASSERT(m_runnable_priority < 0); ASSERT(m_runnable_priority < 0);
ASSERT(!m_runnable_list_node.is_in_list()); }
{
ScopedSpinLock lock(g_tid_map_lock);
auto result = g_tid_map->remove(m_tid);
ASSERT(result);
} }
} }
@ -903,11 +916,6 @@ void Thread::set_state(State new_state, u8 stop_signal)
dbgln<THREAD_DEBUG>("Set thread {} state to {}", *this, state_string()); dbgln<THREAD_DEBUG>("Set thread {} state to {}", *this, state_string());
} }
if (m_process->pid() != 0) {
update_state_for_thread(previous_state);
ASSERT(g_scheduler_data->has_thread(*this));
}
if (previous_state == Runnable) { if (previous_state == Runnable) {
Scheduler::dequeue_runnable_thread(*this); Scheduler::dequeue_runnable_thread(*this);
} else if (previous_state == Stopped) { } else if (previous_state == Stopped) {
@ -952,24 +960,6 @@ void Thread::set_state(State new_state, u8 stop_signal)
} }
} }
void Thread::update_state_for_thread(Thread::State previous_state)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(g_scheduler_data);
ASSERT(g_scheduler_lock.own_lock());
auto& previous_list = g_scheduler_data->thread_list_for_state(previous_state);
auto& list = g_scheduler_data->thread_list_for_state(state());
if (&previous_list != &list) {
previous_list.remove(*this);
}
if (list.contains(*this))
return;
list.append(*this);
}
String Thread::backtrace() String Thread::backtrace()
{ {
return backtrace_impl(); return backtrace_impl();
@ -1093,14 +1083,12 @@ const LogStream& operator<<(const LogStream& stream, const Thread& value)
RefPtr<Thread> Thread::from_tid(ThreadID tid) RefPtr<Thread> Thread::from_tid(ThreadID tid)
{ {
RefPtr<Thread> found_thread; RefPtr<Thread> found_thread;
ScopedSpinLock lock(g_scheduler_lock); {
Thread::for_each([&](auto& thread) { ScopedSpinLock lock(g_tid_map_lock);
if (thread.tid() == tid) { auto it = g_tid_map->find(tid);
found_thread = &thread; if (it != g_tid_map->end())
return IterationDecision::Break; found_thread = it->value;
} }
return IterationDecision::Continue;
});
return found_thread; return found_thread;
} }

View file

@ -27,6 +27,7 @@
#pragma once #pragma once
#include <AK/Function.h> #include <AK/Function.h>
#include <AK/HashMap.h>
#include <AK/IntrusiveList.h> #include <AK/IntrusiveList.h>
#include <AK/Optional.h> #include <AK/Optional.h>
#include <AK/OwnPtr.h> #include <AK/OwnPtr.h>
@ -87,12 +88,17 @@ class Thread
friend class Scheduler; friend class Scheduler;
friend class ThreadReadyQueue; friend class ThreadReadyQueue;
static SpinLock<u8> g_tid_map_lock;
static HashMap<ThreadID, Thread*>* g_tid_map;
public: public:
inline static Thread* current() inline static Thread* current()
{ {
return Processor::current_thread(); return Processor::current_thread();
} }
static void initialize();
explicit Thread(NonnullRefPtr<Process>); explicit Thread(NonnullRefPtr<Process>);
~Thread(); ~Thread();
@ -1090,8 +1096,6 @@ public:
template<typename Callback> template<typename Callback>
static IterationDecision for_each_in_state(State, Callback); static IterationDecision for_each_in_state(State, Callback);
template<typename Callback> template<typename Callback>
static IterationDecision for_each_living(Callback);
template<typename Callback>
static IterationDecision for_each(Callback); static IterationDecision for_each(Callback);
[[nodiscard]] static bool is_runnable_state(Thread::State state) [[nodiscard]] static bool is_runnable_state(Thread::State state)
@ -1166,10 +1170,8 @@ public:
private: private:
IntrusiveListNode m_process_thread_list_node; IntrusiveListNode m_process_thread_list_node;
IntrusiveListNode m_runnable_list_node;
int m_runnable_priority { -1 }; int m_runnable_priority { -1 };
friend struct SchedulerData;
friend class WaitQueue; friend class WaitQueue;
class JoinBlockCondition : public BlockCondition { class JoinBlockCondition : public BlockCondition {
@ -1304,100 +1306,38 @@ private:
void yield_without_holding_big_lock(); void yield_without_holding_big_lock();
void donate_without_holding_big_lock(RefPtr<Thread>&, const char*); void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
void yield_while_not_holding_big_lock(); void yield_while_not_holding_big_lock();
void update_state_for_thread(Thread::State previous_state);
void drop_thread_count(bool); void drop_thread_count(bool);
}; };
template<typename Callback>
inline IterationDecision Thread::for_each_living(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
return Thread::for_each([callback](Thread& thread) -> IterationDecision {
if (thread.state() != Thread::State::Dead && thread.state() != Thread::State::Dying)
return callback(thread);
return IterationDecision::Continue;
});
}
template<typename Callback> template<typename Callback>
inline IterationDecision Thread::for_each(Callback callback) inline IterationDecision Thread::for_each(Callback callback)
{ {
ASSERT_INTERRUPTS_DISABLED(); ScopedSpinLock lock(g_tid_map_lock);
ScopedSpinLock lock(g_scheduler_lock); for (auto& it : *g_tid_map) {
auto ret = Scheduler::for_each_runnable(callback); IterationDecision decision = callback(*it.value);
if (ret == IterationDecision::Break) if (decision != IterationDecision::Continue)
return ret; return decision;
return Scheduler::for_each_nonrunnable(callback); }
return IterationDecision::Continue;
} }
template<typename Callback> template<typename Callback>
inline IterationDecision Thread::for_each_in_state(State state, Callback callback) inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
{ {
ASSERT_INTERRUPTS_DISABLED(); ScopedSpinLock lock(g_tid_map_lock);
ScopedSpinLock lock(g_scheduler_lock); for (auto& it : *g_tid_map) {
auto new_callback = [=](Thread& thread) -> IterationDecision { auto& thread = *it.value;
if (thread.state() == state) if (thread.state() != state)
return callback(thread); continue;
return IterationDecision::Continue; IterationDecision decision = callback(thread);
}; if (decision != IterationDecision::Continue)
if (is_runnable_state(state)) return decision;
return Scheduler::for_each_runnable(new_callback); }
return Scheduler::for_each_nonrunnable(new_callback); return IterationDecision::Continue;
} }
const LogStream& operator<<(const LogStream&, const Thread&); const LogStream& operator<<(const LogStream&, const Thread&);
struct SchedulerData {
typedef IntrusiveList<Thread, &Thread::m_runnable_list_node> ThreadList;
ThreadList m_runnable_threads;
ThreadList m_nonrunnable_threads;
bool has_thread(Thread& thread) const
{
return m_runnable_threads.contains(thread) || m_nonrunnable_threads.contains(thread);
}
ThreadList& thread_list_for_state(Thread::State state)
{
if (Thread::is_runnable_state(state))
return m_runnable_threads;
return m_nonrunnable_threads;
}
};
template<typename Callback>
inline IterationDecision Scheduler::for_each_runnable(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(g_scheduler_lock.own_lock());
auto& tl = g_scheduler_data->m_runnable_threads;
for (auto it = tl.begin(); it != tl.end();) {
auto& thread = *it;
it = ++it;
if (callback(thread) == IterationDecision::Break)
return IterationDecision::Break;
}
return IterationDecision::Continue;
}
template<typename Callback>
inline IterationDecision Scheduler::for_each_nonrunnable(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(g_scheduler_lock.own_lock());
auto& tl = g_scheduler_data->m_nonrunnable_threads;
for (auto it = tl.begin(); it != tl.end();) {
auto& thread = *it;
it = ++it;
if (callback(thread) == IterationDecision::Break)
return IterationDecision::Break;
}
return IterationDecision::Continue;
}
} }
template<> template<>

View file

@ -162,6 +162,7 @@ extern "C" [[noreturn]] void init()
} }
VirtualConsole::switch_to(0); VirtualConsole::switch_to(0);
Thread::initialize();
Process::initialize(); Process::initialize();
Scheduler::initialize(); Scheduler::initialize();