From ec27cbbb2ae5cd7c48886e8fccf487847fcbddca Mon Sep 17 00:00:00 2001 From: Tom Date: Thu, 28 Jan 2021 20:07:41 -0700 Subject: [PATCH] Kernel: Store whether a thread is the idle thread in Thread directly This solves a problem where checking whether a thread is an idle thread may require iterating all processors if it is not the idle thread of the current processor. --- Kernel/Arch/x86/CPU.h | 11 ++++++----- Kernel/Scheduler.cpp | 12 ++++++------ Kernel/Thread.h | 3 +++ 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/Kernel/Arch/x86/CPU.h b/Kernel/Arch/x86/CPU.h index 64f6114e58..296ea2e39a 100644 --- a/Kernel/Arch/x86/CPU.h +++ b/Kernel/Arch/x86/CPU.h @@ -779,11 +779,6 @@ public: return *m_mm_data; } - ALWAYS_INLINE Thread* idle_thread() const - { - return m_idle_thread; - } - ALWAYS_INLINE void set_idle_thread(Thread& idle_thread) { m_idle_thread = &idle_thread; @@ -806,6 +801,12 @@ public: write_fs_u32(__builtin_offsetof(Processor, m_current_thread), FlatPtr(¤t_thread)); } + ALWAYS_INLINE static Thread* idle_thread() + { + // See comment in Processor::current_thread + return (Thread*)read_fs_u32(__builtin_offsetof(Processor, m_idle_thread)); + } + ALWAYS_INLINE u32 get_id() const { // NOTE: This variant should only be used when iterating over all diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 4c90294ecc..4f2474b675 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -39,7 +39,7 @@ RecursiveSpinLock g_scheduler_lock; static u32 time_slice_for(const Thread& thread) { // One time slice unit == 4ms (assuming 250 ticks/second) - if (&thread == Processor::current().idle_thread()) + if (thread.is_idle_thread()) return 1; return 2; } @@ -105,12 +105,12 @@ Thread& Scheduler::pull_next_runnable_thread() } priority_mask &= ~(1u << priority); } - return *Processor::current().idle_thread(); + return *Processor::idle_thread(); } bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity) { - if (&thread == Processor::current().idle_thread()) + if (thread.is_idle_thread()) return true; ScopedSpinLock lock(g_ready_queues_lock); auto priority = thread.m_runnable_priority; @@ -134,7 +134,7 @@ bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity) void Scheduler::queue_runnable_thread(Thread& thread) { VERIFY(g_scheduler_lock.own_lock()); - if (&thread == Processor::current().idle_thread()) + if (thread.is_idle_thread()) return; auto priority = thread_priority_to_priority_index(thread.priority()); @@ -160,9 +160,8 @@ UNMAP_AFTER_INIT void Scheduler::start() auto& processor = Processor::current(); processor.set_scheduler_data(*new SchedulerPerProcessorData()); VERIFY(processor.is_initialized()); - auto& idle_thread = *processor.idle_thread(); + auto& idle_thread = *Processor::idle_thread(); VERIFY(processor.current_thread() == &idle_thread); - VERIFY(processor.idle_thread() == &idle_thread); idle_thread.set_ticks_left(time_slice_for(idle_thread)); idle_thread.did_schedule(); idle_thread.set_initialized(true); @@ -467,6 +466,7 @@ UNMAP_AFTER_INIT void Scheduler::initialize() UNMAP_AFTER_INIT void Scheduler::set_idle_thread(Thread* idle_thread) { + idle_thread->set_idle_thread(); Processor::current().set_idle_thread(*idle_thread); Processor::current().set_current_thread(*idle_thread); } diff --git a/Kernel/Thread.h b/Kernel/Thread.h index b887cf46eb..bdb72efd2f 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -1112,6 +1112,8 @@ public: return m_handling_page_fault; } void set_handling_page_fault(bool b) { m_handling_page_fault = b; } + void set_idle_thread() { m_is_idle_thread = true; } + bool is_idle_thread() const { return m_is_idle_thread; } private: Thread(NonnullRefPtr, NonnullOwnPtr kernel_stack_region); @@ -1248,6 +1250,7 @@ private: bool m_should_die { false }; bool m_initialized { false }; bool m_in_block { false }; + bool m_is_idle_thread { false }; Atomic m_have_any_unmasked_pending_signals { false }; void yield_without_holding_big_lock();