mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 18:17:44 +00:00
Kernel: Store whether a thread is the idle thread in Thread directly
This solves a problem where checking whether a thread is an idle thread may require iterating all processors if it is not the idle thread of the current processor.
This commit is contained in:
parent
9a69b9112b
commit
ec27cbbb2a
3 changed files with 15 additions and 11 deletions
|
@ -779,11 +779,6 @@ public:
|
||||||
return *m_mm_data;
|
return *m_mm_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE Thread* idle_thread() const
|
|
||||||
{
|
|
||||||
return m_idle_thread;
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
|
ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
|
||||||
{
|
{
|
||||||
m_idle_thread = &idle_thread;
|
m_idle_thread = &idle_thread;
|
||||||
|
@ -806,6 +801,12 @@ public:
|
||||||
write_fs_u32(__builtin_offsetof(Processor, m_current_thread), FlatPtr(¤t_thread));
|
write_fs_u32(__builtin_offsetof(Processor, m_current_thread), FlatPtr(¤t_thread));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE static Thread* idle_thread()
|
||||||
|
{
|
||||||
|
// See comment in Processor::current_thread
|
||||||
|
return (Thread*)read_fs_u32(__builtin_offsetof(Processor, m_idle_thread));
|
||||||
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE u32 get_id() const
|
ALWAYS_INLINE u32 get_id() const
|
||||||
{
|
{
|
||||||
// NOTE: This variant should only be used when iterating over all
|
// NOTE: This variant should only be used when iterating over all
|
||||||
|
|
|
@ -39,7 +39,7 @@ RecursiveSpinLock g_scheduler_lock;
|
||||||
static u32 time_slice_for(const Thread& thread)
|
static u32 time_slice_for(const Thread& thread)
|
||||||
{
|
{
|
||||||
// One time slice unit == 4ms (assuming 250 ticks/second)
|
// One time slice unit == 4ms (assuming 250 ticks/second)
|
||||||
if (&thread == Processor::current().idle_thread())
|
if (thread.is_idle_thread())
|
||||||
return 1;
|
return 1;
|
||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
@ -105,12 +105,12 @@ Thread& Scheduler::pull_next_runnable_thread()
|
||||||
}
|
}
|
||||||
priority_mask &= ~(1u << priority);
|
priority_mask &= ~(1u << priority);
|
||||||
}
|
}
|
||||||
return *Processor::current().idle_thread();
|
return *Processor::idle_thread();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
|
bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
|
||||||
{
|
{
|
||||||
if (&thread == Processor::current().idle_thread())
|
if (thread.is_idle_thread())
|
||||||
return true;
|
return true;
|
||||||
ScopedSpinLock lock(g_ready_queues_lock);
|
ScopedSpinLock lock(g_ready_queues_lock);
|
||||||
auto priority = thread.m_runnable_priority;
|
auto priority = thread.m_runnable_priority;
|
||||||
|
@ -134,7 +134,7 @@ bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
|
||||||
void Scheduler::queue_runnable_thread(Thread& thread)
|
void Scheduler::queue_runnable_thread(Thread& thread)
|
||||||
{
|
{
|
||||||
VERIFY(g_scheduler_lock.own_lock());
|
VERIFY(g_scheduler_lock.own_lock());
|
||||||
if (&thread == Processor::current().idle_thread())
|
if (thread.is_idle_thread())
|
||||||
return;
|
return;
|
||||||
auto priority = thread_priority_to_priority_index(thread.priority());
|
auto priority = thread_priority_to_priority_index(thread.priority());
|
||||||
|
|
||||||
|
@ -160,9 +160,8 @@ UNMAP_AFTER_INIT void Scheduler::start()
|
||||||
auto& processor = Processor::current();
|
auto& processor = Processor::current();
|
||||||
processor.set_scheduler_data(*new SchedulerPerProcessorData());
|
processor.set_scheduler_data(*new SchedulerPerProcessorData());
|
||||||
VERIFY(processor.is_initialized());
|
VERIFY(processor.is_initialized());
|
||||||
auto& idle_thread = *processor.idle_thread();
|
auto& idle_thread = *Processor::idle_thread();
|
||||||
VERIFY(processor.current_thread() == &idle_thread);
|
VERIFY(processor.current_thread() == &idle_thread);
|
||||||
VERIFY(processor.idle_thread() == &idle_thread);
|
|
||||||
idle_thread.set_ticks_left(time_slice_for(idle_thread));
|
idle_thread.set_ticks_left(time_slice_for(idle_thread));
|
||||||
idle_thread.did_schedule();
|
idle_thread.did_schedule();
|
||||||
idle_thread.set_initialized(true);
|
idle_thread.set_initialized(true);
|
||||||
|
@ -467,6 +466,7 @@ UNMAP_AFTER_INIT void Scheduler::initialize()
|
||||||
|
|
||||||
UNMAP_AFTER_INIT void Scheduler::set_idle_thread(Thread* idle_thread)
|
UNMAP_AFTER_INIT void Scheduler::set_idle_thread(Thread* idle_thread)
|
||||||
{
|
{
|
||||||
|
idle_thread->set_idle_thread();
|
||||||
Processor::current().set_idle_thread(*idle_thread);
|
Processor::current().set_idle_thread(*idle_thread);
|
||||||
Processor::current().set_current_thread(*idle_thread);
|
Processor::current().set_current_thread(*idle_thread);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1112,6 +1112,8 @@ public:
|
||||||
return m_handling_page_fault;
|
return m_handling_page_fault;
|
||||||
}
|
}
|
||||||
void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
|
void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
|
||||||
|
void set_idle_thread() { m_is_idle_thread = true; }
|
||||||
|
bool is_idle_thread() const { return m_is_idle_thread; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Region> kernel_stack_region);
|
Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Region> kernel_stack_region);
|
||||||
|
@ -1248,6 +1250,7 @@ private:
|
||||||
bool m_should_die { false };
|
bool m_should_die { false };
|
||||||
bool m_initialized { false };
|
bool m_initialized { false };
|
||||||
bool m_in_block { false };
|
bool m_in_block { false };
|
||||||
|
bool m_is_idle_thread { false };
|
||||||
Atomic<bool> m_have_any_unmasked_pending_signals { false };
|
Atomic<bool> m_have_any_unmasked_pending_signals { false };
|
||||||
|
|
||||||
void yield_without_holding_big_lock();
|
void yield_without_holding_big_lock();
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue