1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-15 01:14:58 +00:00

Everywhere: Improve CPU usage calculation

As threads come and go, we can't simply account for how many time
slices the threads at any given point may have been using. We need to
also account for threads that have since disappeared. This means we
also need to track how many time slices we have expired globally.

However, because this doesn't account for context switches outside of
the system timer tick values may still be under-reported. To solve this
we will need to track more accurate time information on each context
switch.

This also fixes top's cpu usage calculation which was still based on
the number of context switches.

Fixes #6473
This commit is contained in:
Tom 2021-07-14 12:05:59 -06:00 committed by Andreas Kling
parent ef85c4f747
commit 7e77a2ec40
17 changed files with 132 additions and 83 deletions

View file

@ -51,6 +51,7 @@ struct ThreadReadyQueue {
};
static SpinLock<u8> g_ready_queues_lock;
static u32 g_ready_queues_mask;
static TotalTicksScheduled g_total_ticks_scheduled;
static constexpr u32 g_ready_queue_buckets = sizeof(g_ready_queues_mask) * 8;
READONLY_AFTER_INIT static ThreadReadyQueue* g_ready_queues; // g_ready_queue_buckets entries
static void dump_thread_list(bool = false);
@ -446,18 +447,22 @@ void Scheduler::timer_tick(const RegisterState& regs)
return; // TODO: This prevents scheduling on other CPUs!
#endif
if (current_thread->previous_mode() == Thread::PreviousMode::UserMode && current_thread->should_die() && !current_thread->is_blocked()) {
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::id(), *current_thread);
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
if (current_thread->previous_mode() == Thread::PreviousMode::UserMode && current_thread->should_die() && !current_thread->is_blocked()) {
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::id(), *current_thread);
current_thread->set_state(Thread::Dying);
Processor::current().invoke_scheduler_async();
return;
}
VERIFY(!Processor::current().in_critical());
Processor::current().invoke_scheduler_async();
return;
g_total_ticks_scheduled.total++;
if (current_thread->previous_mode() == Thread::PreviousMode::KernelMode)
g_total_ticks_scheduled.total_kernel++;
if (current_thread->tick())
return;
}
if (current_thread->tick())
return;
if (!current_thread->is_idle_thread() && !peek_next_runnable_thread()) {
// If no other thread is ready to be scheduled we don't need to
@ -540,6 +545,12 @@ bool Scheduler::is_initialized()
return Processor::idle_thread() != nullptr;
}
TotalTicksScheduled Scheduler::get_total_ticks_scheduled()
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
return g_total_ticks_scheduled;
}
void dump_thread_list(bool with_stack_traces)
{
dbgln("Scheduler thread list for processor {}:", Processor::id());