1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 07:58:11 +00:00

Everywhere: Make tracking cpu usage independent from system ticks

This switches tracking CPU usage to more accurately measure time in
user and kernel land using either the TSC or another time source.
This will also come in handy when implementing a tickless kernel mode.
This commit is contained in:
Tom 2021-07-14 21:46:32 -06:00 committed by Andreas Kling
parent 7e77a2ec40
commit a635ff4e60
13 changed files with 174 additions and 85 deletions

View file

@ -612,7 +612,10 @@ void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
trap.next_trap = current_trap;
current_trap = &trap;
// The cs register of this trap tells us where we will return back to
current_thread->set_previous_mode(((trap.regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode);
auto new_previous_mode = ((trap.regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode;
if (current_thread->set_previous_mode(new_previous_mode) && trap.prev_irq_level == 0) {
current_thread->update_time_scheduled(Scheduler::current_time(), new_previous_mode == Thread::PreviousMode::KernelMode, false);
}
} else {
trap.next_trap = nullptr;
}
@ -627,25 +630,29 @@ void Processor::exit_trap(TrapFrame& trap)
smp_process_pending_messages();
if (!m_in_irq && !m_in_critical)
check_invoke_scheduler();
auto* current_thread = Processor::current_thread();
if (current_thread) {
auto& current_trap = current_thread->current_trap();
current_trap = trap.next_trap;
Thread::PreviousMode new_previous_mode;
if (current_trap) {
VERIFY(current_trap->regs);
// If we have another higher level trap then we probably returned
// from an interrupt or irq handler. The cs register of the
// new/higher level trap tells us what the mode prior to it was
current_thread->set_previous_mode(((current_trap->regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode);
new_previous_mode = ((current_trap->regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode;
} else {
// If we don't have a higher level trap then we're back in user mode.
// Unless we're a kernel process, in which case we're always in kernel mode
current_thread->set_previous_mode(current_thread->process().is_kernel_process() ? Thread::PreviousMode::KernelMode : Thread::PreviousMode::UserMode);
// Which means that the previous mode prior to being back in user mode was kernel mode
new_previous_mode = Thread::PreviousMode::KernelMode;
}
if (current_thread->set_previous_mode(new_previous_mode))
current_thread->update_time_scheduled(Scheduler::current_time(), true, false);
}
if (!m_in_irq && !m_in_critical)
check_invoke_scheduler();
}
void Processor::check_invoke_scheduler()

View file

@ -469,8 +469,8 @@ private:
thread_object.add("tid", thread.tid().value());
thread_object.add("name", thread.name());
thread_object.add("times_scheduled", thread.times_scheduled());
thread_object.add("ticks_user", thread.ticks_in_user());
thread_object.add("ticks_kernel", thread.ticks_in_kernel());
thread_object.add("time_user", thread.time_in_user());
thread_object.add("time_kernel", thread.time_in_kernel());
thread_object.add("state", thread.state_string());
thread_object.add("cpu", thread.cpu());
thread_object.add("priority", thread.priority());
@ -497,9 +497,9 @@ private:
build_process(array, process);
}
auto total_ticks_scheduled = Scheduler::get_total_ticks_scheduled();
json.add("total_ticks", total_ticks_scheduled.total);
json.add("total_ticks_kernel", total_ticks_scheduled.total_kernel);
auto total_time_scheduled = Scheduler::get_total_time_scheduled();
json.add("total_time", total_time_scheduled.total);
json.add("total_time_kernel", total_time_scheduled.total_kernel);
}
return true;
}

View file

@ -51,9 +51,16 @@ struct ThreadReadyQueue {
};
static SpinLock<u8> g_ready_queues_lock;
static u32 g_ready_queues_mask;
static TotalTicksScheduled g_total_ticks_scheduled;
static constexpr u32 g_ready_queue_buckets = sizeof(g_ready_queues_mask) * 8;
READONLY_AFTER_INIT static ThreadReadyQueue* g_ready_queues; // g_ready_queue_buckets entries
static TotalTimeScheduled g_total_time_scheduled;
static SpinLock<u8> g_total_time_scheduled_lock;
// The Scheduler::current_time function provides a current time for scheduling purposes,
// which may not necessarily relate to wall time
u64 (*Scheduler::current_time)();
static void dump_thread_list(bool = false);
static inline u32 thread_priority_to_priority_index(u32 thread_priority)
@ -334,6 +341,13 @@ bool Scheduler::context_switch(Thread* thread)
void Scheduler::enter_current(Thread& prev_thread, bool is_first)
{
VERIFY(g_scheduler_lock.own_lock());
// We already recorded the scheduled time when entering the trap, so this merely accounts for the kernel time since then
auto scheduler_time = Scheduler::current_time();
prev_thread.update_time_scheduled(scheduler_time, true, true);
auto* current_thread = Thread::current();
current_thread->update_time_scheduled(scheduler_time, true, false);
prev_thread.set_active(false);
if (prev_thread.state() == Thread::Dying) {
// If the thread we switched from is marked as dying, then notify
@ -343,7 +357,6 @@ void Scheduler::enter_current(Thread& prev_thread, bool is_first)
} else if (!is_first) {
// Check if we have any signals we should deliver (even if we don't
// end up switching to another thread).
auto current_thread = Thread::current();
if (!current_thread->is_in_block() && current_thread->previous_mode() != Thread::PreviousMode::KernelMode) {
ScopedSpinLock lock(current_thread->get_lock());
if (current_thread->state() == Thread::Running && current_thread->pending_signals_for_state()) {
@ -392,10 +405,30 @@ Process* Scheduler::colonel()
return s_colonel_process;
}
static u64 current_time_tsc()
{
return read_tsc();
}
static u64 current_time_monotonic()
{
// We always need a precise timestamp here, we cannot rely on a coarse timestamp
return (u64)TimeManagement::the().monotonic_time(TimePrecision::Precise).to_nanoseconds();
}
UNMAP_AFTER_INIT void Scheduler::initialize()
{
VERIFY(Processor::is_initialized()); // sanity check
// Figure out a good scheduling time source
if (Processor::current().has_feature(CPUFeature::TSC)) {
// TODO: only use if TSC is running at a constant frequency?
current_time = current_time_tsc;
} else {
// TODO: Using HPET is rather slow, can we use any other time source that may be faster?
current_time = current_time_monotonic;
}
RefPtr<Thread> idle_thread;
g_finalizer_wait_queue = new WaitQueue;
g_ready_queues = new ThreadReadyQueue[g_ready_queue_buckets];
@ -429,6 +462,14 @@ UNMAP_AFTER_INIT Thread* Scheduler::create_ap_idle_thread(u32 cpu)
return idle_thread;
}
void Scheduler::add_time_scheduled(u64 time_to_add, bool is_kernel)
{
ScopedSpinLock lock(g_total_time_scheduled_lock);
g_total_time_scheduled.total += time_to_add;
if (is_kernel)
g_total_time_scheduled.total_kernel += time_to_add;
}
void Scheduler::timer_tick(const RegisterState& regs)
{
VERIFY_INTERRUPTS_DISABLED();
@ -447,23 +488,24 @@ void Scheduler::timer_tick(const RegisterState& regs)
return; // TODO: This prevents scheduling on other CPUs!
#endif
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
if (current_thread->previous_mode() == Thread::PreviousMode::UserMode && current_thread->should_die() && !current_thread->is_blocked()) {
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::id(), *current_thread);
current_thread->set_state(Thread::Dying);
Processor::current().invoke_scheduler_async();
return;
}
g_total_ticks_scheduled.total++;
if (current_thread->previous_mode() == Thread::PreviousMode::KernelMode)
g_total_ticks_scheduled.total_kernel++;
if (current_thread->tick())
return;
if (current_thread->process().is_kernel_process()) {
// Because the previous mode when entering/exiting kernel threads never changes
// we never update the time scheduled. So we need to update it manually on the
// timer interrupt
current_thread->update_time_scheduled(current_time(), true, false);
}
if (current_thread->previous_mode() == Thread::PreviousMode::UserMode && current_thread->should_die() && !current_thread->is_blocked()) {
ScopedSpinLock scheduler_lock(g_scheduler_lock);
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::id(), *current_thread);
current_thread->set_state(Thread::Dying);
Processor::current().invoke_scheduler_async();
return;
}
if (current_thread->tick())
return;
if (!current_thread->is_idle_thread() && !peek_next_runnable_thread()) {
// If no other thread is ready to be scheduled we don't need to
// switch to the idle thread. Just give the current thread another
@ -545,10 +587,10 @@ bool Scheduler::is_initialized()
return Processor::idle_thread() != nullptr;
}
TotalTicksScheduled Scheduler::get_total_ticks_scheduled()
TotalTimeScheduled Scheduler::get_total_time_scheduled()
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
return g_total_ticks_scheduled;
ScopedSpinLock lock(g_total_time_scheduled_lock);
return g_total_time_scheduled;
}
void dump_thread_list(bool with_stack_traces)

View file

@ -24,7 +24,7 @@ extern WaitQueue* g_finalizer_wait_queue;
extern Atomic<bool> g_finalizer_has_work;
extern RecursiveSpinLock g_scheduler_lock;
struct TotalTicksScheduled {
struct TotalTimeScheduled {
u64 total { 0 };
u64 total_kernel { 0 };
};
@ -54,7 +54,9 @@ public:
static void queue_runnable_thread(Thread&);
static void dump_scheduler_state(bool = false);
static bool is_initialized();
static TotalTicksScheduled get_total_ticks_scheduled();
static TotalTimeScheduled get_total_time_scheduled();
static void add_time_scheduled(u64, bool);
static u64 (*current_time)();
};
}

View file

@ -567,6 +567,29 @@ void Thread::finalize_dying_threads()
}
}
void Thread::update_time_scheduled(u64 current_scheduler_time, bool is_kernel, bool no_longer_running)
{
if (m_last_time_scheduled.has_value()) {
u64 delta;
if (current_scheduler_time >= m_last_time_scheduled.value())
delta = current_scheduler_time - m_last_time_scheduled.value();
else
delta = m_last_time_scheduled.value() - current_scheduler_time; // the unlikely event that the clock wrapped
if (delta != 0) {
// Add it to the global total *before* updating the thread's value!
Scheduler::add_time_scheduled(delta, is_kernel);
auto& total_time = is_kernel ? m_total_time_scheduled_kernel : m_total_time_scheduled_user;
ScopedSpinLock scheduler_lock(g_scheduler_lock);
total_time += delta;
}
}
if (no_longer_running)
m_last_time_scheduled = {};
else
m_last_time_scheduled = current_scheduler_time;
}
bool Thread::tick()
{
if (previous_mode() == PreviousMode::KernelMode) {

View file

@ -987,6 +987,7 @@ public:
void exit(void* = nullptr);
void update_time_scheduled(u64, bool, bool);
bool tick();
void set_ticks_left(u32 t) { m_ticks_left = t; }
u32 ticks_left() const { return m_ticks_left; }
@ -1112,15 +1113,22 @@ public:
static constexpr u32 default_kernel_stack_size = 65536;
static constexpr u32 default_userspace_stack_size = 1 * MiB;
u32 ticks_in_user() const { return m_ticks_in_user; }
u32 ticks_in_kernel() const { return m_ticks_in_kernel; }
u64 time_in_user() const { return m_total_time_scheduled_user; }
u64 time_in_kernel() const { return m_total_time_scheduled_kernel; }
enum class PreviousMode : u8 {
KernelMode = 0,
UserMode
};
PreviousMode previous_mode() const { return m_previous_mode; }
void set_previous_mode(PreviousMode mode) { m_previous_mode = mode; }
bool set_previous_mode(PreviousMode mode)
{
if (m_previous_mode == mode)
return false;
m_previous_mode = mode;
return true;
}
TrapFrame*& current_trap() { return m_current_trap; }
RecursiveSpinLock& get_lock() const { return m_lock; }
@ -1270,6 +1278,9 @@ private:
IntrusiveListNode<Thread> m_ready_queue_node;
Atomic<u32> m_cpu { 0 };
u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
Optional<u64> m_last_time_scheduled;
u64 m_total_time_scheduled_user { 0 };
u64 m_total_time_scheduled_kernel { 0 };
u32 m_ticks_left { 0 };
u32 m_times_scheduled { 0 };
u32 m_ticks_in_user { 0 };
@ -1302,7 +1313,7 @@ private:
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
bool m_is_joinable { true };
bool m_handling_page_fault { false };
PreviousMode m_previous_mode { PreviousMode::UserMode };
PreviousMode m_previous_mode { PreviousMode::KernelMode }; // We always start out in kernel mode
unsigned m_syscall_count { 0 };
unsigned m_inode_faults { 0 };

View file

@ -80,6 +80,8 @@ private:
void set_system_timer(HardwareTimerBase&);
static void system_timer_tick(const RegisterState&);
static u64 scheduling_current_time(bool);
// Variables between m_update1 and m_update2 are synchronized
Atomic<u32> m_update1 { 0 };
u32 m_ticks_this_second { 0 };
@ -91,6 +93,7 @@ private:
u32 m_time_ticks_per_second { 0 }; // may be different from interrupts/second (e.g. hpet)
bool m_can_query_precise_time { false };
bool m_updating_time { false }; // may only be accessed from the BSP!
RefPtr<HardwareTimerBase> m_system_timer;
RefPtr<HardwareTimerBase> m_time_keeper_timer;

View file

@ -129,15 +129,15 @@ private:
return false;
if (m_last_total_sum.has_value())
scheduled_diff = all_processes->total_ticks_scheduled - m_last_total_sum.value();
m_last_total_sum = all_processes->total_ticks_scheduled;
scheduled_diff = all_processes->total_time_scheduled - m_last_total_sum.value();
m_last_total_sum = all_processes->total_time_scheduled;
for (auto& it : all_processes.value().processes) {
for (auto& jt : it.threads) {
if (it.pid == 0)
idle += jt.ticks_user + jt.ticks_kernel;
idle += jt.time_user + jt.time_kernel;
else
busy += jt.ticks_user + jt.ticks_kernel;
busy += jt.time_user + jt.time_kernel;
}
}
return true;

View file

@ -319,15 +319,15 @@ void ProcessModel::update()
auto all_processes = Core::ProcessStatisticsReader::get_all(m_proc_all);
HashTable<int> live_tids;
u64 sum_ticks_scheduled = 0, sum_ticks_scheduled_kernel = 0;
u64 total_ticks_scheduled_diff = 0;
u64 sum_time_scheduled = 0, sum_time_scheduled_kernel = 0;
u64 total_time_scheduled_diff = 0;
if (all_processes.has_value()) {
if (m_has_total_ticks)
total_ticks_scheduled_diff = all_processes->total_ticks_scheduled - m_total_ticks_scheduled;
if (m_has_total_scheduled_time)
total_time_scheduled_diff = all_processes->total_time_scheduled - m_total_time_scheduled;
m_total_ticks_scheduled = all_processes->total_ticks_scheduled;
m_total_ticks_scheduled_kernel = all_processes->total_ticks_scheduled_kernel;
m_has_total_ticks = true;
m_total_time_scheduled = all_processes->total_time_scheduled;
m_total_time_scheduled_kernel = all_processes->total_time_scheduled_kernel;
m_has_total_scheduled_time = true;
for (auto& process : all_processes.value().processes) {
for (auto& thread : process.threads) {
@ -361,14 +361,14 @@ void ProcessModel::update()
state.tid = thread.tid;
state.pgid = process.pgid;
state.sid = process.sid;
state.ticks_user = thread.ticks_user;
state.ticks_kernel = thread.ticks_kernel;
state.time_user = thread.time_user;
state.time_kernel = thread.time_kernel;
state.cpu = thread.cpu;
state.cpu_percent = 0;
state.priority = thread.priority;
state.state = thread.state;
sum_ticks_scheduled += thread.ticks_user + thread.ticks_kernel;
sum_ticks_scheduled_kernel += thread.ticks_kernel;
sum_time_scheduled += thread.time_user + thread.time_kernel;
sum_time_scheduled_kernel += thread.time_kernel;
{
auto pit = m_threads.find(thread.tid);
if (pit == m_threads.end())
@ -397,11 +397,11 @@ void ProcessModel::update()
continue;
}
auto& thread = *it.value;
u32 ticks_scheduled_diff = (thread.current_state.ticks_user + thread.current_state.ticks_kernel)
- (thread.previous_state.ticks_user + thread.previous_state.ticks_kernel);
u32 ticks_scheduled_diff_kernel = thread.current_state.ticks_kernel - thread.previous_state.ticks_kernel;
thread.current_state.cpu_percent = total_ticks_scheduled_diff > 0 ? ((float)ticks_scheduled_diff * 100) / (float)total_ticks_scheduled_diff : 0;
thread.current_state.cpu_percent_kernel = total_ticks_scheduled_diff > 0 ? ((float)ticks_scheduled_diff_kernel * 100) / (float)total_ticks_scheduled_diff : 0;
u32 time_scheduled_diff = (thread.current_state.time_user + thread.current_state.time_kernel)
- (thread.previous_state.time_user + thread.previous_state.time_kernel);
u32 time_scheduled_diff_kernel = thread.current_state.time_kernel - thread.previous_state.time_kernel;
thread.current_state.cpu_percent = total_time_scheduled_diff > 0 ? ((float)time_scheduled_diff * 100) / (float)total_time_scheduled_diff : 0;
thread.current_state.cpu_percent_kernel = total_time_scheduled_diff > 0 ? ((float)time_scheduled_diff_kernel * 100) / (float)total_time_scheduled_diff : 0;
if (it.value->current_state.pid != 0) {
auto& cpu_info = m_cpus[thread.current_state.cpu];
cpu_info.total_cpu_percent += thread.current_state.cpu_percent;

View file

@ -88,8 +88,8 @@ private:
pid_t ppid;
pid_t pgid;
pid_t sid;
unsigned ticks_user;
unsigned ticks_kernel;
u64 time_user;
u64 time_kernel;
bool kernel;
String executable;
String name;
@ -129,7 +129,7 @@ private:
Vector<int> m_tids;
RefPtr<Core::File> m_proc_all;
GUI::Icon m_kernel_process_icon;
u64 m_total_ticks_scheduled { 0 };
u64 m_total_ticks_scheduled_kernel { 0 };
bool m_has_total_ticks { false };
u64 m_total_time_scheduled { 0 };
u64 m_total_time_scheduled_kernel { 0 };
bool m_has_total_scheduled_time { false };
};

View file

@ -37,6 +37,7 @@ Optional<AllProcessesStatistics> ProcessStatisticsReader::get_all(RefPtr<Core::F
auto json = JsonValue::from_string(file_contents);
if (!json.has_value())
return {};
auto& json_obj = json.value().as_object();
json_obj.get("processes").as_array().for_each([&](auto& value) {
const JsonObject& process_object = value.as_object();
@ -74,8 +75,8 @@ Optional<AllProcessesStatistics> ProcessStatisticsReader::get_all(RefPtr<Core::F
thread.times_scheduled = thread_object.get("times_scheduled").to_u32();
thread.name = thread_object.get("name").to_string();
thread.state = thread_object.get("state").to_string();
thread.ticks_user = thread_object.get("ticks_user").to_u32();
thread.ticks_kernel = thread_object.get("ticks_kernel").to_u32();
thread.time_user = thread_object.get("time_user").to_u64();
thread.time_kernel = thread_object.get("time_kernel").to_u64();
thread.cpu = thread_object.get("cpu").to_u32();
thread.priority = thread_object.get("priority").to_u32();
thread.syscall_count = thread_object.get("syscall_count").to_u32();
@ -96,8 +97,8 @@ Optional<AllProcessesStatistics> ProcessStatisticsReader::get_all(RefPtr<Core::F
all_processes_statistics.processes.append(move(process));
});
all_processes_statistics.total_ticks_scheduled = json_obj.get("total_ticks").to_u64();
all_processes_statistics.total_ticks_scheduled_kernel = json_obj.get("total_ticks_kernel").to_u64();
all_processes_statistics.total_time_scheduled = json_obj.get("total_time").to_u64();
all_processes_statistics.total_time_scheduled_kernel = json_obj.get("total_time_kernel").to_u64();
return all_processes_statistics;
}

View file

@ -15,8 +15,8 @@ namespace Core {
struct ThreadStatistics {
pid_t tid;
unsigned times_scheduled;
unsigned ticks_user;
unsigned ticks_kernel;
u64 time_user;
u64 time_kernel;
unsigned syscall_count;
unsigned inode_faults;
unsigned zero_faults;
@ -66,8 +66,8 @@ struct ProcessStatistics {
struct AllProcessesStatistics {
Vector<ProcessStatistics> processes;
u64 total_ticks_scheduled;
u64 total_ticks_scheduled_kernel;
u64 total_time_scheduled;
u64 total_time_scheduled_kernel;
};
class ProcessStatisticsReader {

View file

@ -54,9 +54,9 @@ struct ThreadData {
unsigned inode_faults;
unsigned zero_faults;
unsigned cow_faults;
u64 ticks_scheduled;
u64 time_scheduled;
u64 ticks_scheduled_since_prev { 0 };
u64 time_scheduled_since_prev { 0 };
unsigned cpu_percent { 0 };
unsigned cpu_percent_decimal { 0 };
@ -83,8 +83,8 @@ struct Traits<PidAndTid> : public GenericTraits<PidAndTid> {
struct Snapshot {
HashMap<PidAndTid, ThreadData> map;
u64 total_ticks_scheduled { 0 };
u64 total_ticks_scheduled_kernel { 0 };
u64 total_time_scheduled { 0 };
u64 total_time_scheduled_kernel { 0 };
};
static Snapshot get_snapshot()
@ -115,7 +115,7 @@ static Snapshot get_snapshot()
thread_data.inode_faults = thread.inode_faults;
thread_data.zero_faults = thread.zero_faults;
thread_data.cow_faults = thread.cow_faults;
thread_data.ticks_scheduled = (u64)thread.ticks_user + (u64)thread.ticks_kernel;
thread_data.time_scheduled = (u64)thread.time_user + (u64)thread.time_kernel;
thread_data.priority = thread.priority;
thread_data.state = thread.state;
thread_data.username = process.username;
@ -124,8 +124,8 @@ static Snapshot get_snapshot()
}
}
snapshot.total_ticks_scheduled = all_processes->total_ticks_scheduled;
snapshot.total_ticks_scheduled_kernel = all_processes->total_ticks_scheduled_kernel;
snapshot.total_time_scheduled = all_processes->total_time_scheduled;
snapshot.total_time_scheduled_kernel = all_processes->total_time_scheduled_kernel;
return snapshot;
}
@ -220,7 +220,7 @@ int main(int argc, char** argv)
}
auto current = get_snapshot();
auto total_scheduled_diff = current.total_ticks_scheduled - prev.total_ticks_scheduled;
auto total_scheduled_diff = current.total_time_scheduled - prev.total_time_scheduled;
printf("\033[3J\033[H\033[2J");
printf("\033[47;30m%6s %3s %3s %-9s %-13s %6s %6s %4s %s\033[K\033[0m\n",
@ -240,11 +240,11 @@ int main(int argc, char** argv)
auto jt = prev.map.find(pid_and_tid);
if (jt == prev.map.end())
continue;
auto ticks_scheduled_before = (*jt).value.ticks_scheduled;
auto ticks_scheduled_diff = it.value.ticks_scheduled - ticks_scheduled_before;
it.value.ticks_scheduled_since_prev = ticks_scheduled_diff;
it.value.cpu_percent = total_scheduled_diff > 0 ? ((ticks_scheduled_diff * 100) / total_scheduled_diff) : 0;
it.value.cpu_percent_decimal = total_scheduled_diff > 0 ? (((ticks_scheduled_diff * 1000) / total_scheduled_diff) % 10) : 0;
auto time_scheduled_before = (*jt).value.time_scheduled;
auto time_scheduled_diff = it.value.time_scheduled - time_scheduled_before;
it.value.time_scheduled_since_prev = time_scheduled_diff;
it.value.cpu_percent = total_scheduled_diff > 0 ? ((time_scheduled_diff * 100) / total_scheduled_diff) : 0;
it.value.cpu_percent_decimal = total_scheduled_diff > 0 ? (((time_scheduled_diff * 1000) / total_scheduled_diff) % 10) : 0;
threads.append(&it.value);
}
@ -269,7 +269,7 @@ int main(int argc, char** argv)
case TopOption::SortBy::Cpu:
return p2->cpu_percent * 10 + p2->cpu_percent_decimal < p1->cpu_percent * 10 + p1->cpu_percent_decimal;
default:
return p2->ticks_scheduled_since_prev < p1->ticks_scheduled_since_prev;
return p2->time_scheduled_since_prev < p1->time_scheduled_since_prev;
}
});