1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2026-01-16 13:51:00 +00:00

SystemMonitor: Beef up CPU and memory graphs

We now show the total CPU usage as well as the kernel portion.
For the memory graphs we show the amount of committed memory,
actually allocated memory, and the portion of the kernel heap.
This commit is contained in:
Tom 2021-01-04 20:14:52 -07:00 committed by Andreas Kling
parent b4a783d923
commit de6a4d49b8
7 changed files with 207 additions and 54 deletions

View file

@ -352,12 +352,15 @@ void ProcessModel::update()
auto previous_pid_count = m_pids.size();
auto all_processes = Core::ProcessStatisticsReader::get_all(m_proc_all);
u64 last_sum_ticks_scheduled = 0;
for (auto& it : m_threads)
last_sum_ticks_scheduled += it.value->current_state.ticks_user + it.value->current_state.ticks_kernel;
u64 last_sum_ticks_scheduled = 0, last_sum_ticks_scheduled_kernel = 0;
for (auto& it : m_threads) {
auto& current_state = it.value->current_state;
last_sum_ticks_scheduled += current_state.ticks_user + current_state.ticks_kernel;
last_sum_ticks_scheduled_kernel += current_state.ticks_kernel;
}
HashTable<PidAndTid> live_pids;
u64 sum_ticks_scheduled = 0;
u64 sum_ticks_scheduled = 0, sum_ticks_scheduled_kernel = 0;
if (all_processes.has_value()) {
for (auto& it : all_processes.value()) {
for (auto& thread : it.value.threads) {
@ -399,6 +402,7 @@ void ProcessModel::update()
state.effective_priority = thread.effective_priority;
state.state = thread.state;
sum_ticks_scheduled += thread.ticks_user + thread.ticks_kernel;
sum_ticks_scheduled_kernel += thread.ticks_kernel;
{
auto pit = m_threads.find({ it.value.pid, thread.tid });
if (pit == m_threads.end())
@ -415,8 +419,10 @@ void ProcessModel::update()
}
m_pids.clear();
for (auto& c : m_cpus)
for (auto& c : m_cpus) {
c.total_cpu_percent = 0.0;
c.total_cpu_percent_kernel = 0.0;
}
Vector<PidAndTid, 16> pids_to_remove;
for (auto& it : m_threads) {
if (!live_pids.contains(it.key)) {
@ -424,11 +430,15 @@ void ProcessModel::update()
continue;
}
auto& process = *it.value;
u32 times_scheduled_diff = (process.current_state.ticks_user + process.current_state.ticks_kernel)
u32 ticks_scheduled_diff = (process.current_state.ticks_user + process.current_state.ticks_kernel)
- (process.previous_state.ticks_user + process.previous_state.ticks_kernel);
process.current_state.cpu_percent = ((float)times_scheduled_diff * 100) / (float)(sum_ticks_scheduled - last_sum_ticks_scheduled);
u32 ticks_scheduled_diff_kernel = process.current_state.ticks_kernel - process.previous_state.ticks_kernel;
process.current_state.cpu_percent = ((float)ticks_scheduled_diff * 100) / (float)(sum_ticks_scheduled - last_sum_ticks_scheduled);
process.current_state.cpu_percent_kernel = ((float)ticks_scheduled_diff_kernel * 100) / (float)(sum_ticks_scheduled - last_sum_ticks_scheduled);
if (it.key.pid != 0) {
m_cpus[process.current_state.cpu].total_cpu_percent += process.current_state.cpu_percent;
auto& cpu_info = m_cpus[process.current_state.cpu];
cpu_info.total_cpu_percent += process.current_state.cpu_percent;
cpu_info.total_cpu_percent_kernel += process.current_state.cpu_percent_kernel;
m_pids.append(it.key);
}
}