mirror of
https://github.com/RGBCube/serenity
synced 2025-05-14 09:04:59 +00:00
Kernel: Generate page fault events from the kernel profiler
Hook the kernel page fault handler and capture page fault events when the fault has a current thread attached in TLS. We capture the eip and ebp so we can unwind the stack and locate which pieces of code are generating the most page faults. Co-authored-by: Gunnar Beutner <gbeutner@serenityos.org>
This commit is contained in:
parent
6ac1ca5a9a
commit
83fc591cea
7 changed files with 47 additions and 2 deletions
|
@ -23,6 +23,7 @@
|
|||
#include <Kernel/Interrupts/UnhandledInterruptHandler.h>
|
||||
#include <Kernel/KSyms.h>
|
||||
#include <Kernel/Panic.h>
|
||||
#include <Kernel/PerformanceManager.h>
|
||||
#include <Kernel/Process.h>
|
||||
#include <Kernel/Random.h>
|
||||
#include <Kernel/Thread.h>
|
||||
|
@ -243,8 +244,11 @@ void page_fault_handler(TrapFrame* trap)
|
|||
|
||||
auto current_thread = Thread::current();
|
||||
|
||||
if (current_thread)
|
||||
if (current_thread) {
|
||||
current_thread->set_handling_page_fault(true);
|
||||
PerformanceManager::add_page_fault_event(*current_thread, regs);
|
||||
}
|
||||
|
||||
ScopeGuard guard = [current_thread] {
|
||||
if (current_thread)
|
||||
current_thread->set_handling_page_fault(false);
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include <AK/JsonArraySerializer.h>
|
||||
#include <AK/JsonObject.h>
|
||||
#include <AK/JsonObjectSerializer.h>
|
||||
#include <AK/ScopeGuard.h>
|
||||
#include <Kernel/Arch/x86/SmapDisabler.h>
|
||||
#include <Kernel/FileSystem/Custody.h>
|
||||
#include <Kernel/KBufferBuilder.h>
|
||||
|
@ -63,6 +64,17 @@ KResult PerformanceEventBuffer::append_with_eip_and_ebp(ProcessID pid, ThreadID
|
|||
if ((g_profiling_event_mask & type) == 0)
|
||||
return EINVAL;
|
||||
|
||||
auto current_thread = Thread::current();
|
||||
u32 enter_count = 0;
|
||||
if (current_thread)
|
||||
enter_count = current_thread->enter_profiler();
|
||||
ScopeGuard leave_profiler([&] {
|
||||
if (current_thread)
|
||||
current_thread->leave_profiler();
|
||||
});
|
||||
if (enter_count > 0)
|
||||
return EINVAL;
|
||||
|
||||
PerformanceEvent event;
|
||||
event.type = type;
|
||||
event.lost_samples = lost_samples;
|
||||
|
@ -122,6 +134,8 @@ KResult PerformanceEventBuffer::append_with_eip_and_ebp(ProcessID pid, ThreadID
|
|||
event.data.kfree.size = arg1;
|
||||
event.data.kfree.ptr = arg2;
|
||||
break;
|
||||
case PERF_EVENT_PAGE_FAULT:
|
||||
break;
|
||||
default:
|
||||
return EINVAL;
|
||||
}
|
||||
|
@ -210,6 +224,9 @@ bool PerformanceEventBuffer::to_json_impl(Serializer& object) const
|
|||
event_object.add("ptr", static_cast<u64>(event.data.kfree.ptr));
|
||||
event_object.add("size", static_cast<u64>(event.data.kfree.size));
|
||||
break;
|
||||
case PERF_EVENT_PAGE_FAULT:
|
||||
event_object.add("type", "page_fault");
|
||||
break;
|
||||
}
|
||||
event_object.add("pid", event.pid);
|
||||
event_object.add("tid", event.tid);
|
||||
|
|
|
@ -106,6 +106,15 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
inline static void add_page_fault_event(Thread& thread, const RegisterState& regs)
|
||||
{
|
||||
if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
|
||||
[[maybe_unused]] auto rc = event_buffer->append_with_eip_and_ebp(
|
||||
thread.pid(), thread.tid(),
|
||||
regs.eip, regs.ebp, PERF_EVENT_PAGE_FAULT, 0, 0, 0, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
inline static void timer_tick(RegisterState const& regs)
|
||||
{
|
||||
static Time last_wakeup;
|
||||
|
|
|
@ -1120,6 +1120,16 @@ public:
|
|||
void set_idle_thread() { m_is_idle_thread = true; }
|
||||
bool is_idle_thread() const { return m_is_idle_thread; }
|
||||
|
||||
ALWAYS_INLINE u32 enter_profiler()
|
||||
{
|
||||
return m_nested_profiler_calls.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE u32 leave_profiler()
|
||||
{
|
||||
return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
|
||||
}
|
||||
|
||||
private:
|
||||
Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Region> kernel_stack_region);
|
||||
|
||||
|
@ -1257,6 +1267,7 @@ private:
|
|||
bool m_in_block { false };
|
||||
bool m_is_idle_thread { false };
|
||||
Atomic<bool> m_have_any_unmasked_pending_signals { false };
|
||||
Atomic<u32> m_nested_profiler_calls { 0 };
|
||||
|
||||
void yield_without_holding_big_lock();
|
||||
void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
|
||||
|
|
|
@ -60,6 +60,7 @@ enum {
|
|||
PERF_EVENT_CONTEXT_SWITCH = 1024,
|
||||
PERF_EVENT_KMALLOC = 2048,
|
||||
PERF_EVENT_KFREE = 4096,
|
||||
PERF_EVENT_PAGE_FAULT = 8192,
|
||||
};
|
||||
|
||||
#define WNOHANG 1
|
||||
|
|
|
@ -89,6 +89,7 @@ enum {
|
|||
PERF_EVENT_CONTEXT_SWITCH = 1024,
|
||||
PERF_EVENT_KMALLOC = 2048,
|
||||
PERF_EVENT_KFREE = 4096,
|
||||
PERF_EVENT_PAGE_FAULT = 8192,
|
||||
};
|
||||
|
||||
#define PERF_EVENT_MASK_ALL (~0ull)
|
||||
|
|
|
@ -44,6 +44,8 @@ int main(int argc, char** argv)
|
|||
event_mask |= PERF_EVENT_KMALLOC;
|
||||
else if (event_type == "kfree")
|
||||
event_mask |= PERF_EVENT_KFREE;
|
||||
else if (event_type == "page_fault")
|
||||
event_mask |= PERF_EVENT_PAGE_FAULT;
|
||||
else {
|
||||
warnln("Unknown event type '{}' specified.", event_type);
|
||||
exit(1);
|
||||
|
@ -53,7 +55,7 @@ int main(int argc, char** argv)
|
|||
|
||||
auto print_types = [] {
|
||||
outln();
|
||||
outln("Event type can be one of: sample, context_switch, kmalloc and kfree.");
|
||||
outln("Event type can be one of: sample, context_switch, page_fault, kmalloc and kfree.");
|
||||
};
|
||||
|
||||
if (!args_parser.parse(argc, argv, false)) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue