1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-24 15:57:35 +00:00

Kernel: Rename PerformanceEvent methods to be more ARCH independent

This commit is contained in:
Brian Gianforcaro 2021-07-18 16:54:45 -07:00 committed by Gunnar Beutner
parent 1cffecbe8d
commit 121e7626d0
3 changed files with 17 additions and 29 deletions

View file

@ -25,16 +25,16 @@ NEVER_INLINE KResult PerformanceEventBuffer::append(int type, FlatPtr arg1, Flat
FlatPtr ebp; FlatPtr ebp;
asm volatile("movl %%ebp, %%eax" asm volatile("movl %%ebp, %%eax"
: "=a"(ebp)); : "=a"(ebp));
return append_with_eip_and_ebp(current_thread->pid(), current_thread->tid(), 0, ebp, type, 0, arg1, arg2, arg3); return append_with_ip_and_bp(current_thread->pid(), current_thread->tid(), 0, ebp, type, 0, arg1, arg2, arg3);
} }
static Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> raw_backtrace(FlatPtr ebp, FlatPtr eip) static Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> raw_backtrace(FlatPtr bp, FlatPtr ip)
{ {
Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> backtrace; Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> backtrace;
if (eip != 0) if (ip != 0)
backtrace.append(eip); backtrace.append(ip);
FlatPtr stack_ptr_copy; FlatPtr stack_ptr_copy;
FlatPtr stack_ptr = (FlatPtr)ebp; FlatPtr stack_ptr = bp;
// FIXME: Figure out how to remove this SmapDisabler without breaking profile stacks. // FIXME: Figure out how to remove this SmapDisabler without breaking profile stacks.
SmapDisabler disabler; SmapDisabler disabler;
while (stack_ptr) { while (stack_ptr) {
@ -54,8 +54,8 @@ static Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> raw_backtrace(Fl
return backtrace; return backtrace;
} }
KResult PerformanceEventBuffer::append_with_eip_and_ebp(ProcessID pid, ThreadID tid, KResult PerformanceEventBuffer::append_with_ip_and_bp(ProcessID pid, ThreadID tid,
u32 eip, u32 ebp, int type, u32 lost_samples, FlatPtr arg1, FlatPtr arg2, const StringView& arg3) FlatPtr ip, FlatPtr bp, int type, u32 lost_samples, FlatPtr arg1, FlatPtr arg2, const StringView& arg3)
{ {
if (count() >= capacity()) if (count() >= capacity())
return ENOBUFS; return ENOBUFS;
@ -139,7 +139,7 @@ KResult PerformanceEventBuffer::append_with_eip_and_ebp(ProcessID pid, ThreadID
return EINVAL; return EINVAL;
} }
auto backtrace = raw_backtrace(ebp, eip); auto backtrace = raw_backtrace(bp, ip);
event.stack_size = min(sizeof(event.stack) / sizeof(FlatPtr), static_cast<size_t>(backtrace.size())); event.stack_size = min(sizeof(event.stack) / sizeof(FlatPtr), static_cast<size_t>(backtrace.size()));
memcpy(event.stack, backtrace.data(), event.stack_size * sizeof(FlatPtr)); memcpy(event.stack, backtrace.data(), event.stack_size * sizeof(FlatPtr));
@ -269,17 +269,17 @@ void PerformanceEventBuffer::add_process(const Process& process, ProcessEventTyp
else else
executable = String::formatted("<{}>", process.name()); executable = String::formatted("<{}>", process.name());
[[maybe_unused]] auto rc = append_with_eip_and_ebp(process.pid(), 0, 0, 0, [[maybe_unused]] auto rc = append_with_ip_and_bp(process.pid(), 0, 0, 0,
event_type == ProcessEventType::Create ? PERF_EVENT_PROCESS_CREATE : PERF_EVENT_PROCESS_EXEC, event_type == ProcessEventType::Create ? PERF_EVENT_PROCESS_CREATE : PERF_EVENT_PROCESS_EXEC,
0, process.pid().value(), 0, executable); 0, process.pid().value(), 0, executable);
process.for_each_thread([&](auto& thread) { process.for_each_thread([&](auto& thread) {
[[maybe_unused]] auto rc = append_with_eip_and_ebp(process.pid(), thread.tid().value(), [[maybe_unused]] auto rc = append_with_ip_and_bp(process.pid(), thread.tid().value(),
0, 0, PERF_EVENT_THREAD_CREATE, 0, 0, 0, nullptr); 0, 0, PERF_EVENT_THREAD_CREATE, 0, 0, 0, nullptr);
}); });
for (auto& region : process.space().regions()) { for (auto& region : process.space().regions()) {
[[maybe_unused]] auto rc = append_with_eip_and_ebp(process.pid(), 0, [[maybe_unused]] auto rc = append_with_ip_and_bp(process.pid(), 0,
0, 0, PERF_EVENT_MMAP, 0, region->range().base().get(), region->range().size(), region->name()); 0, 0, PERF_EVENT_MMAP, 0, region->range().base().get(), region->range().size(), region->name());
} }
} }

View file

@ -95,7 +95,7 @@ public:
static OwnPtr<PerformanceEventBuffer> try_create_with_size(size_t buffer_size); static OwnPtr<PerformanceEventBuffer> try_create_with_size(size_t buffer_size);
KResult append(int type, FlatPtr arg1, FlatPtr arg2, const StringView& arg3, Thread* current_thread = Thread::current()); KResult append(int type, FlatPtr arg1, FlatPtr arg2, const StringView& arg3, Thread* current_thread = Thread::current());
KResult append_with_eip_and_ebp(ProcessID pid, ThreadID tid, u32 eip, u32 ebp, KResult append_with_ip_and_bp(ProcessID pid, ThreadID tid, FlatPtr eip, FlatPtr ebp,
int type, u32 lost_samples, FlatPtr arg1, FlatPtr arg2, const StringView& arg3); int type, u32 lost_samples, FlatPtr arg1, FlatPtr arg2, const StringView& arg3);
void clear() void clear()

View file

@ -33,7 +33,7 @@ public:
{ {
if (g_profiling_all_threads) { if (g_profiling_all_threads) {
VERIFY(g_global_perf_events); VERIFY(g_global_perf_events);
[[maybe_unused]] auto rc = g_global_perf_events->append_with_eip_and_ebp( [[maybe_unused]] auto rc = g_global_perf_events->append_with_ip_and_bp(
process.pid(), 0, 0, 0, PERF_EVENT_PROCESS_EXIT, 0, 0, 0, nullptr); process.pid(), 0, 0, 0, PERF_EVENT_PROCESS_EXIT, 0, 0, 0, nullptr);
} }
} }
@ -61,15 +61,9 @@ public:
if (current_thread.is_profiling_suppressed()) if (current_thread.is_profiling_suppressed())
return; return;
if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) { if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
#if ARCH(I386) [[maybe_unused]] auto rc = event_buffer->append_with_ip_and_bp(
[[maybe_unused]] auto rc = event_buffer->append_with_eip_and_ebp(
current_thread.pid(), current_thread.tid(), current_thread.pid(), current_thread.tid(),
regs.eip, regs.ebp, PERF_EVENT_SAMPLE, lost_time, 0, 0, nullptr); regs.ip(), regs.bp(), PERF_EVENT_SAMPLE, lost_time, 0, 0, nullptr);
#else
[[maybe_unused]] auto rc = event_buffer->append_with_eip_and_ebp(
current_thread.pid(), current_thread.tid(),
regs.rip, regs.rbp, PERF_EVENT_SAMPLE, lost_time, 0, 0, nullptr);
#endif
} }
} }
@ -119,15 +113,9 @@ public:
if (thread.is_profiling_suppressed()) if (thread.is_profiling_suppressed())
return; return;
if (auto* event_buffer = thread.process().current_perf_events_buffer()) { if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
#if ARCH(I386) [[maybe_unused]] auto rc = event_buffer->append_with_ip_and_bp(
[[maybe_unused]] auto rc = event_buffer->append_with_eip_and_ebp(
thread.pid(), thread.tid(), thread.pid(), thread.tid(),
regs.eip, regs.ebp, PERF_EVENT_PAGE_FAULT, 0, 0, 0, nullptr); regs.ip(), regs.bp(), PERF_EVENT_PAGE_FAULT, 0, 0, 0, nullptr);
#else
[[maybe_unused]] auto rc = event_buffer->append_with_eip_and_ebp(
thread.pid(), thread.tid(),
regs.rip, regs.rbp, PERF_EVENT_PAGE_FAULT, 0, 0, 0, nullptr);
#endif
} }
} }