From c10e0adaca3ae6c56f729e6edad21eecf8b782d4 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Wed, 3 Feb 2021 11:51:13 +0100 Subject: [PATCH] Kernel: Move perf event backtrace capture out of Thread class There's no need for this to be generic and support running from an arbitrary thread context. Perf events are always generated from within the thread being profiled, so take advantage of that to simplify the code. Also use Vector capacity to avoid heap allocations. --- Kernel/PerformanceEventBuffer.cpp | 30 ++++++++++++++++++++++++------ Kernel/Thread.cpp | 24 ------------------------ Kernel/Thread.h | 1 - 3 files changed, 24 insertions(+), 31 deletions(-) diff --git a/Kernel/PerformanceEventBuffer.cpp b/Kernel/PerformanceEventBuffer.cpp index 941e14efb5..0aaa07c3fd 100644 --- a/Kernel/PerformanceEventBuffer.cpp +++ b/Kernel/PerformanceEventBuffer.cpp @@ -48,6 +48,29 @@ KResult PerformanceEventBuffer::append(int type, FlatPtr arg1, FlatPtr arg2) return append_with_eip_and_ebp(eip, ebp, type, arg1, arg2); } +static Vector raw_backtrace(FlatPtr ebp, FlatPtr eip) +{ + Vector backtrace; + backtrace.append(eip); + FlatPtr stack_ptr_copy; + FlatPtr stack_ptr = (FlatPtr)ebp; + // FIXME: Figure out how to remove this SmapDisabler without breaking profile stacks. + SmapDisabler disabler; + while (stack_ptr) { + void* fault_at; + if (!safe_memcpy(&stack_ptr_copy, (void*)stack_ptr, sizeof(FlatPtr), fault_at)) + break; + FlatPtr retaddr; + if (!safe_memcpy(&retaddr, (void*)(stack_ptr + sizeof(FlatPtr)), sizeof(FlatPtr), fault_at)) + break; + backtrace.append(retaddr); + if (backtrace.size() == PerformanceEvent::max_stack_frame_count) + break; + stack_ptr = stack_ptr_copy; + } + return backtrace; +} + KResult PerformanceEventBuffer::append_with_eip_and_ebp(u32 eip, u32 ebp, int type, FlatPtr arg1, FlatPtr arg2) { if (count() >= capacity()) @@ -70,12 +93,7 @@ KResult PerformanceEventBuffer::append_with_eip_and_ebp(u32 eip, u32 ebp, int ty return EINVAL; } - auto current_thread = Thread::current(); - Vector backtrace; - { - SmapDisabler disabler; - backtrace = current_thread->raw_backtrace(ebp, eip); - } + auto backtrace = raw_backtrace(ebp, eip); event.stack_size = min(sizeof(event.stack) / sizeof(FlatPtr), static_cast(backtrace.size())); memcpy(event.stack, backtrace.data(), event.stack_size * sizeof(FlatPtr)); diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index c9719b7b7a..fe4f762b80 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -1017,30 +1017,6 @@ String Thread::backtrace_impl() return builder.to_string(); } -Vector Thread::raw_backtrace(FlatPtr ebp, FlatPtr eip) const -{ - InterruptDisabler disabler; - auto& process = const_cast(this->process()); - ProcessPagingScope paging_scope(process); - Vector backtrace; - backtrace.append(eip); - FlatPtr stack_ptr_copy; - FlatPtr stack_ptr = (FlatPtr)ebp; - while (stack_ptr) { - void* fault_at; - if (!safe_memcpy(&stack_ptr_copy, (void*)stack_ptr, sizeof(FlatPtr), fault_at)) - break; - FlatPtr retaddr; - if (!safe_memcpy(&retaddr, (void*)(stack_ptr + sizeof(FlatPtr)), sizeof(FlatPtr), fault_at)) - break; - backtrace.append(retaddr); - if (backtrace.size() == PerformanceEvent::max_stack_frame_count) - break; - stack_ptr = stack_ptr_copy; - } - return backtrace; -} - size_t Thread::thread_specific_region_alignment() const { return max(process().m_master_tls_alignment, alignof(ThreadSpecificData)); diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 70bdf6bfc1..4a01f3256e 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -127,7 +127,6 @@ public: const Process& process() const { return m_process; } String backtrace(); - Vector raw_backtrace(FlatPtr ebp, FlatPtr eip) const; String name() const {