1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 04:27:43 +00:00

Kernel: Move perf event backtrace capture out of Thread class

There's no need for this to be generic and support running from an
arbitrary thread context. Perf events are always generated from within
the thread being profiled, so take advantage of that to simplify the
code. Also use Vector capacity to avoid heap allocations.
This commit is contained in:
Andreas Kling 2021-02-03 11:51:13 +01:00
parent 9c77980965
commit c10e0adaca
3 changed files with 24 additions and 31 deletions

View file

@ -48,6 +48,29 @@ KResult PerformanceEventBuffer::append(int type, FlatPtr arg1, FlatPtr arg2)
return append_with_eip_and_ebp(eip, ebp, type, arg1, arg2);
}
static Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> raw_backtrace(FlatPtr ebp, FlatPtr eip)
{
Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> backtrace;
backtrace.append(eip);
FlatPtr stack_ptr_copy;
FlatPtr stack_ptr = (FlatPtr)ebp;
// FIXME: Figure out how to remove this SmapDisabler without breaking profile stacks.
SmapDisabler disabler;
while (stack_ptr) {
void* fault_at;
if (!safe_memcpy(&stack_ptr_copy, (void*)stack_ptr, sizeof(FlatPtr), fault_at))
break;
FlatPtr retaddr;
if (!safe_memcpy(&retaddr, (void*)(stack_ptr + sizeof(FlatPtr)), sizeof(FlatPtr), fault_at))
break;
backtrace.append(retaddr);
if (backtrace.size() == PerformanceEvent::max_stack_frame_count)
break;
stack_ptr = stack_ptr_copy;
}
return backtrace;
}
KResult PerformanceEventBuffer::append_with_eip_and_ebp(u32 eip, u32 ebp, int type, FlatPtr arg1, FlatPtr arg2)
{
if (count() >= capacity())
@ -70,12 +93,7 @@ KResult PerformanceEventBuffer::append_with_eip_and_ebp(u32 eip, u32 ebp, int ty
return EINVAL;
}
auto current_thread = Thread::current();
Vector<FlatPtr> backtrace;
{
SmapDisabler disabler;
backtrace = current_thread->raw_backtrace(ebp, eip);
}
auto backtrace = raw_backtrace(ebp, eip);
event.stack_size = min(sizeof(event.stack) / sizeof(FlatPtr), static_cast<size_t>(backtrace.size()));
memcpy(event.stack, backtrace.data(), event.stack_size * sizeof(FlatPtr));

View file

@ -1017,30 +1017,6 @@ String Thread::backtrace_impl()
return builder.to_string();
}
Vector<FlatPtr> Thread::raw_backtrace(FlatPtr ebp, FlatPtr eip) const
{
InterruptDisabler disabler;
auto& process = const_cast<Process&>(this->process());
ProcessPagingScope paging_scope(process);
Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> backtrace;
backtrace.append(eip);
FlatPtr stack_ptr_copy;
FlatPtr stack_ptr = (FlatPtr)ebp;
while (stack_ptr) {
void* fault_at;
if (!safe_memcpy(&stack_ptr_copy, (void*)stack_ptr, sizeof(FlatPtr), fault_at))
break;
FlatPtr retaddr;
if (!safe_memcpy(&retaddr, (void*)(stack_ptr + sizeof(FlatPtr)), sizeof(FlatPtr), fault_at))
break;
backtrace.append(retaddr);
if (backtrace.size() == PerformanceEvent::max_stack_frame_count)
break;
stack_ptr = stack_ptr_copy;
}
return backtrace;
}
size_t Thread::thread_specific_region_alignment() const
{
return max(process().m_master_tls_alignment, alignof(ThreadSpecificData));

View file

@ -127,7 +127,6 @@ public:
const Process& process() const { return m_process; }
String backtrace();
Vector<FlatPtr> raw_backtrace(FlatPtr ebp, FlatPtr eip) const;
String name() const
{