diff --git a/Kernel/Arch/x86/Processor.h b/Kernel/Arch/x86/Processor.h index b36f0071c2..919ded724c 100644 --- a/Kernel/Arch/x86/Processor.h +++ b/Kernel/Arch/x86/Processor.h @@ -398,7 +398,7 @@ public: NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread); [[noreturn]] static void assume_context(Thread& thread, FlatPtr flags); FlatPtr init_context(Thread& thread, bool leave_crit); - static Vector capture_stack_trace(Thread& thread, size_t max_frames = 0); + static ErrorOr> capture_stack_trace(Thread& thread, size_t max_frames = 0); static StringView platform_string(); }; diff --git a/Kernel/Arch/x86/common/Processor.cpp b/Kernel/Arch/x86/common/Processor.cpp index dfcdf9ab00..9c61facd65 100644 --- a/Kernel/Arch/x86/common/Processor.cpp +++ b/Kernel/Arch/x86/common/Processor.cpp @@ -491,15 +491,15 @@ const DescriptorTablePointer& Processor::get_gdtr() return m_gdtr; } -Vector Processor::capture_stack_trace(Thread& thread, size_t max_frames) +ErrorOr> Processor::capture_stack_trace(Thread& thread, size_t max_frames) { FlatPtr frame_ptr = 0, ip = 0; Vector stack_trace; - auto walk_stack = [&](FlatPtr stack_ptr) { + auto walk_stack = [&](FlatPtr stack_ptr) -> ErrorOr { static constexpr size_t max_stack_frames = 4096; bool is_walking_userspace_stack = false; - stack_trace.append(ip); + TRY(stack_trace.try_append(ip)); size_t count = 1; while (stack_ptr && stack_trace.size() < max_stack_frames) { FlatPtr retaddr; @@ -520,24 +520,25 @@ Vector Processor::capture_stack_trace(Thread& thread, size_t max_fr if (Memory::is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) { if (copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]).is_error() || !retaddr) break; - stack_trace.append(retaddr); + TRY(stack_trace.try_append(retaddr)); if (copy_from_user(&stack_ptr, (FlatPtr*)stack_ptr).is_error()) break; } else { void* fault_at; if (!safe_memcpy(&retaddr, &((FlatPtr*)stack_ptr)[1], sizeof(FlatPtr), fault_at) || !retaddr) break; - stack_trace.append(retaddr); + TRY(stack_trace.try_append(retaddr)); if (!safe_memcpy(&stack_ptr, (FlatPtr*)stack_ptr, sizeof(FlatPtr), fault_at)) break; } } + return {}; }; auto capture_current_thread = [&]() { frame_ptr = (FlatPtr)__builtin_frame_address(0); ip = (FlatPtr)__builtin_return_address(0); - walk_stack(frame_ptr); + return walk_stack(frame_ptr); }; // Since the thread may be running on another processor, there @@ -551,7 +552,7 @@ Vector Processor::capture_stack_trace(Thread& thread, size_t max_fr // need to be preempted. Since this is our own thread it won't // cause any problems as the stack won't change below this frame. lock.unlock(); - capture_current_thread(); + TRY(capture_current_thread()); } else if (thread.is_active()) { VERIFY(thread.cpu() != Processor::current_id()); // If this is the case, the thread is currently running @@ -560,6 +561,7 @@ Vector Processor::capture_stack_trace(Thread& thread, size_t max_fr // an IPI to that processor, have it walk the stack and wait // until it returns the data back to us auto& proc = Processor::current(); + ErrorOr result; smp_unicast( thread.cpu(), [&]() { @@ -574,9 +576,10 @@ Vector Processor::capture_stack_trace(Thread& thread, size_t max_fr // TODO: What to do about page faults here? We might deadlock // because the other processor is still holding the // scheduler lock... - capture_current_thread(); + result = capture_current_thread(); }, false); + TRY(result); } else { switch (thread.state()) { case Thread::Running: @@ -608,7 +611,7 @@ Vector Processor::capture_stack_trace(Thread& thread, size_t max_fr // need to prevent the target thread from being run while // we walk the stack lock.unlock(); - walk_stack(frame_ptr); + TRY(walk_stack(frame_ptr)); break; } default: diff --git a/Kernel/ProcessSpecificExposed.cpp b/Kernel/ProcessSpecificExposed.cpp index 70bfd36273..e6fd94f028 100644 --- a/Kernel/ProcessSpecificExposed.cpp +++ b/Kernel/ProcessSpecificExposed.cpp @@ -27,7 +27,7 @@ ErrorOr Process::procfs_get_thread_stack(ThreadID thread_id, KBufferBuilde return ESRCH; bool show_kernel_addresses = Process::current().is_superuser(); bool kernel_address_added = false; - for (auto address : Processor::capture_stack_trace(*thread, 1024)) { + for (auto address : TRY(Processor::capture_stack_trace(*thread, 1024))) { if (!show_kernel_addresses && !Memory::is_user_address(VirtualAddress { address })) { if (kernel_address_added) continue; diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 858eced9b2..d218c6b697 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -1192,7 +1192,7 @@ ErrorOr> Thread::backtrace() Vector recognized_symbols; auto& process = const_cast(this->process()); - auto stack_trace = Processor::capture_stack_trace(*this); + auto stack_trace = TRY(Processor::capture_stack_trace(*this)); VERIFY(!g_scheduler_lock.is_locked_by_current_processor()); ScopedAddressSpaceSwitcher switcher(process); for (auto& frame : stack_trace) {