1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 17:37:37 +00:00

Kernel: Make Processor::capture_stack_trace fallible using ErrorOr

This commit is contained in:
Idan Horowitz 2022-01-15 21:19:16 +02:00
parent 03b7352286
commit 0142f33ddc
4 changed files with 15 additions and 12 deletions

View file

@ -398,7 +398,7 @@ public:
NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread); NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
[[noreturn]] static void assume_context(Thread& thread, FlatPtr flags); [[noreturn]] static void assume_context(Thread& thread, FlatPtr flags);
FlatPtr init_context(Thread& thread, bool leave_crit); FlatPtr init_context(Thread& thread, bool leave_crit);
static Vector<FlatPtr, 32> capture_stack_trace(Thread& thread, size_t max_frames = 0); static ErrorOr<Vector<FlatPtr, 32>> capture_stack_trace(Thread& thread, size_t max_frames = 0);
static StringView platform_string(); static StringView platform_string();
}; };

View file

@ -491,15 +491,15 @@ const DescriptorTablePointer& Processor::get_gdtr()
return m_gdtr; return m_gdtr;
} }
Vector<FlatPtr, 32> Processor::capture_stack_trace(Thread& thread, size_t max_frames) ErrorOr<Vector<FlatPtr, 32>> Processor::capture_stack_trace(Thread& thread, size_t max_frames)
{ {
FlatPtr frame_ptr = 0, ip = 0; FlatPtr frame_ptr = 0, ip = 0;
Vector<FlatPtr, 32> stack_trace; Vector<FlatPtr, 32> stack_trace;
auto walk_stack = [&](FlatPtr stack_ptr) { auto walk_stack = [&](FlatPtr stack_ptr) -> ErrorOr<void> {
static constexpr size_t max_stack_frames = 4096; static constexpr size_t max_stack_frames = 4096;
bool is_walking_userspace_stack = false; bool is_walking_userspace_stack = false;
stack_trace.append(ip); TRY(stack_trace.try_append(ip));
size_t count = 1; size_t count = 1;
while (stack_ptr && stack_trace.size() < max_stack_frames) { while (stack_ptr && stack_trace.size() < max_stack_frames) {
FlatPtr retaddr; FlatPtr retaddr;
@ -520,24 +520,25 @@ Vector<FlatPtr, 32> Processor::capture_stack_trace(Thread& thread, size_t max_fr
if (Memory::is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) { if (Memory::is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
if (copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]).is_error() || !retaddr) if (copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]).is_error() || !retaddr)
break; break;
stack_trace.append(retaddr); TRY(stack_trace.try_append(retaddr));
if (copy_from_user(&stack_ptr, (FlatPtr*)stack_ptr).is_error()) if (copy_from_user(&stack_ptr, (FlatPtr*)stack_ptr).is_error())
break; break;
} else { } else {
void* fault_at; void* fault_at;
if (!safe_memcpy(&retaddr, &((FlatPtr*)stack_ptr)[1], sizeof(FlatPtr), fault_at) || !retaddr) if (!safe_memcpy(&retaddr, &((FlatPtr*)stack_ptr)[1], sizeof(FlatPtr), fault_at) || !retaddr)
break; break;
stack_trace.append(retaddr); TRY(stack_trace.try_append(retaddr));
if (!safe_memcpy(&stack_ptr, (FlatPtr*)stack_ptr, sizeof(FlatPtr), fault_at)) if (!safe_memcpy(&stack_ptr, (FlatPtr*)stack_ptr, sizeof(FlatPtr), fault_at))
break; break;
} }
} }
return {};
}; };
auto capture_current_thread = [&]() { auto capture_current_thread = [&]() {
frame_ptr = (FlatPtr)__builtin_frame_address(0); frame_ptr = (FlatPtr)__builtin_frame_address(0);
ip = (FlatPtr)__builtin_return_address(0); ip = (FlatPtr)__builtin_return_address(0);
walk_stack(frame_ptr); return walk_stack(frame_ptr);
}; };
// Since the thread may be running on another processor, there // Since the thread may be running on another processor, there
@ -551,7 +552,7 @@ Vector<FlatPtr, 32> Processor::capture_stack_trace(Thread& thread, size_t max_fr
// need to be preempted. Since this is our own thread it won't // need to be preempted. Since this is our own thread it won't
// cause any problems as the stack won't change below this frame. // cause any problems as the stack won't change below this frame.
lock.unlock(); lock.unlock();
capture_current_thread(); TRY(capture_current_thread());
} else if (thread.is_active()) { } else if (thread.is_active()) {
VERIFY(thread.cpu() != Processor::current_id()); VERIFY(thread.cpu() != Processor::current_id());
// If this is the case, the thread is currently running // If this is the case, the thread is currently running
@ -560,6 +561,7 @@ Vector<FlatPtr, 32> Processor::capture_stack_trace(Thread& thread, size_t max_fr
// an IPI to that processor, have it walk the stack and wait // an IPI to that processor, have it walk the stack and wait
// until it returns the data back to us // until it returns the data back to us
auto& proc = Processor::current(); auto& proc = Processor::current();
ErrorOr<void> result;
smp_unicast( smp_unicast(
thread.cpu(), thread.cpu(),
[&]() { [&]() {
@ -574,9 +576,10 @@ Vector<FlatPtr, 32> Processor::capture_stack_trace(Thread& thread, size_t max_fr
// TODO: What to do about page faults here? We might deadlock // TODO: What to do about page faults here? We might deadlock
// because the other processor is still holding the // because the other processor is still holding the
// scheduler lock... // scheduler lock...
capture_current_thread(); result = capture_current_thread();
}, },
false); false);
TRY(result);
} else { } else {
switch (thread.state()) { switch (thread.state()) {
case Thread::Running: case Thread::Running:
@ -608,7 +611,7 @@ Vector<FlatPtr, 32> Processor::capture_stack_trace(Thread& thread, size_t max_fr
// need to prevent the target thread from being run while // need to prevent the target thread from being run while
// we walk the stack // we walk the stack
lock.unlock(); lock.unlock();
walk_stack(frame_ptr); TRY(walk_stack(frame_ptr));
break; break;
} }
default: default:

View file

@ -27,7 +27,7 @@ ErrorOr<void> Process::procfs_get_thread_stack(ThreadID thread_id, KBufferBuilde
return ESRCH; return ESRCH;
bool show_kernel_addresses = Process::current().is_superuser(); bool show_kernel_addresses = Process::current().is_superuser();
bool kernel_address_added = false; bool kernel_address_added = false;
for (auto address : Processor::capture_stack_trace(*thread, 1024)) { for (auto address : TRY(Processor::capture_stack_trace(*thread, 1024))) {
if (!show_kernel_addresses && !Memory::is_user_address(VirtualAddress { address })) { if (!show_kernel_addresses && !Memory::is_user_address(VirtualAddress { address })) {
if (kernel_address_added) if (kernel_address_added)
continue; continue;

View file

@ -1192,7 +1192,7 @@ ErrorOr<NonnullOwnPtr<KString>> Thread::backtrace()
Vector<RecognizedSymbol, 128> recognized_symbols; Vector<RecognizedSymbol, 128> recognized_symbols;
auto& process = const_cast<Process&>(this->process()); auto& process = const_cast<Process&>(this->process());
auto stack_trace = Processor::capture_stack_trace(*this); auto stack_trace = TRY(Processor::capture_stack_trace(*this));
VERIFY(!g_scheduler_lock.is_locked_by_current_processor()); VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
ScopedAddressSpaceSwitcher switcher(process); ScopedAddressSpaceSwitcher switcher(process);
for (auto& frame : stack_trace) { for (auto& frame : stack_trace) {