mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 16:07:47 +00:00
Kernel: Implement capturing stack trace on a different CPU
When trying to get a stack trace of a thread on another CPU we send a SMP message to that processor to capture the stack trace for us.
This commit is contained in:
parent
5b38132e3c
commit
3ee7c21fae
2 changed files with 86 additions and 7 deletions
|
@ -1279,8 +1279,9 @@ const DescriptorTablePointer& Processor::get_gdtr()
|
|||
return m_gdtr;
|
||||
}
|
||||
|
||||
bool Processor::get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip)
|
||||
bool Processor::get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip, bool from_other_processor)
|
||||
{
|
||||
bool ret = true;
|
||||
ScopedCritical critical;
|
||||
auto& proc = Processor::current();
|
||||
if (&thread == proc.current_thread()) {
|
||||
|
@ -1288,6 +1289,12 @@ bool Processor::get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip)
|
|||
asm volatile("movl %%ebp, %%eax"
|
||||
: "=g"(frame_ptr));
|
||||
} else {
|
||||
// If this triggered from another processor, we should never
|
||||
// hit this code path because the other processor is still holding
|
||||
// the scheduler lock, which should prevent us from switching
|
||||
// contexts
|
||||
ASSERT(!from_other_processor);
|
||||
|
||||
// Since the thread may be running on another processor, there
|
||||
// is a chance a context switch may happen while we're trying
|
||||
// to get it. It also won't be entirely accurate and merely
|
||||
|
@ -1295,15 +1302,19 @@ bool Processor::get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip)
|
|||
ScopedSpinLock lock(g_scheduler_lock);
|
||||
if (thread.state() == Thread::Running) {
|
||||
ASSERT(thread.cpu() != proc.id());
|
||||
// TODO: If this is the case, the thread is currently running
|
||||
// If this is the case, the thread is currently running
|
||||
// on another processor. We can't trust the kernel stack as
|
||||
// it may be changing at any time. We need to probably send
|
||||
// an IPI to that processor, have it walk the stack and wait
|
||||
// until it returns the data back to us
|
||||
dbg() << "CPU[" << proc.id() << "] getting stack for "
|
||||
<< thread << " on other CPU# " << thread.cpu() << " not yet implemented!";
|
||||
frame_ptr = eip = 0; // TODO
|
||||
return false;
|
||||
smp_unicast(thread.cpu(),
|
||||
[&]() {
|
||||
dbg() << "CPU[" << Processor::current().id() << "] getting stack for cpu #" << proc.id();
|
||||
// NOTE: Because we are holding the scheduler lock while
|
||||
// waiting for this callback to finish, the current thread
|
||||
// on the target processor cannot change
|
||||
ret = get_context_frame_ptr(thread, frame_ptr, eip, true);
|
||||
}, false);
|
||||
} else {
|
||||
// We need to retrieve ebp from what was last pushed to the kernel
|
||||
// stack. Before switching out of that thread, it switch_context
|
||||
|
@ -1903,6 +1914,56 @@ void Processor::smp_broadcast(void (*callback)(), bool async)
|
|||
smp_broadcast_message(msg, async);
|
||||
}
|
||||
|
||||
void Processor::smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async)
|
||||
{
|
||||
auto& cur_proc = Processor::current();
|
||||
ASSERT(cpu != cur_proc.id());
|
||||
auto& target_proc = processors()[cpu];
|
||||
msg.async = async;
|
||||
#ifdef SMP_DEBUG
|
||||
dbg() << "SMP[" << cur_proc.id() << "]: Send message " << VirtualAddress(&msg) << " to cpu #" << cpu << " proc: " << VirtualAddress(&target_proc);
|
||||
#endif
|
||||
atomic_store(&msg.refs, 1u, AK::MemoryOrder::memory_order_release);
|
||||
if (target_proc->smp_queue_message(msg)) {
|
||||
APIC::the().send_ipi(cpu);
|
||||
}
|
||||
|
||||
if (!async) {
|
||||
// If synchronous then we must cleanup and return the message back
|
||||
// to the pool. Otherwise, the last processor to complete it will return it
|
||||
while (atomic_load(&msg.refs, AK::MemoryOrder::memory_order_consume) != 0) {
|
||||
// TODO: pause for a bit?
|
||||
|
||||
// We need to check here if another processor may have requested
|
||||
// us to halt before this message could be delivered. Otherwise
|
||||
// we're just spinning the CPU because msg.refs will never drop to 0.
|
||||
if (cur_proc.m_halt_requested.load(AK::MemoryOrder::memory_order_relaxed))
|
||||
halt_this();
|
||||
}
|
||||
|
||||
smp_cleanup_message(msg);
|
||||
smp_return_to_pool(msg);
|
||||
}
|
||||
}
|
||||
|
||||
void Processor::smp_unicast(u32 cpu, void (*callback)(void*), void* data, void (*free_data)(void*), bool async)
|
||||
{
|
||||
auto& msg = smp_get_from_pool();
|
||||
msg.type = ProcessorMessage::CallbackWithData;
|
||||
msg.callback_with_data.handler = callback;
|
||||
msg.callback_with_data.data = data;
|
||||
msg.callback_with_data.free = free_data;
|
||||
smp_unicast_message(cpu, msg, async);
|
||||
}
|
||||
|
||||
void Processor::smp_unicast(u32 cpu, void (*callback)(), bool async)
|
||||
{
|
||||
auto& msg = smp_get_from_pool();
|
||||
msg.type = ProcessorMessage::CallbackWithData;
|
||||
msg.callback.handler = callback;
|
||||
smp_unicast_message(cpu, msg, async);
|
||||
}
|
||||
|
||||
void Processor::smp_broadcast_flush_tlb(VirtualAddress vaddr, size_t page_count)
|
||||
{
|
||||
auto& msg = smp_get_from_pool();
|
||||
|
|
|
@ -739,6 +739,7 @@ class Processor {
|
|||
static ProcessorMessage& smp_get_from_pool();
|
||||
static void smp_cleanup_message(ProcessorMessage& msg);
|
||||
bool smp_queue_message(ProcessorMessage& msg);
|
||||
static void smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async);
|
||||
static void smp_broadcast_message(ProcessorMessage& msg, bool async);
|
||||
static void smp_broadcast_halt();
|
||||
|
||||
|
@ -965,6 +966,23 @@ public:
|
|||
}
|
||||
static void smp_broadcast(void (*callback)(), bool async);
|
||||
static void smp_broadcast(void (*callback)(void*), void* data, void (*free_data)(void*), bool async);
|
||||
template<typename Callback>
|
||||
static void smp_unicast(u32 cpu, Callback callback, bool async)
|
||||
{
|
||||
auto* data = new Callback(move(callback));
|
||||
smp_unicast(
|
||||
cpu,
|
||||
[](void* data) {
|
||||
(*reinterpret_cast<Callback*>(data))();
|
||||
},
|
||||
data,
|
||||
[](void* data) {
|
||||
delete reinterpret_cast<Callback*>(data);
|
||||
},
|
||||
async);
|
||||
}
|
||||
static void smp_unicast(u32 cpu, void (*callback)(), bool async);
|
||||
static void smp_unicast(u32 cpu, void (*callback)(void*), void* data, void (*free_data)(void*), bool async);
|
||||
static void smp_broadcast_flush_tlb(VirtualAddress vaddr, size_t page_count);
|
||||
|
||||
template<typename Callback>
|
||||
|
@ -999,7 +1017,7 @@ public:
|
|||
void switch_context(Thread*& from_thread, Thread*& to_thread);
|
||||
[[noreturn]] static void assume_context(Thread& thread, u32 flags);
|
||||
u32 init_context(Thread& thread, bool leave_crit);
|
||||
static bool get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip);
|
||||
static bool get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip, bool = false);
|
||||
|
||||
void set_thread_specific(u8* data, size_t len);
|
||||
};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue