1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-20 14:25:08 +00:00

Kernel: Push ARCH specific ifdef's down into RegisterState functions

The non CPU specific code of the kernel shouldn't need to deal with
architecture specific registers, and should instead deal with an
abstract view of the machine. This allows us to remove a variety of
architecture specific ifdefs and helps keep the code slightly more
portable.

We do this by exposing the abstract representation of instruction
pointer, stack pointer, base pointer, return register, etc on the
RegisterState struct.
This commit is contained in:
Brian Gianforcaro 2021-07-18 16:50:08 -07:00 committed by Gunnar Beutner
parent ff8429a749
commit 1cffecbe8d
9 changed files with 119 additions and 97 deletions

View file

@ -67,6 +67,75 @@ struct [[gnu::packed]] RegisterState {
FlatPtr userspace_rsp; FlatPtr userspace_rsp;
FlatPtr userspace_ss; FlatPtr userspace_ss;
#endif #endif
FlatPtr userspace_sp() const
{
#if ARCH(I386)
return userspace_esp;
#else
return userspace_rsp;
#endif
}
FlatPtr ip() const
{
#if ARCH(I386)
return eip;
#else
return rip;
#endif
}
FlatPtr bp() const
{
#if ARCH(I386)
return ebp;
#else
return rbp;
#endif
}
FlatPtr flags() const
{
#if ARCH(I386)
return eflags;
#else
return rflags;
#endif
}
void capture_syscall_params(FlatPtr& function, FlatPtr& arg1, FlatPtr& arg2, FlatPtr& arg3) const
{
#if ARCH(I386)
function = eax;
arg1 = edx;
arg2 = ecx;
arg3 = ebx;
#else
function = rax;
arg1 = rdx;
arg2 = rcx;
arg3 = rbx;
#endif
}
void set_ip_reg(FlatPtr value)
{
#if ARCH(I386)
eip = value;
#else
rip = value;
#endif
}
void set_return_reg(FlatPtr value)
{
#if ARCH(I386)
eax = value;
#else
rax = value;
#endif
}
}; };
#if ARCH(I386) #if ARCH(I386)

View file

@ -228,13 +228,7 @@ void handle_crash(RegisterState& regs, const char* description, int signal, bool
PANIC("Crash in ring 0"); PANIC("Crash in ring 0");
} }
FlatPtr ip; process->crash(signal, regs.ip(), out_of_memory);
#if ARCH(I386)
ip = regs.eip;
#else
ip = regs.rip;
#endif
process->crash(signal, ip, out_of_memory);
} }
EH_ENTRY_NO_CODE(6, illegal_instruction); EH_ENTRY_NO_CODE(6, illegal_instruction);
@ -316,12 +310,7 @@ void page_fault_handler(TrapFrame* trap)
current_thread->set_handling_page_fault(false); current_thread->set_handling_page_fault(false);
}; };
VirtualAddress userspace_sp; VirtualAddress userspace_sp = VirtualAddress { regs.userspace_sp() };
#if ARCH(I386)
userspace_sp = VirtualAddress { regs.userspace_esp };
#else
userspace_sp = VirtualAddress { regs.userspace_rsp };
#endif
if (!faulted_in_kernel && !MM.validate_user_stack(current_thread->process(), userspace_sp)) { if (!faulted_in_kernel && !MM.validate_user_stack(current_thread->process(), userspace_sp)) {
dbgln("Invalid stack pointer: {}", userspace_sp); dbgln("Invalid stack pointer: {}", userspace_sp);
handle_crash(regs, "Bad stack on page fault", SIGSTKFLT); handle_crash(regs, "Bad stack on page fault", SIGSTKFLT);

View file

@ -546,14 +546,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
// to be ebp. // to be ebp.
ProcessPagingScope paging_scope(thread.process()); ProcessPagingScope paging_scope(thread.process());
auto& regs = thread.regs(); auto& regs = thread.regs();
FlatPtr* stack_top; FlatPtr* stack_top = reinterpret_cast<FlatPtr*>(regs.sp());
FlatPtr sp;
#if ARCH(I386)
sp = regs.esp;
#else
sp = regs.rsp;
#endif
stack_top = reinterpret_cast<FlatPtr*>(sp);
if (is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) { if (is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) {
if (!copy_from_user(&frame_ptr, &((FlatPtr*)stack_top)[0])) if (!copy_from_user(&frame_ptr, &((FlatPtr*)stack_top)[0]))
frame_ptr = 0; frame_ptr = 0;
@ -562,11 +555,9 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
if (!safe_memcpy(&frame_ptr, &((FlatPtr*)stack_top)[0], sizeof(FlatPtr), fault_at)) if (!safe_memcpy(&frame_ptr, &((FlatPtr*)stack_top)[0], sizeof(FlatPtr), fault_at))
frame_ptr = 0; frame_ptr = 0;
} }
#if ARCH(I386)
ip = regs.eip; ip = regs.ip();
#else
ip = regs.rip;
#endif
// TODO: We need to leave the scheduler lock here, but we also // TODO: We need to leave the scheduler lock here, but we also
// need to prevent the target thread from being run while // need to prevent the target thread from being run while
// we walk the stack // we walk the stack
@ -1222,12 +1213,7 @@ extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe
// the scheduler lock. We don't want to enable interrupts at this point // the scheduler lock. We don't want to enable interrupts at this point
// as we're still in the middle of a context switch. Doing so could // as we're still in the middle of a context switch. Doing so could
// trigger a context switch within a context switch, leading to a crash. // trigger a context switch within a context switch, leading to a crash.
FlatPtr flags; FlatPtr flags = trap->regs->flags();
#if ARCH(I386)
flags = trap->regs->eflags;
#else
flags = trap->regs->rflags;
#endif
Scheduler::leave_on_first_switch(flags & ~0x200); Scheduler::leave_on_first_switch(flags & ~0x200);
} }

View file

@ -261,12 +261,8 @@ NEVER_INLINE Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* v
bool handle_safe_access_fault(RegisterState& regs, FlatPtr fault_address) bool handle_safe_access_fault(RegisterState& regs, FlatPtr fault_address)
{ {
FlatPtr ip; FlatPtr ip = regs.ip();
#if ARCH(I386) ;
ip = regs.eip;
#else
ip = regs.rip;
#endif
if (ip >= (FlatPtr)&start_of_safemem_text && ip < (FlatPtr)&end_of_safemem_text) { if (ip >= (FlatPtr)&start_of_safemem_text && ip < (FlatPtr)&end_of_safemem_text) {
// If we detect that the fault happened in safe_memcpy() safe_strnlen(), // If we detect that the fault happened in safe_memcpy() safe_strnlen(),
// or safe_memset() then resume at the appropriate _faulted label // or safe_memset() then resume at the appropriate _faulted label

View file

@ -235,7 +235,7 @@ bool Scheduler::pick_next()
dbgln("Scheduler[{}]: Switch to {} @ {:04x}:{:08x}", dbgln("Scheduler[{}]: Switch to {} @ {:04x}:{:08x}",
Processor::id(), Processor::id(),
thread_to_schedule, thread_to_schedule,
thread_to_schedule.regs().cs, thread_to_schedule.regs().eip); thread_to_schedule.regs().cs, thread_to_schedule.regs().ip());
#else #else
dbgln("Scheduler[{}]: Switch to {} @ {:04x}:{:016x}", dbgln("Scheduler[{}]: Switch to {} @ {:04x}:{:016x}",
Processor::id(), Processor::id(),
@ -295,13 +295,16 @@ bool Scheduler::context_switch(Thread* thread)
from_thread->set_state(Thread::Runnable); from_thread->set_state(Thread::Runnable);
#ifdef LOG_EVERY_CONTEXT_SWITCH #ifdef LOG_EVERY_CONTEXT_SWITCH
const auto msg =
# if ARCH(I386) # if ARCH(I386)
dbgln("Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:08x}", Processor::id(), from_thread->tid().value(), "Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:08x}";
thread->tid().value(), thread->priority(), thread->regs().cs, thread->regs().eip);
# else # else
dbgln("Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:16x}", Processor::id(), from_thread->tid().value(), "Scheduler[{}]: {} -> {} [prio={}] {:04x}:{:16x}";
thread->tid().value(), thread->priority(), thread->regs().cs, thread->regs().rip);
# endif # endif
dbgln(msg,
Processor::id(), from_thread->tid().value(),
thread->tid().value(), thread->priority(), thread->regs().cs, thread->regs().ip());
#endif #endif
} }
@ -322,14 +325,8 @@ bool Scheduler::context_switch(Thread* thread)
VERIFY(thread == Thread::current()); VERIFY(thread == Thread::current());
if (thread->process().is_user_process()) { if (thread->process().is_user_process()) {
FlatPtr flags;
auto& regs = Thread::current()->get_register_dump_from_stack(); auto& regs = Thread::current()->get_register_dump_from_stack();
#if ARCH(I386) auto iopl = get_iopl_from_eflags(regs.flags());
flags = regs.eflags;
#else
flags = regs.rflags;
#endif
auto iopl = get_iopl_from_eflags(flags);
if (iopl != 0) { if (iopl != 0) {
PANIC("Switched to thread {} with non-zero IOPL={}", Thread::current()->tid().value(), iopl); PANIC("Switched to thread {} with non-zero IOPL={}", Thread::current()->tid().value(), iopl);
} }
@ -604,15 +601,9 @@ void dump_thread_list(bool with_stack_traces)
}; };
auto get_eip = [](Thread& thread) -> u32 { auto get_eip = [](Thread& thread) -> u32 {
#if ARCH(I386)
if (!thread.current_trap()) if (!thread.current_trap())
return thread.regs().eip; return thread.regs().ip();
return thread.get_register_dump_from_stack().eip; return thread.get_register_dump_from_stack().ip();
#else
if (!thread.current_trap())
return thread.regs().rip;
return thread.get_register_dump_from_stack().rip;
#endif
}; };
Thread::for_each([&](Thread& thread) { Thread::for_each([&](Thread& thread) {

View file

@ -107,11 +107,7 @@ KResultOr<FlatPtr> handle(RegisterState& regs, FlatPtr function, FlatPtr arg1, F
// These syscalls need special handling since they never return to the caller. // These syscalls need special handling since they never return to the caller.
if (auto* tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) { if (auto* tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
#if ARCH(I386) regs.set_return_reg(0);
regs.eax = 0;
#else
regs.rax = 0;
#endif
tracer->set_trace_syscalls(false); tracer->set_trace_syscalls(false);
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread! process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
} }
@ -232,31 +228,17 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap)
handle_crash(regs, "Syscall from non-syscall region", SIGSEGV); handle_crash(regs, "Syscall from non-syscall region", SIGSEGV);
} }
#if ARCH(I386) FlatPtr function;
auto function = regs.eax; FlatPtr arg1;
auto arg1 = regs.edx; FlatPtr arg2;
auto arg2 = regs.ecx; FlatPtr arg3;
auto arg3 = regs.ebx; regs.capture_syscall_params(function, arg1, arg2, arg3);
#else
auto function = regs.rax;
auto arg1 = regs.rdx;
auto arg2 = regs.rcx;
auto arg3 = regs.rbx;
#endif
auto result = Syscall::handle(regs, function, arg1, arg2, arg3); auto result = Syscall::handle(regs, function, arg1, arg2, arg3);
if (result.is_error()) { if (result.is_error()) {
#if ARCH(I386) regs.set_return_reg(result.error());
regs.eax = result.error();
#else
regs.rax = result.error();
#endif
} else { } else {
#if ARCH(I386) regs.set_return_reg(result.value());
regs.eax = result.value();
#else
regs.rax = result.value();
#endif
} }
process.big_lock().unlock(); process.big_lock().unlock();

View file

@ -13,12 +13,7 @@ namespace Kernel {
KResultOr<FlatPtr> Process::sys$get_stack_bounds(Userspace<FlatPtr*> user_stack_base, Userspace<size_t*> user_stack_size) KResultOr<FlatPtr> Process::sys$get_stack_bounds(Userspace<FlatPtr*> user_stack_base, Userspace<size_t*> user_stack_size)
{ {
auto& regs = Thread::current()->get_register_dump_from_stack(); auto& regs = Thread::current()->get_register_dump_from_stack();
FlatPtr stack_pointer; FlatPtr stack_pointer = regs.userspace_sp();
#if ARCH(I386)
stack_pointer = regs.userspace_esp;
#else
stack_pointer = regs.userspace_rsp;
#endif
auto* stack_region = space().find_region_containing(Range { VirtualAddress(stack_pointer), 1 }); auto* stack_region = space().find_region_containing(Range { VirtualAddress(stack_pointer), 1 });
// The syscall handler should have killed us if we had an invalid stack pointer. // The syscall handler should have killed us if we had an invalid stack pointer.

View file

@ -1028,16 +1028,12 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
auto& regs = get_register_dump_from_stack(); auto& regs = get_register_dump_from_stack();
setup_stack(regs); setup_stack(regs);
auto signal_trampoline_addr = process.signal_trampoline().get(); auto signal_trampoline_addr = process.signal_trampoline().get();
#if ARCH(I386) regs.set_ip_reg(signal_trampoline_addr);
regs.eip = signal_trampoline_addr;
#else
regs.rip = signal_trampoline_addr;
#endif
#if ARCH(I386) #if ARCH(I386)
dbgln_if(SIGNAL_DEBUG, "Thread in state '{}' has been primed with signal handler {:04x}:{:08x} to deliver {}", state_string(), m_regs.cs, m_regs.eip, signal); dbgln_if(SIGNAL_DEBUG, "Thread in state '{}' has been primed with signal handler {:04x}:{:08x} to deliver {}", state_string(), m_regs.cs, m_regs.ip(), signal);
#else #else
dbgln_if(SIGNAL_DEBUG, "Thread in state '{}' has been primed with signal handler {:04x}:{:16x} to deliver {}", state_string(), m_regs.cs, m_regs.rip, signal); dbgln_if(SIGNAL_DEBUG, "Thread in state '{}' has been primed with signal handler {:04x}:{:16x} to deliver {}", state_string(), m_regs.cs, m_regs.ip(), signal);
#endif #endif
return DispatchSignalResult::Continue; return DispatchSignalResult::Continue;

View file

@ -108,6 +108,24 @@ struct ThreadRegisters {
FlatPtr rflags; FlatPtr rflags;
#endif #endif
FlatPtr cr3; FlatPtr cr3;
FlatPtr ip() const
{
#if ARCH(I386)
return eip;
#else
return rip;
#endif
}
FlatPtr sp() const
{
#if ARCH(I386)
return esp;
#else
return rsp;
#endif
}
}; };
class Thread class Thread