mirror of
https://github.com/RGBCube/serenity
synced 2025-07-24 19:37:35 +00:00
Kernel: Track previous mode when entering/exiting traps
This allows us to determine what the previous mode (user or kernel) was, e.g. in the timer interrupt. This is used e.g. to determine whether a signal handler should be set up. Fixes #5096
This commit is contained in:
parent
fa18010477
commit
0bd558081e
6 changed files with 70 additions and 7 deletions
|
@ -1463,6 +1463,7 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
|
|||
TrapFrame& trap = *reinterpret_cast<TrapFrame*>(stack_top);
|
||||
trap.regs = &iretframe;
|
||||
trap.prev_irq_level = 0;
|
||||
trap.next_trap = nullptr;
|
||||
|
||||
stack_top -= sizeof(u32); // pointer to TrapFrame
|
||||
*reinterpret_cast<u32*>(stack_top) = stack_top + 4;
|
||||
|
@ -1612,6 +1613,15 @@ void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
|
|||
trap.prev_irq_level = m_in_irq;
|
||||
if (raise_irq)
|
||||
m_in_irq++;
|
||||
if (m_current_thread) {
|
||||
auto& current_trap = m_current_thread->current_trap();
|
||||
trap.next_trap = current_trap;
|
||||
current_trap = &trap;
|
||||
// The cs register of this trap tells us where we will return back to
|
||||
m_current_thread->set_previous_mode(((trap.regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode);
|
||||
} else {
|
||||
trap.next_trap = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void Processor::exit_trap(TrapFrame& trap)
|
||||
|
@ -1624,6 +1634,21 @@ void Processor::exit_trap(TrapFrame& trap)
|
|||
|
||||
if (!m_in_irq && !m_in_critical)
|
||||
check_invoke_scheduler();
|
||||
|
||||
if (m_current_thread) {
|
||||
auto& current_trap = m_current_thread->current_trap();
|
||||
current_trap = trap.next_trap;
|
||||
if (current_trap) {
|
||||
// If we have another higher level trap then we probably returned
|
||||
// from an interrupt or irq handler. The cs register of the
|
||||
// new/higher level trap tells us what the mode prior to it was
|
||||
m_current_thread->set_previous_mode(((current_trap->regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode);
|
||||
} else {
|
||||
// If we don't have a higher level trap then we're back in user mode.
|
||||
// Unless we're a kernel process, in which case we're always in kernel mode
|
||||
m_current_thread->set_previous_mode(m_current_thread->process().is_kernel_process() ? Thread::PreviousMode::KernelMode : Thread::PreviousMode::UserMode);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Processor::check_invoke_scheduler()
|
||||
|
|
|
@ -1078,6 +1078,7 @@ private:
|
|||
|
||||
struct TrapFrame {
|
||||
u32 prev_irq_level;
|
||||
TrapFrame* next_trap;
|
||||
RegisterState* regs; // must be last
|
||||
|
||||
TrapFrame() = delete;
|
||||
|
@ -1087,7 +1088,7 @@ struct TrapFrame {
|
|||
TrapFrame& operator=(TrapFrame&&) = delete;
|
||||
};
|
||||
|
||||
#define TRAP_FRAME_SIZE (2 * 4)
|
||||
#define TRAP_FRAME_SIZE (3 * 4)
|
||||
static_assert(TRAP_FRAME_SIZE == sizeof(TrapFrame));
|
||||
|
||||
extern "C" void enter_trap_no_irq(TrapFrame*);
|
||||
|
|
|
@ -369,11 +369,13 @@ bool Scheduler::context_switch(Thread* thread)
|
|||
ASSERT(thread == Thread::current());
|
||||
|
||||
#if ARCH(I386)
|
||||
if (thread->process().is_user_process()) {
|
||||
auto iopl = get_iopl_from_eflags(Thread::current()->get_register_dump_from_stack().eflags);
|
||||
if (thread->process().is_user_process() && iopl != 0) {
|
||||
if (iopl != 0) {
|
||||
dbgln("PANIC: Switched to thread {} with non-zero IOPL={}", Thread::current()->tid().value(), iopl);
|
||||
Processor::halt();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return true;
|
||||
|
@ -392,7 +394,7 @@ void Scheduler::enter_current(Thread& prev_thread, bool is_first)
|
|||
// Check if we have any signals we should deliver (even if we don't
|
||||
// end up switching to another thread).
|
||||
auto current_thread = Thread::current();
|
||||
if (!current_thread->is_in_block()) {
|
||||
if (!current_thread->is_in_block() && current_thread->previous_mode() != Thread::PreviousMode::KernelMode) {
|
||||
ScopedSpinLock lock(current_thread->get_lock());
|
||||
if (current_thread->state() == Thread::Running && current_thread->pending_signals_for_state()) {
|
||||
current_thread->dispatch_one_pending_signal();
|
||||
|
@ -485,6 +487,10 @@ void Scheduler::timer_tick(const RegisterState& regs)
|
|||
if (!current_thread)
|
||||
return;
|
||||
|
||||
// Sanity checks
|
||||
ASSERT(current_thread->current_trap());
|
||||
ASSERT(current_thread->current_trap()->regs == ®s);
|
||||
|
||||
bool is_bsp = Processor::current().id() == 0;
|
||||
if (!is_bsp)
|
||||
return; // TODO: This prevents scheduling on other CPUs!
|
||||
|
|
|
@ -137,6 +137,7 @@ void syscall_handler(TrapFrame* trap)
|
|||
{
|
||||
auto& regs = *trap->regs;
|
||||
auto current_thread = Thread::current();
|
||||
ASSERT(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
|
||||
auto& process = current_thread->process();
|
||||
|
||||
if (auto tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
|
||||
|
@ -206,6 +207,9 @@ void syscall_handler(TrapFrame* trap)
|
|||
|
||||
current_thread->check_dispatch_pending_signal();
|
||||
|
||||
// If the previous mode somehow changed something is seriously messed up...
|
||||
ASSERT(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
|
||||
|
||||
// Check if we're supposed to return to userspace or just die.
|
||||
current_thread->die_if_needed();
|
||||
|
||||
|
|
|
@ -699,6 +699,8 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
|
|||
return DispatchSignalResult::Deferred;
|
||||
}
|
||||
|
||||
ASSERT(previous_mode() == PreviousMode::UserMode);
|
||||
|
||||
auto& action = m_signal_action_data[signal];
|
||||
// FIXME: Implement SA_SIGINFO signal handlers.
|
||||
ASSERT(!(action.flags & SA_SIGINFO));
|
||||
|
@ -762,6 +764,9 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
|
|||
return DispatchSignalResult::Continue;
|
||||
}
|
||||
|
||||
ASSERT(previous_mode() == PreviousMode::UserMode);
|
||||
ASSERT(current_trap());
|
||||
|
||||
ProcessPagingScope paging_scope(m_process);
|
||||
|
||||
u32 old_signal_mask = m_signal_mask;
|
||||
|
@ -843,7 +848,19 @@ bool Thread::push_value_on_stack(FlatPtr value)
|
|||
|
||||
RegisterState& Thread::get_register_dump_from_stack()
|
||||
{
|
||||
return *(RegisterState*)(kernel_stack_top() - sizeof(RegisterState));
|
||||
auto* trap = current_trap();
|
||||
|
||||
// We should *always* have a trap. If we don't we're probably a kernel
|
||||
// thread that hasn't been pre-empted. If we want to support this, we
|
||||
// need to capture the registers probably into m_tss and return it
|
||||
ASSERT(trap);
|
||||
|
||||
while (trap) {
|
||||
if (!trap->next_trap)
|
||||
break;
|
||||
trap = trap->next_trap;
|
||||
}
|
||||
return *trap->regs;
|
||||
}
|
||||
|
||||
RefPtr<Thread> Thread::clone(Process& process)
|
||||
|
|
|
@ -1106,6 +1106,14 @@ public:
|
|||
u32 ticks_in_user() const { return m_ticks_in_user; }
|
||||
u32 ticks_in_kernel() const { return m_ticks_in_kernel; }
|
||||
|
||||
enum class PreviousMode : u8 {
|
||||
KernelMode = 0,
|
||||
UserMode
|
||||
};
|
||||
PreviousMode previous_mode() const { return m_previous_mode; }
|
||||
void set_previous_mode(PreviousMode mode) { m_previous_mode = mode; }
|
||||
TrapFrame*& current_trap() { return m_current_trap; }
|
||||
|
||||
RecursiveSpinLock& get_lock() const { return m_lock; }
|
||||
|
||||
#if LOCK_DEBUG
|
||||
|
@ -1230,6 +1238,7 @@ private:
|
|||
NonnullRefPtr<Process> m_process;
|
||||
ThreadID m_tid { -1 };
|
||||
TSS32 m_tss;
|
||||
TrapFrame* m_current_trap { nullptr };
|
||||
Atomic<u32> m_cpu { 0 };
|
||||
u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
|
||||
u32 m_ticks_left { 0 };
|
||||
|
@ -1261,6 +1270,7 @@ private:
|
|||
Atomic<bool> m_is_active { false };
|
||||
bool m_is_joinable { true };
|
||||
bool m_handling_page_fault { false };
|
||||
PreviousMode m_previous_mode { PreviousMode::UserMode };
|
||||
|
||||
unsigned m_syscall_count { 0 };
|
||||
unsigned m_inode_faults { 0 };
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue