From f27e7bbbf431fc061bcb102c4533a58bb7101163 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Sun, 8 Aug 2021 22:22:38 +0200 Subject: [PATCH] Kernel/SMP: Don't enable interrupts in Processor::exit_trap Enter a critical section in Processor::exit_trap so that processing SMP messages doesn't enable interrupts upon leaving. We need to delay this until the end where we call into the Scheduler if exiting the trap results in being outside of a critical section and irq handler. Co-authored-by: Tom --- Kernel/Arch/x86/common/Processor.cpp | 25 +++++++++++++++++++++++-- Kernel/Arch/x86/i386/Processor.cpp | 4 ++-- Kernel/Arch/x86/x86_64/Processor.cpp | 4 ++-- 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/Kernel/Arch/x86/common/Processor.cpp b/Kernel/Arch/x86/common/Processor.cpp index e28bee102e..b437e891cb 100644 --- a/Kernel/Arch/x86/common/Processor.cpp +++ b/Kernel/Arch/x86/common/Processor.cpp @@ -602,6 +602,14 @@ void Processor::exit_trap(TrapFrame& trap) { VERIFY_INTERRUPTS_DISABLED(); VERIFY(&Processor::current() == this); + + // Temporarily enter a critical section. This is to prevent critical + // sections entered and left within e.g. smp_process_pending_messages + // to trigger a context switch while we're executing this function + // See the comment at the end of the function why we don't use + // ScopedCritical here. + m_in_critical++; + VERIFY(m_in_irq >= trap.prev_irq_level); m_in_irq = trap.prev_irq_level; @@ -628,7 +636,11 @@ void Processor::exit_trap(TrapFrame& trap) current_thread->update_time_scheduled(Scheduler::current_time(), true, false); } - if (!m_in_irq && !m_in_critical) + // Leave the critical section without actually enabling interrupts. + // We don't want context switches to happen until we're explicitly + // triggering a switch in check_invoke_scheduler. + auto new_critical = m_in_critical.fetch_sub(1) - 1; + if (!m_in_irq && !new_critical) check_invoke_scheduler(); } @@ -636,6 +648,8 @@ void Processor::check_invoke_scheduler() { VERIFY(!m_in_irq); VERIFY(!m_in_critical); + VERIFY_INTERRUPTS_DISABLED(); + VERIFY(&Processor::current() == this); if (m_invoke_scheduler_async && m_scheduler_initialized) { m_invoke_scheduler_async = false; Scheduler::invoke_async(); @@ -1191,6 +1205,10 @@ extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe Scheduler::enter_current(*from_thread, true); + auto in_critical = to_thread->saved_critical(); + VERIFY(in_critical > 0); + Processor::current().restore_in_critical(in_critical); + // Since we got here and don't have Scheduler::context_switch in the // call stack (because this is the first time we switched into this // context), we need to notify the scheduler so that it can release @@ -1249,7 +1267,10 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) write_cr3(to_regs.cr3); to_thread->set_cpu(processor.get_id()); - processor.restore_in_critical(to_thread->saved_critical()); + + auto in_critical = to_thread->saved_critical(); + VERIFY(in_critical > 0); + processor.restore_in_critical(in_critical); if (has_fxsr) asm volatile("fxrstor %0" ::"m"(to_thread->fpu_state())); diff --git a/Kernel/Arch/x86/i386/Processor.cpp b/Kernel/Arch/x86/i386/Processor.cpp index a3fec8a10c..a7b38e0c54 100644 --- a/Kernel/Arch/x86/i386/Processor.cpp +++ b/Kernel/Arch/x86/i386/Processor.cpp @@ -185,6 +185,8 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread) VERIFY(is_kernel_mode()); dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread); + + // m_in_critical is restored in enter_thread_context from_thread->save_critical(m_in_critical); // clang-format off @@ -230,8 +232,6 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread) // clang-format on dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread); - - Processor::current().restore_in_critical(to_thread->saved_critical()); } UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_thread) diff --git a/Kernel/Arch/x86/x86_64/Processor.cpp b/Kernel/Arch/x86/x86_64/Processor.cpp index 2eaf7f4be5..474c669160 100644 --- a/Kernel/Arch/x86/x86_64/Processor.cpp +++ b/Kernel/Arch/x86/x86_64/Processor.cpp @@ -169,6 +169,8 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread) VERIFY(is_kernel_mode()); dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread); + + // m_in_critical is restored in enter_thread_context from_thread->save_critical(m_in_critical); // clang-format off @@ -238,8 +240,6 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread) // clang-format on dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread); - - Processor::current().restore_in_critical(to_thread->saved_critical()); } UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_thread)