1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-24 16:47:42 +00:00

Kernel/SMP: Make entering/leaving critical sections multi-processor safe

By making these functions static we close a window where we could get
preempted after calling Processor::current() and move to another
processor.

Co-authored-by: Tom <tomut@yahoo.com>
This commit is contained in:
Andreas Kling 2021-08-10 01:16:08 +02:00
parent 369e3da6a2
commit 9babb92a4b
11 changed files with 82 additions and 53 deletions

View file

@ -121,7 +121,7 @@ class Processor {
u32 m_cpu; u32 m_cpu;
u32 m_in_irq; u32 m_in_irq;
Atomic<u32, AK::MemoryOrder::memory_order_relaxed> m_in_critical; volatile u32 m_in_critical {};
static Atomic<u32> s_idle_cpu_mask; static Atomic<u32> s_idle_cpu_mask;
TSS m_tss; TSS m_tss;
@ -334,32 +334,34 @@ public:
return m_in_irq; return m_in_irq;
} }
ALWAYS_INLINE void restore_in_critical(u32 critical) ALWAYS_INLINE static void restore_in_critical(u32 critical)
{ {
m_in_critical = critical; write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), critical);
} }
ALWAYS_INLINE void enter_critical(u32& prev_flags) ALWAYS_INLINE static void enter_critical(u32& prev_flags)
{ {
prev_flags = cpu_flags(); prev_flags = cpu_flags();
cli(); cli();
m_in_critical++; // NOTE: Up until this point we *could* have been preempted.
// Now interrupts are disabled, so calling current() is safe.
AK::atomic_fetch_add(&current().m_in_critical, 1u, AK::MemoryOrder::memory_order_relaxed);
} }
ALWAYS_INLINE void leave_critical(u32 prev_flags) private:
ALWAYS_INLINE void do_leave_critical(u32 prev_flags)
{ {
cli(); // Need to prevent IRQs from interrupting us here!
VERIFY(m_in_critical > 0); VERIFY(m_in_critical > 0);
if (m_in_critical == 1) { if (m_in_critical == 1) {
if (!m_in_irq) { if (!m_in_irq) {
deferred_call_execute_pending(); deferred_call_execute_pending();
VERIFY(m_in_critical == 1); VERIFY(m_in_critical == 1);
} }
m_in_critical--; m_in_critical = 0;
if (!m_in_irq) if (!m_in_irq)
check_invoke_scheduler(); check_invoke_scheduler();
} else { } else {
m_in_critical--; m_in_critical = m_in_critical - 1;
} }
if (prev_flags & 0x200) if (prev_flags & 0x200)
sti(); sti();
@ -367,28 +369,53 @@ public:
cli(); cli();
} }
ALWAYS_INLINE u32 clear_critical(u32& prev_flags, bool enable_interrupts) public:
ALWAYS_INLINE static void leave_critical(u32 prev_flags)
{ {
prev_flags = cpu_flags(); cli(); // Need to prevent IRQs from interrupting us here!
u32 prev_crit = m_in_critical.exchange(0, AK::MemoryOrder::memory_order_acquire); // NOTE: Up until this point we *could* have been preempted!
if (!m_in_irq) // Now interrupts are disabled, so calling current() is safe
check_invoke_scheduler(); current().do_leave_critical(prev_flags);
if (enable_interrupts)
sti();
return prev_crit;
} }
ALWAYS_INLINE void restore_critical(u32 prev_crit, u32 prev_flags) ALWAYS_INLINE static u32 clear_critical(u32& prev_flags, bool enable_interrupts)
{ {
m_in_critical.store(prev_crit, AK::MemoryOrder::memory_order_release); cli();
VERIFY(!prev_crit || !(prev_flags & 0x200)); // NOTE: Up until this point we *could* have been preempted!
// Now interrupts are disabled, so calling current() is safe
// This doesn't have to be atomic, and it's also fine if we
// were to be preempted in between these steps (which should
// not happen due to the cli call), but if we moved to another
// processors m_in_critical would move along with us
auto prev_critical = read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), 0);
auto& proc = current();
if (!proc.m_in_irq)
proc.check_invoke_scheduler();
if (enable_interrupts || (prev_flags & 0x200))
sti();
return prev_critical;
}
ALWAYS_INLINE static void restore_critical(u32 prev_critical, u32 prev_flags)
{
// NOTE: This doesn't have to be atomic, and it's also fine if we
// get preempted in between these steps. If we move to another
// processors m_in_critical will move along with us. And if we
// are preempted, we would resume with the same flags.
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), prev_critical);
VERIFY(!prev_critical || !(prev_flags & 0x200));
if (prev_flags & 0x200) if (prev_flags & 0x200)
sti(); sti();
else else
cli(); cli();
} }
ALWAYS_INLINE u32 in_critical() { return m_in_critical.load(); } ALWAYS_INLINE static u32 in_critical()
{
// See comment in Processor::current_thread
return read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
}
ALWAYS_INLINE const FPUState& clean_fpu_state() const ALWAYS_INLINE const FPUState& clean_fpu_state() const
{ {

View file

@ -46,14 +46,14 @@ public:
{ {
VERIFY(m_valid); VERIFY(m_valid);
m_valid = false; m_valid = false;
Processor::current().leave_critical(m_prev_flags); Processor::leave_critical(m_prev_flags);
} }
void enter() void enter()
{ {
VERIFY(!m_valid); VERIFY(!m_valid);
m_valid = true; m_valid = true;
Processor::current().enter_critical(m_prev_flags); Processor::enter_critical(m_prev_flags);
} }
private: private:

View file

@ -613,7 +613,7 @@ void Processor::exit_trap(TrapFrame& trap)
// to trigger a context switch while we're executing this function // to trigger a context switch while we're executing this function
// See the comment at the end of the function why we don't use // See the comment at the end of the function why we don't use
// ScopedCritical here. // ScopedCritical here.
m_in_critical++; m_in_critical = m_in_critical + 1;
VERIFY(m_in_irq >= trap.prev_irq_level); VERIFY(m_in_irq >= trap.prev_irq_level);
m_in_irq = trap.prev_irq_level; m_in_irq = trap.prev_irq_level;
@ -646,11 +646,13 @@ void Processor::exit_trap(TrapFrame& trap)
current_thread->update_time_scheduled(Scheduler::current_time(), true, false); current_thread->update_time_scheduled(Scheduler::current_time(), true, false);
} }
VERIFY_INTERRUPTS_DISABLED();
// Leave the critical section without actually enabling interrupts. // Leave the critical section without actually enabling interrupts.
// We don't want context switches to happen until we're explicitly // We don't want context switches to happen until we're explicitly
// triggering a switch in check_invoke_scheduler. // triggering a switch in check_invoke_scheduler.
auto new_critical = m_in_critical.fetch_sub(1) - 1; m_in_critical = m_in_critical - 1;
if (!m_in_irq && !new_critical) if (!m_in_irq && !m_in_critical)
check_invoke_scheduler(); check_invoke_scheduler();
} }
@ -730,7 +732,7 @@ ProcessorMessage& Processor::smp_get_from_pool()
u32 Processor::smp_wake_n_idle_processors(u32 wake_count) u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
{ {
VERIFY(Processor::current().in_critical()); VERIFY(Processor::in_critical());
VERIFY(wake_count > 0); VERIFY(wake_count > 0);
if (!s_smp_enabled) if (!s_smp_enabled)
return 0; return 0;
@ -1311,7 +1313,7 @@ void Processor::assume_context(Thread& thread, FlatPtr flags)
Scheduler::prepare_after_exec(); Scheduler::prepare_after_exec();
// in_critical() should be 2 here. The critical section in Process::exec // in_critical() should be 2 here. The critical section in Process::exec
// and then the scheduler lock // and then the scheduler lock
VERIFY(Processor::current().in_critical() == 2); VERIFY(Processor::in_critical() == 2);
do_assume_context(&thread, flags); do_assume_context(&thread, flags);

View file

@ -70,8 +70,8 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
if (leave_crit) { if (leave_crit) {
// Leave the critical section we set up in in Process::exec, // Leave the critical section we set up in in Process::exec,
// but because we still have the scheduler lock we should end up with 1 // but because we still have the scheduler lock we should end up with 1
m_in_critical--; // leave it without triggering anything or restoring flags VERIFY(in_critical() == 2);
VERIFY(in_critical() == 1); m_in_critical = 1; // leave it without triggering anything or restoring flags
} }
u32 kernel_stack_top = thread.kernel_stack_top(); u32 kernel_stack_top = thread.kernel_stack_top();

View file

@ -66,8 +66,8 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
if (leave_crit) { if (leave_crit) {
// Leave the critical section we set up in in Process::exec, // Leave the critical section we set up in in Process::exec,
// but because we still have the scheduler lock we should end up with 1 // but because we still have the scheduler lock we should end up with 1
m_in_critical--; // leave it without triggering anything or restoring flags VERIFY(in_critical() == 2);
VERIFY(in_critical() == 1); m_in_critical = 1; // leave it without triggering anything or restoring flags
} }
u64 kernel_stack_top = thread.kernel_stack_top(); u64 kernel_stack_top = thread.kernel_stack_top();

View file

@ -212,7 +212,7 @@ static inline void kmalloc_verify_nospinlock_held()
{ {
// Catch bad callers allocating under spinlock. // Catch bad callers allocating under spinlock.
if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) { if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
VERIFY(!Processor::current().in_critical()); VERIFY(!Processor::in_critical());
} }
} }

View file

@ -24,7 +24,7 @@ public:
ALWAYS_INLINE u32 lock() ALWAYS_INLINE u32 lock()
{ {
u32 prev_flags; u32 prev_flags;
Processor::current().enter_critical(prev_flags); Processor::enter_critical(prev_flags);
while (m_lock.exchange(1, AK::memory_order_acquire) != 0) { while (m_lock.exchange(1, AK::memory_order_acquire) != 0) {
Processor::wait_check(); Processor::wait_check();
} }
@ -35,7 +35,7 @@ public:
{ {
VERIFY(is_locked()); VERIFY(is_locked());
m_lock.store(0, AK::memory_order_release); m_lock.store(0, AK::memory_order_release);
Processor::current().leave_critical(prev_flags); Processor::leave_critical(prev_flags);
} }
[[nodiscard]] ALWAYS_INLINE bool is_locked() const [[nodiscard]] ALWAYS_INLINE bool is_locked() const
@ -64,7 +64,7 @@ public:
auto& proc = Processor::current(); auto& proc = Processor::current();
FlatPtr cpu = FlatPtr(&proc); FlatPtr cpu = FlatPtr(&proc);
u32 prev_flags; u32 prev_flags;
proc.enter_critical(prev_flags); Processor::enter_critical(prev_flags);
FlatPtr expected = 0; FlatPtr expected = 0;
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) { while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
if (expected == cpu) if (expected == cpu)
@ -82,7 +82,7 @@ public:
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current())); VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
if (--m_recursions == 0) if (--m_recursions == 0)
m_lock.store(0, AK::memory_order_release); m_lock.store(0, AK::memory_order_release);
Processor::current().leave_critical(prev_flags); Processor::leave_critical(prev_flags);
} }
[[nodiscard]] ALWAYS_INLINE bool is_locked() const [[nodiscard]] ALWAYS_INLINE bool is_locked() const

View file

@ -257,7 +257,7 @@ bool Scheduler::yield()
auto current_thread = Thread::current(); auto current_thread = Thread::current();
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", proc.get_id(), *current_thread, proc.in_irq()); dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", proc.get_id(), *current_thread, proc.in_irq());
VERIFY(current_thread != nullptr); VERIFY(current_thread != nullptr);
if (proc.in_irq() || proc.in_critical()) { if (proc.in_irq() || Processor::in_critical()) {
// If we're handling an IRQ we can't switch context, or we're in // If we're handling an IRQ we can't switch context, or we're in
// a critical section where we don't want to switch contexts, then // a critical section where we don't want to switch contexts, then
// delay until exiting the trap or critical section // delay until exiting the trap or critical section

View file

@ -297,7 +297,7 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace>
FlatPtr load_base_address = 0; FlatPtr load_base_address = 0;
String elf_name = object_description.absolute_path(); String elf_name = object_description.absolute_path();
VERIFY(!Processor::current().in_critical()); VERIFY(!Processor::in_critical());
Memory::MemoryManager::enter_space(*new_space); Memory::MemoryManager::enter_space(*new_space);
@ -495,7 +495,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
RefPtr<FileDescription> interpreter_description, Thread*& new_main_thread, u32& prev_flags, const ElfW(Ehdr) & main_program_header) RefPtr<FileDescription> interpreter_description, Thread*& new_main_thread, u32& prev_flags, const ElfW(Ehdr) & main_program_header)
{ {
VERIFY(is_user_process()); VERIFY(is_user_process());
VERIFY(!Processor::current().in_critical()); VERIFY(!Processor::in_critical());
auto path = main_program_description->absolute_path(); auto path = main_program_description->absolute_path();
dbgln_if(EXEC_DEBUG, "do_exec: {}", path); dbgln_if(EXEC_DEBUG, "do_exec: {}", path);
@ -686,7 +686,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
u32 lock_count_to_restore; u32 lock_count_to_restore;
[[maybe_unused]] auto rc = big_lock().force_unlock_if_locked(lock_count_to_restore); [[maybe_unused]] auto rc = big_lock().force_unlock_if_locked(lock_count_to_restore);
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
VERIFY(Processor::current().in_critical()); VERIFY(Processor::in_critical());
return KSuccess; return KSuccess;
} }
@ -912,7 +912,7 @@ KResult Process::exec(String path, Vector<String> arguments, Vector<String> envi
return result; return result;
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
VERIFY(Processor::current().in_critical()); VERIFY(Processor::in_critical());
auto current_thread = Thread::current(); auto current_thread = Thread::current();
if (current_thread == new_main_thread) { if (current_thread == new_main_thread) {
@ -920,14 +920,14 @@ KResult Process::exec(String path, Vector<String> arguments, Vector<String> envi
// and it will be released after the context switch into that // and it will be released after the context switch into that
// thread. We should also still be in our critical section // thread. We should also still be in our critical section
VERIFY(!g_scheduler_lock.own_lock()); VERIFY(!g_scheduler_lock.own_lock());
VERIFY(Processor::current().in_critical() == 1); VERIFY(Processor::in_critical() == 1);
g_scheduler_lock.lock(); g_scheduler_lock.lock();
current_thread->set_state(Thread::State::Running); current_thread->set_state(Thread::State::Running);
Processor::assume_context(*current_thread, prev_flags); Processor::assume_context(*current_thread, prev_flags);
VERIFY_NOT_REACHED(); VERIFY_NOT_REACHED();
} }
Processor::current().leave_critical(prev_flags); Processor::leave_critical(prev_flags);
return KSuccess; return KSuccess;
} }

View file

@ -214,7 +214,7 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
for (;;) { for (;;) {
// Yield to the scheduler, and wait for us to resume unblocked. // Yield to the scheduler, and wait for us to resume unblocked.
VERIFY(!g_scheduler_lock.own_lock()); VERIFY(!g_scheduler_lock.own_lock());
VERIFY(Processor::current().in_critical()); VERIFY(Processor::in_critical());
if (&lock != &big_lock && big_lock.own_lock()) { if (&lock != &big_lock && big_lock.own_lock()) {
// We're locking another lock and already hold the big lock... // We're locking another lock and already hold the big lock...
// We need to release the big lock // We need to release the big lock
@ -222,7 +222,7 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
} else { } else {
yield_assuming_not_holding_big_lock(); yield_assuming_not_holding_big_lock();
} }
VERIFY(Processor::current().in_critical()); VERIFY(Processor::in_critical());
ScopedSpinLock block_lock2(m_block_lock); ScopedSpinLock block_lock2(m_block_lock);
if (should_be_stopped() || state() == Stopped) { if (should_be_stopped() || state() == Stopped) {
@ -389,7 +389,7 @@ void Thread::die_if_needed()
// Now leave the critical section so that we can also trigger the // Now leave the critical section so that we can also trigger the
// actual context switch // actual context switch
u32 prev_flags; u32 prev_flags;
Processor::current().clear_critical(prev_flags, false); Processor::clear_critical(prev_flags, false);
dbgln("die_if_needed returned from clear_critical!!! in irq: {}", Processor::current().in_irq()); dbgln("die_if_needed returned from clear_critical!!! in irq: {}", Processor::current().in_irq());
// We should never get here, but the scoped scheduler lock // We should never get here, but the scoped scheduler lock
// will be released by Scheduler::context_switch again // will be released by Scheduler::context_switch again
@ -421,9 +421,9 @@ void Thread::yield_assuming_not_holding_big_lock()
InterruptDisabler disable; InterruptDisabler disable;
Scheduler::yield(); // flag a switch Scheduler::yield(); // flag a switch
u32 prev_flags; u32 prev_flags;
u32 prev_crit = Processor::current().clear_critical(prev_flags, true); u32 prev_crit = Processor::clear_critical(prev_flags, true);
// NOTE: We may be on a different CPU now! // NOTE: We may be on a different CPU now!
Processor::current().restore_critical(prev_crit, prev_flags); Processor::restore_critical(prev_crit, prev_flags);
} }
void Thread::yield_and_release_relock_big_lock() void Thread::yield_and_release_relock_big_lock()
@ -452,12 +452,12 @@ void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore)
// We have to do it this way because we intentionally // We have to do it this way because we intentionally
// leave the critical section here to be able to switch contexts. // leave the critical section here to be able to switch contexts.
u32 prev_flags; u32 prev_flags;
u32 prev_crit = Processor::current().clear_critical(prev_flags, true); u32 prev_crit = Processor::clear_critical(prev_flags, true);
// CONTEXT SWITCH HAPPENS HERE! // CONTEXT SWITCH HAPPENS HERE!
// NOTE: We may be on a different CPU now! // NOTE: We may be on a different CPU now!
Processor::current().restore_critical(prev_crit, prev_flags); Processor::restore_critical(prev_crit, prev_flags);
if (previous_locked != LockMode::Unlocked) { if (previous_locked != LockMode::Unlocked) {
// We've unblocked, relock the process if needed and carry on. // We've unblocked, relock the process if needed and carry on.

View file

@ -921,9 +921,9 @@ public:
for (;;) { for (;;) {
// Yield to the scheduler, and wait for us to resume unblocked. // Yield to the scheduler, and wait for us to resume unblocked.
VERIFY(!g_scheduler_lock.own_lock()); VERIFY(!g_scheduler_lock.own_lock());
VERIFY(Processor::current().in_critical()); VERIFY(Processor::in_critical());
yield_assuming_not_holding_big_lock(); yield_assuming_not_holding_big_lock();
VERIFY(Processor::current().in_critical()); VERIFY(Processor::in_critical());
ScopedSpinLock block_lock2(m_block_lock); ScopedSpinLock block_lock2(m_block_lock);
if (should_be_stopped() || state() == Stopped) { if (should_be_stopped() || state() == Stopped) {