1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 09:47:35 +00:00

Kernel/SMP: Make entering/leaving critical sections multi-processor safe

By making these functions static we close a window where we could get
preempted after calling Processor::current() and move to another
processor.

Co-authored-by: Tom <tomut@yahoo.com>
This commit is contained in:
Andreas Kling 2021-08-10 01:16:08 +02:00
parent 369e3da6a2
commit 9babb92a4b
11 changed files with 82 additions and 53 deletions

View file

@ -121,7 +121,7 @@ class Processor {
u32 m_cpu;
u32 m_in_irq;
Atomic<u32, AK::MemoryOrder::memory_order_relaxed> m_in_critical;
volatile u32 m_in_critical {};
static Atomic<u32> s_idle_cpu_mask;
TSS m_tss;
@ -334,32 +334,34 @@ public:
return m_in_irq;
}
ALWAYS_INLINE void restore_in_critical(u32 critical)
ALWAYS_INLINE static void restore_in_critical(u32 critical)
{
m_in_critical = critical;
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), critical);
}
ALWAYS_INLINE void enter_critical(u32& prev_flags)
ALWAYS_INLINE static void enter_critical(u32& prev_flags)
{
prev_flags = cpu_flags();
cli();
m_in_critical++;
// NOTE: Up until this point we *could* have been preempted.
// Now interrupts are disabled, so calling current() is safe.
AK::atomic_fetch_add(&current().m_in_critical, 1u, AK::MemoryOrder::memory_order_relaxed);
}
ALWAYS_INLINE void leave_critical(u32 prev_flags)
private:
ALWAYS_INLINE void do_leave_critical(u32 prev_flags)
{
cli(); // Need to prevent IRQs from interrupting us here!
VERIFY(m_in_critical > 0);
if (m_in_critical == 1) {
if (!m_in_irq) {
deferred_call_execute_pending();
VERIFY(m_in_critical == 1);
}
m_in_critical--;
m_in_critical = 0;
if (!m_in_irq)
check_invoke_scheduler();
} else {
m_in_critical--;
m_in_critical = m_in_critical - 1;
}
if (prev_flags & 0x200)
sti();
@ -367,28 +369,53 @@ public:
cli();
}
ALWAYS_INLINE u32 clear_critical(u32& prev_flags, bool enable_interrupts)
public:
ALWAYS_INLINE static void leave_critical(u32 prev_flags)
{
prev_flags = cpu_flags();
u32 prev_crit = m_in_critical.exchange(0, AK::MemoryOrder::memory_order_acquire);
if (!m_in_irq)
check_invoke_scheduler();
if (enable_interrupts)
sti();
return prev_crit;
cli(); // Need to prevent IRQs from interrupting us here!
// NOTE: Up until this point we *could* have been preempted!
// Now interrupts are disabled, so calling current() is safe
current().do_leave_critical(prev_flags);
}
ALWAYS_INLINE void restore_critical(u32 prev_crit, u32 prev_flags)
ALWAYS_INLINE static u32 clear_critical(u32& prev_flags, bool enable_interrupts)
{
m_in_critical.store(prev_crit, AK::MemoryOrder::memory_order_release);
VERIFY(!prev_crit || !(prev_flags & 0x200));
cli();
// NOTE: Up until this point we *could* have been preempted!
// Now interrupts are disabled, so calling current() is safe
// This doesn't have to be atomic, and it's also fine if we
// were to be preempted in between these steps (which should
// not happen due to the cli call), but if we moved to another
// processors m_in_critical would move along with us
auto prev_critical = read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), 0);
auto& proc = current();
if (!proc.m_in_irq)
proc.check_invoke_scheduler();
if (enable_interrupts || (prev_flags & 0x200))
sti();
return prev_critical;
}
ALWAYS_INLINE static void restore_critical(u32 prev_critical, u32 prev_flags)
{
// NOTE: This doesn't have to be atomic, and it's also fine if we
// get preempted in between these steps. If we move to another
// processors m_in_critical will move along with us. And if we
// are preempted, we would resume with the same flags.
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), prev_critical);
VERIFY(!prev_critical || !(prev_flags & 0x200));
if (prev_flags & 0x200)
sti();
else
cli();
}
ALWAYS_INLINE u32 in_critical() { return m_in_critical.load(); }
ALWAYS_INLINE static u32 in_critical()
{
// See comment in Processor::current_thread
return read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
}
ALWAYS_INLINE const FPUState& clean_fpu_state() const
{

View file

@ -46,14 +46,14 @@ public:
{
VERIFY(m_valid);
m_valid = false;
Processor::current().leave_critical(m_prev_flags);
Processor::leave_critical(m_prev_flags);
}
void enter()
{
VERIFY(!m_valid);
m_valid = true;
Processor::current().enter_critical(m_prev_flags);
Processor::enter_critical(m_prev_flags);
}
private:

View file

@ -613,7 +613,7 @@ void Processor::exit_trap(TrapFrame& trap)
// to trigger a context switch while we're executing this function
// See the comment at the end of the function why we don't use
// ScopedCritical here.
m_in_critical++;
m_in_critical = m_in_critical + 1;
VERIFY(m_in_irq >= trap.prev_irq_level);
m_in_irq = trap.prev_irq_level;
@ -646,11 +646,13 @@ void Processor::exit_trap(TrapFrame& trap)
current_thread->update_time_scheduled(Scheduler::current_time(), true, false);
}
VERIFY_INTERRUPTS_DISABLED();
// Leave the critical section without actually enabling interrupts.
// We don't want context switches to happen until we're explicitly
// triggering a switch in check_invoke_scheduler.
auto new_critical = m_in_critical.fetch_sub(1) - 1;
if (!m_in_irq && !new_critical)
m_in_critical = m_in_critical - 1;
if (!m_in_irq && !m_in_critical)
check_invoke_scheduler();
}
@ -730,7 +732,7 @@ ProcessorMessage& Processor::smp_get_from_pool()
u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
{
VERIFY(Processor::current().in_critical());
VERIFY(Processor::in_critical());
VERIFY(wake_count > 0);
if (!s_smp_enabled)
return 0;
@ -1311,7 +1313,7 @@ void Processor::assume_context(Thread& thread, FlatPtr flags)
Scheduler::prepare_after_exec();
// in_critical() should be 2 here. The critical section in Process::exec
// and then the scheduler lock
VERIFY(Processor::current().in_critical() == 2);
VERIFY(Processor::in_critical() == 2);
do_assume_context(&thread, flags);

View file

@ -70,8 +70,8 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
if (leave_crit) {
// Leave the critical section we set up in in Process::exec,
// but because we still have the scheduler lock we should end up with 1
m_in_critical--; // leave it without triggering anything or restoring flags
VERIFY(in_critical() == 1);
VERIFY(in_critical() == 2);
m_in_critical = 1; // leave it without triggering anything or restoring flags
}
u32 kernel_stack_top = thread.kernel_stack_top();

View file

@ -66,8 +66,8 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
if (leave_crit) {
// Leave the critical section we set up in in Process::exec,
// but because we still have the scheduler lock we should end up with 1
m_in_critical--; // leave it without triggering anything or restoring flags
VERIFY(in_critical() == 1);
VERIFY(in_critical() == 2);
m_in_critical = 1; // leave it without triggering anything or restoring flags
}
u64 kernel_stack_top = thread.kernel_stack_top();