1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 02:38:11 +00:00

Kernel/SMP: Change critical sections to not disable interrupts

Leave interrupts enabled so that we can still process IRQs. Critical
sections should only prevent preemption by another thread.

Co-authored-by: Tom <tomut@yahoo.com>
This commit is contained in:
Andreas Kling 2021-08-10 01:56:21 +02:00
parent 364134ad4b
commit 0a02496f04
6 changed files with 42 additions and 60 deletions

View file

@ -339,17 +339,13 @@ public:
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), critical); write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), critical);
} }
ALWAYS_INLINE static void enter_critical(u32& prev_flags) ALWAYS_INLINE static void enter_critical()
{ {
prev_flags = cpu_flags(); write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), in_critical() + 1);
cli();
// NOTE: Up until this point we *could* have been preempted.
// Now interrupts are disabled, so calling current() is safe.
AK::atomic_fetch_add(&current().m_in_critical, 1u, AK::MemoryOrder::memory_order_relaxed);
} }
private: private:
ALWAYS_INLINE void do_leave_critical(u32 prev_flags) ALWAYS_INLINE void do_leave_critical()
{ {
VERIFY(m_in_critical > 0); VERIFY(m_in_critical > 0);
if (m_in_critical == 1) { if (m_in_critical == 1) {
@ -363,52 +359,31 @@ private:
} else { } else {
m_in_critical = m_in_critical - 1; m_in_critical = m_in_critical - 1;
} }
if (prev_flags & 0x200)
sti();
else
cli();
} }
public: public:
ALWAYS_INLINE static void leave_critical(u32 prev_flags) ALWAYS_INLINE static void leave_critical()
{ {
cli(); // Need to prevent IRQs from interrupting us here! current().do_leave_critical();
// NOTE: Up until this point we *could* have been preempted!
// Now interrupts are disabled, so calling current() is safe
current().do_leave_critical(prev_flags);
} }
ALWAYS_INLINE static u32 clear_critical(u32& prev_flags, bool enable_interrupts) ALWAYS_INLINE static u32 clear_critical()
{ {
cli(); auto prev_critical = in_critical();
// NOTE: Up until this point we *could* have been preempted!
// Now interrupts are disabled, so calling current() is safe
// This doesn't have to be atomic, and it's also fine if we
// were to be preempted in between these steps (which should
// not happen due to the cli call), but if we moved to another
// processors m_in_critical would move along with us
auto prev_critical = read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), 0); write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), 0);
auto& proc = current(); auto& proc = current();
if (!proc.m_in_irq) if (!proc.m_in_irq)
proc.check_invoke_scheduler(); proc.check_invoke_scheduler();
if (enable_interrupts || (prev_flags & 0x200))
sti();
return prev_critical; return prev_critical;
} }
ALWAYS_INLINE static void restore_critical(u32 prev_critical, u32 prev_flags) ALWAYS_INLINE static void restore_critical(u32 prev_critical)
{ {
// NOTE: This doesn't have to be atomic, and it's also fine if we // NOTE: This doesn't have to be atomic, and it's also fine if we
// get preempted in between these steps. If we move to another // get preempted in between these steps. If we move to another
// processors m_in_critical will move along with us. And if we // processors m_in_critical will move along with us. And if we
// are preempted, we would resume with the same flags. // are preempted, we would resume with the same flags.
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), prev_critical); write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), prev_critical);
VERIFY(!prev_critical || !(prev_flags & 0x200));
if (prev_flags & 0x200)
sti();
else
cli();
} }
ALWAYS_INLINE static u32 in_critical() ALWAYS_INLINE static u32 in_critical()

View file

@ -28,15 +28,13 @@ public:
} }
ScopedCritical(ScopedCritical&& from) ScopedCritical(ScopedCritical&& from)
: m_prev_flags(exchange(from.m_prev_flags, 0)) : m_valid(exchange(from.m_valid, false))
, m_valid(exchange(from.m_valid, false))
{ {
} }
ScopedCritical& operator=(ScopedCritical&& from) ScopedCritical& operator=(ScopedCritical&& from)
{ {
if (&from != this) { if (&from != this) {
m_prev_flags = exchange(from.m_prev_flags, 0);
m_valid = exchange(from.m_valid, false); m_valid = exchange(from.m_valid, false);
} }
return *this; return *this;
@ -46,18 +44,17 @@ public:
{ {
VERIFY(m_valid); VERIFY(m_valid);
m_valid = false; m_valid = false;
Processor::leave_critical(m_prev_flags); Processor::leave_critical();
} }
void enter() void enter()
{ {
VERIFY(!m_valid); VERIFY(!m_valid);
m_valid = true; m_valid = true;
Processor::enter_critical(m_prev_flags); Processor::enter_critical();
} }
private: private:
u32 m_prev_flags { 0 };
bool m_valid { false }; bool m_valid { false };
}; };

View file

@ -658,9 +658,9 @@ void Processor::exit_trap(TrapFrame& trap)
void Processor::check_invoke_scheduler() void Processor::check_invoke_scheduler()
{ {
InterruptDisabler disabler;
VERIFY(!m_in_irq); VERIFY(!m_in_irq);
VERIFY(!m_in_critical); VERIFY(!m_in_critical);
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this); VERIFY(&Processor::current() == this);
if (m_invoke_scheduler_async && m_scheduler_initialized) { if (m_invoke_scheduler_async && m_scheduler_initialized) {
m_invoke_scheduler_async = false; m_invoke_scheduler_async = false;
@ -732,7 +732,7 @@ ProcessorMessage& Processor::smp_get_from_pool()
u32 Processor::smp_wake_n_idle_processors(u32 wake_count) u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
{ {
VERIFY(Processor::in_critical()); VERIFY_INTERRUPTS_DISABLED();
VERIFY(wake_count > 0); VERIFY(wake_count > 0);
if (!s_smp_enabled) if (!s_smp_enabled)
return 0; return 0;
@ -817,8 +817,7 @@ bool Processor::smp_process_pending_messages()
VERIFY(s_smp_enabled); VERIFY(s_smp_enabled);
bool did_process = false; bool did_process = false;
u32 prev_flags; enter_critical();
enter_critical(prev_flags);
if (auto pending_msgs = m_message_queue.exchange(nullptr, AK::MemoryOrder::memory_order_acq_rel)) { if (auto pending_msgs = m_message_queue.exchange(nullptr, AK::MemoryOrder::memory_order_acq_rel)) {
// We pulled the stack of pending messages in LIFO order, so we need to reverse the list first // We pulled the stack of pending messages in LIFO order, so we need to reverse the list first
@ -882,7 +881,7 @@ bool Processor::smp_process_pending_messages()
halt_this(); halt_this();
} }
leave_critical(prev_flags); leave_critical();
return did_process; return did_process;
} }

View file

@ -23,8 +23,9 @@ public:
ALWAYS_INLINE u32 lock() ALWAYS_INLINE u32 lock()
{ {
u32 prev_flags; u32 prev_flags = cpu_flags();
Processor::enter_critical(prev_flags); Processor::enter_critical();
cli();
while (m_lock.exchange(1, AK::memory_order_acquire) != 0) { while (m_lock.exchange(1, AK::memory_order_acquire) != 0) {
Processor::wait_check(); Processor::wait_check();
} }
@ -35,7 +36,11 @@ public:
{ {
VERIFY(is_locked()); VERIFY(is_locked());
m_lock.store(0, AK::memory_order_release); m_lock.store(0, AK::memory_order_release);
Processor::leave_critical(prev_flags); if (prev_flags & 0x200)
sti();
else
cli();
Processor::leave_critical();
} }
[[nodiscard]] ALWAYS_INLINE bool is_locked() const [[nodiscard]] ALWAYS_INLINE bool is_locked() const
@ -63,8 +68,9 @@ public:
{ {
auto& proc = Processor::current(); auto& proc = Processor::current();
FlatPtr cpu = FlatPtr(&proc); FlatPtr cpu = FlatPtr(&proc);
u32 prev_flags; u32 prev_flags = cpu_flags();
Processor::enter_critical(prev_flags); Processor::enter_critical();
cli();
FlatPtr expected = 0; FlatPtr expected = 0;
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) { while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
if (expected == cpu) if (expected == cpu)
@ -82,7 +88,11 @@ public:
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current())); VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
if (--m_recursions == 0) if (--m_recursions == 0)
m_lock.store(0, AK::memory_order_release); m_lock.store(0, AK::memory_order_release);
Processor::leave_critical(prev_flags); if (prev_flags & 0x200)
sti();
else
cli();
Processor::leave_critical();
} }
[[nodiscard]] ALWAYS_INLINE bool is_locked() const [[nodiscard]] ALWAYS_INLINE bool is_locked() const

View file

@ -630,7 +630,9 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
// We enter a critical section here because we don't want to get interrupted between do_exec() // We enter a critical section here because we don't want to get interrupted between do_exec()
// and Processor::assume_context() or the next context switch. // and Processor::assume_context() or the next context switch.
// If we used an InterruptDisabler that sti()'d on exit, we might timer tick'd too soon in exec(). // If we used an InterruptDisabler that sti()'d on exit, we might timer tick'd too soon in exec().
Processor::current().enter_critical(prev_flags); Processor::enter_critical();
prev_flags = cpu_flags();
cli();
// NOTE: Be careful to not trigger any page faults below! // NOTE: Be careful to not trigger any page faults below!
@ -927,7 +929,9 @@ KResult Process::exec(String path, Vector<String> arguments, Vector<String> envi
VERIFY_NOT_REACHED(); VERIFY_NOT_REACHED();
} }
Processor::leave_critical(prev_flags); if (prev_flags & 0x200)
sti();
Processor::leave_critical();
return KSuccess; return KSuccess;
} }

View file

@ -388,8 +388,7 @@ void Thread::die_if_needed()
// Now leave the critical section so that we can also trigger the // Now leave the critical section so that we can also trigger the
// actual context switch // actual context switch
u32 prev_flags; Processor::clear_critical();
Processor::clear_critical(prev_flags, false);
dbgln("die_if_needed returned from clear_critical!!! in irq: {}", Processor::current().in_irq()); dbgln("die_if_needed returned from clear_critical!!! in irq: {}", Processor::current().in_irq());
// We should never get here, but the scoped scheduler lock // We should never get here, but the scoped scheduler lock
// will be released by Scheduler::context_switch again // will be released by Scheduler::context_switch again
@ -420,10 +419,9 @@ void Thread::yield_assuming_not_holding_big_lock()
// Disable interrupts here. This ensures we don't accidentally switch contexts twice // Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable; InterruptDisabler disable;
Scheduler::yield(); // flag a switch Scheduler::yield(); // flag a switch
u32 prev_flags; u32 prev_critical = Processor::clear_critical();
u32 prev_crit = Processor::clear_critical(prev_flags, true);
// NOTE: We may be on a different CPU now! // NOTE: We may be on a different CPU now!
Processor::restore_critical(prev_crit, prev_flags); Processor::restore_critical(prev_critical);
} }
void Thread::yield_and_release_relock_big_lock() void Thread::yield_and_release_relock_big_lock()
@ -451,13 +449,12 @@ void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore)
// flagged by calling Scheduler::yield above. // flagged by calling Scheduler::yield above.
// We have to do it this way because we intentionally // We have to do it this way because we intentionally
// leave the critical section here to be able to switch contexts. // leave the critical section here to be able to switch contexts.
u32 prev_flags; u32 prev_critical = Processor::clear_critical();
u32 prev_crit = Processor::clear_critical(prev_flags, true);
// CONTEXT SWITCH HAPPENS HERE! // CONTEXT SWITCH HAPPENS HERE!
// NOTE: We may be on a different CPU now! // NOTE: We may be on a different CPU now!
Processor::restore_critical(prev_crit, prev_flags); Processor::restore_critical(prev_critical);
if (previous_locked != LockMode::Unlocked) { if (previous_locked != LockMode::Unlocked) {
// We've unblocked, relock the process if needed and carry on. // We've unblocked, relock the process if needed and carry on.