1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 09:17:35 +00:00

Kernel/SMP: Change critical sections to not disable interrupts

Leave interrupts enabled so that we can still process IRQs. Critical
sections should only prevent preemption by another thread.

Co-authored-by: Tom <tomut@yahoo.com>
This commit is contained in:
Andreas Kling 2021-08-10 01:56:21 +02:00
parent 364134ad4b
commit 0a02496f04
6 changed files with 42 additions and 60 deletions

View file

@ -339,17 +339,13 @@ public:
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), critical);
}
ALWAYS_INLINE static void enter_critical(u32& prev_flags)
ALWAYS_INLINE static void enter_critical()
{
prev_flags = cpu_flags();
cli();
// NOTE: Up until this point we *could* have been preempted.
// Now interrupts are disabled, so calling current() is safe.
AK::atomic_fetch_add(&current().m_in_critical, 1u, AK::MemoryOrder::memory_order_relaxed);
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), in_critical() + 1);
}
private:
ALWAYS_INLINE void do_leave_critical(u32 prev_flags)
ALWAYS_INLINE void do_leave_critical()
{
VERIFY(m_in_critical > 0);
if (m_in_critical == 1) {
@ -363,52 +359,31 @@ private:
} else {
m_in_critical = m_in_critical - 1;
}
if (prev_flags & 0x200)
sti();
else
cli();
}
public:
ALWAYS_INLINE static void leave_critical(u32 prev_flags)
ALWAYS_INLINE static void leave_critical()
{
cli(); // Need to prevent IRQs from interrupting us here!
// NOTE: Up until this point we *could* have been preempted!
// Now interrupts are disabled, so calling current() is safe
current().do_leave_critical(prev_flags);
current().do_leave_critical();
}
ALWAYS_INLINE static u32 clear_critical(u32& prev_flags, bool enable_interrupts)
ALWAYS_INLINE static u32 clear_critical()
{
cli();
// NOTE: Up until this point we *could* have been preempted!
// Now interrupts are disabled, so calling current() is safe
// This doesn't have to be atomic, and it's also fine if we
// were to be preempted in between these steps (which should
// not happen due to the cli call), but if we moved to another
// processors m_in_critical would move along with us
auto prev_critical = read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
auto prev_critical = in_critical();
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), 0);
auto& proc = current();
if (!proc.m_in_irq)
proc.check_invoke_scheduler();
if (enable_interrupts || (prev_flags & 0x200))
sti();
return prev_critical;
}
ALWAYS_INLINE static void restore_critical(u32 prev_critical, u32 prev_flags)
ALWAYS_INLINE static void restore_critical(u32 prev_critical)
{
// NOTE: This doesn't have to be atomic, and it's also fine if we
// get preempted in between these steps. If we move to another
// processors m_in_critical will move along with us. And if we
// are preempted, we would resume with the same flags.
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), prev_critical);
VERIFY(!prev_critical || !(prev_flags & 0x200));
if (prev_flags & 0x200)
sti();
else
cli();
}
ALWAYS_INLINE static u32 in_critical()

View file

@ -28,15 +28,13 @@ public:
}
ScopedCritical(ScopedCritical&& from)
: m_prev_flags(exchange(from.m_prev_flags, 0))
, m_valid(exchange(from.m_valid, false))
: m_valid(exchange(from.m_valid, false))
{
}
ScopedCritical& operator=(ScopedCritical&& from)
{
if (&from != this) {
m_prev_flags = exchange(from.m_prev_flags, 0);
m_valid = exchange(from.m_valid, false);
}
return *this;
@ -46,18 +44,17 @@ public:
{
VERIFY(m_valid);
m_valid = false;
Processor::leave_critical(m_prev_flags);
Processor::leave_critical();
}
void enter()
{
VERIFY(!m_valid);
m_valid = true;
Processor::enter_critical(m_prev_flags);
Processor::enter_critical();
}
private:
u32 m_prev_flags { 0 };
bool m_valid { false };
};

View file

@ -658,9 +658,9 @@ void Processor::exit_trap(TrapFrame& trap)
void Processor::check_invoke_scheduler()
{
InterruptDisabler disabler;
VERIFY(!m_in_irq);
VERIFY(!m_in_critical);
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
if (m_invoke_scheduler_async && m_scheduler_initialized) {
m_invoke_scheduler_async = false;
@ -732,7 +732,7 @@ ProcessorMessage& Processor::smp_get_from_pool()
u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
{
VERIFY(Processor::in_critical());
VERIFY_INTERRUPTS_DISABLED();
VERIFY(wake_count > 0);
if (!s_smp_enabled)
return 0;
@ -817,8 +817,7 @@ bool Processor::smp_process_pending_messages()
VERIFY(s_smp_enabled);
bool did_process = false;
u32 prev_flags;
enter_critical(prev_flags);
enter_critical();
if (auto pending_msgs = m_message_queue.exchange(nullptr, AK::MemoryOrder::memory_order_acq_rel)) {
// We pulled the stack of pending messages in LIFO order, so we need to reverse the list first
@ -882,7 +881,7 @@ bool Processor::smp_process_pending_messages()
halt_this();
}
leave_critical(prev_flags);
leave_critical();
return did_process;
}