mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 06:27:45 +00:00
Kernel: Make entering and leaving critical sections atomic
We also need to store m_in_critical in the Thread upon switching, and we need to restore it. This solves a problem where threads moving between different processors could end up with an unexpected value.
This commit is contained in:
parent
33cdc1d2f1
commit
f88a8b16d7
3 changed files with 25 additions and 13 deletions
|
@ -1291,6 +1291,7 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
|
||||||
write_cr3(to_tss.cr3);
|
write_cr3(to_tss.cr3);
|
||||||
|
|
||||||
to_thread->set_cpu(processor.id());
|
to_thread->set_cpu(processor.id());
|
||||||
|
processor.restore_in_critical(to_thread->saved_critical());
|
||||||
|
|
||||||
asm volatile("fxrstor %0"
|
asm volatile("fxrstor %0"
|
||||||
::"m"(to_thread->fpu_state()));
|
::"m"(to_thread->fpu_state()));
|
||||||
|
@ -1308,6 +1309,7 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
|
||||||
ASSERT(is_kernel_mode());
|
ASSERT(is_kernel_mode());
|
||||||
|
|
||||||
dbgln<CONTEXT_SWITCH_DEBUG>("switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
|
dbgln<CONTEXT_SWITCH_DEBUG>("switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
|
||||||
|
from_thread->save_critical(m_in_critical);
|
||||||
|
|
||||||
// Switch to new thread context, passing from_thread and to_thread
|
// Switch to new thread context, passing from_thread and to_thread
|
||||||
// through to the new context using registers edx and eax
|
// through to the new context using registers edx and eax
|
||||||
|
@ -1347,6 +1349,7 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
|
||||||
[to_eip] "c" (to_thread->tss().eip),
|
[to_eip] "c" (to_thread->tss().eip),
|
||||||
[from_thread] "d" (from_thread),
|
[from_thread] "d" (from_thread),
|
||||||
[to_thread] "a" (to_thread)
|
[to_thread] "a" (to_thread)
|
||||||
|
: "memory"
|
||||||
);
|
);
|
||||||
|
|
||||||
dbgln<CONTEXT_SWITCH_DEBUG>("switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
|
dbgln<CONTEXT_SWITCH_DEBUG>("switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
|
||||||
|
|
|
@ -708,7 +708,7 @@ class Processor {
|
||||||
|
|
||||||
u32 m_cpu;
|
u32 m_cpu;
|
||||||
u32 m_in_irq;
|
u32 m_in_irq;
|
||||||
u32 m_in_critical;
|
Atomic<u32, AK::MemoryOrder::memory_order_relaxed> m_in_critical;
|
||||||
|
|
||||||
TSS32 m_tss;
|
TSS32 m_tss;
|
||||||
static FPUState s_clean_fpu_state;
|
static FPUState s_clean_fpu_state;
|
||||||
|
@ -876,15 +876,16 @@ public:
|
||||||
{
|
{
|
||||||
ASSERT(prev_irq <= m_in_irq);
|
ASSERT(prev_irq <= m_in_irq);
|
||||||
if (!prev_irq) {
|
if (!prev_irq) {
|
||||||
if (m_in_critical == 0) {
|
u32 prev_critical = 0;
|
||||||
auto prev_critical = m_in_critical++;
|
if (m_in_critical.compare_exchange_strong(prev_critical, 1)) {
|
||||||
m_in_irq = prev_irq;
|
m_in_irq = prev_irq;
|
||||||
deferred_call_execute_pending();
|
deferred_call_execute_pending();
|
||||||
ASSERT(m_in_critical == prev_critical + 1);
|
auto prev_raised = m_in_critical.exchange(prev_critical);
|
||||||
m_in_critical = prev_critical;
|
ASSERT(prev_raised == prev_critical + 1);
|
||||||
}
|
|
||||||
if (!m_in_critical)
|
|
||||||
check_invoke_scheduler();
|
check_invoke_scheduler();
|
||||||
|
} else if (prev_critical == 0) {
|
||||||
|
check_invoke_scheduler();
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
m_in_irq = prev_irq;
|
m_in_irq = prev_irq;
|
||||||
}
|
}
|
||||||
|
@ -895,11 +896,16 @@ public:
|
||||||
return m_in_irq;
|
return m_in_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void restore_in_critical(u32 critical)
|
||||||
|
{
|
||||||
|
m_in_critical = critical;
|
||||||
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void enter_critical(u32& prev_flags)
|
ALWAYS_INLINE void enter_critical(u32& prev_flags)
|
||||||
{
|
{
|
||||||
m_in_critical++;
|
|
||||||
prev_flags = cpu_flags();
|
prev_flags = cpu_flags();
|
||||||
cli();
|
cli();
|
||||||
|
m_in_critical++;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void leave_critical(u32 prev_flags)
|
ALWAYS_INLINE void leave_critical(u32 prev_flags)
|
||||||
|
@ -925,9 +931,8 @@ public:
|
||||||
|
|
||||||
ALWAYS_INLINE u32 clear_critical(u32& prev_flags, bool enable_interrupts)
|
ALWAYS_INLINE u32 clear_critical(u32& prev_flags, bool enable_interrupts)
|
||||||
{
|
{
|
||||||
u32 prev_crit = m_in_critical;
|
|
||||||
m_in_critical = 0;
|
|
||||||
prev_flags = cpu_flags();
|
prev_flags = cpu_flags();
|
||||||
|
u32 prev_crit = m_in_critical.exchange(0, AK::MemoryOrder::memory_order_acquire);
|
||||||
if (!m_in_irq)
|
if (!m_in_irq)
|
||||||
check_invoke_scheduler();
|
check_invoke_scheduler();
|
||||||
if (enable_interrupts)
|
if (enable_interrupts)
|
||||||
|
@ -937,15 +942,15 @@ public:
|
||||||
|
|
||||||
ALWAYS_INLINE void restore_critical(u32 prev_crit, u32 prev_flags)
|
ALWAYS_INLINE void restore_critical(u32 prev_crit, u32 prev_flags)
|
||||||
{
|
{
|
||||||
ASSERT(m_in_critical == 0);
|
m_in_critical.store(prev_crit, AK::MemoryOrder::memory_order_release);
|
||||||
m_in_critical = prev_crit;
|
ASSERT(!prev_crit || !(prev_flags & 0x200));
|
||||||
if (prev_flags & 0x200)
|
if (prev_flags & 0x200)
|
||||||
sti();
|
sti();
|
||||||
else
|
else
|
||||||
cli();
|
cli();
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE u32& in_critical() { return m_in_critical; }
|
ALWAYS_INLINE u32 in_critical() { return m_in_critical.load(); }
|
||||||
|
|
||||||
ALWAYS_INLINE const FPUState& clean_fpu_state() const
|
ALWAYS_INLINE const FPUState& clean_fpu_state() const
|
||||||
{
|
{
|
||||||
|
|
|
@ -1065,6 +1065,9 @@ public:
|
||||||
m_is_active.store(active, AK::memory_order_release);
|
m_is_active.store(active, AK::memory_order_release);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u32 saved_critical() const { return m_saved_critical; }
|
||||||
|
void save_critical(u32 critical) { m_saved_critical = critical; }
|
||||||
|
|
||||||
[[nodiscard]] bool is_active() const
|
[[nodiscard]] bool is_active() const
|
||||||
{
|
{
|
||||||
return m_is_active.load(AK::MemoryOrder::memory_order_acquire);
|
return m_is_active.load(AK::MemoryOrder::memory_order_acquire);
|
||||||
|
@ -1239,6 +1242,7 @@ private:
|
||||||
ThreadID m_tid { -1 };
|
ThreadID m_tid { -1 };
|
||||||
TSS32 m_tss;
|
TSS32 m_tss;
|
||||||
TrapFrame* m_current_trap { nullptr };
|
TrapFrame* m_current_trap { nullptr };
|
||||||
|
u32 m_saved_critical { 1 };
|
||||||
Atomic<u32> m_cpu { 0 };
|
Atomic<u32> m_cpu { 0 };
|
||||||
u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
|
u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
|
||||||
u32 m_ticks_left { 0 };
|
u32 m_ticks_left { 0 };
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue