mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 06:27:45 +00:00
Kernel/x86: Don't re-enable interrupts too soon when unlocking spinlocks
To ensure that we stay on the same CPU that acquired the spinlock until we're completely unlocked, we now leave the critical section *before* re-enabling interrupts.
This commit is contained in:
parent
cb04caa18e
commit
ae3fa20252
1 changed files with 6 additions and 4 deletions
|
@ -24,12 +24,13 @@ void Spinlock::unlock(u32 prev_flags)
|
||||||
VERIFY(is_locked());
|
VERIFY(is_locked());
|
||||||
track_lock_release(m_rank);
|
track_lock_release(m_rank);
|
||||||
m_lock.store(0, AK::memory_order_release);
|
m_lock.store(0, AK::memory_order_release);
|
||||||
|
|
||||||
|
Processor::leave_critical();
|
||||||
|
|
||||||
if ((prev_flags & 0x200) != 0)
|
if ((prev_flags & 0x200) != 0)
|
||||||
sti();
|
sti();
|
||||||
else
|
else
|
||||||
cli();
|
cli();
|
||||||
|
|
||||||
Processor::leave_critical();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 RecursiveSpinlock::lock()
|
u32 RecursiveSpinlock::lock()
|
||||||
|
@ -60,12 +61,13 @@ void RecursiveSpinlock::unlock(u32 prev_flags)
|
||||||
track_lock_release(m_rank);
|
track_lock_release(m_rank);
|
||||||
m_lock.store(0, AK::memory_order_release);
|
m_lock.store(0, AK::memory_order_release);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Processor::leave_critical();
|
||||||
|
|
||||||
if ((prev_flags & 0x200) != 0)
|
if ((prev_flags & 0x200) != 0)
|
||||||
sti();
|
sti();
|
||||||
else
|
else
|
||||||
cli();
|
cli();
|
||||||
|
|
||||||
Processor::leave_critical();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue