1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 22:48:11 +00:00

Kernel: Various context switch fixes

These changes solve a number of problems with the software
context swithcing:

* The scheduler lock really should be held throughout context switches
* Transitioning from the initial (idle) thread to another needs to
  hold the scheduler lock
* Transitioning from a dying thread to another also needs to hold
  the scheduler lock
* Dying threads cannot necessarily be finalized if they haven't
  switched out of it yet, so flag them as active while a processor
  is running it (the Running state may be switched to Dying while
  it still is actually running)
This commit is contained in:
Tom 2020-07-05 14:32:07 -06:00 committed by Andreas Kling
parent 49f5069b76
commit 2a82a25fec
9 changed files with 235 additions and 89 deletions

View file

@ -42,6 +42,8 @@ public:
SpinLock() = default;
SpinLock(const SpinLock&) = delete;
SpinLock(SpinLock&&) = delete;
SpinLock& operator=(const SpinLock&) = delete;
SpinLock& operator=(SpinLock&&) = delete;
ALWAYS_INLINE u32 lock()
{
@ -82,6 +84,8 @@ public:
RecursiveSpinLock() = default;
RecursiveSpinLock(const RecursiveSpinLock&) = delete;
RecursiveSpinLock(RecursiveSpinLock&&) = delete;
RecursiveSpinLock& operator=(const RecursiveSpinLock&) = delete;
RecursiveSpinLock& operator=(RecursiveSpinLock&&) = delete;
ALWAYS_INLINE u32 lock()
{
@ -128,6 +132,9 @@ class ScopedSpinLock
public:
ScopedSpinLock() = delete;
ScopedSpinLock(const ScopedSpinLock&) = delete;
ScopedSpinLock& operator=(const ScopedSpinLock&) = delete;
ScopedSpinLock& operator=(ScopedSpinLock&&) = delete;
ScopedSpinLock(LockType& lock):
m_lock(&lock)
@ -147,8 +154,6 @@ public:
from.m_have_lock = false;
}
ScopedSpinLock(const ScopedSpinLock&) = delete;
~ScopedSpinLock()
{
if (m_lock && m_have_lock) {