mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 05:57:45 +00:00
Kernel: Prevent recursive calls into the scheduler
Upon leaving a critical section (such as a SpinLock) we need to check if we're already asynchronously invoking the Scheduler. Otherwise we might end up triggering another context switch as soon as leaving the scheduler lock. Fixes #2883
This commit is contained in:
parent
a19304c9d6
commit
728de56481
5 changed files with 136 additions and 21 deletions
|
@ -943,6 +943,7 @@ void Processor::early_initialize(u32 cpu)
|
|||
m_message_queue = nullptr;
|
||||
m_idle_thread = nullptr;
|
||||
m_current_thread = nullptr;
|
||||
m_scheduler_data = nullptr;
|
||||
m_mm_data = nullptr;
|
||||
m_info = nullptr;
|
||||
|
||||
|
@ -1188,9 +1189,9 @@ extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapF
|
|||
|
||||
// Since we got here and don't have Scheduler::context_switch in the
|
||||
// call stack (because this is the first time we switched into this
|
||||
// context), we need to unlock the scheduler lock manually. We're
|
||||
// using the flags initially set up by init_context
|
||||
g_scheduler_lock.unlock(trap->regs->eflags);
|
||||
// context), we need to notify the scheduler so that it can release
|
||||
// the scheduler lock.
|
||||
Scheduler::leave_on_first_switch(trap->regs->eflags);
|
||||
}
|
||||
|
||||
extern "C" void thread_context_first_enter(void);
|
||||
|
@ -1335,6 +1336,7 @@ void Processor::assume_context(Thread& thread, u32 flags)
|
|||
dbg() << "Assume context for thread " << VirtualAddress(&thread) << " " << thread;
|
||||
#endif
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
Scheduler::prepare_after_exec();
|
||||
// in_critical() should be 2 here. The critical section in Process::exec
|
||||
// and then the scheduler lock
|
||||
ASSERT(Processor::current().in_critical() == 2);
|
||||
|
@ -1346,21 +1348,20 @@ extern "C" void pre_init_finished(void)
|
|||
{
|
||||
ASSERT(g_scheduler_lock.own_lock());
|
||||
|
||||
// The target flags will get restored upon leaving the trap
|
||||
u32 prev_flags = cpu_flags();
|
||||
g_scheduler_lock.unlock(prev_flags);
|
||||
|
||||
// We because init_finished() will wait on the other APs, we need
|
||||
// Because init_finished() will wait on the other APs, we need
|
||||
// to release the scheduler lock so that the other APs can also get
|
||||
// to this point
|
||||
|
||||
// The target flags will get restored upon leaving the trap
|
||||
u32 prev_flags = cpu_flags();
|
||||
Scheduler::leave_on_first_switch(prev_flags);
|
||||
}
|
||||
|
||||
extern "C" void post_init_finished(void)
|
||||
{
|
||||
// We need to re-acquire the scheduler lock before a context switch
|
||||
// transfers control into the idle loop, which needs the lock held
|
||||
ASSERT(!g_scheduler_lock.own_lock());
|
||||
g_scheduler_lock.lock();
|
||||
Scheduler::prepare_for_idle_loop();
|
||||
}
|
||||
|
||||
void Processor::initialize_context_switching(Thread& initial_thread)
|
||||
|
|
|
@ -623,6 +623,7 @@ static_assert(GDT_SELECTOR_CODE0 + 16 == GDT_SELECTOR_CODE3); // CS3 = CS0 + 16
|
|||
static_assert(GDT_SELECTOR_CODE0 + 24 == GDT_SELECTOR_DATA3); // SS3 = CS0 + 32
|
||||
|
||||
class ProcessorInfo;
|
||||
class SchedulerPerProcessorData;
|
||||
struct MemoryManagerData;
|
||||
struct ProcessorMessageEntry;
|
||||
|
||||
|
@ -683,6 +684,7 @@ class Processor {
|
|||
|
||||
ProcessorInfo* m_info;
|
||||
MemoryManagerData* m_mm_data;
|
||||
SchedulerPerProcessorData* m_scheduler_data;
|
||||
Thread* m_current_thread;
|
||||
Thread* m_idle_thread;
|
||||
|
||||
|
@ -770,6 +772,16 @@ public:
|
|||
return get_fs() == GDT_SELECTOR_PROC && read_fs_u32(0) != 0;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void set_scheduler_data(SchedulerPerProcessorData& scheduler_data)
|
||||
{
|
||||
m_scheduler_data = &scheduler_data;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE SchedulerPerProcessorData& get_scheduler_data() const
|
||||
{
|
||||
return *m_scheduler_data;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void set_mm_data(MemoryManagerData& mm_data)
|
||||
{
|
||||
m_mm_data = &mm_data;
|
||||
|
@ -920,16 +932,13 @@ class ScopedCritical {
|
|||
public:
|
||||
ScopedCritical()
|
||||
{
|
||||
m_valid = true;
|
||||
Processor::current().enter_critical(m_prev_flags);
|
||||
enter();
|
||||
}
|
||||
|
||||
~ScopedCritical()
|
||||
{
|
||||
if (m_valid) {
|
||||
m_valid = false;
|
||||
Processor::current().leave_critical(m_prev_flags);
|
||||
}
|
||||
if (m_valid)
|
||||
leave();
|
||||
}
|
||||
|
||||
ScopedCritical(ScopedCritical&& from)
|
||||
|
@ -955,6 +964,20 @@ public:
|
|||
m_prev_flags &= ~0x200;
|
||||
}
|
||||
|
||||
void leave()
|
||||
{
|
||||
ASSERT(m_valid);
|
||||
m_valid = false;
|
||||
Processor::current().leave_critical(m_prev_flags);
|
||||
}
|
||||
|
||||
void enter()
|
||||
{
|
||||
ASSERT(!m_valid);
|
||||
m_valid = true;
|
||||
Processor::current().enter_critical(m_prev_flags);
|
||||
}
|
||||
|
||||
private:
|
||||
u32 m_prev_flags { 0 };
|
||||
bool m_valid { false };
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue