mirror of
https://github.com/RGBCube/serenity
synced 2025-05-22 19:25:07 +00:00
Kernel: Move "in-scheduler" flag from SchedulerData to Processor
This avoids a race between getting the processor-specific SchedulerData and accessing it. (Switching to a different CPU in that window means that we're operating on the wrong SchedulerData.) Co-authored-by: Tom <tomut@yahoo.com>
This commit is contained in:
parent
249d6a490d
commit
d9da513959
2 changed files with 24 additions and 22 deletions
|
@ -25,7 +25,6 @@ struct ProcessorMessageEntry;
|
||||||
|
|
||||||
enum class ProcessorSpecificDataID {
|
enum class ProcessorSpecificDataID {
|
||||||
MemoryManager,
|
MemoryManager,
|
||||||
Scheduler,
|
|
||||||
__Count,
|
__Count,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -138,6 +137,7 @@ class Processor {
|
||||||
|
|
||||||
bool m_invoke_scheduler_async;
|
bool m_invoke_scheduler_async;
|
||||||
bool m_scheduler_initialized;
|
bool m_scheduler_initialized;
|
||||||
|
bool m_in_scheduler { true };
|
||||||
Atomic<bool> m_halt_requested;
|
Atomic<bool> m_halt_requested;
|
||||||
|
|
||||||
DeferredCallEntry* m_pending_deferred_calls; // in reverse order
|
DeferredCallEntry* m_pending_deferred_calls; // in reverse order
|
||||||
|
@ -344,6 +344,16 @@ public:
|
||||||
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), in_critical() + 1);
|
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), in_critical() + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE static bool current_in_scheduler()
|
||||||
|
{
|
||||||
|
return read_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler));
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE static void set_current_in_scheduler(bool value)
|
||||||
|
{
|
||||||
|
write_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler), value);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ALWAYS_INLINE void do_leave_critical()
|
ALWAYS_INLINE void do_leave_critical()
|
||||||
{
|
{
|
||||||
|
|
|
@ -22,12 +22,6 @@
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
struct SchedulerData {
|
|
||||||
static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::Scheduler; }
|
|
||||||
|
|
||||||
bool in_scheduler { true };
|
|
||||||
};
|
|
||||||
|
|
||||||
RecursiveSpinlock g_scheduler_lock;
|
RecursiveSpinlock g_scheduler_lock;
|
||||||
|
|
||||||
static u32 time_slice_for(const Thread& thread)
|
static u32 time_slice_for(const Thread& thread)
|
||||||
|
@ -195,7 +189,6 @@ UNMAP_AFTER_INIT void Scheduler::start()
|
||||||
g_scheduler_lock.lock();
|
g_scheduler_lock.lock();
|
||||||
|
|
||||||
auto& processor = Processor::current();
|
auto& processor = Processor::current();
|
||||||
ProcessorSpecific<SchedulerData>::initialize();
|
|
||||||
VERIFY(processor.is_initialized());
|
VERIFY(processor.is_initialized());
|
||||||
auto& idle_thread = *Processor::idle_thread();
|
auto& idle_thread = *Processor::idle_thread();
|
||||||
VERIFY(processor.current_thread() == &idle_thread);
|
VERIFY(processor.current_thread() == &idle_thread);
|
||||||
|
@ -217,14 +210,13 @@ bool Scheduler::pick_next()
|
||||||
// prevents a recursive call into Scheduler::invoke_async upon
|
// prevents a recursive call into Scheduler::invoke_async upon
|
||||||
// leaving the scheduler lock.
|
// leaving the scheduler lock.
|
||||||
ScopedCritical critical;
|
ScopedCritical critical;
|
||||||
ProcessorSpecific<SchedulerData>::get().in_scheduler = true;
|
Processor::set_current_in_scheduler(true);
|
||||||
ScopeGuard guard(
|
ScopeGuard guard(
|
||||||
[]() {
|
[]() {
|
||||||
// We may be on a different processor after we got switched
|
// We may be on a different processor after we got switched
|
||||||
// back to this thread!
|
// back to this thread!
|
||||||
auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
|
VERIFY(Processor::current_in_scheduler());
|
||||||
VERIFY(scheduler_data.in_scheduler);
|
Processor::set_current_in_scheduler(false);
|
||||||
scheduler_data.in_scheduler = false;
|
|
||||||
});
|
});
|
||||||
|
|
||||||
SpinlockLocker lock(g_scheduler_lock);
|
SpinlockLocker lock(g_scheduler_lock);
|
||||||
|
@ -361,9 +353,9 @@ void Scheduler::leave_on_first_switch(u32 flags)
|
||||||
// Scheduler::context_switch is not in the call stack we need to
|
// Scheduler::context_switch is not in the call stack we need to
|
||||||
// clean up and release locks manually here
|
// clean up and release locks manually here
|
||||||
g_scheduler_lock.unlock(flags);
|
g_scheduler_lock.unlock(flags);
|
||||||
auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
|
|
||||||
VERIFY(scheduler_data.in_scheduler);
|
VERIFY(Processor::current_in_scheduler());
|
||||||
scheduler_data.in_scheduler = false;
|
Processor::set_current_in_scheduler(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::prepare_after_exec()
|
void Scheduler::prepare_after_exec()
|
||||||
|
@ -371,9 +363,9 @@ void Scheduler::prepare_after_exec()
|
||||||
// This is called after exec() when doing a context "switch" into
|
// This is called after exec() when doing a context "switch" into
|
||||||
// the new process. This is called from Processor::assume_context
|
// the new process. This is called from Processor::assume_context
|
||||||
VERIFY(g_scheduler_lock.own_lock());
|
VERIFY(g_scheduler_lock.own_lock());
|
||||||
auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
|
|
||||||
VERIFY(!scheduler_data.in_scheduler);
|
VERIFY(!Processor::current_in_scheduler());
|
||||||
scheduler_data.in_scheduler = true;
|
Processor::set_current_in_scheduler(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::prepare_for_idle_loop()
|
void Scheduler::prepare_for_idle_loop()
|
||||||
|
@ -382,9 +374,9 @@ void Scheduler::prepare_for_idle_loop()
|
||||||
// and is about to run it. We need to acquire he scheduler lock
|
// and is about to run it. We need to acquire he scheduler lock
|
||||||
VERIFY(!g_scheduler_lock.own_lock());
|
VERIFY(!g_scheduler_lock.own_lock());
|
||||||
g_scheduler_lock.lock();
|
g_scheduler_lock.lock();
|
||||||
auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
|
|
||||||
VERIFY(!scheduler_data.in_scheduler);
|
VERIFY(!Processor::current_in_scheduler());
|
||||||
scheduler_data.in_scheduler = true;
|
Processor::set_current_in_scheduler(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
Process* Scheduler::colonel()
|
Process* Scheduler::colonel()
|
||||||
|
@ -517,7 +509,7 @@ void Scheduler::invoke_async()
|
||||||
// Since this function is called when leaving critical sections (such
|
// Since this function is called when leaving critical sections (such
|
||||||
// as a Spinlock), we need to check if we're not already doing this
|
// as a Spinlock), we need to check if we're not already doing this
|
||||||
// to prevent recursion
|
// to prevent recursion
|
||||||
if (!ProcessorSpecific<SchedulerData>::get().in_scheduler)
|
if (!Processor::current_in_scheduler())
|
||||||
pick_next();
|
pick_next();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue