mirror of
https://github.com/RGBCube/serenity
synced 2025-05-14 09:24:57 +00:00
Kernel: Introduce ProcessorSpecific<T> for per-CPU data structures
To add a new per-CPU data structure, add an ID for it to the ProcessorSpecificDataID enum. Then call ProcessorSpecific<T>::initialize() when you are ready to construct the per-CPU data structure on the current CPU. It can then be accessed via ProcessorSpecific<T>::get(). This patch replaces the existing hard-coded mechanisms for Scheduler and MemoryManager per-CPU data structure.
This commit is contained in:
parent
559ab00249
commit
1e43292c3b
6 changed files with 46 additions and 38 deletions
|
@ -21,10 +21,14 @@
|
|||
namespace Kernel {
|
||||
|
||||
class ProcessorInfo;
|
||||
class SchedulerPerProcessorData;
|
||||
struct MemoryManagerData;
|
||||
struct ProcessorMessageEntry;
|
||||
|
||||
enum class ProcessorSpecificDataID {
|
||||
MemoryManager,
|
||||
Scheduler,
|
||||
__Count,
|
||||
};
|
||||
|
||||
#if ARCH(X86_64)
|
||||
# define MSR_FS_BASE 0xc0000100
|
||||
# define MSR_GS_BASE 0xc0000101
|
||||
|
@ -127,8 +131,6 @@ class Processor {
|
|||
u8 m_physical_address_bit_width;
|
||||
|
||||
ProcessorInfo* m_info;
|
||||
MemoryManagerData* m_mm_data;
|
||||
SchedulerPerProcessorData* m_scheduler_data;
|
||||
Thread* m_current_thread;
|
||||
Thread* m_idle_thread;
|
||||
|
||||
|
@ -142,6 +144,8 @@ class Processor {
|
|||
DeferredCallEntry* m_free_deferred_call_pool_entry;
|
||||
DeferredCallEntry m_deferred_call_pool[5];
|
||||
|
||||
void* m_processor_specific_data[(size_t)ProcessorSpecificDataID::__Count];
|
||||
|
||||
void gdt_init();
|
||||
void write_raw_gdt_entry(u16 selector, u32 low, u32 high);
|
||||
void write_gdt_entry(u16 selector, Descriptor& descriptor);
|
||||
|
@ -259,24 +263,15 @@ public:
|
|||
read_gs_ptr(__builtin_offsetof(Processor, m_self)) != 0;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void set_scheduler_data(SchedulerPerProcessorData& scheduler_data)
|
||||
template<typename T>
|
||||
T* get_specific()
|
||||
{
|
||||
m_scheduler_data = &scheduler_data;
|
||||
return static_cast<T*>(m_processor_specific_data[static_cast<size_t>(T::processor_specific_data_id())]);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE SchedulerPerProcessorData& get_scheduler_data() const
|
||||
void set_specific(ProcessorSpecificDataID specific_id, void* ptr)
|
||||
{
|
||||
return *m_scheduler_data;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void set_mm_data(MemoryManagerData& mm_data)
|
||||
{
|
||||
m_mm_data = &mm_data;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE MemoryManagerData& get_mm_data() const
|
||||
{
|
||||
return *m_mm_data;
|
||||
m_processor_specific_data[static_cast<size_t>(specific_id)] = ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
|
||||
|
@ -449,4 +444,17 @@ public:
|
|||
String platform_string() const;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class ProcessorSpecific {
|
||||
public:
|
||||
static void initialize()
|
||||
{
|
||||
Processor::current().set_specific(T::processor_specific_data_id(), new T);
|
||||
}
|
||||
static T& get()
|
||||
{
|
||||
return *Processor::current().get_specific<T>();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -304,8 +304,6 @@ UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
|
|||
m_message_queue = nullptr;
|
||||
m_idle_thread = nullptr;
|
||||
m_current_thread = nullptr;
|
||||
m_scheduler_data = nullptr;
|
||||
m_mm_data = nullptr;
|
||||
m_info = nullptr;
|
||||
|
||||
m_halt_requested = false;
|
||||
|
|
|
@ -55,7 +55,7 @@ class RangeAllocator;
|
|||
class RecursiveSpinLock;
|
||||
class Region;
|
||||
class Scheduler;
|
||||
class SchedulerPerProcessorData;
|
||||
class SchedulerData;
|
||||
class SharedInodeVMObject;
|
||||
class Socket;
|
||||
class Space;
|
||||
|
|
|
@ -21,12 +21,14 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
class SchedulerPerProcessorData {
|
||||
AK_MAKE_NONCOPYABLE(SchedulerPerProcessorData);
|
||||
AK_MAKE_NONMOVABLE(SchedulerPerProcessorData);
|
||||
class SchedulerData {
|
||||
AK_MAKE_NONCOPYABLE(SchedulerData);
|
||||
AK_MAKE_NONMOVABLE(SchedulerData);
|
||||
|
||||
public:
|
||||
SchedulerPerProcessorData() = default;
|
||||
static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::Scheduler; }
|
||||
|
||||
SchedulerData() = default;
|
||||
|
||||
bool m_in_scheduler { true };
|
||||
};
|
||||
|
@ -190,7 +192,7 @@ UNMAP_AFTER_INIT void Scheduler::start()
|
|||
g_scheduler_lock.lock();
|
||||
|
||||
auto& processor = Processor::current();
|
||||
processor.set_scheduler_data(*new SchedulerPerProcessorData());
|
||||
ProcessorSpecific<SchedulerData>::initialize();
|
||||
VERIFY(processor.is_initialized());
|
||||
auto& idle_thread = *Processor::idle_thread();
|
||||
VERIFY(processor.current_thread() == &idle_thread);
|
||||
|
@ -212,13 +214,12 @@ bool Scheduler::pick_next()
|
|||
// prevents a recursive call into Scheduler::invoke_async upon
|
||||
// leaving the scheduler lock.
|
||||
ScopedCritical critical;
|
||||
auto& scheduler_data = Processor::current().get_scheduler_data();
|
||||
scheduler_data.m_in_scheduler = true;
|
||||
ProcessorSpecific<SchedulerData>::get().m_in_scheduler = true;
|
||||
ScopeGuard guard(
|
||||
[]() {
|
||||
// We may be on a different processor after we got switched
|
||||
// back to this thread!
|
||||
auto& scheduler_data = Processor::current().get_scheduler_data();
|
||||
auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
|
||||
VERIFY(scheduler_data.m_in_scheduler);
|
||||
scheduler_data.m_in_scheduler = false;
|
||||
});
|
||||
|
@ -358,7 +359,7 @@ void Scheduler::leave_on_first_switch(u32 flags)
|
|||
// Scheduler::context_switch is not in the call stack we need to
|
||||
// clean up and release locks manually here
|
||||
g_scheduler_lock.unlock(flags);
|
||||
auto& scheduler_data = Processor::current().get_scheduler_data();
|
||||
auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
|
||||
VERIFY(scheduler_data.m_in_scheduler);
|
||||
scheduler_data.m_in_scheduler = false;
|
||||
}
|
||||
|
@ -368,7 +369,7 @@ void Scheduler::prepare_after_exec()
|
|||
// This is called after exec() when doing a context "switch" into
|
||||
// the new process. This is called from Processor::assume_context
|
||||
VERIFY(g_scheduler_lock.own_lock());
|
||||
auto& scheduler_data = Processor::current().get_scheduler_data();
|
||||
auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
|
||||
VERIFY(!scheduler_data.m_in_scheduler);
|
||||
scheduler_data.m_in_scheduler = true;
|
||||
}
|
||||
|
@ -379,7 +380,7 @@ void Scheduler::prepare_for_idle_loop()
|
|||
// and is about to run it. We need to acquire he scheduler lock
|
||||
VERIFY(!g_scheduler_lock.own_lock());
|
||||
g_scheduler_lock.lock();
|
||||
auto& scheduler_data = Processor::current().get_scheduler_data();
|
||||
auto& scheduler_data = ProcessorSpecific<SchedulerData>::get();
|
||||
VERIFY(!scheduler_data.m_in_scheduler);
|
||||
scheduler_data.m_in_scheduler = true;
|
||||
}
|
||||
|
@ -509,13 +510,13 @@ void Scheduler::timer_tick(const RegisterState& regs)
|
|||
void Scheduler::invoke_async()
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
auto& proc = Processor::current();
|
||||
VERIFY(!proc.in_irq());
|
||||
auto& processor = Processor::current();
|
||||
VERIFY(!processor.in_irq());
|
||||
|
||||
// Since this function is called when leaving critical sections (such
|
||||
// as a SpinLock), we need to check if we're not already doing this
|
||||
// to prevent recursion
|
||||
if (!proc.get_scheduler_data().m_in_scheduler)
|
||||
if (!ProcessorSpecific<SchedulerData>::get().m_in_scheduler)
|
||||
pick_next();
|
||||
}
|
||||
|
||||
|
|
|
@ -598,8 +598,7 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
|
|||
|
||||
UNMAP_AFTER_INIT void MemoryManager::initialize(u32 cpu)
|
||||
{
|
||||
auto mm_data = new MemoryManagerData;
|
||||
Processor::current().set_mm_data(*mm_data);
|
||||
ProcessorSpecific<MemoryManagerData>::initialize();
|
||||
|
||||
if (cpu == 0) {
|
||||
new MemoryManager;
|
||||
|
|
|
@ -91,6 +91,8 @@ struct PhysicalMemoryRange {
|
|||
#define MM Kernel::MemoryManager::the()
|
||||
|
||||
struct MemoryManagerData {
|
||||
static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::MemoryManager; }
|
||||
|
||||
SpinLock<u8> m_quickmap_in_use;
|
||||
u32 m_quickmap_prev_flags;
|
||||
|
||||
|
@ -115,7 +117,7 @@ public:
|
|||
|
||||
static inline MemoryManagerData& get_data()
|
||||
{
|
||||
return Processor::current().get_mm_data();
|
||||
return ProcessorSpecific<MemoryManagerData>::get();
|
||||
}
|
||||
|
||||
PageFaultResponse handle_page_fault(PageFault const&);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue