diff --git a/Kernel/Arch/x86/Processor.h b/Kernel/Arch/x86/Processor.h index 478ab0c2f7..1a27e071c7 100644 --- a/Kernel/Arch/x86/Processor.h +++ b/Kernel/Arch/x86/Processor.h @@ -21,10 +21,14 @@ namespace Kernel { class ProcessorInfo; -class SchedulerPerProcessorData; -struct MemoryManagerData; struct ProcessorMessageEntry; +enum class ProcessorSpecificDataID { + MemoryManager, + Scheduler, + __Count, +}; + #if ARCH(X86_64) # define MSR_FS_BASE 0xc0000100 # define MSR_GS_BASE 0xc0000101 @@ -127,8 +131,6 @@ class Processor { u8 m_physical_address_bit_width; ProcessorInfo* m_info; - MemoryManagerData* m_mm_data; - SchedulerPerProcessorData* m_scheduler_data; Thread* m_current_thread; Thread* m_idle_thread; @@ -142,6 +144,8 @@ class Processor { DeferredCallEntry* m_free_deferred_call_pool_entry; DeferredCallEntry m_deferred_call_pool[5]; + void* m_processor_specific_data[(size_t)ProcessorSpecificDataID::__Count]; + void gdt_init(); void write_raw_gdt_entry(u16 selector, u32 low, u32 high); void write_gdt_entry(u16 selector, Descriptor& descriptor); @@ -259,24 +263,15 @@ public: read_gs_ptr(__builtin_offsetof(Processor, m_self)) != 0; } - ALWAYS_INLINE void set_scheduler_data(SchedulerPerProcessorData& scheduler_data) + template + T* get_specific() { - m_scheduler_data = &scheduler_data; + return static_cast(m_processor_specific_data[static_cast(T::processor_specific_data_id())]); } - ALWAYS_INLINE SchedulerPerProcessorData& get_scheduler_data() const + void set_specific(ProcessorSpecificDataID specific_id, void* ptr) { - return *m_scheduler_data; - } - - ALWAYS_INLINE void set_mm_data(MemoryManagerData& mm_data) - { - m_mm_data = &mm_data; - } - - ALWAYS_INLINE MemoryManagerData& get_mm_data() const - { - return *m_mm_data; + m_processor_specific_data[static_cast(specific_id)] = ptr; } ALWAYS_INLINE void set_idle_thread(Thread& idle_thread) @@ -449,4 +444,17 @@ public: String platform_string() const; }; +template +class ProcessorSpecific { +public: + static void initialize() + { + Processor::current().set_specific(T::processor_specific_data_id(), new T); + } + static T& get() + { + return *Processor::current().get_specific(); + } +}; + } diff --git a/Kernel/Arch/x86/common/Processor.cpp b/Kernel/Arch/x86/common/Processor.cpp index 2a3ba5e907..29eed55ae4 100644 --- a/Kernel/Arch/x86/common/Processor.cpp +++ b/Kernel/Arch/x86/common/Processor.cpp @@ -304,8 +304,6 @@ UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu) m_message_queue = nullptr; m_idle_thread = nullptr; m_current_thread = nullptr; - m_scheduler_data = nullptr; - m_mm_data = nullptr; m_info = nullptr; m_halt_requested = false; diff --git a/Kernel/Forward.h b/Kernel/Forward.h index a7600a18b6..494dda897d 100644 --- a/Kernel/Forward.h +++ b/Kernel/Forward.h @@ -55,7 +55,7 @@ class RangeAllocator; class RecursiveSpinLock; class Region; class Scheduler; -class SchedulerPerProcessorData; +class SchedulerData; class SharedInodeVMObject; class Socket; class Space; diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 8e9f918e9b..2da03253b2 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -21,12 +21,14 @@ namespace Kernel { -class SchedulerPerProcessorData { - AK_MAKE_NONCOPYABLE(SchedulerPerProcessorData); - AK_MAKE_NONMOVABLE(SchedulerPerProcessorData); +class SchedulerData { + AK_MAKE_NONCOPYABLE(SchedulerData); + AK_MAKE_NONMOVABLE(SchedulerData); public: - SchedulerPerProcessorData() = default; + static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::Scheduler; } + + SchedulerData() = default; bool m_in_scheduler { true }; }; @@ -190,7 +192,7 @@ UNMAP_AFTER_INIT void Scheduler::start() g_scheduler_lock.lock(); auto& processor = Processor::current(); - processor.set_scheduler_data(*new SchedulerPerProcessorData()); + ProcessorSpecific::initialize(); VERIFY(processor.is_initialized()); auto& idle_thread = *Processor::idle_thread(); VERIFY(processor.current_thread() == &idle_thread); @@ -212,13 +214,12 @@ bool Scheduler::pick_next() // prevents a recursive call into Scheduler::invoke_async upon // leaving the scheduler lock. ScopedCritical critical; - auto& scheduler_data = Processor::current().get_scheduler_data(); - scheduler_data.m_in_scheduler = true; + ProcessorSpecific::get().m_in_scheduler = true; ScopeGuard guard( []() { // We may be on a different processor after we got switched // back to this thread! - auto& scheduler_data = Processor::current().get_scheduler_data(); + auto& scheduler_data = ProcessorSpecific::get(); VERIFY(scheduler_data.m_in_scheduler); scheduler_data.m_in_scheduler = false; }); @@ -358,7 +359,7 @@ void Scheduler::leave_on_first_switch(u32 flags) // Scheduler::context_switch is not in the call stack we need to // clean up and release locks manually here g_scheduler_lock.unlock(flags); - auto& scheduler_data = Processor::current().get_scheduler_data(); + auto& scheduler_data = ProcessorSpecific::get(); VERIFY(scheduler_data.m_in_scheduler); scheduler_data.m_in_scheduler = false; } @@ -368,7 +369,7 @@ void Scheduler::prepare_after_exec() // This is called after exec() when doing a context "switch" into // the new process. This is called from Processor::assume_context VERIFY(g_scheduler_lock.own_lock()); - auto& scheduler_data = Processor::current().get_scheduler_data(); + auto& scheduler_data = ProcessorSpecific::get(); VERIFY(!scheduler_data.m_in_scheduler); scheduler_data.m_in_scheduler = true; } @@ -379,7 +380,7 @@ void Scheduler::prepare_for_idle_loop() // and is about to run it. We need to acquire he scheduler lock VERIFY(!g_scheduler_lock.own_lock()); g_scheduler_lock.lock(); - auto& scheduler_data = Processor::current().get_scheduler_data(); + auto& scheduler_data = ProcessorSpecific::get(); VERIFY(!scheduler_data.m_in_scheduler); scheduler_data.m_in_scheduler = true; } @@ -509,13 +510,13 @@ void Scheduler::timer_tick(const RegisterState& regs) void Scheduler::invoke_async() { VERIFY_INTERRUPTS_DISABLED(); - auto& proc = Processor::current(); - VERIFY(!proc.in_irq()); + auto& processor = Processor::current(); + VERIFY(!processor.in_irq()); // Since this function is called when leaving critical sections (such // as a SpinLock), we need to check if we're not already doing this // to prevent recursion - if (!proc.get_scheduler_data().m_in_scheduler) + if (!ProcessorSpecific::get().m_in_scheduler) pick_next(); } diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index ac6f7b9228..9452c07d6b 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -598,8 +598,7 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va UNMAP_AFTER_INIT void MemoryManager::initialize(u32 cpu) { - auto mm_data = new MemoryManagerData; - Processor::current().set_mm_data(*mm_data); + ProcessorSpecific::initialize(); if (cpu == 0) { new MemoryManager; diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 2803b43cbe..20bd573d99 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -91,6 +91,8 @@ struct PhysicalMemoryRange { #define MM Kernel::MemoryManager::the() struct MemoryManagerData { + static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::MemoryManager; } + SpinLock m_quickmap_in_use; u32 m_quickmap_prev_flags; @@ -115,7 +117,7 @@ public: static inline MemoryManagerData& get_data() { - return Processor::current().get_mm_data(); + return ProcessorSpecific::get(); } PageFaultResponse handle_page_fault(PageFault const&);