1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-28 08:17:34 +00:00

Kernel: Share Processor class (and others) across architectures

About half of the Processor code is common across architectures, so
let's share it with a templated base class. Also, other code that can be
shared in some ways, like FPUState and TrapFrame functions, is adjusted
here. Functions which cannot be shared trivially (without internal
refactoring) are left alone for now.
This commit is contained in:
kleines Filmröllchen 2023-09-18 21:45:14 +02:00 committed by Andrew Kaster
parent 0b824ab7a6
commit 398d271a46
26 changed files with 943 additions and 860 deletions

View file

@ -9,7 +9,7 @@
#pragma once
#include <Kernel/Arch/aarch64/Processor.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/aarch64/Registers.h>
namespace Kernel::Aarch64::Asm {
@ -145,12 +145,3 @@ inline void flush_data_cache(FlatPtr start, size_t size)
}
}
namespace Kernel {
inline bool are_interrupts_enabled()
{
return Processor::are_interrupts_enabled();
}
}

View file

@ -4,9 +4,9 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/aarch64/ASM_wrapper.h>
#include <Kernel/Arch/aarch64/CPU.h>
#include <Kernel/Arch/aarch64/Processor.h>
#include <Kernel/Arch/aarch64/Registers.h>
#include <Kernel/Library/Panic.h>

View file

@ -0,0 +1,19 @@
/*
* Copyright (c) 2023, kleines Filmröllchen <filmroellchen@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
VALIDATE_IS_AARCH64()
namespace Kernel {
struct [[gnu::aligned(16)]] FPUState {
u8 buffer[512];
};
}

View file

@ -22,15 +22,10 @@
namespace Kernel {
extern "C" void thread_context_first_enter(void);
extern "C" void exit_kernel_thread(void);
extern "C" void do_assume_context(Thread* thread, u32 new_interrupts_state);
extern "C" void context_first_init(Thread* from_thread, Thread* to_thread) __attribute__((used));
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used));
extern "C" FlatPtr do_init_context(Thread* thread, u32 new_interrupts_state) __attribute__((used));
Processor* g_current_processor;
READONLY_AFTER_INIT FPUState Processor::s_clean_fpu_state;
static void store_fpu_state(FPUState* fpu_state)
{
@ -78,7 +73,8 @@ static void load_fpu_state(FPUState* fpu_state)
"\n" ::[fpu_state] "r"(fpu_state));
}
void Processor::early_initialize(u32 cpu)
template<typename T>
void ProcessorBase<T>::early_initialize(u32 cpu)
{
VERIFY(g_current_processor == nullptr);
m_cpu = cpu;
@ -86,10 +82,11 @@ void Processor::early_initialize(u32 cpu)
m_physical_address_bit_width = detect_physical_address_bit_width();
m_virtual_address_bit_width = detect_virtual_address_bit_width();
g_current_processor = this;
g_current_processor = static_cast<Processor*>(this);
}
void Processor::initialize(u32)
template<typename T>
void ProcessorBase<T>::initialize(u32)
{
m_deferred_call_pool.init();
@ -102,14 +99,16 @@ void Processor::initialize(u32)
store_fpu_state(&s_clean_fpu_state);
}
[[noreturn]] void Processor::halt()
template<typename T>
[[noreturn]] void ProcessorBase<T>::halt()
{
disable_interrupts();
for (;;)
asm volatile("wfi");
}
void Processor::flush_tlb_local(VirtualAddress, size_t)
template<typename T>
void ProcessorBase<T>::flush_tlb_local(VirtualAddress, size_t)
{
// FIXME: Figure out how to flush a single page
asm volatile("dsb ishst");
@ -118,7 +117,8 @@ void Processor::flush_tlb_local(VirtualAddress, size_t)
asm volatile("isb");
}
void Processor::flush_entire_tlb_local()
template<typename T>
void ProcessorBase<T>::flush_entire_tlb_local()
{
asm volatile("dsb ishst");
asm volatile("tlbi vmalle1");
@ -126,34 +126,14 @@ void Processor::flush_entire_tlb_local()
asm volatile("isb");
}
void Processor::flush_tlb(Memory::PageDirectory const*, VirtualAddress vaddr, size_t page_count)
template<typename T>
void ProcessorBase<T>::flush_tlb(Memory::PageDirectory const*, VirtualAddress vaddr, size_t page_count)
{
flush_tlb_local(vaddr, page_count);
}
void Processor::leave_critical()
{
InterruptDisabler disabler;
current().do_leave_critical();
}
void Processor::do_leave_critical()
{
VERIFY(m_in_critical > 0);
if (m_in_critical == 1) {
if (m_in_irq == 0) {
m_deferred_call_pool.execute_pending();
VERIFY(m_in_critical == 1);
}
m_in_critical = 0;
if (m_in_irq == 0)
check_invoke_scheduler();
} else {
m_in_critical = m_in_critical - 1;
}
}
u32 Processor::clear_critical()
template<typename T>
u32 ProcessorBase<T>::clear_critical()
{
InterruptDisabler disabler;
auto prev_critical = in_critical();
@ -164,19 +144,16 @@ u32 Processor::clear_critical()
return prev_critical;
}
u64 Processor::time_spent_idle() const
{
return m_idle_thread->time_in_user() + m_idle_thread->time_in_kernel();
}
u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
template<typename T>
u32 ProcessorBase<T>::smp_wake_n_idle_processors(u32 wake_count)
{
(void)wake_count;
// FIXME: Actually wake up other cores when SMP is supported for aarch64.
return 0;
}
void Processor::initialize_context_switching(Thread& initial_thread)
template<typename T>
void ProcessorBase<T>::initialize_context_switching(Thread& initial_thread)
{
VERIFY(initial_thread.process().is_kernel_process());
@ -203,7 +180,8 @@ void Processor::initialize_context_switching(Thread& initial_thread)
VERIFY_NOT_REACHED();
}
void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
template<typename T>
void ProcessorBase<T>::switch_context(Thread*& from_thread, Thread*& to_thread)
{
VERIFY(!m_in_irq);
VERIFY(m_in_critical == 1);
@ -307,7 +285,9 @@ extern "C" FlatPtr do_init_context(Thread* thread, u32 new_interrupts_state)
return Processor::current().init_context(*thread, true);
}
void Processor::assume_context(Thread& thread, InterruptsState new_interrupts_state)
// FIXME: Share this code with other architectures.
template<typename T>
void ProcessorBase<T>::assume_context(Thread& thread, InterruptsState new_interrupts_state)
{
dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
@ -322,7 +302,8 @@ void Processor::assume_context(Thread& thread, InterruptsState new_interrupts_st
VERIFY_NOT_REACHED();
}
FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
template<typename T>
FlatPtr ProcessorBase<T>::init_context(Thread& thread, bool leave_crit)
{
VERIFY(g_scheduler_lock.is_locked());
if (leave_crit) {
@ -381,28 +362,9 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
return stack_top;
}
void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
// FIXME: Figure out if we need prev_irq_level, see duplicated code in Kernel/Arch/x86/common/Processor.cpp
if (raise_irq)
m_in_irq++;
auto* current_thread = Processor::current_thread();
if (current_thread) {
auto& current_trap = current_thread->current_trap();
trap.next_trap = current_trap;
current_trap = &trap;
auto new_previous_mode = trap.regs->previous_mode();
if (current_thread->set_previous_mode(new_previous_mode)) {
current_thread->update_time_scheduled(TimeManagement::scheduler_current_time(), new_previous_mode == ExecutionMode::Kernel, false);
}
} else {
trap.next_trap = nullptr;
}
}
void Processor::exit_trap(TrapFrame& trap)
// FIXME: Figure out if we can fully share this code with x86.
template<typename T>
void ProcessorBase<T>::exit_trap(TrapFrame& trap)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
@ -449,7 +411,8 @@ void Processor::exit_trap(TrapFrame& trap)
check_invoke_scheduler();
}
ErrorOr<Vector<FlatPtr, 32>> Processor::capture_stack_trace(Thread& thread, size_t max_frames)
template<typename T>
ErrorOr<Vector<FlatPtr, 32>> ProcessorBase<T>::capture_stack_trace(Thread& thread, size_t max_frames)
{
(void)thread;
(void)max_frames;
@ -457,18 +420,6 @@ ErrorOr<Vector<FlatPtr, 32>> Processor::capture_stack_trace(Thread& thread, size
return Vector<FlatPtr, 32> {};
}
void Processor::check_invoke_scheduler()
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(!m_in_irq);
VERIFY(!m_in_critical);
VERIFY(&Processor::current() == this);
if (m_invoke_scheduler_async && m_scheduler_initialized) {
m_invoke_scheduler_async = false;
Scheduler::invoke_async();
}
}
NAKED void thread_context_first_enter(void)
{
asm(
@ -498,32 +449,9 @@ NAKED void do_assume_context(Thread*, u32)
// clang-format on
}
void exit_kernel_thread(void)
extern "C" void context_first_init(Thread* from_thread, Thread* to_thread)
{
Thread::current()->exit();
}
extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread)
{
VERIFY(!are_interrupts_enabled());
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
VERIFY(to_thread == Thread::current());
Scheduler::enter_current(*from_thread);
auto in_critical = to_thread->saved_critical();
VERIFY(in_critical > 0);
Processor::restore_critical(in_critical);
// Since we got here and don't have Scheduler::context_switch in the
// call stack (because this is the first time we switched into this
// context), we need to notify the scheduler so that it can release
// the scheduler lock. We don't want to enable interrupts at this point
// as we're still in the middle of a context switch. Doing so could
// trigger a context switch within a context switch, leading to a crash.
Scheduler::leave_on_first_switch(InterruptsState::Disabled);
do_context_first_init(from_thread, to_thread);
}
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
@ -553,27 +481,31 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
load_fpu_state(&to_thread->fpu_state());
}
StringView Processor::platform_string()
template<typename T>
StringView ProcessorBase<T>::platform_string()
{
return "aarch64"sv;
}
void Processor::set_thread_specific_data(VirtualAddress thread_specific_data)
template<typename T>
void ProcessorBase<T>::set_thread_specific_data(VirtualAddress thread_specific_data)
{
Aarch64::Asm::set_tpidr_el0(thread_specific_data.get());
}
void Processor::deferred_call_queue(Function<void()> callback)
template<typename T>
void ProcessorBase<T>::wait_for_interrupt() const
{
// NOTE: If we are called outside of a critical section and outside
// of an irq handler, the function will be executed before we return!
ScopedCritical critical;
auto& cur_proc = Processor::current();
asm("wfi");
}
auto* entry = cur_proc.m_deferred_call_pool.get_free();
entry->handler_value() = move(callback);
cur_proc.m_deferred_call_pool.queue_entry(entry);
template<typename T>
Processor& ProcessorBase<T>::by_id(u32 id)
{
(void)id;
TODO_AARCH64();
}
}
#include <Kernel/Arch/ProcessorFunctions.include>

View file

@ -18,6 +18,9 @@
#include <Kernel/Arch/aarch64/Registers.h>
#include <Kernel/Memory/VirtualAddress.h>
#include <AK/Platform.h>
VALIDATE_IS_AARCH64()
namespace Kernel {
namespace Memory {
@ -29,205 +32,16 @@ class Processor;
struct TrapFrame;
enum class InterruptsState;
// FIXME This needs to go behind some sort of platform abstraction
// it is used between Thread and Processor.
struct [[gnu::aligned(16)]] FPUState {
u8 buffer[512];
};
template<typename ProcessorT>
class ProcessorBase;
// FIXME: Remove this once we support SMP in aarch64
extern Processor* g_current_processor;
constexpr size_t MAX_CPU_COUNT = 1;
class Processor {
void* m_processor_specific_data[static_cast<size_t>(ProcessorSpecificDataID::__Count)];
class Processor final : public ProcessorBase<Processor> {
public:
Processor() = default;
void early_initialize(u32 cpu);
void initialize(u32 cpu);
template<typename T>
T* get_specific()
{
return static_cast<T*>(m_processor_specific_data[static_cast<size_t>(T::processor_specific_data_id())]);
}
void set_specific(ProcessorSpecificDataID specific_id, void* ptr)
{
m_processor_specific_data[static_cast<size_t>(specific_id)] = ptr;
}
void idle_begin() const
{
// FIXME: Implement this when SMP for aarch64 is supported.
}
void idle_end() const
{
// FIXME: Implement this when SMP for aarch64 is supported.
}
void wait_for_interrupt() const
{
asm("wfi");
}
ALWAYS_INLINE static void pause()
{
asm volatile("isb sy");
}
ALWAYS_INLINE static void wait_check()
{
asm volatile("yield");
// FIXME: Process SMP messages once we support SMP on aarch64; cf. x86_64
}
ALWAYS_INLINE u8 physical_address_bit_width() const
{
return m_physical_address_bit_width;
}
ALWAYS_INLINE u8 virtual_address_bit_width() const
{
return m_virtual_address_bit_width;
}
ALWAYS_INLINE static bool is_initialized()
{
return g_current_processor != nullptr;
}
static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
static void flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
ALWAYS_INLINE u32 id() const
{
// NOTE: This variant should only be used when iterating over all
// Processor instances, or when it's guaranteed that the thread
// cannot move to another processor in between calling Processor::current
// and Processor::get_id, or if this fact is not important.
// All other cases should use Processor::id instead!
return 0;
}
// FIXME: When aarch64 supports multiple cores, return the correct core id here.
ALWAYS_INLINE static u32 current_id()
{
return 0;
}
ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
{
m_idle_thread = &idle_thread;
}
ALWAYS_INLINE static Thread* current_thread()
{
return current().m_current_thread;
}
ALWAYS_INLINE bool has_nx() const
{
return true;
}
ALWAYS_INLINE bool has_pat() const
{
return false;
}
ALWAYS_INLINE bool has_feature(CPUFeature::Type const& feature) const
{
return m_features.has_flag(feature);
}
ALWAYS_INLINE static FlatPtr current_in_irq()
{
return current().m_in_irq;
}
ALWAYS_INLINE static u64 read_cpu_counter()
{
TODO_AARCH64();
return 0;
}
ALWAYS_INLINE static bool are_interrupts_enabled()
{
auto daif = Aarch64::DAIF::read();
return !daif.I;
}
ALWAYS_INLINE static void enable_interrupts()
{
Aarch64::DAIF::clear_I();
}
ALWAYS_INLINE static void disable_interrupts()
{
Aarch64::DAIF::set_I();
}
void check_invoke_scheduler();
void invoke_scheduler_async() { m_invoke_scheduler_async = true; }
ALWAYS_INLINE static bool current_in_scheduler()
{
return current().m_in_scheduler;
}
ALWAYS_INLINE static void set_current_in_scheduler(bool value)
{
current().m_in_scheduler = value;
}
// FIXME: Share the critical functions with x86/Processor.h
ALWAYS_INLINE static void enter_critical()
{
auto& current_processor = current();
current_processor.m_in_critical = current_processor.m_in_critical + 1;
}
static void leave_critical();
static u32 clear_critical();
ALWAYS_INLINE static void restore_critical(u32 prev_critical)
{
current().m_in_critical = prev_critical;
}
ALWAYS_INLINE static u32 in_critical()
{
return current().m_in_critical;
}
ALWAYS_INLINE static void verify_no_spinlocks_held()
{
VERIFY(!Processor::in_critical());
}
ALWAYS_INLINE static FPUState const& clean_fpu_state()
{
return s_clean_fpu_state;
}
ALWAYS_INLINE static void set_current_thread(Thread& current_thread)
{
current().m_current_thread = &current_thread;
}
ALWAYS_INLINE static Thread* idle_thread()
{
return current().m_idle_thread;
}
ALWAYS_INLINE static Processor& current()
{
return *g_current_processor;
}
template<IteratorFunction<Processor&> Callback>
static inline IterationDecision for_each(Callback callback)
{
@ -244,65 +58,162 @@ public:
callback(*g_current_processor);
return IterationDecision::Continue;
}
static u32 count()
{
TODO_AARCH64();
}
// FIXME: Move this into generic Processor class, when there is such a class.
ALWAYS_INLINE static bool is_bootstrap_processor()
{
return Processor::current_id() == 0;
}
static void deferred_call_queue(Function<void()>);
u64 time_spent_idle() const;
static u32 smp_wake_n_idle_processors(u32 wake_count);
[[noreturn]] static void halt();
[[noreturn]] void initialize_context_switching(Thread& initial_thread);
NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
[[noreturn]] static void assume_context(Thread& thread, InterruptsState new_interrupts_state);
FlatPtr init_context(Thread& thread, bool leave_crit);
static ErrorOr<Vector<FlatPtr, 32>> capture_stack_trace(Thread& thread, size_t max_frames = 0);
void enter_trap(TrapFrame& trap, bool raise_irq);
void exit_trap(TrapFrame& trap);
static StringView platform_string();
static void set_thread_specific_data(VirtualAddress thread_specific_data);
static void flush_entire_tlb_local();
private:
Processor(Processor const&) = delete;
void do_leave_critical();
DeferredCallPool m_deferred_call_pool {};
u32 m_cpu;
CPUFeature::Type m_features;
u8 m_physical_address_bit_width;
u8 m_virtual_address_bit_width;
Thread* m_current_thread;
Thread* m_idle_thread;
u32 m_in_critical { 0 };
static FPUState s_clean_fpu_state;
// FIXME: Once there is code in place to differentiate IRQs from synchronous exceptions (syscalls),
// this member should be incremented. Also this member shouldn't be a FlatPtr.
FlatPtr m_in_irq { 0 };
bool m_in_scheduler { false };
bool m_invoke_scheduler_async { false };
bool m_scheduler_initialized { false };
};
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::is_initialized()
{
return g_current_processor != nullptr;
}
template<typename T>
ALWAYS_INLINE Thread* ProcessorBase<T>::idle_thread()
{
return current().m_idle_thread;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::set_current_thread(Thread& current_thread)
{
current().m_current_thread = &current_thread;
}
// FIXME: When aarch64 supports multiple cores, return the correct core id here.
template<typename T>
ALWAYS_INLINE u32 ProcessorBase<T>::current_id()
{
return 0;
}
template<typename T>
ALWAYS_INLINE u32 ProcessorBase<T>::in_critical()
{
return current().m_in_critical;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::enter_critical()
{
auto& current_processor = current();
current_processor.m_in_critical = current_processor.m_in_critical + 1;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::restore_critical(u32 prev_critical)
{
current().m_in_critical = prev_critical;
}
template<typename T>
ALWAYS_INLINE T& ProcessorBase<T>::current()
{
return *g_current_processor;
}
template<typename T>
void ProcessorBase<T>::idle_begin() const
{
// FIXME: Implement this when SMP for aarch64 is supported.
}
template<typename T>
void ProcessorBase<T>::idle_end() const
{
// FIXME: Implement this when SMP for aarch64 is supported.
}
template<typename T>
void ProcessorBase<T>::smp_enable()
{
// FIXME: Implement this when SMP for aarch64 is supported.
}
template<typename T>
bool ProcessorBase<T>::is_smp_enabled()
{
return false;
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::are_interrupts_enabled()
{
auto daif = Aarch64::DAIF::read();
return !daif.I;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::enable_interrupts()
{
Aarch64::DAIF::clear_I();
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::disable_interrupts()
{
Aarch64::DAIF::set_I();
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::is_kernel_mode()
{
// FIXME: Implement this correctly.
return true;
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::current_in_scheduler()
{
return current().m_in_scheduler;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::set_current_in_scheduler(bool value)
{
current().m_in_scheduler = value;
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::has_nx() const
{
return true;
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::has_pat() const
{
return false;
}
template<typename T>
ALWAYS_INLINE FlatPtr ProcessorBase<T>::current_in_irq()
{
return current().m_in_irq;
}
template<typename T>
ALWAYS_INLINE Thread* ProcessorBase<T>::current_thread()
{
return current().m_current_thread;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::pause()
{
asm volatile("isb sy");
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::wait_check()
{
asm volatile("yield");
// FIXME: Process SMP messages once we support SMP on aarch64; cf. x86_64
}
template<typename T>
ALWAYS_INLINE u64 ProcessorBase<T>::read_cpu_counter()
{
TODO_AARCH64();
return 0;
}
}

View file

@ -1,18 +0,0 @@
/*
* Copyright (c) 2023, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/aarch64/TrapFrame.h>
#include <Kernel/Interrupts/InterruptDisabler.h>
namespace Kernel {
extern "C" void exit_trap(TrapFrame* trap)
{
return Processor::current().exit_trap(*trap);
}
}

View file

@ -27,6 +27,4 @@ struct TrapFrame {
#define TRAP_FRAME_SIZE (2 * 8)
static_assert(AssertSize<TrapFrame, TRAP_FRAME_SIZE>());
extern "C" void exit_trap(TrapFrame*) __attribute__((used));
}