1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 18:37:34 +00:00

Kernel: Add even more AARCH64 stubs

This commit is contained in:
Gunnar Beutner 2022-10-16 22:43:43 +02:00 committed by Linus Groh
parent 63a91d6971
commit 056e406a12
9 changed files with 136 additions and 13 deletions

View file

@ -51,6 +51,16 @@ public:
m_processor_specific_data[static_cast<size_t>(specific_id)] = ptr;
}
void idle_begin() const
{
TODO_AARCH64();
}
void idle_end() const
{
TODO_AARCH64();
}
ALWAYS_INLINE static void pause()
{
TODO_AARCH64();
@ -80,12 +90,27 @@ public:
static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
static void flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
ALWAYS_INLINE u32 id() const
{
// NOTE: This variant should only be used when iterating over all
// Processor instances, or when it's guaranteed that the thread
// cannot move to another processor in between calling Processor::current
// and Processor::get_id, or if this fact is not important.
// All other cases should use Processor::id instead!
return 0;
}
// FIXME: When aarch64 supports multiple cores, return the correct core id here.
ALWAYS_INLINE static u32 current_id()
{
return 0;
}
ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
{
m_idle_thread = &idle_thread;
}
// FIXME: Actually return the current thread once aarch64 supports threading.
ALWAYS_INLINE static Thread* current_thread()
{
@ -129,6 +154,20 @@ public:
Aarch64::DAIF::set_I();
}
void check_invoke_scheduler();
void invoke_scheduler_async() { m_invoke_scheduler_async = true; }
ALWAYS_INLINE static bool current_in_scheduler()
{
auto current_processor = current();
return current_processor.m_in_scheduler;
}
ALWAYS_INLINE static void set_current_in_scheduler(bool value)
{
current().m_in_scheduler = value;
}
// FIXME: Share the critical functions with x86/Processor.h
ALWAYS_INLINE static void enter_critical()
{
@ -162,6 +201,12 @@ public:
ALWAYS_INLINE static FPUState const& clean_fpu_state() { TODO_AARCH64(); }
ALWAYS_INLINE static void set_current_thread(Thread& current_thread)
{
(void)current_thread;
TODO_AARCH64();
}
// FIXME: Actually return the idle thread once aarch64 supports threading.
ALWAYS_INLINE static Thread* idle_thread()
{
@ -188,14 +233,21 @@ public:
[[noreturn]] static void halt();
[[noreturn]] void initialize_context_switching(Thread& initial_thread);
NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
[[noreturn]] static void assume_context(Thread& thread, FlatPtr flags);
FlatPtr init_context(Thread& thread, bool leave_crit);
static ErrorOr<Vector<FlatPtr, 32>> capture_stack_trace(Thread& thread, size_t max_frames = 0);
private:
Thread* m_idle_thread;
u32 m_in_critical { 0 };
// FIXME: Once there is code in place to differentiate IRQs from synchronous exceptions (syscalls),
// this member should be incremented. Also this member shouldn't be a FlatPtr.
FlatPtr m_in_irq { 0 };
bool m_in_scheduler { false };
bool m_invoke_scheduler_async { false };
};
}