mirror of
https://github.com/RGBCube/serenity
synced 2025-07-28 12:27:36 +00:00
Kernel: Share Processor class (and others) across architectures
About half of the Processor code is common across architectures, so let's share it with a templated base class. Also, other code that can be shared in some ways, like FPUState and TrapFrame functions, is adjusted here. Functions which cannot be shared trivially (without internal refactoring) are left alone for now.
This commit is contained in:
parent
0b824ab7a6
commit
398d271a46
26 changed files with 943 additions and 860 deletions
|
@ -105,15 +105,6 @@ void write_dr6(FlatPtr);
|
|||
FlatPtr read_dr7();
|
||||
void write_dr7(FlatPtr);
|
||||
|
||||
ALWAYS_INLINE static bool is_kernel_mode()
|
||||
{
|
||||
u16 cs;
|
||||
asm volatile(
|
||||
"mov %%cs, %[cs] \n"
|
||||
: [cs] "=g"(cs));
|
||||
return (cs & 3) == 0;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void read_tsc(u32& lsw, u32& msw)
|
||||
{
|
||||
asm volatile("rdtsc"
|
||||
|
|
|
@ -5,8 +5,8 @@
|
|||
*/
|
||||
|
||||
#include <Kernel/Arch/CurrentTime.h>
|
||||
#include <Kernel/Arch/Processor.h>
|
||||
#include <Kernel/Arch/x86_64/ASM_wrapper.h>
|
||||
#include <Kernel/Arch/x86_64/Processor.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
*/
|
||||
|
||||
#include <Kernel/Arch/DebugOutput.h>
|
||||
#include <Kernel/Arch/Processor.h>
|
||||
#include <Kernel/Arch/x86_64/BochsDebugOutput.h>
|
||||
#include <Kernel/Arch/x86_64/IO.h>
|
||||
#include <Kernel/Arch/x86_64/Processor.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
|
|
27
Kernel/Arch/x86_64/FPUState.h
Normal file
27
Kernel/Arch/x86_64/FPUState.h
Normal file
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* Copyright (c) 2023, kleines Filmröllchen <filmroellchen@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <AK/Platform.h>
|
||||
#include <AK/Types.h>
|
||||
#include <Kernel/Arch/x86_64/SIMDState.h>
|
||||
|
||||
VALIDATE_IS_X86()
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
struct [[gnu::aligned(64), gnu::packed]] FPUState {
|
||||
SIMD::LegacyRegion legacy_region;
|
||||
SIMD::Header xsave_header;
|
||||
|
||||
// FIXME: This should be dynamically allocated! For now, we only save the `YMM` registers here,
|
||||
// so this will do for now. The size of the area is queried via CPUID(EAX=0dh, ECX=2):EAX.
|
||||
// https://www.intel.com/content/dam/develop/external/us/en/documents/36945
|
||||
u8 ext_save_area[256];
|
||||
};
|
||||
|
||||
}
|
|
@ -275,7 +275,7 @@ extern "C" UNMAP_AFTER_INIT void pre_init_finished(void)
|
|||
// to this point
|
||||
|
||||
// The target flags will get restored upon leaving the trap
|
||||
Scheduler::leave_on_first_switch(processor_interrupts_state());
|
||||
Scheduler::leave_on_first_switch(Processor::interrupts_state());
|
||||
}
|
||||
|
||||
extern "C" UNMAP_AFTER_INIT void post_init_finished(void)
|
||||
|
|
|
@ -35,23 +35,18 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
READONLY_AFTER_INIT FPUState Processor::s_clean_fpu_state;
|
||||
|
||||
READONLY_AFTER_INIT static ProcessorContainer s_processors {};
|
||||
READONLY_AFTER_INIT Atomic<u32> Processor::g_total_processors;
|
||||
READONLY_AFTER_INIT static bool volatile s_smp_enabled;
|
||||
|
||||
static Atomic<ProcessorMessage*> s_message_pool;
|
||||
Atomic<u32> Processor::s_idle_cpu_mask { 0 };
|
||||
|
||||
// The compiler can't see the calls to these functions inside assembly.
|
||||
// Declare them, to avoid dead code warnings.
|
||||
extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap) __attribute__((used));
|
||||
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used));
|
||||
extern "C" FlatPtr do_init_context(Thread* thread, u32 flags) __attribute__((used));
|
||||
extern "C" void syscall_entry();
|
||||
|
||||
bool Processor::is_smp_enabled()
|
||||
template<typename T>
|
||||
bool ProcessorBase<T>::is_smp_enabled()
|
||||
{
|
||||
return s_smp_enabled;
|
||||
}
|
||||
|
@ -62,11 +57,6 @@ UNMAP_AFTER_INIT static void sse_init()
|
|||
write_cr4(read_cr4() | 0x600);
|
||||
}
|
||||
|
||||
void exit_kernel_thread(void)
|
||||
{
|
||||
Thread::current()->exit();
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT void Processor::cpu_detect()
|
||||
{
|
||||
// NOTE: This is called during Processor::early_initialize, we cannot
|
||||
|
@ -601,9 +591,11 @@ UNMAP_AFTER_INIT void Processor::cpu_setup()
|
|||
m_features |= CPUFeature::OSPKE;
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
|
||||
template<typename T>
|
||||
UNMAP_AFTER_INIT void ProcessorBase<T>::early_initialize(u32 cpu)
|
||||
{
|
||||
m_self = this;
|
||||
m_self = static_cast<Processor*>(this);
|
||||
auto self = static_cast<Processor*>(this);
|
||||
|
||||
m_cpu = cpu;
|
||||
m_in_irq = 0;
|
||||
|
@ -613,10 +605,10 @@ UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
|
|||
m_scheduler_initialized = false;
|
||||
m_in_scheduler = true;
|
||||
|
||||
m_message_queue = nullptr;
|
||||
self->m_message_queue = nullptr;
|
||||
m_idle_thread = nullptr;
|
||||
m_current_thread = nullptr;
|
||||
m_info = nullptr;
|
||||
self->m_info = nullptr;
|
||||
|
||||
m_halt_requested = false;
|
||||
if (cpu == 0) {
|
||||
|
@ -628,26 +620,29 @@ UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
|
|||
|
||||
m_deferred_call_pool.init();
|
||||
|
||||
cpu_setup();
|
||||
gdt_init();
|
||||
self->cpu_setup();
|
||||
self->gdt_init();
|
||||
|
||||
VERIFY(is_initialized()); // sanity check
|
||||
VERIFY(is_initialized());
|
||||
VERIFY(¤t() == this); // sanity check
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
|
||||
template<typename T>
|
||||
UNMAP_AFTER_INIT void ProcessorBase<T>::initialize(u32 cpu)
|
||||
{
|
||||
VERIFY(m_self == this);
|
||||
VERIFY(¤t() == this); // sanity check
|
||||
|
||||
m_info = new ProcessorInfo(*this);
|
||||
auto self = static_cast<Processor*>(this);
|
||||
|
||||
dmesgln("CPU[{}]: Supported features: {}", current_id(), m_info->features_string());
|
||||
self->m_info = new ProcessorInfo(*self);
|
||||
|
||||
dmesgln("CPU[{}]: Supported features: {}", current_id(), self->m_info->features_string());
|
||||
if (!has_feature(CPUFeature::RDRAND))
|
||||
dmesgln("CPU[{}]: No RDRAND support detected, randomness will be poor", current_id());
|
||||
dmesgln("CPU[{}]: Physical address bit width: {}", current_id(), m_physical_address_bit_width);
|
||||
dmesgln("CPU[{}]: Virtual address bit width: {}", current_id(), m_virtual_address_bit_width);
|
||||
if (m_has_qemu_hvf_quirk)
|
||||
if (self->m_has_qemu_hvf_quirk)
|
||||
dmesgln("CPU[{}]: Applied correction for QEMU Hypervisor.framework quirk", current_id());
|
||||
|
||||
if (cpu == 0)
|
||||
|
@ -672,13 +667,13 @@ UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
|
|||
}
|
||||
|
||||
if (has_feature(CPUFeature::HYPERVISOR))
|
||||
detect_hypervisor();
|
||||
self->detect_hypervisor();
|
||||
}
|
||||
|
||||
{
|
||||
// We need to prevent races between APs starting up at the same time
|
||||
VERIFY(cpu < s_processors.size());
|
||||
s_processors[cpu] = this;
|
||||
s_processors[cpu] = static_cast<Processor*>(this);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -765,7 +760,8 @@ DescriptorTablePointer const& Processor::get_gdtr()
|
|||
return m_gdtr;
|
||||
}
|
||||
|
||||
ErrorOr<Vector<FlatPtr, 32>> Processor::capture_stack_trace(Thread& thread, size_t max_frames)
|
||||
template<typename T>
|
||||
ErrorOr<Vector<FlatPtr, 32>> ProcessorBase<T>::capture_stack_trace(Thread& thread, size_t max_frames)
|
||||
{
|
||||
FlatPtr frame_ptr = 0, ip = 0;
|
||||
Vector<FlatPtr, 32> stack_trace;
|
||||
|
@ -836,7 +832,7 @@ ErrorOr<Vector<FlatPtr, 32>> Processor::capture_stack_trace(Thread& thread, size
|
|||
// until it returns the data back to us
|
||||
auto& proc = Processor::current();
|
||||
ErrorOr<void> result;
|
||||
smp_unicast(
|
||||
Processor::smp_unicast(
|
||||
thread.cpu(),
|
||||
[&]() {
|
||||
dbgln("CPU[{}] getting stack for cpu #{}", Processor::current_id(), proc.id());
|
||||
|
@ -889,37 +885,19 @@ ProcessorContainer& Processor::processors()
|
|||
return s_processors;
|
||||
}
|
||||
|
||||
Processor& Processor::by_id(u32 id)
|
||||
template<typename T>
|
||||
Processor& ProcessorBase<T>::by_id(u32 id)
|
||||
{
|
||||
return *s_processors[id];
|
||||
}
|
||||
|
||||
void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
|
||||
template<typename T>
|
||||
void ProcessorBase<T>::exit_trap(TrapFrame& trap)
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(&Processor::current() == this);
|
||||
trap.prev_irq_level = m_in_irq;
|
||||
if (raise_irq)
|
||||
m_in_irq++;
|
||||
auto* current_thread = Processor::current_thread();
|
||||
if (current_thread) {
|
||||
auto& current_trap = current_thread->current_trap();
|
||||
trap.next_trap = current_trap;
|
||||
current_trap = &trap;
|
||||
|
||||
auto new_previous_mode = trap.regs->previous_mode();
|
||||
if (current_thread->set_previous_mode(new_previous_mode) && trap.prev_irq_level == 0) {
|
||||
current_thread->update_time_scheduled(TimeManagement::scheduler_current_time(), new_previous_mode == ExecutionMode::Kernel, false);
|
||||
}
|
||||
} else {
|
||||
trap.next_trap = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void Processor::exit_trap(TrapFrame& trap)
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(&Processor::current() == this);
|
||||
auto* self = static_cast<Processor*>(this);
|
||||
|
||||
// Temporarily enter a critical section. This is to prevent critical
|
||||
// sections entered and left within e.g. smp_process_pending_messages
|
||||
|
@ -932,7 +910,7 @@ void Processor::exit_trap(TrapFrame& trap)
|
|||
m_in_irq = trap.prev_irq_level;
|
||||
|
||||
if (s_smp_enabled)
|
||||
smp_process_pending_messages();
|
||||
self->smp_process_pending_messages();
|
||||
|
||||
// Process the deferred call queue. Among other things, this ensures
|
||||
// that any pending thread unblocks happen before we enter the scheduler.
|
||||
|
@ -968,19 +946,8 @@ void Processor::exit_trap(TrapFrame& trap)
|
|||
check_invoke_scheduler();
|
||||
}
|
||||
|
||||
void Processor::check_invoke_scheduler()
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(!m_in_irq);
|
||||
VERIFY(!m_in_critical);
|
||||
VERIFY(&Processor::current() == this);
|
||||
if (m_invoke_scheduler_async && m_scheduler_initialized) {
|
||||
m_invoke_scheduler_async = false;
|
||||
Scheduler::invoke_async();
|
||||
}
|
||||
}
|
||||
|
||||
void Processor::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
|
||||
template<typename T>
|
||||
void ProcessorBase<T>::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
|
||||
{
|
||||
auto ptr = vaddr.as_ptr();
|
||||
while (page_count > 0) {
|
||||
|
@ -995,10 +962,17 @@ void Processor::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
|
|||
}
|
||||
}
|
||||
|
||||
void Processor::flush_tlb(Memory::PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
|
||||
template<typename T>
|
||||
void ProcessorBase<T>::flush_entire_tlb_local()
|
||||
{
|
||||
write_cr3(read_cr3());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void ProcessorBase<T>::flush_tlb(Memory::PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
|
||||
{
|
||||
if (s_smp_enabled && (!Memory::is_user_address(vaddr) || Process::current().thread_count() > 1))
|
||||
smp_broadcast_flush_tlb(page_directory, vaddr, page_count);
|
||||
Processor::smp_broadcast_flush_tlb(page_directory, vaddr, page_count);
|
||||
else
|
||||
flush_tlb_local(vaddr, page_count);
|
||||
}
|
||||
|
@ -1042,7 +1016,8 @@ ProcessorMessage& Processor::smp_get_from_pool()
|
|||
return *msg;
|
||||
}
|
||||
|
||||
u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
|
||||
template<typename T>
|
||||
u32 ProcessorBase<T>::smp_wake_n_idle_processors(u32 wake_count)
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(wake_count > 0);
|
||||
|
@ -1061,7 +1036,7 @@ u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
|
|||
auto& apic = APIC::the();
|
||||
while (did_wake_count < wake_count) {
|
||||
// Try to get a set of idle CPUs and flip them to busy
|
||||
u32 idle_mask = s_idle_cpu_mask.load(AK::MemoryOrder::memory_order_relaxed) & ~(1u << current_id);
|
||||
u32 idle_mask = Processor::s_idle_cpu_mask.load(AK::MemoryOrder::memory_order_relaxed) & ~(1u << current_id);
|
||||
u32 idle_count = popcount(idle_mask);
|
||||
if (idle_count == 0)
|
||||
break; // No (more) idle processor available
|
||||
|
@ -1073,7 +1048,7 @@ u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
|
|||
found_mask |= 1u << cpu;
|
||||
}
|
||||
|
||||
idle_mask = s_idle_cpu_mask.fetch_and(~found_mask, AK::MemoryOrder::memory_order_acq_rel) & found_mask;
|
||||
idle_mask = Processor::s_idle_cpu_mask.fetch_and(~found_mask, AK::MemoryOrder::memory_order_acq_rel) & found_mask;
|
||||
if (idle_mask == 0)
|
||||
continue; // All of them were flipped to busy, try again
|
||||
idle_count = popcount(idle_mask);
|
||||
|
@ -1091,7 +1066,8 @@ u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
|
|||
return did_wake_count;
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT void Processor::smp_enable()
|
||||
template<typename T>
|
||||
UNMAP_AFTER_INIT void ProcessorBase<T>::smp_enable()
|
||||
{
|
||||
size_t msg_pool_size = Processor::count() * 100u;
|
||||
size_t msg_entries_cnt = Processor::count();
|
||||
|
@ -1325,27 +1301,15 @@ void Processor::smp_broadcast_halt()
|
|||
APIC::the().broadcast_ipi();
|
||||
}
|
||||
|
||||
void Processor::Processor::halt()
|
||||
template<typename T>
|
||||
void ProcessorBase<T>::halt()
|
||||
{
|
||||
if (s_smp_enabled)
|
||||
smp_broadcast_halt();
|
||||
Processor::smp_broadcast_halt();
|
||||
|
||||
halt_this();
|
||||
}
|
||||
|
||||
void Processor::deferred_call_queue(Function<void()> callback)
|
||||
{
|
||||
// NOTE: If we are called outside of a critical section and outside
|
||||
// of an irq handler, the function will be executed before we return!
|
||||
ScopedCritical critical;
|
||||
auto& cur_proc = Processor::current();
|
||||
|
||||
auto* entry = cur_proc.m_deferred_call_pool.get_free();
|
||||
entry->handler_value() = move(callback);
|
||||
|
||||
cur_proc.m_deferred_call_pool.queue_entry(entry);
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT void Processor::gdt_init()
|
||||
{
|
||||
m_gdt_length = 0;
|
||||
|
@ -1381,28 +1345,9 @@ UNMAP_AFTER_INIT void Processor::gdt_init()
|
|||
gs_base.set((u64)this);
|
||||
}
|
||||
|
||||
extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
|
||||
extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
|
||||
{
|
||||
VERIFY(!are_interrupts_enabled());
|
||||
VERIFY(is_kernel_mode());
|
||||
|
||||
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
|
||||
|
||||
VERIFY(to_thread == Thread::current());
|
||||
|
||||
Scheduler::enter_current(*from_thread);
|
||||
|
||||
auto in_critical = to_thread->saved_critical();
|
||||
VERIFY(in_critical > 0);
|
||||
Processor::restore_critical(in_critical);
|
||||
|
||||
// Since we got here and don't have Scheduler::context_switch in the
|
||||
// call stack (because this is the first time we switched into this
|
||||
// context), we need to notify the scheduler so that it can release
|
||||
// the scheduler lock. We don't want to enable interrupts at this point
|
||||
// as we're still in the middle of a context switch. Doing so could
|
||||
// trigger a context switch within a context switch, leading to a crash.
|
||||
Scheduler::leave_on_first_switch(InterruptsState::Disabled);
|
||||
do_context_first_init(from_thread, to_thread);
|
||||
}
|
||||
|
||||
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
|
||||
|
@ -1471,7 +1416,9 @@ extern "C" FlatPtr do_init_context(Thread* thread, u32 flags)
|
|||
return Processor::current().init_context(*thread, true);
|
||||
}
|
||||
|
||||
void Processor::assume_context(Thread& thread, InterruptsState new_interrupts_state)
|
||||
// FIXME: Share this code with other architectures.
|
||||
template<typename T>
|
||||
void ProcessorBase<T>::assume_context(Thread& thread, InterruptsState new_interrupts_state)
|
||||
{
|
||||
dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
|
||||
|
||||
|
@ -1487,34 +1434,8 @@ void Processor::assume_context(Thread& thread, InterruptsState new_interrupts_st
|
|||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
|
||||
u64 Processor::time_spent_idle() const
|
||||
{
|
||||
return m_idle_thread->time_in_user() + m_idle_thread->time_in_kernel();
|
||||
}
|
||||
|
||||
void Processor::leave_critical()
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
current().do_leave_critical();
|
||||
}
|
||||
|
||||
void Processor::do_leave_critical()
|
||||
{
|
||||
VERIFY(m_in_critical > 0);
|
||||
if (m_in_critical == 1) {
|
||||
if (m_in_irq == 0) {
|
||||
m_deferred_call_pool.execute_pending();
|
||||
VERIFY(m_in_critical == 1);
|
||||
}
|
||||
m_in_critical = 0;
|
||||
if (m_in_irq == 0)
|
||||
check_invoke_scheduler();
|
||||
} else {
|
||||
m_in_critical = m_in_critical - 1;
|
||||
}
|
||||
}
|
||||
|
||||
u32 Processor::clear_critical()
|
||||
template<typename T>
|
||||
u32 ProcessorBase<T>::clear_critical()
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
auto prev_critical = in_critical();
|
||||
|
@ -1563,13 +1484,14 @@ NAKED void do_assume_context(Thread*, u32)
|
|||
// clang-format on
|
||||
}
|
||||
|
||||
StringView Processor::platform_string()
|
||||
template<typename T>
|
||||
StringView ProcessorBase<T>::platform_string()
|
||||
{
|
||||
return "x86_64"sv;
|
||||
}
|
||||
|
||||
// FIXME: For the most part this is a copy of the i386-specific function, get rid of the code duplication
|
||||
FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
|
||||
template<typename T>
|
||||
FlatPtr ProcessorBase<T>::init_context(Thread& thread, bool leave_crit)
|
||||
{
|
||||
VERIFY(is_kernel_mode());
|
||||
VERIFY(g_scheduler_lock.is_locked());
|
||||
|
@ -1673,11 +1595,13 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
|
|||
return stack_top;
|
||||
}
|
||||
|
||||
void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
|
||||
template<typename T>
|
||||
void ProcessorBase<T>::switch_context(Thread*& from_thread, Thread*& to_thread)
|
||||
{
|
||||
VERIFY(!m_in_irq);
|
||||
VERIFY(m_in_critical == 1);
|
||||
VERIFY(is_kernel_mode());
|
||||
auto* self = static_cast<Processor*>(this);
|
||||
|
||||
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
|
||||
|
||||
|
@ -1739,8 +1663,8 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
|
|||
: [from_rsp] "=m" (from_thread->regs().rsp),
|
||||
[from_rbp] "=m" (from_thread->regs().rbp),
|
||||
[from_rip] "=m" (from_thread->regs().rip),
|
||||
[tss_rsp0l] "=m" (m_tss.rsp0l),
|
||||
[tss_rsp0h] "=m" (m_tss.rsp0h),
|
||||
[tss_rsp0l] "=m" (self->m_tss.rsp0l),
|
||||
[tss_rsp0h] "=m" (self->m_tss.rsp0h),
|
||||
"=d" (from_thread), // needed so that from_thread retains the correct value
|
||||
"=a" (to_thread) // needed so that to_thread retains the correct value
|
||||
: [to_rsp] "g" (to_thread->regs().rsp),
|
||||
|
@ -1755,14 +1679,16 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
|
|||
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_thread)
|
||||
template<typename T>
|
||||
UNMAP_AFTER_INIT void ProcessorBase<T>::initialize_context_switching(Thread& initial_thread)
|
||||
{
|
||||
VERIFY(initial_thread.process().is_kernel_process());
|
||||
auto* self = static_cast<Processor*>(this);
|
||||
|
||||
auto& regs = initial_thread.regs();
|
||||
m_tss.iomapbase = sizeof(m_tss);
|
||||
m_tss.rsp0l = regs.rsp0 & 0xffffffff;
|
||||
m_tss.rsp0h = regs.rsp0 >> 32;
|
||||
self->m_tss.iomapbase = sizeof(self->m_tss);
|
||||
self->m_tss.rsp0l = regs.rsp0 & 0xffffffff;
|
||||
self->m_tss.rsp0h = regs.rsp0 >> 32;
|
||||
|
||||
m_scheduler_initialized = true;
|
||||
|
||||
|
@ -1791,10 +1717,31 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
|
|||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
|
||||
void Processor::set_thread_specific_data(VirtualAddress thread_specific_data)
|
||||
template<typename T>
|
||||
void ProcessorBase<T>::set_thread_specific_data(VirtualAddress thread_specific_data)
|
||||
{
|
||||
MSR fs_base_msr(MSR_FS_BASE);
|
||||
fs_base_msr.set(thread_specific_data.get());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void ProcessorBase<T>::idle_begin() const
|
||||
{
|
||||
Processor::s_idle_cpu_mask.fetch_or(1u << m_cpu, AK::MemoryOrder::memory_order_relaxed);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void ProcessorBase<T>::idle_end() const
|
||||
{
|
||||
Processor::s_idle_cpu_mask.fetch_and(~(1u << m_cpu), AK::MemoryOrder::memory_order_relaxed);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void ProcessorBase<T>::wait_for_interrupt() const
|
||||
{
|
||||
asm("hlt");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#include <Kernel/Arch/ProcessorFunctions.include>
|
||||
|
|
|
@ -40,37 +40,28 @@ struct ProcessorMessageEntry;
|
|||
#define MSR_IA32_EFER 0xc0000080
|
||||
#define MSR_IA32_PAT 0x277
|
||||
|
||||
// FIXME: Find a better place for these
|
||||
extern "C" void thread_context_first_enter(void);
|
||||
extern "C" void exit_kernel_thread(void);
|
||||
extern "C" void do_assume_context(Thread* thread, u32 flags);
|
||||
|
||||
struct [[gnu::aligned(64), gnu::packed]] FPUState {
|
||||
SIMD::LegacyRegion legacy_region;
|
||||
SIMD::Header xsave_header;
|
||||
|
||||
// FIXME: This should be dynamically allocated! For now, we only save the `YMM` registers here,
|
||||
// so this will do for now. The size of the area is queried via CPUID(EAX=0dh, ECX=2):EAX.
|
||||
// https://www.intel.com/content/dam/develop/external/us/en/documents/36945
|
||||
u8 ext_save_area[256];
|
||||
};
|
||||
|
||||
enum class InterruptsState;
|
||||
class Processor;
|
||||
|
||||
template<typename ProcessorT>
|
||||
class ProcessorBase;
|
||||
|
||||
// Note: We only support 64 processors at most at the moment,
|
||||
// so allocate 64 slots of inline capacity in the container.
|
||||
|
||||
constexpr size_t MAX_CPU_COUNT = 64;
|
||||
using ProcessorContainer = Array<Processor*, MAX_CPU_COUNT>;
|
||||
|
||||
class Processor {
|
||||
extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, [[maybe_unused]] TrapFrame* trap);
|
||||
|
||||
// If this fails to compile because ProcessorBase was not found, you are including this header directly.
|
||||
// Include Arch/Processor.h instead.
|
||||
class Processor final : public ProcessorBase<Processor> {
|
||||
friend class ProcessorInfo;
|
||||
// Allow some implementations to access the idle CPU mask and various x86 implementation details.
|
||||
friend class ProcessorBase<Processor>;
|
||||
|
||||
AK_MAKE_NONCOPYABLE(Processor);
|
||||
AK_MAKE_NONMOVABLE(Processor);
|
||||
|
||||
Processor* m_self;
|
||||
|
||||
private:
|
||||
// Saved user stack for the syscall instruction.
|
||||
void* m_user_stack;
|
||||
|
||||
|
@ -78,34 +69,15 @@ class Processor {
|
|||
alignas(Descriptor) Descriptor m_gdt[256];
|
||||
u32 m_gdt_length;
|
||||
|
||||
u32 m_cpu;
|
||||
FlatPtr m_in_irq;
|
||||
volatile u32 m_in_critical;
|
||||
static Atomic<u32> s_idle_cpu_mask;
|
||||
|
||||
TSS m_tss;
|
||||
static FPUState s_clean_fpu_state;
|
||||
CPUFeature::Type m_features;
|
||||
static Atomic<u32> g_total_processors;
|
||||
u8 m_physical_address_bit_width;
|
||||
u8 m_virtual_address_bit_width;
|
||||
bool m_has_qemu_hvf_quirk;
|
||||
|
||||
ProcessorInfo* m_info;
|
||||
Thread* m_current_thread;
|
||||
Thread* m_idle_thread;
|
||||
|
||||
Atomic<ProcessorMessageEntry*> m_message_queue;
|
||||
|
||||
bool m_invoke_scheduler_async;
|
||||
bool m_scheduler_initialized;
|
||||
bool m_in_scheduler;
|
||||
Atomic<bool> m_halt_requested;
|
||||
|
||||
DeferredCallPool m_deferred_call_pool {};
|
||||
|
||||
void* m_processor_specific_data[(size_t)ProcessorSpecificDataID::__Count];
|
||||
|
||||
void gdt_init();
|
||||
void write_raw_gdt_entry(u16 selector, u32 low, u32 high);
|
||||
void write_gdt_entry(u16 selector, Descriptor& descriptor);
|
||||
|
@ -123,66 +95,10 @@ class Processor {
|
|||
void cpu_detect();
|
||||
void cpu_setup();
|
||||
|
||||
public:
|
||||
Processor() = default;
|
||||
|
||||
void early_initialize(u32 cpu);
|
||||
void initialize(u32 cpu);
|
||||
|
||||
void detect_hypervisor();
|
||||
void detect_hypervisor_hyperv(CPUID const& hypervisor_leaf_range);
|
||||
|
||||
void idle_begin() const
|
||||
{
|
||||
s_idle_cpu_mask.fetch_or(1u << m_cpu, AK::MemoryOrder::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void idle_end() const
|
||||
{
|
||||
s_idle_cpu_mask.fetch_and(~(1u << m_cpu), AK::MemoryOrder::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void wait_for_interrupt() const
|
||||
{
|
||||
asm("hlt");
|
||||
}
|
||||
|
||||
static Processor& by_id(u32);
|
||||
|
||||
static u32 count()
|
||||
{
|
||||
// NOTE: because this value never changes once all APs are booted,
|
||||
// we can safely bypass loading it atomically.
|
||||
return *g_total_processors.ptr();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static u64 read_cpu_counter()
|
||||
{
|
||||
return read_tsc();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static void pause()
|
||||
{
|
||||
asm volatile("pause");
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static void wait_check()
|
||||
{
|
||||
Processor::pause();
|
||||
if (Processor::is_smp_enabled())
|
||||
Processor::current().smp_process_pending_messages();
|
||||
}
|
||||
|
||||
[[noreturn]] static void halt();
|
||||
|
||||
static void flush_entire_tlb_local()
|
||||
{
|
||||
write_cr3(read_cr3());
|
||||
}
|
||||
|
||||
static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
|
||||
static void flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
|
||||
|
||||
public:
|
||||
Descriptor& get_gdt_entry(u16 selector);
|
||||
void flush_gdt();
|
||||
DescriptorTablePointer const& get_gdtr();
|
||||
|
@ -222,15 +138,8 @@ public:
|
|||
return {};
|
||||
}
|
||||
|
||||
ALWAYS_INLINE u8 physical_address_bit_width() const { return m_physical_address_bit_width; }
|
||||
ALWAYS_INLINE u8 virtual_address_bit_width() const { return m_virtual_address_bit_width; }
|
||||
|
||||
ALWAYS_INLINE ProcessorInfo& info() { return *m_info; }
|
||||
|
||||
u64 time_spent_idle() const;
|
||||
|
||||
static bool is_smp_enabled();
|
||||
|
||||
static constexpr u64 user_stack_offset()
|
||||
{
|
||||
return __builtin_offsetof(Processor, m_user_stack);
|
||||
|
@ -240,180 +149,157 @@ public:
|
|||
return __builtin_offsetof(Processor, m_tss) + __builtin_offsetof(TSS, rsp0l);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static Processor& current()
|
||||
{
|
||||
return *(Processor*)read_gs_ptr(__builtin_offsetof(Processor, m_self));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static bool is_initialized()
|
||||
{
|
||||
return read_gs_ptr(__builtin_offsetof(Processor, m_self)) != 0;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T* get_specific()
|
||||
{
|
||||
return static_cast<T*>(m_processor_specific_data[static_cast<size_t>(T::processor_specific_data_id())]);
|
||||
}
|
||||
|
||||
void set_specific(ProcessorSpecificDataID specific_id, void* ptr)
|
||||
{
|
||||
m_processor_specific_data[static_cast<size_t>(specific_id)] = ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
|
||||
{
|
||||
m_idle_thread = &idle_thread;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static Thread* current_thread()
|
||||
{
|
||||
// If we were to use Processor::current here, we'd have to
|
||||
// disable interrupts to prevent a race where we may get pre-empted
|
||||
// right after getting the Processor structure and then get moved
|
||||
// to another processor, which would lead us to get the wrong thread.
|
||||
// To avoid having to disable interrupts, we can just read the field
|
||||
// directly in an atomic fashion, similar to Processor::current.
|
||||
return (Thread*)read_gs_ptr(__builtin_offsetof(Processor, m_current_thread));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static void set_current_thread(Thread& current_thread)
|
||||
{
|
||||
// See comment in Processor::current_thread
|
||||
write_gs_ptr(__builtin_offsetof(Processor, m_current_thread), FlatPtr(¤t_thread));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static Thread* idle_thread()
|
||||
{
|
||||
// See comment in Processor::current_thread
|
||||
return (Thread*)read_gs_ptr(__builtin_offsetof(Processor, m_idle_thread));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE u32 id() const
|
||||
{
|
||||
// NOTE: This variant should only be used when iterating over all
|
||||
// Processor instances, or when it's guaranteed that the thread
|
||||
// cannot move to another processor in between calling Processor::current
|
||||
// and Processor::get_id, or if this fact is not important.
|
||||
// All other cases should use Processor::id instead!
|
||||
return m_cpu;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static u32 current_id()
|
||||
{
|
||||
// See comment in Processor::current_thread
|
||||
return read_gs_ptr(__builtin_offsetof(Processor, m_cpu));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static bool is_bootstrap_processor()
|
||||
{
|
||||
return Processor::current_id() == 0;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static FlatPtr current_in_irq()
|
||||
{
|
||||
return read_gs_ptr(__builtin_offsetof(Processor, m_in_irq));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static void enter_critical()
|
||||
{
|
||||
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), in_critical() + 1);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static bool current_in_scheduler()
|
||||
{
|
||||
return read_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static void set_current_in_scheduler(bool value)
|
||||
{
|
||||
write_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler), value);
|
||||
}
|
||||
|
||||
private:
|
||||
void do_leave_critical();
|
||||
|
||||
public:
|
||||
static void leave_critical();
|
||||
static u32 clear_critical();
|
||||
|
||||
ALWAYS_INLINE static void restore_critical(u32 prev_critical)
|
||||
{
|
||||
// NOTE: This doesn't have to be atomic, and it's also fine if we
|
||||
// get preempted in between these steps. If we move to another
|
||||
// processors m_in_critical will move along with us. And if we
|
||||
// are preempted, we would resume with the same flags.
|
||||
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), prev_critical);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static u32 in_critical()
|
||||
{
|
||||
// See comment in Processor::current_thread
|
||||
return read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static void verify_no_spinlocks_held()
|
||||
{
|
||||
VERIFY(!Processor::in_critical());
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static FPUState const& clean_fpu_state() { return s_clean_fpu_state; }
|
||||
|
||||
static void smp_enable();
|
||||
bool smp_process_pending_messages();
|
||||
|
||||
static void smp_unicast(u32 cpu, Function<void()>, bool async);
|
||||
static void smp_broadcast_flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
|
||||
static u32 smp_wake_n_idle_processors(u32 wake_count);
|
||||
|
||||
static void deferred_call_queue(Function<void()> callback);
|
||||
|
||||
ALWAYS_INLINE bool has_nx() const
|
||||
{
|
||||
return has_feature(CPUFeature::NX);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool has_pat() const
|
||||
{
|
||||
return has_feature(CPUFeature::PAT);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool has_feature(CPUFeature::Type const& feature) const
|
||||
{
|
||||
return m_features.has_flag(feature);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static bool are_interrupts_enabled()
|
||||
{
|
||||
return Kernel::are_interrupts_enabled();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static void enable_interrupts()
|
||||
{
|
||||
sti();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static void disable_interrupts()
|
||||
{
|
||||
cli();
|
||||
}
|
||||
|
||||
void check_invoke_scheduler();
|
||||
void invoke_scheduler_async() { m_invoke_scheduler_async = true; }
|
||||
|
||||
void enter_trap(TrapFrame& trap, bool raise_irq);
|
||||
|
||||
void exit_trap(TrapFrame& trap);
|
||||
|
||||
[[noreturn]] void initialize_context_switching(Thread& initial_thread);
|
||||
NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
|
||||
[[noreturn]] static void assume_context(Thread& thread, InterruptsState new_interrupts_state);
|
||||
FlatPtr init_context(Thread& thread, bool leave_crit);
|
||||
static ErrorOr<Vector<FlatPtr, 32>> capture_stack_trace(Thread& thread, size_t max_frames = 0);
|
||||
|
||||
static StringView platform_string();
|
||||
|
||||
static void set_thread_specific_data(VirtualAddress thread_specific_data);
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE Thread* ProcessorBase<T>::current_thread()
|
||||
{
|
||||
// If we were to use ProcessorBase::current here, we'd have to
|
||||
// disable interrupts to prevent a race where we may get pre-empted
|
||||
// right after getting the Processor structure and then get moved
|
||||
// to another processor, which would lead us to get the wrong thread.
|
||||
// To avoid having to disable interrupts, we can just read the field
|
||||
// directly in an atomic fashion, similar to Processor::current.
|
||||
return (Thread*)read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_current_thread));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE u32 ProcessorBase<T>::current_id()
|
||||
{
|
||||
// See comment in ProcessorBase::current_thread
|
||||
return read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_cpu));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE void ProcessorBase<T>::restore_critical(u32 prev_critical)
|
||||
{
|
||||
// NOTE: This doesn't have to be atomic, and it's also fine if we
|
||||
// get preempted in between these steps. If we move to another
|
||||
// processors m_in_critical will move along with us. And if we
|
||||
// are preempted, we would resume with the same flags.
|
||||
write_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_in_critical), prev_critical);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE u32 ProcessorBase<T>::in_critical()
|
||||
{
|
||||
// See comment in ProcessorBase::current_thread
|
||||
return read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_in_critical));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE void ProcessorBase<T>::set_current_thread(Thread& current_thread)
|
||||
{
|
||||
// See comment in ProcessorBase::current_thread
|
||||
write_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_current_thread), FlatPtr(¤t_thread));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE Thread* ProcessorBase<T>::idle_thread()
|
||||
{
|
||||
// See comment in ProcessorBase::current_thread
|
||||
return (Thread*)read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_idle_thread));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T& ProcessorBase<T>::current()
|
||||
{
|
||||
return *(Processor*)(read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_self)));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE bool ProcessorBase<T>::is_initialized()
|
||||
{
|
||||
return read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_self)) != 0;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE void ProcessorBase<T>::enter_critical()
|
||||
{
|
||||
write_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_in_critical), in_critical() + 1);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE bool ProcessorBase<T>::are_interrupts_enabled()
|
||||
{
|
||||
return Kernel::are_interrupts_enabled();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE bool ProcessorBase<T>::is_kernel_mode()
|
||||
{
|
||||
u16 cs;
|
||||
asm volatile(
|
||||
"mov %%cs, %[cs] \n"
|
||||
: [cs] "=g"(cs));
|
||||
return (cs & 3) == 0;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE bool ProcessorBase<T>::current_in_scheduler()
|
||||
{
|
||||
auto value = read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_in_scheduler));
|
||||
return value;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE void ProcessorBase<T>::set_current_in_scheduler(bool value)
|
||||
{
|
||||
write_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_in_scheduler), value);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE void ProcessorBase<T>::enable_interrupts()
|
||||
{
|
||||
sti();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE void ProcessorBase<T>::disable_interrupts()
|
||||
{
|
||||
cli();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE bool ProcessorBase<T>::has_nx() const
|
||||
{
|
||||
return has_feature(CPUFeature::NX);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE bool ProcessorBase<T>::has_pat() const
|
||||
{
|
||||
return has_feature(CPUFeature::PAT);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE u64 ProcessorBase<T>::read_cpu_counter()
|
||||
{
|
||||
return read_tsc();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE void ProcessorBase<T>::pause()
|
||||
{
|
||||
asm volatile("pause");
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE void ProcessorBase<T>::wait_check()
|
||||
{
|
||||
Processor::pause();
|
||||
if (Processor::is_smp_enabled())
|
||||
Processor::current().smp_process_pending_messages();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE FlatPtr ProcessorBase<T>::current_in_irq()
|
||||
{
|
||||
return read_gs_ptr(__builtin_offsetof(Processor, m_in_irq));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
*/
|
||||
|
||||
#include <Kernel/API/Syscall.h>
|
||||
#include <Kernel/Arch/Processor.h>
|
||||
#include <Kernel/Arch/TrapFrame.h>
|
||||
#include <Kernel/Arch/x86_64/DescriptorTable.h>
|
||||
#include <Kernel/Arch/x86_64/Processor.h>
|
||||
#include <Kernel/Library/Assertions.h>
|
||||
#include <Kernel/Library/Panic.h>
|
||||
#include <Kernel/Tasks/Process.h>
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <Kernel/Arch/Processor.h>
|
||||
#include <Kernel/Arch/x86_64/TrapFrame.h>
|
||||
#include <Kernel/Interrupts/InterruptDisabler.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
extern "C" void enter_trap_no_irq(TrapFrame* trap)
|
||||
{
|
||||
InterruptDisabler disable;
|
||||
Processor::current().enter_trap(*trap, false);
|
||||
}
|
||||
|
||||
extern "C" void enter_trap(TrapFrame* trap)
|
||||
{
|
||||
InterruptDisabler disable;
|
||||
Processor::current().enter_trap(*trap, true);
|
||||
}
|
||||
|
||||
extern "C" void exit_trap(TrapFrame* trap)
|
||||
{
|
||||
InterruptDisabler disable;
|
||||
return Processor::current().exit_trap(*trap);
|
||||
}
|
||||
|
||||
}
|
|
@ -31,8 +31,4 @@ struct TrapFrame {
|
|||
|
||||
static_assert(AssertSize<TrapFrame, TRAP_FRAME_SIZE>());
|
||||
|
||||
extern "C" void enter_trap_no_irq(TrapFrame* trap) __attribute__((used));
|
||||
extern "C" void enter_trap(TrapFrame*) __attribute__((used));
|
||||
extern "C" void exit_trap(TrapFrame*) __attribute__((used));
|
||||
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue