1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-28 19:27:36 +00:00

Kernel: Reorganize Arch/x86 directory to Arch/x86_64 after i686 removal

No functional change.
This commit is contained in:
Liav A 2022-10-04 13:46:11 +03:00 committed by Andreas Kling
parent 5ff318cf3a
commit 91db482ad3
129 changed files with 482 additions and 1116 deletions

View file

@ -0,0 +1,117 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Types.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/x86_64/ASM_wrapper.h>
#include <Kernel/Sections.h>
namespace Kernel {
#define XCR_XFEATURE_ENABLED_MASK 0
UNMAP_AFTER_INIT u64 read_xcr0()
{
u32 eax, edx;
asm volatile("xgetbv"
: "=a"(eax), "=d"(edx)
: "c"(XCR_XFEATURE_ENABLED_MASK));
return eax + ((u64)edx << 32);
}
UNMAP_AFTER_INIT void write_xcr0(u64 value)
{
u32 eax = value;
u32 edx = value >> 32;
asm volatile("xsetbv" ::"a"(eax), "d"(edx), "c"(XCR_XFEATURE_ENABLED_MASK));
}
void stac()
{
if (!Processor::current().has_feature(CPUFeature::SMAP))
return;
asm volatile("stac" ::
: "cc");
}
void clac()
{
if (!Processor::current().has_feature(CPUFeature::SMAP))
return;
asm volatile("clac" ::
: "cc");
}
UNMAP_AFTER_INIT void write_cr0(FlatPtr value)
{
asm volatile("mov %%rax, %%cr0" ::"a"(value));
}
UNMAP_AFTER_INIT void write_cr4(FlatPtr value)
{
asm volatile("mov %%rax, %%cr4" ::"a"(value));
}
FlatPtr read_cr0()
{
FlatPtr cr0;
asm("mov %%cr0, %%rax"
: "=a"(cr0));
return cr0;
}
FlatPtr read_cr2()
{
FlatPtr cr2;
asm("mov %%cr2, %%rax"
: "=a"(cr2));
return cr2;
}
FlatPtr read_cr3()
{
FlatPtr cr3;
asm("mov %%cr3, %%rax"
: "=a"(cr3));
return cr3;
}
void write_cr3(FlatPtr cr3)
{
// NOTE: If you're here from a GPF crash, it's very likely that a PDPT entry is incorrect, not this!
asm volatile("mov %%rax, %%cr3" ::"a"(cr3)
: "memory");
}
FlatPtr read_cr4()
{
FlatPtr cr4;
asm("mov %%cr4, %%rax"
: "=a"(cr4));
return cr4;
}
#define DEFINE_DEBUG_REGISTER(index) \
FlatPtr read_dr##index() \
{ \
FlatPtr value; \
asm("mov %%dr" #index ", %%rax" \
: "=a"(value)); \
return value; \
} \
void write_dr##index(FlatPtr value) \
{ \
asm volatile("mov %%rax, %%dr" #index ::"a"(value)); \
}
DEFINE_DEBUG_REGISTER(0);
DEFINE_DEBUG_REGISTER(1);
DEFINE_DEBUG_REGISTER(2);
DEFINE_DEBUG_REGISTER(3);
DEFINE_DEBUG_REGISTER(6);
DEFINE_DEBUG_REGISTER(7);
}

View file

@ -0,0 +1,163 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
namespace Kernel {
ALWAYS_INLINE void cli()
{
asm volatile("cli" ::
: "memory");
}
ALWAYS_INLINE void sti()
{
asm volatile("sti" ::
: "memory");
}
ALWAYS_INLINE FlatPtr cpu_flags()
{
FlatPtr flags;
asm volatile(
"pushf\n"
"pop %0\n"
: "=rm"(flags)::"memory");
return flags;
}
template<typename T>
ALWAYS_INLINE T read_gs_value(FlatPtr offset)
{
T val;
asm volatile(
"mov %%gs:%a[off], %[val]"
: [val] "=r"(val)
: [off] "ir"(offset));
return val;
}
template<typename T>
ALWAYS_INLINE void write_gs_value(FlatPtr offset, T val)
{
asm volatile(
"mov %[val], %%gs:%a[off]" ::[off] "ir"(offset), [val] "r"(val)
: "memory");
}
ALWAYS_INLINE FlatPtr read_gs_ptr(FlatPtr offset)
{
FlatPtr val;
asm volatile(
"mov %%gs:%a[off], %[val]"
: [val] "=r"(val)
: [off] "ir"(offset));
return val;
}
ALWAYS_INLINE void write_gs_ptr(u32 offset, FlatPtr val)
{
asm volatile(
"mov %[val], %%gs:%a[off]" ::[off] "ir"(offset), [val] "r"(val)
: "memory");
}
ALWAYS_INLINE bool are_interrupts_enabled()
{
return (cpu_flags() & 0x200) != 0;
}
FlatPtr read_cr0();
FlatPtr read_cr2();
FlatPtr read_cr3();
FlatPtr read_cr4();
u64 read_xcr0();
void write_cr0(FlatPtr);
void write_cr3(FlatPtr);
void write_cr4(FlatPtr);
void write_xcr0(u64);
void flush_idt();
ALWAYS_INLINE void load_task_register(u16 selector)
{
asm("ltr %0" ::"r"(selector));
}
FlatPtr read_dr0();
void write_dr0(FlatPtr);
FlatPtr read_dr1();
void write_dr1(FlatPtr);
FlatPtr read_dr2();
void write_dr2(FlatPtr);
FlatPtr read_dr3();
void write_dr3(FlatPtr);
FlatPtr read_dr6();
void write_dr6(FlatPtr);
FlatPtr read_dr7();
void write_dr7(FlatPtr);
ALWAYS_INLINE static bool is_kernel_mode()
{
u16 cs;
asm volatile(
"mov %%cs, %[cs] \n"
: [cs] "=g"(cs));
return (cs & 3) == 0;
}
ALWAYS_INLINE void read_tsc(u32& lsw, u32& msw)
{
asm volatile("rdtsc"
: "=d"(msw), "=a"(lsw));
}
ALWAYS_INLINE u64 read_tsc()
{
u32 lsw;
u32 msw;
read_tsc(lsw, msw);
return ((u64)msw << 32) | lsw;
}
ALWAYS_INLINE u32 rdrand()
{
u32 value;
asm volatile(
"1:\n"
"rdrand %0\n"
"jnc 1b\n"
: "=r"(value)::"cc");
return value;
}
ALWAYS_INLINE u32 rdseed()
{
u32 value;
asm volatile(
"1:\n"
"rdseed %0\n"
"jnc 1b\n"
: "=r"(value)::"cc");
return value;
}
void stac();
void clac();
[[noreturn]] ALWAYS_INLINE void halt_this()
{
for (;;) {
asm volatile("cli; hlt");
}
}
}

View file

@ -0,0 +1,13 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
namespace Kernel {
void bochs_debug_output(char ch);
}

View file

@ -0,0 +1,230 @@
#include <Kernel/Sections.h>
.section .text
.global gdt64ptr
gdt64ptr:
.quad 0
.global code64_sel
code64_sel:
.short 0
/*
The apic_ap_start function will be loaded to P0x00008000 where the APIC
will boot the AP from in real mode. This code also contains space for
special variables that *must* remain here. When initializing the APIC,
the code here gets copied to P0x00008000, the variables in here get
populated and then the boot of the APs will be triggered.
Having the variables here allows us to access them from real mode. Also, the
code here avoids the need for relocation entries.
Basically, the variables between apic_ap_start and end_apic_ap_start
*MUST* remain here and cannot be moved into a .bss or any other location.
*/
.global apic_ap_start
.type apic_ap_start, @function
.align 8
apic_ap_start:
.code16
cli
jmp $0x800, $(1f - apic_ap_start) /* avoid relocation entries */
1:
mov %cs, %ax
mov %ax, %ds
xor %ax, %ax
mov %ax, %sp
/* load the first temporary gdt */
lgdt (ap_cpu_gdtr_initial - apic_ap_start)
/* enable PM */
movl %cr0, %eax
orl $1, %eax
movl %eax, %cr0
ljmpl $8, $(apic_ap_start32 - apic_ap_start + 0x8000)
apic_ap_start32:
.code32
mov $0x10, %ax
mov %ax, %ss
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
movl $0x8000, %ebp
/* generate a unique ap cpu id (0 means 1st ap, not bsp!) */
xorl %eax, %eax
incl %eax
lock; xaddl %eax, (ap_cpu_id - apic_ap_start)(%ebp) /* avoid relocation entries */
movl %eax, %esi
/* check if we support NX and enable it if we do */
movl $0x80000001, %eax
cpuid
testl $0x100000, %edx
je (1f - apic_ap_start + 0x8000)
/* turn on IA32_EFER.NXE */
movl $0xc0000080, %ecx
rdmsr
orl $0x800, %eax
wrmsr
1:
/* load the bsp's cr3 value */
movl (ap_cpu_init_cr3 - apic_ap_start)(%ebp), %eax
movl %eax, %cr3
/* Enter Long-mode! ref(https://wiki.osdev.org/Setting_Up_Long_Mode)*/
mov $0xC0000080, %ecx /* Set the C-register to 0xC0000080, which is the EFER MSR.*/
rdmsr /* Read from the model-specific register.*/
or $(1 << 8), %eax /* Set the LM-bit which is the 9th bit (bit 8).*/
wrmsr /* Write to the model-specific register.*/
/* enable PAE + PSE */
movl %cr4, %eax
orl $0x60, %eax
movl %eax, %cr4
/* enable PG */
movl %cr0, %eax
orl $0x80000000, %eax
movl %eax, %cr0
/* load the temporary 64-bit gdt from boot that points above 3GB */
lgdt (ap_cpu_gdt64ptr - apic_ap_start + 0x8000)
/* Jump into our identity mapped area, stay in low memory for now.
We need to fully enable 64 bit mode before we can adjust rsp and rip
to values higher than 4GB */
ljmpl $(ap_cpu_gdt64code - ap_cpu_gdt64), $(apic_ap_start64 - apic_ap_start + 0x8000)
.code64
apic_ap_start64:
movq (ap_cpu_kernel_map_base - apic_ap_start)(%rbp), %rbp
addq $0x8000, %rbp
/* find our allocated stack based on the generated id */
movq (ap_cpu_init_stacks - apic_ap_start)(%rbp, %rsi, 8), %rsp
/* Now jump from our identity mapped area into high memory */
movq $(1f - apic_ap_start), %rax
addq %rbp, %rax
jmp *%rax
1:
mov $0, %ax
mov %ax, %ss
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
/* flush the TLB */
movq %cr3, %rax
movq %rax, %cr3
/* now load the final gdt and idt from the identity mapped area */
movq (ap_cpu_gdtr - apic_ap_start)(%rbp), %rax
lgdt (%rax)
movq (ap_cpu_idtr - apic_ap_start)(%rbp), %rax
lidt (%rax)
/* set same cr0 and cr4 values as the BSP */
movq (ap_cpu_init_cr0 - apic_ap_start)(%rbp), %rax
movq %rax, %cr0
movq (ap_cpu_init_cr4 - apic_ap_start)(%rbp), %rax
movq %rax, %cr4
/* Save the cpu id into rdi (first argument), 0 representing the bsp */
movq %rsi, %rdi
incq %rdi
/* Save the Processor pointer this CPU is going to use into rsi (second argument) */
movq (ap_cpu_init_processor_info_array - apic_ap_start)(%rbp), %rax
movq 0(%rax, %rsi, 8), %rsi
/* Get the entry function */
movq (ap_cpu_kernel_entry_function - apic_ap_start)(%rbp), %r10
movq %rsp, %rbp
cld
/* We are in identity mapped P0x8000 and the BSP will unload this code
once all APs are initialized, so call the entry function and return to our
infinite loop if it ever were to return. */
call *%r10
loop:
hlt
jmp loop
.align 4
.global apic_ap_start_size
apic_ap_start_size:
.2byte end_apic_ap_start - apic_ap_start
.align 4
ap_cpu_id:
.4byte 0x0
ap_cpu_gdt:
/* null */
.8byte 0x0
/* code */
.4byte 0x0000FFFF
.4byte 0x00cf9a00
/* data */
.4byte 0x0000FFFF
.4byte 0x00cf9200
ap_cpu_gdt_end:
ap_cpu_gdtr_initial:
.2byte ap_cpu_gdt_end - ap_cpu_gdt - 1
.4byte (ap_cpu_gdt - apic_ap_start) + 0x8000
.align 8
.global ap_cpu_gdtr
ap_cpu_gdtr:
.8byte 0x0 /* will be set at runtime */
.global ap_cpu_idtr
ap_cpu_idtr:
.8byte 0x0 /* will be set at runtime */
.global ap_cpu_init_cr0
ap_cpu_init_cr0:
.8byte 0x0 /* will be set at runtime */
.global ap_cpu_init_cr3
ap_cpu_init_cr3:
.8byte 0x0 /* will be set at runtime */
.global ap_cpu_init_cr4
ap_cpu_init_cr4:
.8byte 0x0 /* will be set at runtime */
.global ap_cpu_gdt64
ap_cpu_gdt64:
.8byte 0x0
.global ap_cpu_gdt64code
ap_cpu_gdt64code:
.4byte 0xffff
.4byte 0xaf9a00
.global ap_cpu_gdt64data
ap_cpu_gdt64data:
.4byte 0xffff
.4byte 0xaf9200
.global ap_cpu_gdt64ptr
ap_cpu_gdt64ptr:
.2byte ap_cpu_gdt64ptr - ap_cpu_gdt64 - 1
.8byte (ap_cpu_gdt64 - apic_ap_start) + 0x8000
.align 8
.global ap_cpu_kernel_entry_function
ap_cpu_kernel_entry_function:
.8byte 0x0 /* will be set at runtime */
.global ap_cpu_kernel_map_base
ap_cpu_kernel_map_base:
.8byte 0x0 /* will be set at runtime */
.global ap_cpu_init_processor_info_array
ap_cpu_init_processor_info_array:
.8byte 0x0 /* will be set at runtime */
.global ap_cpu_init_stacks
ap_cpu_init_stacks:
/* array of allocated stack pointers */
/* NOTE: ap_cpu_init_stacks must be the last variable before
end_apic_ap_start! */
.set end_apic_ap_start, .

View file

@ -0,0 +1,24 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86_64/CMOS.h>
#include <Kernel/Arch/x86_64/IO.h>
namespace Kernel::CMOS {
u8 read(u8 index)
{
IO::out8(0x70, index);
return IO::in8(0x71);
}
void write(u8 index, u8 data)
{
IO::out8(0x70, index);
IO::out8(0x71, data);
}
}

16
Kernel/Arch/x86_64/CMOS.h Normal file
View file

@ -0,0 +1,16 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
namespace Kernel::CMOS {
u8 read(u8 index);
void write(u8 index, u8 data);
}

View file

@ -0,0 +1,42 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <Kernel/Arch/CPU.h>
#include <Kernel/Panic.h>
#include <Kernel/Process.h>
using namespace Kernel;
void __assertion_failed(char const* msg, char const* file, unsigned line, char const* func)
{
asm volatile("cli");
critical_dmesgln("ASSERTION FAILED: {}", msg);
critical_dmesgln("{}:{} in {}", file, line, func);
abort();
}
[[noreturn]] void abort()
{
// Avoid lock ranking checks on crashing paths, just try to get some debugging messages out.
auto thread = Thread::current();
if (thread)
thread->set_crashing();
// Switch back to the current process's page tables if there are any.
// Otherwise stack walking will be a disaster.
if (Process::has_current())
Memory::MemoryManager::enter_process_address_space(Process::current());
PANIC("Aborted");
}
[[noreturn]] void _abort()
{
asm volatile("ud2");
__builtin_unreachable();
}

58
Kernel/Arch/x86_64/CPU.h Normal file
View file

@ -0,0 +1,58 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Atomic.h>
#include <AK/Concepts.h>
#include <AK/Vector.h>
#include <Kernel/Arch/x86_64/DescriptorTable.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
/* Map IRQ0-15 @ ISR 0x50-0x5F */
#define IRQ_VECTOR_BASE 0x50
#define GENERIC_INTERRUPT_HANDLERS_COUNT (256 - IRQ_VECTOR_BASE)
namespace Kernel {
struct RegisterState;
class GenericInterruptHandler;
static constexpr u32 safe_eflags_mask = 0xdff;
static constexpr u32 iopl_mask = 3u << 12;
inline u32 get_iopl_from_eflags(u32 eflags)
{
return (eflags & iopl_mask) >> 12;
}
DescriptorTablePointer const& get_gdtr();
DescriptorTablePointer const& get_idtr();
constexpr FlatPtr page_base_of(FlatPtr address)
{
return address & PAGE_MASK;
}
inline FlatPtr page_base_of(void const* address)
{
return page_base_of((FlatPtr)address);
}
constexpr FlatPtr offset_in_page(FlatPtr address)
{
return address & (~PAGE_MASK);
}
inline FlatPtr offset_in_page(void const* address)
{
return offset_in_page((FlatPtr)address);
}
}

View file

@ -0,0 +1,376 @@
/*
* Copyright (c) 2022, Linus Groh <linusg@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86_64/CPUID.h>
namespace Kernel {
StringView cpu_feature_to_string_view(CPUFeature::Type const& feature)
{
if (feature == CPUFeature::SSE3)
return "sse3"sv;
if (feature == CPUFeature::PCLMULQDQ)
return "pclmulqdq"sv;
if (feature == CPUFeature::DTES64)
return "dtes64"sv;
if (feature == CPUFeature::MONITOR)
return "monitor"sv;
if (feature == CPUFeature::DS_CPL)
return "ds_cpl"sv;
if (feature == CPUFeature::VMX)
return "vmx"sv;
if (feature == CPUFeature::SMX)
return "smx"sv;
if (feature == CPUFeature::EST)
return "est"sv;
if (feature == CPUFeature::TM2)
return "tm2"sv;
if (feature == CPUFeature::SSSE3)
return "ssse3"sv;
// NOTE: This is called cid on Linux, but CNXT_ID in the Intel manual.
if (feature == CPUFeature::CNXT_ID)
return "cnxt_id"sv;
if (feature == CPUFeature::SDBG)
return "sdbg"sv;
if (feature == CPUFeature::FMA)
return "fma"sv;
if (feature == CPUFeature::CX16)
return "cx16"sv;
if (feature == CPUFeature::XTPR)
return "xtpr"sv;
if (feature == CPUFeature::PDCM)
return "pdcm"sv;
if (feature == CPUFeature::PCID)
return "pcid"sv;
if (feature == CPUFeature::DCA)
return "dca"sv;
if (feature == CPUFeature::SSE4_1)
return "sse4_1"sv;
if (feature == CPUFeature::SSE4_2)
return "sse4_2"sv;
if (feature == CPUFeature::X2APIC)
return "x2apic"sv;
if (feature == CPUFeature::MOVBE)
return "movbe"sv;
if (feature == CPUFeature::POPCNT)
return "popcnt"sv;
// NOTE: This is called tsc_deadline_timer on Linux, but TSC_DEADLINE in the Intel manual.
if (feature == CPUFeature::TSC_DEADLINE)
return "tsc_deadline"sv;
if (feature == CPUFeature::AES)
return "aes"sv;
if (feature == CPUFeature::XSAVE)
return "xsave"sv;
if (feature == CPUFeature::OSXSAVE)
return "osxsave"sv;
if (feature == CPUFeature::AVX)
return "avx"sv;
if (feature == CPUFeature::F16C)
return "f16c"sv;
if (feature == CPUFeature::RDRAND)
return "rdrand"sv;
if (feature == CPUFeature::HYPERVISOR)
return "hypervisor"sv;
if (feature == CPUFeature::FPU)
return "fpu"sv;
if (feature == CPUFeature::VME)
return "vme"sv;
if (feature == CPUFeature::DE)
return "de"sv;
if (feature == CPUFeature::PSE)
return "pse"sv;
if (feature == CPUFeature::TSC)
return "tsc"sv;
if (feature == CPUFeature::MSR)
return "msr"sv;
if (feature == CPUFeature::PAE)
return "pae"sv;
if (feature == CPUFeature::MCE)
return "mce"sv;
if (feature == CPUFeature::CX8)
return "cx8"sv;
if (feature == CPUFeature::APIC)
return "apic"sv;
if (feature == CPUFeature::SEP)
return "sep"sv;
if (feature == CPUFeature::MTRR)
return "mtrr"sv;
if (feature == CPUFeature::PGE)
return "pge"sv;
if (feature == CPUFeature::MCA)
return "mca"sv;
if (feature == CPUFeature::CMOV)
return "cmov"sv;
if (feature == CPUFeature::PAT)
return "pat"sv;
if (feature == CPUFeature::PSE36)
return "pse36"sv;
if (feature == CPUFeature::PSN)
return "psn"sv;
if (feature == CPUFeature::CLFLUSH)
return "clflush"sv;
if (feature == CPUFeature::DS)
return "ds"sv;
if (feature == CPUFeature::ACPI)
return "acpi"sv;
if (feature == CPUFeature::MMX)
return "mmx"sv;
if (feature == CPUFeature::FXSR)
return "fxsr"sv;
if (feature == CPUFeature::SSE)
return "sse"sv;
if (feature == CPUFeature::SSE2)
return "sse2"sv;
if (feature == CPUFeature::SS)
return "ss"sv;
if (feature == CPUFeature::HTT)
return "htt"sv;
if (feature == CPUFeature::TM)
return "tm"sv;
if (feature == CPUFeature::IA64)
return "ia64"sv;
if (feature == CPUFeature::PBE)
return "pbe"sv;
if (feature == CPUFeature::FSGSBASE)
return "fsgsbase"sv;
if (feature == CPUFeature::TSC_ADJUST)
return "tsc_adjust"sv;
if (feature == CPUFeature::SGX)
return "sgx"sv;
if (feature == CPUFeature::BMI1)
return "bmi1"sv;
if (feature == CPUFeature::HLE)
return "hle"sv;
if (feature == CPUFeature::AVX2)
return "avx2"sv;
if (feature == CPUFeature::FDP_EXCPTN_ONLY)
return "fdp_excptn_only"sv;
if (feature == CPUFeature::SMEP)
return "smep"sv;
if (feature == CPUFeature::BMI2)
return "bmi2"sv;
if (feature == CPUFeature::ERMS)
return "erms"sv;
if (feature == CPUFeature::INVPCID)
return "invpcid"sv;
if (feature == CPUFeature::RTM)
return "rtm"sv;
if (feature == CPUFeature::PQM)
return "pqm"sv;
if (feature == CPUFeature::ZERO_FCS_FDS)
return "zero_fcs_fds"sv;
if (feature == CPUFeature::MPX)
return "mpx"sv;
if (feature == CPUFeature::PQE)
return "pqe"sv;
if (feature == CPUFeature::AVX512_F)
return "avx512_f"sv;
if (feature == CPUFeature::AVX512_DQ)
return "avx512_dq"sv;
if (feature == CPUFeature::RDSEED)
return "rdseed"sv;
if (feature == CPUFeature::ADX)
return "adx"sv;
if (feature == CPUFeature::SMAP)
return "smap"sv;
if (feature == CPUFeature::AVX512_IFMA)
return "avx512_ifma"sv;
if (feature == CPUFeature::PCOMMIT)
return "pcommit"sv;
if (feature == CPUFeature::CLFLUSHOPT)
return "clflushopt"sv;
if (feature == CPUFeature::CLWB)
return "clwb"sv;
if (feature == CPUFeature::INTEL_PT)
return "intel_pt"sv;
if (feature == CPUFeature::AVX512_PF)
return "avx512_pf"sv;
if (feature == CPUFeature::AVX512_ER)
return "avx512_er"sv;
if (feature == CPUFeature::AVX512_CD)
return "avx512_cd"sv;
if (feature == CPUFeature::SHA)
return "sha"sv;
if (feature == CPUFeature::AVX512_BW)
return "avx512_bw"sv;
if (feature == CPUFeature::AVX512_VL)
return "avx512_vl"sv;
if (feature == CPUFeature::PREFETCHWT1)
return "prefetchwt1"sv;
if (feature == CPUFeature::AVX512_VBMI)
return "avx512_vbmi"sv;
if (feature == CPUFeature::UMIP)
return "umip"sv;
if (feature == CPUFeature::PKU)
return "pku"sv;
if (feature == CPUFeature::OSPKE)
return "ospke"sv;
if (feature == CPUFeature::WAITPKG)
return "waitpkg"sv;
if (feature == CPUFeature::AVX512_VBMI2)
return "avx512_vbmi2"sv;
if (feature == CPUFeature::CET_SS)
return "cet_ss"sv;
if (feature == CPUFeature::GFNI)
return "gfni"sv;
if (feature == CPUFeature::VAES)
return "vaes"sv;
if (feature == CPUFeature::VPCLMULQDQ)
return "vpclmulqdq"sv;
if (feature == CPUFeature::AVX512_VNNI)
return "avx512_vnni"sv;
if (feature == CPUFeature::AVX512_BITALG)
return "avx512_bitalg"sv;
if (feature == CPUFeature::TME_EN)
return "tme_en"sv;
if (feature == CPUFeature::AVX512_VPOPCNTDQ)
return "avx512_vpopcntdq"sv;
if (feature == CPUFeature::INTEL_5_LEVEL_PAGING)
return "intel_5_level_paging"sv;
if (feature == CPUFeature::RDPID)
return "rdpid"sv;
if (feature == CPUFeature::KL)
return "kl"sv;
if (feature == CPUFeature::CLDEMOTE)
return "cldemote"sv;
if (feature == CPUFeature::MOVDIRI)
return "movdiri"sv;
if (feature == CPUFeature::MOVDIR64B)
return "movdir64b"sv;
if (feature == CPUFeature::ENQCMD)
return "enqcmd"sv;
if (feature == CPUFeature::SGX_LC)
return "sgx_lc"sv;
if (feature == CPUFeature::PKS)
return "pks"sv;
if (feature == CPUFeature::AVX512_4VNNIW)
return "avx512_4vnniw"sv;
if (feature == CPUFeature::AVX512_4FMAPS)
return "avx512_4fmaps"sv;
if (feature == CPUFeature::FSRM)
return "fsrm"sv;
if (feature == CPUFeature::AVX512_VP2INTERSECT)
return "avx512_vp2intersect"sv;
if (feature == CPUFeature::SRBDS_CTRL)
return "srbds_ctrl"sv;
if (feature == CPUFeature::MD_CLEAR)
return "md_clear"sv;
if (feature == CPUFeature::RTM_ALWAYS_ABORT)
return "rtm_always_abort"sv;
if (feature == CPUFeature::TSX_FORCE_ABORT)
return "tsx_force_abort"sv;
if (feature == CPUFeature::SERIALIZE)
return "serialize"sv;
if (feature == CPUFeature::HYBRID)
return "hybrid"sv;
if (feature == CPUFeature::TSXLDTRK)
return "tsxldtrk"sv;
if (feature == CPUFeature::PCONFIG)
return "pconfig"sv;
if (feature == CPUFeature::LBR)
return "lbr"sv;
if (feature == CPUFeature::CET_IBT)
return "cet_ibt"sv;
if (feature == CPUFeature::AMX_BF16)
return "amx_bf16"sv;
if (feature == CPUFeature::AVX512_FP16)
return "avx512_fp16"sv;
if (feature == CPUFeature::AMX_TILE)
return "amx_tile"sv;
if (feature == CPUFeature::AMX_INT8)
return "amx_int8"sv;
if (feature == CPUFeature::SPEC_CTRL)
return "spec_ctrl"sv;
if (feature == CPUFeature::STIBP)
return "stibp"sv;
// NOTE: This is called flush_l1d on Linux, but L1D_FLUSH in the Intel manual.
if (feature == CPUFeature::L1D_FLUSH)
return "l1d_flush"sv;
if (feature == CPUFeature::IA32_ARCH_CAPABILITIES)
return "ia32_arch_capabilities"sv;
if (feature == CPUFeature::IA32_CORE_CAPABILITIES)
return "ia32_code_capabilities"sv;
if (feature == CPUFeature::SSBD)
return "ssbd"sv;
if (feature == CPUFeature::LAHF_LM)
return "lahf_lm"sv;
if (feature == CPUFeature::CMP_LEGACY)
return "cmp_legacy"sv;
if (feature == CPUFeature::SVM)
return "svm"sv;
if (feature == CPUFeature::EXTAPIC)
return "extapic"sv;
if (feature == CPUFeature::CR8_LEGACY)
return "cr8_legacy"sv;
if (feature == CPUFeature::ABM)
return "abm"sv;
if (feature == CPUFeature::SSE4A)
return "sse4a"sv;
if (feature == CPUFeature::MISALIGNSSE)
return "misalignsse"sv;
if (feature == CPUFeature::_3DNOWPREFETCH)
return "3dnowprefetch"sv;
if (feature == CPUFeature::OSVW)
return "osvw"sv;
if (feature == CPUFeature::IBS)
return "ibs"sv;
if (feature == CPUFeature::XOP)
return "xop"sv;
if (feature == CPUFeature::SKINIT)
return "skinit"sv;
if (feature == CPUFeature::WDT)
return "wdt"sv;
if (feature == CPUFeature::LWP)
return "lwp"sv;
if (feature == CPUFeature::FMA4)
return "fma4"sv;
if (feature == CPUFeature::TCE)
return "tce"sv;
if (feature == CPUFeature::NODEID_MSR)
return "nodeid_msr"sv;
if (feature == CPUFeature::TBM)
return "tbm"sv;
if (feature == CPUFeature::TOPOEXT)
return "topoext"sv;
if (feature == CPUFeature::PERFCTR_CORE)
return "perfctr_core"sv;
if (feature == CPUFeature::PERFCTR_NB)
return "perfctr_nb"sv;
if (feature == CPUFeature::DBX)
return "dbx"sv;
if (feature == CPUFeature::PERFTSC)
return "perftsc"sv;
// NOTE: This is called perfctr_l2 on Linux, but PCX_L2I in the AMD manual & other references.
if (feature == CPUFeature::PCX_L2I)
return "pcx_l2i"sv;
if (feature == CPUFeature::SYSCALL)
return "syscall"sv;
if (feature == CPUFeature::MP)
return "mp"sv;
if (feature == CPUFeature::NX)
return "nx"sv;
if (feature == CPUFeature::MMXEXT)
return "mmxext"sv;
if (feature == CPUFeature::FXSR_OPT)
return "fxsr_opt"sv;
if (feature == CPUFeature::PDPE1GB)
return "pdpe1gb"sv;
if (feature == CPUFeature::RDTSCP)
return "rdtscp"sv;
if (feature == CPUFeature::LM)
return "lm"sv;
if (feature == CPUFeature::_3DNOWEXT)
return "3dnowext"sv;
if (feature == CPUFeature::_3DNOW)
return "3dnow"sv;
if (feature == CPUFeature::CONSTANT_TSC)
return "constant_tsc"sv;
if (feature == CPUFeature::NONSTOP_TSC)
return "nonstop_tsc"sv;
VERIFY_NOT_REACHED();
}
}

242
Kernel/Arch/x86_64/CPUID.h Normal file
View file

@ -0,0 +1,242 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2022, Linus Groh <linusg@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/ArbitrarySizedEnum.h>
#include <AK/Types.h>
#include <AK/UFixedBigInt.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
namespace Kernel {
class CPUID {
public:
explicit CPUID(u32 function, u32 ecx = 0)
{
asm volatile("cpuid"
: "=a"(m_eax), "=b"(m_ebx), "=c"(m_ecx), "=d"(m_edx)
: "a"(function), "c"(ecx));
}
u32 eax() const { return m_eax; }
u32 ebx() const { return m_ebx; }
u32 ecx() const { return m_ecx; }
u32 edx() const { return m_edx; }
private:
u32 m_eax { 0xffffffff };
u32 m_ebx { 0xffffffff };
u32 m_ecx { 0xffffffff };
u32 m_edx { 0xffffffff };
};
AK_MAKE_ARBITRARY_SIZED_ENUM(CPUFeature, u256,
/* EAX=1, ECX */ //
SSE3 = CPUFeature(1u) << 0u, // Streaming SIMD Extensions 3
PCLMULQDQ = CPUFeature(1u) << 1u, // PCLMULDQ Instruction
DTES64 = CPUFeature(1u) << 2u, // 64-Bit Debug Store
MONITOR = CPUFeature(1u) << 3u, // MONITOR/MWAIT Instructions
DS_CPL = CPUFeature(1u) << 4u, // CPL Qualified Debug Store
VMX = CPUFeature(1u) << 5u, // Virtual Machine Extensions
SMX = CPUFeature(1u) << 6u, // Safer Mode Extensions
EST = CPUFeature(1u) << 7u, // Enhanced Intel SpeedStep® Technology
TM2 = CPUFeature(1u) << 8u, // Thermal Monitor 2
SSSE3 = CPUFeature(1u) << 9u, // Supplemental Streaming SIMD Extensions 3
CNXT_ID = CPUFeature(1u) << 10u, // L1 Context ID
SDBG = CPUFeature(1u) << 11u, // Silicon Debug (IA32_DEBUG_INTERFACE MSR)
FMA = CPUFeature(1u) << 12u, // Fused Multiply Add
CX16 = CPUFeature(1u) << 13u, // CMPXCHG16B Instruction
XTPR = CPUFeature(1u) << 14u, // xTPR Update Control
PDCM = CPUFeature(1u) << 15u, // Perfmon and Debug Capability (IA32_PERF_CAPABILITIES MSR)
/* ECX Bit 16 */ // Reserved
PCID = CPUFeature(1u) << 17u, // Process Context Identifiers
DCA = CPUFeature(1u) << 18u, // Direct Cache Access
SSE4_1 = CPUFeature(1u) << 19u, // Streaming SIMD Extensions 4.1
SSE4_2 = CPUFeature(1u) << 20u, // Streaming SIMD Extensions 4.2
X2APIC = CPUFeature(1u) << 21u, // Extended xAPIC Support
MOVBE = CPUFeature(1u) << 22u, // MOVBE Instruction
POPCNT = CPUFeature(1u) << 23u, // POPCNT Instruction
TSC_DEADLINE = CPUFeature(1u) << 24u, // Time Stamp Counter Deadline
AES = CPUFeature(1u) << 25u, // AES Instruction Extensions
XSAVE = CPUFeature(1u) << 26u, // XSAVE/XSTOR States
OSXSAVE = CPUFeature(1u) << 27u, // OS-Enabled Extended State Management
AVX = CPUFeature(1u) << 28u, // Advanced Vector Extensions
F16C = CPUFeature(1u) << 29u, // 16-bit floating-point conversion instructions
RDRAND = CPUFeature(1u) << 30u, // RDRAND Instruction
HYPERVISOR = CPUFeature(1u) << 31u, // Hypervisor present (always zero on physical CPUs)
/* EAX=1, EDX */ //
FPU = CPUFeature(1u) << 32u, // Floating-point Unit On-Chip
VME = CPUFeature(1u) << 33u, // Virtual Mode Extension
DE = CPUFeature(1u) << 34u, // Debugging Extension
PSE = CPUFeature(1u) << 35u, // Page Size Extension
TSC = CPUFeature(1u) << 36u, // Time Stamp Counter
MSR = CPUFeature(1u) << 37u, // Model Specific Registers
PAE = CPUFeature(1u) << 38u, // Physical Address Extension
MCE = CPUFeature(1u) << 39u, // Machine-Check Exception
CX8 = CPUFeature(1u) << 40u, // CMPXCHG8 Instruction
APIC = CPUFeature(1u) << 41u, // On-chip APIC Hardware
/* EDX Bit 10 */ // Reserved
SEP = CPUFeature(1u) << 43u, // Fast System Call
MTRR = CPUFeature(1u) << 44u, // Memory Type Range Registers
PGE = CPUFeature(1u) << 45u, // Page Global Enable
MCA = CPUFeature(1u) << 46u, // Machine-Check Architecture
CMOV = CPUFeature(1u) << 47u, // Conditional Move Instruction
PAT = CPUFeature(1u) << 48u, // Page Attribute Table
PSE36 = CPUFeature(1u) << 49u, // 36-bit Page Size Extension
PSN = CPUFeature(1u) << 50u, // Processor serial number is present and enabled
CLFLUSH = CPUFeature(1u) << 51u, // CLFLUSH Instruction
/* EDX Bit 20 */ // Reserved
DS = CPUFeature(1u) << 53u, // CLFLUSH Instruction
ACPI = CPUFeature(1u) << 54u, // CLFLUSH Instruction
MMX = CPUFeature(1u) << 55u, // CLFLUSH Instruction
FXSR = CPUFeature(1u) << 56u, // CLFLUSH Instruction
SSE = CPUFeature(1u) << 57u, // Streaming SIMD Extensions
SSE2 = CPUFeature(1u) << 58u, // Streaming SIMD Extensions 2
SS = CPUFeature(1u) << 59u, // Self-Snoop
HTT = CPUFeature(1u) << 60u, // Multi-Threading
TM = CPUFeature(1u) << 61u, // Thermal Monitor
IA64 = CPUFeature(1u) << 62u, // IA64 processor emulating x86
PBE = CPUFeature(1u) << 63u, // Pending Break Enable
/* EAX=7, EBX */ //
FSGSBASE = CPUFeature(1u) << 64u, // Access to base of %fs and %gs
TSC_ADJUST = CPUFeature(1u) << 65u, // IA32_TSC_ADJUST MSR
SGX = CPUFeature(1u) << 66u, // Software Guard Extensions
BMI1 = CPUFeature(1u) << 67u, // Bit Manipulation Instruction Set 1
HLE = CPUFeature(1u) << 68u, // TSX Hardware Lock Elision
AVX2 = CPUFeature(1u) << 69u, // Advanced Vector Extensions 2
FDP_EXCPTN_ONLY = CPUFeature(1u) << 70u, // FDP_EXCPTN_ONLY
SMEP = CPUFeature(1u) << 71u, // Supervisor Mode Execution Protection
BMI2 = CPUFeature(1u) << 72u, // Bit Manipulation Instruction Set 2
ERMS = CPUFeature(1u) << 73u, // Enhanced REP MOVSB/STOSB
INVPCID = CPUFeature(1u) << 74u, // INVPCID Instruction
RTM = CPUFeature(1u) << 75u, // TSX Restricted Transactional Memory
PQM = CPUFeature(1u) << 76u, // Platform Quality of Service Monitoring
ZERO_FCS_FDS = CPUFeature(1u) << 77u, // FPU CS and FPU DS deprecated
MPX = CPUFeature(1u) << 78u, // Intel MPX (Memory Protection Extensions)
PQE = CPUFeature(1u) << 79u, // Platform Quality of Service Enforcement
AVX512_F = CPUFeature(1u) << 80u, // AVX-512 Foundation
AVX512_DQ = CPUFeature(1u) << 81u, // AVX-512 Doubleword and Quadword Instructions
RDSEED = CPUFeature(1u) << 82u, // RDSEED Instruction
ADX = CPUFeature(1u) << 83u, // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
SMAP = CPUFeature(1u) << 84u, // Supervisor Mode Access Prevention
AVX512_IFMA = CPUFeature(1u) << 85u, // AVX-512 Integer Fused Multiply-Add Instructions
PCOMMIT = CPUFeature(1u) << 86u, // PCOMMIT Instruction
CLFLUSHOPT = CPUFeature(1u) << 87u, // CLFLUSHOPT Instruction
CLWB = CPUFeature(1u) << 88u, // CLWB Instruction
INTEL_PT = CPUFeature(1u) << 89u, // Intel Processor Tracing
AVX512_PF = CPUFeature(1u) << 90u, // AVX-512 Prefetch Instructions
AVX512_ER = CPUFeature(1u) << 91u, // AVX-512 Exponential and Reciprocal Instructions
AVX512_CD = CPUFeature(1u) << 92u, // AVX-512 Conflict Detection Instructions
SHA = CPUFeature(1u) << 93u, // Intel SHA Extensions
AVX512_BW = CPUFeature(1u) << 94u, // AVX-512 Byte and Word Instructions
AVX512_VL = CPUFeature(1u) << 95u, // AVX-512 Vector Length Extensions
/* EAX=7, ECX */ //
PREFETCHWT1 = CPUFeature(1u) << 96u, // PREFETCHWT1 Instruction
AVX512_VBMI = CPUFeature(1u) << 97u, // AVX-512 Vector Bit Manipulation Instructions
UMIP = CPUFeature(1u) << 98u, // UMIP
PKU = CPUFeature(1u) << 99u, // Memory Protection Keys for User-mode pages
OSPKE = CPUFeature(1u) << 100u, // PKU enabled by OS
WAITPKG = CPUFeature(1u) << 101u, // Timed pause and user-level monitor/wait
AVX512_VBMI2 = CPUFeature(1u) << 102u, // AVX-512 Vector Bit Manipulation Instructions 2
CET_SS = CPUFeature(1u) << 103u, // Control Flow Enforcement (CET) Shadow Stack
GFNI = CPUFeature(1u) << 104u, // Galois Field Instructions
VAES = CPUFeature(1u) << 105u, // Vector AES instruction set (VEX-256/EVEX)
VPCLMULQDQ = CPUFeature(1u) << 106u, // CLMUL instruction set (VEX-256/EVEX)
AVX512_VNNI = CPUFeature(1u) << 107u, // AVX-512 Vector Neural Network Instructions
AVX512_BITALG = CPUFeature(1u) << 108u, // AVX-512 BITALG Instructions
TME_EN = CPUFeature(1u) << 109u, // IA32_TME related MSRs are supported
AVX512_VPOPCNTDQ = CPUFeature(1u) << 110u, // AVX-512 Vector Population Count Double and Quad-word
/* ECX Bit 15 */ // Reserved
INTEL_5_LEVEL_PAGING = CPUFeature(1u) << 112u, // Intel 5-Level Paging
RDPID = CPUFeature(1u) << 113u, // RDPID Instruction
KL = CPUFeature(1u) << 114u, // Key Locker
/* ECX Bit 24 */ // Reserved
CLDEMOTE = CPUFeature(1u) << 116u, // Cache Line Demote
/* ECX Bit 26 */ // Reserved
MOVDIRI = CPUFeature(1u) << 118u, // MOVDIRI Instruction
MOVDIR64B = CPUFeature(1u) << 119u, // MOVDIR64B Instruction
ENQCMD = CPUFeature(1u) << 120u, // ENQCMD Instruction
SGX_LC = CPUFeature(1u) << 121u, // SGX Launch Configuration
PKS = CPUFeature(1u) << 122u, // Protection Keys for Supervisor-Mode Pages
/* EAX=7, EDX */ //
/* ECX Bit 0-1 */ // Reserved
AVX512_4VNNIW = CPUFeature(1u) << 125u, // AVX-512 4-register Neural Network Instructions
AVX512_4FMAPS = CPUFeature(1u) << 126u, // AVX-512 4-register Multiply Accumulation Single precision
FSRM = CPUFeature(1u) << 127u, // Fast Short REP MOVSB
/* ECX Bit 5-7 */ // Reserved
AVX512_VP2INTERSECT = CPUFeature(1u) << 131u, // AVX-512 VP2INTERSECT Doubleword and Quadword Instructions
SRBDS_CTRL = CPUFeature(1u) << 132u, // Special Register Buffer Data Sampling Mitigations
MD_CLEAR = CPUFeature(1u) << 133u, // VERW instruction clears CPU buffers
RTM_ALWAYS_ABORT = CPUFeature(1u) << 134u, // All TSX transactions are aborted
/* ECX Bit 12 */ // Reserved
TSX_FORCE_ABORT = CPUFeature(1u) << 136u, // TSX_FORCE_ABORT MSR
SERIALIZE = CPUFeature(1u) << 137u, // Serialize instruction execution
HYBRID = CPUFeature(1u) << 138u, // Mixture of CPU types in processor topology
TSXLDTRK = CPUFeature(1u) << 139u, // TSX suspend load address tracking
/* ECX Bit 17 */ // Reserved
PCONFIG = CPUFeature(1u) << 141u, // Platform configuration (Memory Encryption Technologies Instructions)
LBR = CPUFeature(1u) << 142u, // Architectural Last Branch Records
CET_IBT = CPUFeature(1u) << 143u, // Control flow enforcement (CET) indirect branch tracking
/* ECX Bit 21 */ // Reserved
AMX_BF16 = CPUFeature(1u) << 145u, // Tile computation on bfloat16 numbers
AVX512_FP16 = CPUFeature(1u) << 146u, // AVX512-FP16 half-precision floating-point instructions
AMX_TILE = CPUFeature(1u) << 147u, // Tile architecture
AMX_INT8 = CPUFeature(1u) << 148u, // Tile computation on 8-bit integers
SPEC_CTRL = CPUFeature(1u) << 149u, // Speculation Control
STIBP = CPUFeature(1u) << 150u, // Single Thread Indirect Branch Predictor
L1D_FLUSH = CPUFeature(1u) << 151u, // IA32_FLUSH_CMD MSR
IA32_ARCH_CAPABILITIES = CPUFeature(1u) << 152u, // IA32_ARCH_CAPABILITIES MSR
IA32_CORE_CAPABILITIES = CPUFeature(1u) << 153u, // IA32_CORE_CAPABILITIES MSR
SSBD = CPUFeature(1u) << 154u, // Speculative Store Bypass Disable
/* EAX=80000001h, ECX */ //
LAHF_LM = CPUFeature(1u) << 155u, // LAHF/SAHF in long mode
CMP_LEGACY = CPUFeature(1u) << 156u, // Hyperthreading not valid
SVM = CPUFeature(1u) << 157u, // Secure Virtual Machine
EXTAPIC = CPUFeature(1u) << 158u, // Extended APIC Space
CR8_LEGACY = CPUFeature(1u) << 159u, // CR8 in 32-bit mode
ABM = CPUFeature(1u) << 160u, // Advanced Bit Manipulation
SSE4A = CPUFeature(1u) << 161u, // SSE4a
MISALIGNSSE = CPUFeature(1u) << 162u, // Misaligned SSE Mode
_3DNOWPREFETCH = CPUFeature(1u) << 163u, // PREFETCH and PREFETCHW Instructions
OSVW = CPUFeature(1u) << 164u, // OS Visible Workaround
IBS = CPUFeature(1u) << 165u, // Instruction Based Sampling
XOP = CPUFeature(1u) << 166u, // XOP instruction set
SKINIT = CPUFeature(1u) << 167u, // SKINIT/STGI Instructions
WDT = CPUFeature(1u) << 168u, // Watchdog timer
LWP = CPUFeature(1u) << 169u, // Light Weight Profiling
FMA4 = CPUFeature(1u) << 170u, // FMA4 instruction set
TCE = CPUFeature(1u) << 171u, // Translation Cache Extension
NODEID_MSR = CPUFeature(1u) << 172u, // NodeID MSR
TBM = CPUFeature(1u) << 173u, // Trailing Bit Manipulation
TOPOEXT = CPUFeature(1u) << 174u, // Topology Extensions
PERFCTR_CORE = CPUFeature(1u) << 175u, // Core Performance Counter Extensions
PERFCTR_NB = CPUFeature(1u) << 176u, // NB Performance Counter Extensions
DBX = CPUFeature(1u) << 177u, // Data Breakpoint Extensions
PERFTSC = CPUFeature(1u) << 178u, // Performance TSC
PCX_L2I = CPUFeature(1u) << 179u, // L2I Performance Counter Extensions
/* EAX=80000001h, EDX */ //
SYSCALL = CPUFeature(1u) << 180u, // SYSCALL/SYSRET Instructions
MP = CPUFeature(1u) << 181u, // Multiprocessor Capable
NX = CPUFeature(1u) << 182u, // NX bit
MMXEXT = CPUFeature(1u) << 183u, // Extended MMX
FXSR_OPT = CPUFeature(1u) << 184u, // FXSAVE/FXRSTOR Optimizations
PDPE1GB = CPUFeature(1u) << 185u, // Gigabyte Pages
RDTSCP = CPUFeature(1u) << 186u, // RDTSCP Instruction
LM = CPUFeature(1u) << 187u, // Long Mode
_3DNOWEXT = CPUFeature(1u) << 188u, // Extended 3DNow!
_3DNOW = CPUFeature(1u) << 189u, // 3DNow!
/* EAX=80000007h, EDX */ //
CONSTANT_TSC = CPUFeature(1u) << 190u, // Invariant TSC
NONSTOP_TSC = CPUFeature(1u) << 191u, // Invariant TSC
__End = CPUFeature(1u) << 255u);
StringView cpu_feature_to_string_view(CPUFeature::Type const&);
}

View file

@ -0,0 +1,45 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/CPU.h>
#include <Kernel/Arch/RegisterState.h>
#include <Kernel/Panic.h>
#include <Kernel/Process.h>
#include <Kernel/Thread.h>
namespace Kernel {
void handle_crash(Kernel::RegisterState const& regs, char const* description, int signal, bool out_of_memory)
{
auto* current_thread = Thread::current();
if (!current_thread)
PANIC("{} with !Thread::current()", description);
auto crashed_in_kernel = (regs.cs & 3) == 0;
if (!crashed_in_kernel && current_thread->has_signal_handler(signal) && !current_thread->should_ignore_signal(signal) && !current_thread->is_signal_masked(signal)) {
current_thread->send_urgent_signal_to_self(signal);
return;
}
auto& process = current_thread->process();
// If a process crashed while inspecting another process,
// make sure we switch back to the right page tables.
Memory::MemoryManager::enter_process_address_space(process);
dmesgln("CRASH: CPU #{} {} in ring {}", Processor::current_id(), description, (regs.cs & 3));
dump_registers(regs);
if (crashed_in_kernel) {
process.address_space().with([&](auto& space) { space->dump_regions(); });
PANIC("Crash in ring 0");
}
process.crash(signal, regs.ip(), out_of_memory);
}
}

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/CurrentTime.h>
#include <Kernel/Arch/x86_64/ASM_wrapper.h>
#include <Kernel/Arch/x86_64/Processor.h>
namespace Kernel {
static u64 current_time_tsc()
{
return read_tsc();
}
fptr optional_current_time()
{
VERIFY(Processor::is_initialized()); // sanity check
// Figure out a good scheduling time source
if (Processor::current().has_feature(CPUFeature::TSC) && Processor::current().has_feature(CPUFeature::CONSTANT_TSC)) {
return current_time_tsc;
}
return nullptr;
}
}

View file

@ -0,0 +1,48 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/DebugOutput.h>
#include <Kernel/Arch/x86_64/BochsDebugOutput.h>
#include <Kernel/Arch/x86_64/IO.h>
namespace Kernel {
static constexpr u16 serial_com1_io_port = 0x3F8;
void bochs_debug_output(char ch)
{
IO::out8(IO::BOCHS_DEBUG_PORT, ch);
}
void debug_output(char ch)
{
static bool serial_ready = false;
static bool was_cr = false;
if (!serial_ready) {
IO::out8(serial_com1_io_port + 1, 0x00);
IO::out8(serial_com1_io_port + 3, 0x80);
IO::out8(serial_com1_io_port + 0, 0x02);
IO::out8(serial_com1_io_port + 1, 0x00);
IO::out8(serial_com1_io_port + 3, 0x03);
IO::out8(serial_com1_io_port + 2, 0xC7);
IO::out8(serial_com1_io_port + 4, 0x0B);
serial_ready = true;
}
while ((IO::in8(serial_com1_io_port + 5) & 0x20) == 0)
;
if (ch == '\n' && !was_cr)
IO::out8(serial_com1_io_port, '\r');
IO::out8(serial_com1_io_port, ch);
was_cr = ch == '\r';
}
}

View file

@ -0,0 +1,17 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Delay.h>
#include <Kernel/Arch/x86_64/IO.h>
namespace Kernel {
void microseconds_delay(u32 microseconds)
{
IO::delay(microseconds);
}
}

View file

@ -0,0 +1,155 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/StdLibExtras.h>
#include <AK/Types.h>
#include <Kernel/VirtualAddress.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
// Note: These values are x86-64.
#define GDT_SELECTOR_CODE0 0x08
#define GDT_SELECTOR_DATA0 0x10
#define GDT_SELECTOR_DATA3 0x18
#define GDT_SELECTOR_CODE3 0x20
#define GDT_SELECTOR_TSS 0x28
#define GDT_SELECTOR_TSS_PART2 0x30
namespace Kernel {
struct [[gnu::packed]] DescriptorTablePointer {
u16 limit;
void* address;
};
union [[gnu::packed]] Descriptor {
struct {
u16 limit_lo;
u16 base_lo;
u8 base_hi;
u8 type : 4;
u8 descriptor_type : 1;
u8 dpl : 2;
u8 segment_present : 1;
u8 limit_hi : 4;
u8 : 1;
u8 operation_size64 : 1;
u8 operation_size32 : 1;
u8 granularity : 1;
u8 base_hi2;
};
struct {
u32 low;
u32 high;
};
enum SystemType {
Invalid = 0,
AvailableTSS_16bit = 0x1,
LDT = 0x2,
BusyTSS_16bit = 0x3,
CallGate_16bit = 0x4,
TaskGate = 0x5,
InterruptGate_16bit = 0x6,
TrapGate_16bit = 0x7,
AvailableTSS = 0x9,
BusyTSS = 0xb,
CallGate = 0xc,
InterruptGate = 0xe,
TrapGate = 0xf,
};
VirtualAddress base() const
{
FlatPtr base = base_lo;
base |= base_hi << 16u;
base |= base_hi2 << 24u;
return VirtualAddress { base };
}
void set_base(VirtualAddress base)
{
base_lo = base.get() & 0xffffu;
base_hi = (base.get() >> 16u) & 0xffu;
base_hi2 = (base.get() >> 24u) & 0xffu;
VERIFY(base.get() <= 0xffffffff);
}
void set_limit(u32 length)
{
limit_lo = length & 0xffff;
limit_hi = (length >> 16) & 0xf;
}
};
static_assert(AssertSize<Descriptor, 8>());
enum class IDTEntryType {
TaskGate32 = 0b0101,
InterruptGate16 = 0b110,
TrapGate16 = 0b111,
InterruptGate32 = 0b1110,
TrapGate32 = 0b1111,
};
// Clang doesn't format this right due to the compiler magic
// clang-format off
struct [[gnu::packed]] IDTEntry
{
u16 offset_1; // offset bits 0..15
u16 selector; // a code segment selector in GDT or LDT
struct {
u8 interrupt_stack_table : 3;
u8 zero : 5; // unused, set to 0
};
struct {
u8 gate_type : 4;
u8 storage_segment : 1;
u8 descriptor_privilege_level : 2;
u8 present : 1;
} type_attr; // type and attributes
u16 offset_2; // offset bits 16..31
u32 offset_3;
u32 zeros;
IDTEntry() = default;
IDTEntry(FlatPtr callback, u16 selector_, IDTEntryType type, u8 storage_segment, u8 privilege_level)
: offset_1 { (u16)((FlatPtr)callback & 0xFFFF) }
, selector { selector_ }
, interrupt_stack_table { 0 }
, zero { 0 }
, type_attr {
.gate_type = (u8)type,
.storage_segment = storage_segment,
.descriptor_privilege_level = (u8)(privilege_level & 0b11),
.present = 1,
}
, offset_2 { (u16)((FlatPtr)callback >> 16) }
, offset_3 { (u32)(((FlatPtr)callback) >> 32) }
, zeros { 0 }
{
}
FlatPtr off() const
{
return (u64)offset_3 << 32 & (u64)offset_2 << 16 & (u64)offset_1;
}
IDTEntryType type() const
{
return IDTEntryType(type_attr.gate_type);
}
};
// clang-format on
static_assert(AssertSize<IDTEntry, 2 * sizeof(void*)>());
}

View file

@ -0,0 +1,179 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Platform.h>
#include <Kernel/Arch/x86_64/Hypervisor/BochsDisplayConnector.h>
#include <Kernel/Arch/x86_64/IO.h>
#include <Kernel/Bus/PCI/Access.h>
#include <Kernel/Debug.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Graphics/Bochs/Definitions.h>
#include <Kernel/Graphics/Console/ContiguousFramebufferConsole.h>
#include <Kernel/Graphics/GraphicsManagement.h>
namespace Kernel {
static void set_register_with_io(u16 index, u16 data)
{
IO::out16(VBE_DISPI_IOPORT_INDEX, index);
IO::out16(VBE_DISPI_IOPORT_DATA, data);
}
static u16 get_register_with_io(u16 index)
{
IO::out16(VBE_DISPI_IOPORT_INDEX, index);
return IO::in16(VBE_DISPI_IOPORT_DATA);
}
LockRefPtr<BochsDisplayConnector> BochsDisplayConnector::try_create_for_vga_isa_connector()
{
VERIFY(PCI::Access::is_hardware_disabled());
BochsDisplayConnector::IndexID index_id = get_register_with_io(0);
if (index_id != VBE_DISPI_ID5)
return {};
auto video_ram_64k_chunks_count = get_register_with_io(to_underlying(BochsDISPIRegisters::VIDEO_RAM_64K_CHUNKS_COUNT));
if (video_ram_64k_chunks_count == 0 || video_ram_64k_chunks_count == 0xffff) {
dmesgln("Graphics: Bochs ISA VGA compatible adapter does not indicate amount of VRAM, default to 8 MiB");
video_ram_64k_chunks_count = (8 * MiB) / (64 * KiB);
} else {
dmesgln("Graphics: Bochs ISA VGA compatible adapter indicates {} bytes of VRAM", video_ram_64k_chunks_count * (64 * KiB));
}
// Note: The default physical address for isa-vga framebuffer in QEMU is 0xE0000000.
// Since this is probably hardcoded at other OSes in their guest drivers,
// we can assume this is going to stay the same framebuffer physical address for
// this device and will not be changed in the future.
auto device_or_error = DeviceManagement::try_create_device<BochsDisplayConnector>(PhysicalAddress(0xE0000000), video_ram_64k_chunks_count * (64 * KiB));
VERIFY(!device_or_error.is_error());
auto connector = device_or_error.release_value();
MUST(connector->create_attached_framebuffer_console());
MUST(connector->initialize_edid_for_generic_monitor({}));
return connector;
}
NonnullLockRefPtr<BochsDisplayConnector> BochsDisplayConnector::must_create(PhysicalAddress framebuffer_address, size_t framebuffer_resource_size, bool virtual_box_hardware)
{
auto device_or_error = DeviceManagement::try_create_device<BochsDisplayConnector>(framebuffer_address, framebuffer_resource_size);
VERIFY(!device_or_error.is_error());
auto connector = device_or_error.release_value();
MUST(connector->create_attached_framebuffer_console());
if (virtual_box_hardware)
MUST(connector->initialize_edid_for_generic_monitor(Array<u8, 3> { 'V', 'B', 'X' }));
else
MUST(connector->initialize_edid_for_generic_monitor({}));
return connector;
}
BochsDisplayConnector::BochsDisplayConnector(PhysicalAddress framebuffer_address, size_t framebuffer_resource_size)
: DisplayConnector(framebuffer_address, framebuffer_resource_size, false)
{
}
ErrorOr<void> BochsDisplayConnector::create_attached_framebuffer_console()
{
// We assume safe resolution is 1024x768x32
m_framebuffer_console = Graphics::ContiguousFramebufferConsole::initialize(m_framebuffer_address.value(), 1024, 768, 1024 * sizeof(u32));
GraphicsManagement::the().set_console(*m_framebuffer_console);
return {};
}
BochsDisplayConnector::IndexID BochsDisplayConnector::index_id() const
{
return get_register_with_io(0);
}
void BochsDisplayConnector::enable_console()
{
VERIFY(m_control_lock.is_locked());
VERIFY(m_framebuffer_console);
m_framebuffer_console->enable();
}
void BochsDisplayConnector::disable_console()
{
VERIFY(m_control_lock.is_locked());
VERIFY(m_framebuffer_console);
m_framebuffer_console->disable();
}
ErrorOr<void> BochsDisplayConnector::flush_first_surface()
{
return Error::from_errno(ENOTSUP);
}
ErrorOr<void> BochsDisplayConnector::set_safe_mode_setting()
{
DisplayConnector::ModeSetting safe_mode_set {
.horizontal_stride = 1024 * sizeof(u32),
.pixel_clock_in_khz = 0, // Note: There's no pixel clock in paravirtualized hardware
.horizontal_active = 1024,
.horizontal_front_porch_pixels = 0, // Note: There's no horizontal_front_porch_pixels in paravirtualized hardware
.horizontal_sync_time_pixels = 0, // Note: There's no horizontal_sync_time_pixels in paravirtualized hardware
.horizontal_blank_pixels = 0, // Note: There's no horizontal_blank_pixels in paravirtualized hardware
.vertical_active = 768,
.vertical_front_porch_lines = 0, // Note: There's no vertical_front_porch_lines in paravirtualized hardware
.vertical_sync_time_lines = 0, // Note: There's no vertical_sync_time_lines in paravirtualized hardware
.vertical_blank_lines = 0, // Note: There's no vertical_blank_lines in paravirtualized hardware
.horizontal_offset = 0,
.vertical_offset = 0,
};
return set_mode_setting(safe_mode_set);
}
ErrorOr<void> BochsDisplayConnector::set_mode_setting(ModeSetting const& mode_setting)
{
SpinlockLocker locker(m_modeset_lock);
size_t width = mode_setting.horizontal_active;
size_t height = mode_setting.vertical_active;
dbgln_if(BXVGA_DEBUG, "BochsDisplayConnector resolution registers set to - {}x{}", width, height);
set_register_with_io(to_underlying(BochsDISPIRegisters::ENABLE), 0);
set_register_with_io(to_underlying(BochsDISPIRegisters::XRES), (u16)width);
set_register_with_io(to_underlying(BochsDISPIRegisters::YRES), (u16)height);
set_register_with_io(to_underlying(BochsDISPIRegisters::VIRT_WIDTH), (u16)width);
set_register_with_io(to_underlying(BochsDISPIRegisters::VIRT_HEIGHT), (u16)height * 2);
set_register_with_io(to_underlying(BochsDISPIRegisters::BPP), 32);
set_register_with_io(to_underlying(BochsDISPIRegisters::ENABLE), to_underlying(BochsFramebufferSettings::Enabled) | to_underlying(BochsFramebufferSettings::LinearFramebuffer));
set_register_with_io(to_underlying(BochsDISPIRegisters::BANK), 0);
if ((u16)width != get_register_with_io(to_underlying(BochsDISPIRegisters::XRES)) || (u16)height != get_register_with_io(to_underlying(BochsDISPIRegisters::YRES))) {
return Error::from_errno(ENOTIMPL);
}
auto current_horizontal_active = get_register_with_io(to_underlying(BochsDISPIRegisters::XRES));
auto current_vertical_active = get_register_with_io(to_underlying(BochsDISPIRegisters::YRES));
DisplayConnector::ModeSetting mode_set {
.horizontal_stride = current_horizontal_active * sizeof(u32),
.pixel_clock_in_khz = 0, // Note: There's no pixel clock in paravirtualized hardware
.horizontal_active = current_horizontal_active,
.horizontal_front_porch_pixels = 0, // Note: There's no horizontal_front_porch_pixels in paravirtualized hardware
.horizontal_sync_time_pixels = 0, // Note: There's no horizontal_sync_time_pixels in paravirtualized hardware
.horizontal_blank_pixels = 0, // Note: There's no horizontal_blank_pixels in paravirtualized hardware
.vertical_active = current_vertical_active,
.vertical_front_porch_lines = 0, // Note: There's no vertical_front_porch_lines in paravirtualized hardware
.vertical_sync_time_lines = 0, // Note: There's no vertical_sync_time_lines in paravirtualized hardware
.vertical_blank_lines = 0, // Note: There's no vertical_blank_lines in paravirtualized hardware
.horizontal_offset = 0,
.vertical_offset = 0,
};
m_current_mode_setting = mode_set;
return {};
}
ErrorOr<void> BochsDisplayConnector::set_y_offset(size_t)
{
// Note: Although when using this device on QEMU we can actually set the horizontal and vertical offsets
// with IO ports, this class is meant to be used for plain old Bochs graphics which might not support
// this feature at all.
return Error::from_errno(ENOTIMPL);
}
ErrorOr<void> BochsDisplayConnector::unblank()
{
return Error::from_errno(ENOTIMPL);
}
}

View file

@ -0,0 +1,58 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Try.h>
#include <Kernel/Graphics/Bochs/Definitions.h>
#include <Kernel/Graphics/Console/GenericFramebufferConsole.h>
#include <Kernel/Graphics/DisplayConnector.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/TypedMapping.h>
namespace Kernel {
class BochsDisplayConnector
: public DisplayConnector {
friend class BochsGraphicsAdapter;
friend class DeviceManagement;
friend class GraphicsManagement;
public:
AK_TYPEDEF_DISTINCT_ORDERED_ID(u16, IndexID);
static LockRefPtr<BochsDisplayConnector> try_create_for_vga_isa_connector();
static NonnullLockRefPtr<BochsDisplayConnector> must_create(PhysicalAddress framebuffer_address, size_t framebuffer_resource_size, bool virtual_box_hardware);
private:
IndexID index_id() const;
ErrorOr<void> create_attached_framebuffer_console();
BochsDisplayConnector(PhysicalAddress framebuffer_address, size_t framebuffer_resource_size);
virtual bool mutable_mode_setting_capable() const override final { return true; }
virtual bool double_framebuffering_capable() const override { return false; }
virtual ErrorOr<void> set_mode_setting(ModeSetting const&) override;
virtual ErrorOr<void> set_safe_mode_setting() override final;
virtual ErrorOr<void> set_y_offset(size_t y) override;
virtual ErrorOr<void> unblank() override;
virtual bool partial_flush_support() const override final { return false; }
virtual bool flush_support() const override final { return false; }
// Note: Paravirtualized hardware doesn't require a defined refresh rate for modesetting.
virtual bool refresh_rate_support() const override final { return false; }
virtual ErrorOr<void> flush_first_surface() override final;
virtual void enable_console() override final;
virtual void disable_console() override final;
LockRefPtr<Graphics::GenericFramebufferConsole> m_framebuffer_console;
};
}

View file

@ -0,0 +1,247 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/OwnPtr.h>
#include <AK/Singleton.h>
#include <Kernel/API/MousePacket.h>
#include <Kernel/Arch/x86_64/Hypervisor/VMWareBackdoor.h>
#include <Kernel/CommandLine.h>
#include <Kernel/Debug.h>
#include <Kernel/InterruptDisabler.h>
#include <Kernel/Sections.h>
namespace Kernel {
#define VMWARE_CMD_GETVERSION 0x0a
#define VMMOUSE_READ_ID 0x45414552
#define VMMOUSE_DISABLE 0x000000f5
#define VMMOUSE_REQUEST_RELATIVE 0x4c455252
#define VMMOUSE_REQUEST_ABSOLUTE 0x53424152
#define VMMOUSE_QEMU_VERSION 0x3442554a
#define VMMOUSE_LEFT_CLICK 0x20
#define VMMOUSE_RIGHT_CLICK 0x10
#define VMMOUSE_MIDDLE_CLICK 0x08
#define VMWARE_MAGIC 0x564D5868
#define VMWARE_PORT 0x5658
#define VMWARE_PORT_HIGHBANDWIDTH 0x5659
inline void vmware_out(VMWareCommand& command)
{
command.magic = VMWARE_MAGIC;
command.port = VMWARE_PORT;
command.si = 0;
command.di = 0;
asm volatile("in %%dx, %0"
: "+a"(command.ax), "+b"(command.bx), "+c"(command.cx), "+d"(command.dx), "+S"(command.si), "+D"(command.di));
}
inline void vmware_high_bandwidth_send(VMWareCommand& command)
{
command.magic = VMWARE_MAGIC;
command.port = VMWARE_PORT_HIGHBANDWIDTH;
asm volatile("cld; rep; outsb"
: "+a"(command.ax), "+b"(command.bx), "+c"(command.cx), "+d"(command.dx), "+S"(command.si), "+D"(command.di));
}
inline void vmware_high_bandwidth_get(VMWareCommand& command)
{
command.magic = VMWARE_MAGIC;
command.port = VMWARE_PORT_HIGHBANDWIDTH;
asm volatile("cld; rep; insb"
: "+a"(command.ax), "+b"(command.bx), "+c"(command.cx), "+d"(command.dx), "+S"(command.si), "+D"(command.di));
}
class VMWareBackdoorDetector {
public:
VMWareBackdoorDetector()
{
if (detect_presence())
m_backdoor = adopt_nonnull_own_or_enomem(new (nothrow) VMWareBackdoor()).release_value_but_fixme_should_propagate_errors();
}
VMWareBackdoor* get_instance()
{
return m_backdoor.ptr();
}
private:
static bool detect_presence()
{
VMWareCommand command;
command.bx = ~VMWARE_MAGIC;
command.command = VMWARE_CMD_GETVERSION;
vmware_out(command);
if (command.bx != VMWARE_MAGIC || command.ax == 0xFFFFFFFF)
return false;
return true;
}
OwnPtr<VMWareBackdoor> m_backdoor;
};
static Singleton<VMWareBackdoorDetector> s_vmware_backdoor;
VMWareBackdoor* VMWareBackdoor::the()
{
return s_vmware_backdoor->get_instance();
}
UNMAP_AFTER_INIT VMWareBackdoor::VMWareBackdoor()
{
if (kernel_command_line().is_vmmouse_enabled())
enable_absolute_vmmouse();
}
bool VMWareBackdoor::detect_vmmouse()
{
VMWareCommand command;
command.bx = VMMOUSE_READ_ID;
command.command = VMMOUSE_COMMAND;
send(command);
command.bx = 1;
command.command = VMMOUSE_DATA;
send(command);
if (command.ax != VMMOUSE_QEMU_VERSION)
return false;
return true;
}
bool VMWareBackdoor::vmmouse_is_absolute() const
{
return m_vmmouse_absolute;
}
void VMWareBackdoor::enable_absolute_vmmouse()
{
InterruptDisabler disabler;
if (!detect_vmmouse())
return;
dmesgln("VMWareBackdoor: Enabling absolute mouse mode");
VMWareCommand command;
command.bx = 0;
command.command = VMMOUSE_STATUS;
send(command);
if (command.ax == 0xFFFF0000) {
dmesgln("VMWareBackdoor: VMMOUSE_STATUS got bad status");
return;
}
// Enable absolute vmmouse
command.bx = VMMOUSE_REQUEST_ABSOLUTE;
command.command = VMMOUSE_COMMAND;
send(command);
m_vmmouse_absolute = true;
}
void VMWareBackdoor::disable_absolute_vmmouse()
{
InterruptDisabler disabler;
VMWareCommand command;
command.bx = VMMOUSE_REQUEST_RELATIVE;
command.command = VMMOUSE_COMMAND;
send(command);
m_vmmouse_absolute = false;
}
void VMWareBackdoor::send_high_bandwidth(VMWareCommand& command)
{
vmware_high_bandwidth_send(command);
dbgln_if(VMWARE_BACKDOOR_DEBUG, "VMWareBackdoor Command High bandwidth Send Results: EAX {:#x} EBX {:#x} ECX {:#x} EDX {:#x}",
command.ax,
command.bx,
command.cx,
command.dx);
}
void VMWareBackdoor::get_high_bandwidth(VMWareCommand& command)
{
vmware_high_bandwidth_get(command);
dbgln_if(VMWARE_BACKDOOR_DEBUG, "VMWareBackdoor Command High bandwidth Get Results: EAX {:#x} EBX {:#x} ECX {:#x} EDX {:#x}",
command.ax,
command.bx,
command.cx,
command.dx);
}
void VMWareBackdoor::send(VMWareCommand& command)
{
vmware_out(command);
dbgln_if(VMWARE_BACKDOOR_DEBUG, "VMWareBackdoor Command Send Results: EAX {:#x} EBX {:#x} ECX {:#x} EDX {:#x}",
command.ax,
command.bx,
command.cx,
command.dx);
}
u16 VMWareBackdoor::read_mouse_status_queue_size()
{
VMWareCommand command;
command.bx = 0;
command.command = VMMOUSE_STATUS;
send(command);
if (command.ax == 0xFFFF0000) {
dbgln_if(PS2MOUSE_DEBUG, "PS2MouseDevice: Resetting VMWare mouse");
disable_absolute_vmmouse();
enable_absolute_vmmouse();
return 0;
}
return command.ax & 0xFFFF;
}
MousePacket VMWareBackdoor::receive_mouse_packet()
{
VMWareCommand command;
command.size = 4;
command.command = VMMOUSE_DATA;
send(command);
int buttons = (command.ax & 0xFFFF);
int x = command.bx;
int y = command.cx;
int z = static_cast<i8>(command.dx); // signed 8 bit value only!
int w = 0;
// horizontal scroll is reported as +-2 by qemu
// FIXME: Scroll only functions correctly when the sign is flipped there
if (z == 2) {
w = -1;
z = 0;
} else if (z == -2) {
w = 1;
z = 0;
}
if constexpr (PS2MOUSE_DEBUG) {
dbgln("Absolute Mouse: Buttons {:x}", buttons);
dbgln("Mouse: x={}, y={}, z={}, w={}", x, y, z, w);
}
MousePacket packet;
packet.x = x;
packet.y = y;
packet.z = z;
packet.w = w;
if (buttons & VMMOUSE_LEFT_CLICK)
packet.buttons |= MousePacket::LeftButton;
if (buttons & VMMOUSE_RIGHT_CLICK)
packet.buttons |= MousePacket::RightButton;
if (buttons & VMMOUSE_MIDDLE_CLICK)
packet.buttons |= MousePacket::MiddleButton;
packet.is_relative = false;
return packet;
}
}

View file

@ -0,0 +1,62 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <AK/kmalloc.h>
#include <Kernel/API/MousePacket.h>
namespace Kernel {
#define VMMOUSE_GETVERSION 10
#define VMMOUSE_DATA 39
#define VMMOUSE_STATUS 40
#define VMMOUSE_COMMAND 41
struct VMWareCommand {
union {
u32 ax;
u32 magic;
};
union {
u32 bx;
u32 size;
};
union {
u32 cx;
u32 command;
};
union {
u32 dx;
u32 port;
};
u32 si;
u32 di;
};
class VMWareBackdoor {
public:
VMWareBackdoor();
static VMWareBackdoor* the();
bool vmmouse_is_absolute() const;
void enable_absolute_vmmouse();
void disable_absolute_vmmouse();
void send(VMWareCommand& command);
u16 read_mouse_status_queue_size();
MousePacket receive_mouse_packet();
private:
void send_high_bandwidth(VMWareCommand& command);
void get_high_bandwidth(VMWareCommand& command);
bool detect_vmmouse();
bool m_vmmouse_absolute { false };
};
}

View file

@ -0,0 +1,19 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Format.h>
#include <Kernel/Arch/x86_64/I8042Reboot.h>
#include <Kernel/Arch/x86_64/IO.h>
namespace Kernel {
void i8042_reboot()
{
dbgln("attempting reboot via KB Controller...");
IO::out8(0x64, 0xFE);
}
}

View file

@ -0,0 +1,13 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
namespace Kernel {
void i8042_reboot();
}

153
Kernel/Arch/x86_64/IO.h Normal file
View file

@ -0,0 +1,153 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Assertions.h>
#include <AK/Format.h>
#include <AK/Types.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
namespace IO {
// Every character written to this IO port is written to the Bochs console
// (e.g. the console where Qemu is running).
static constexpr u16 BOCHS_DEBUG_PORT = 0xE9;
inline u8 in8(u16 port)
{
u8 value;
asm volatile("inb %1, %0"
: "=a"(value)
: "Nd"(port));
return value;
}
inline u16 in16(u16 port)
{
u16 value;
asm volatile("inw %1, %0"
: "=a"(value)
: "Nd"(port));
return value;
}
inline u32 in32(u16 port)
{
u32 value;
asm volatile("inl %1, %0"
: "=a"(value)
: "Nd"(port));
return value;
}
inline void out8(u16 port, u8 value)
{
asm volatile("outb %0, %1" ::"a"(value), "Nd"(port));
}
inline void out16(u16 port, u16 value)
{
asm volatile("outw %0, %1" ::"a"(value), "Nd"(port));
}
inline void out32(u16 port, u32 value)
{
asm volatile("outl %0, %1" ::"a"(value), "Nd"(port));
}
inline void delay(size_t microseconds)
{
for (size_t i = 0; i < microseconds; ++i)
IO::in8(0x80);
}
}
class IOAddress {
public:
IOAddress() = default;
explicit IOAddress(u16 address)
: m_address(address)
{
}
IOAddress offset(u16 o) const { return IOAddress(m_address + o); }
u16 get() const { return m_address; }
void set(u16 address) { m_address = address; }
void mask(u16 m) { m_address &= m; }
template<typename T>
ALWAYS_INLINE T in()
{
static_assert(sizeof(T) <= 4);
if constexpr (sizeof(T) == 4)
return IO::in32(get());
if constexpr (sizeof(T) == 2)
return IO::in16(get());
if constexpr (sizeof(T) == 1)
return IO::in8(get());
VERIFY_NOT_REACHED();
}
template<typename T>
ALWAYS_INLINE void out(T value) const
{
static_assert(sizeof(T) <= 4);
if constexpr (sizeof(T) == 4) {
IO::out32(get(), value);
return;
}
if constexpr (sizeof(T) == 2) {
IO::out16(get(), value);
return;
}
if constexpr (sizeof(T) == 1) {
IO::out8(get(), value);
return;
}
VERIFY_NOT_REACHED();
}
inline void out(u32 value, u8 bit_width) const
{
if (bit_width == 32) {
IO::out32(get(), value);
return;
}
if (bit_width == 16) {
IO::out16(get(), value);
return;
}
if (bit_width == 8) {
IO::out8(get(), value);
return;
}
VERIFY_NOT_REACHED();
}
bool is_null() const { return m_address == 0; }
bool operator==(IOAddress const& other) const { return m_address == other.m_address; }
bool operator!=(IOAddress const& other) const { return m_address != other.m_address; }
bool operator>(IOAddress const& other) const { return m_address > other.m_address; }
bool operator>=(IOAddress const& other) const { return m_address >= other.m_address; }
bool operator<(IOAddress const& other) const { return m_address < other.m_address; }
bool operator<=(IOAddress const& other) const { return m_address <= other.m_address; }
private:
u16 m_address { 0 };
};
template<>
struct AK::Formatter<IOAddress> : AK::Formatter<FormatString> {
ErrorOr<void> format(FormatBuilder& builder, IOAddress value)
{
return Formatter<FormatString>::format(builder, "IO {:x}"sv, value.get());
}
};

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/AtomicRefCounted.h>
#include <AK/Types.h>
#include <Kernel/Interrupts/GenericInterruptHandler.h>
namespace Kernel {
class GenericInterruptHandler;
enum class IRQControllerType {
i8259 = 1, /* Intel 8259 Dual PIC */
i82093AA = 2 /* Intel 82093AA I/O ADVANCED PROGRAMMABLE INTERRUPT CONTROLLER (IOAPIC) */
};
class IRQController : public AtomicRefCounted<IRQController> {
public:
virtual ~IRQController() = default;
virtual void enable(GenericInterruptHandler const&) = 0;
virtual void disable(GenericInterruptHandler const&) = 0;
virtual void hard_disable() { m_hard_disabled = true; }
virtual bool is_vector_enabled(u8 number) const = 0;
virtual bool is_enabled() const = 0;
bool is_hard_disabled() const { return m_hard_disabled; }
virtual void eoi(GenericInterruptHandler const&) const = 0;
virtual void spurious_eoi(GenericInterruptHandler const&) const = 0;
virtual size_t interrupt_vectors_count() const = 0;
virtual u32 gsi_base() const = 0;
virtual u16 get_isr() const = 0;
virtual u16 get_irr() const = 0;
virtual StringView model() const = 0;
virtual IRQControllerType type() const = 0;
protected:
IRQController() = default;
virtual void initialize() = 0;
bool m_hard_disabled { false };
};
}

View file

@ -0,0 +1,116 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Types.h>
#include <Kernel/Arch/x86_64/ISABus/HID/PS2KeyboardDevice.h>
#include <Kernel/Debug.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Devices/HID/HIDManagement.h>
#include <Kernel/Scheduler.h>
#include <Kernel/Sections.h>
#include <Kernel/TTY/ConsoleManagement.h>
#include <Kernel/WorkQueue.h>
namespace Kernel {
#define IRQ_KEYBOARD 1
void PS2KeyboardDevice::irq_handle_byte_read(u8 byte)
{
u8 ch = byte & 0x7f;
bool pressed = !(byte & 0x80);
m_entropy_source.add_random_event(byte);
if (byte == 0xe0) {
m_has_e0_prefix = true;
return;
}
if ((m_modifiers == (Mod_Alt | Mod_Shift) || m_modifiers == (Mod_Ctrl | Mod_Alt | Mod_Shift)) && byte == 0x58) {
// Alt+Shift+F12 pressed, dump some kernel state to the debug console.
ConsoleManagement::the().switch_to_debug();
Scheduler::dump_scheduler_state(m_modifiers == (Mod_Ctrl | Mod_Alt | Mod_Shift));
}
dbgln_if(KEYBOARD_DEBUG, "Keyboard::irq_handle_byte_read: {:#02x} {}", ch, (pressed ? "down" : "up"));
switch (ch) {
case 0x38:
if (m_has_e0_prefix)
update_modifier(Mod_AltGr, pressed);
else
update_modifier(Mod_Alt, pressed);
break;
case 0x1d:
update_modifier(Mod_Ctrl, pressed);
break;
case 0x5b:
m_left_super_pressed = pressed;
update_modifier(Mod_Super, m_left_super_pressed || m_right_super_pressed);
break;
case 0x5c:
m_right_super_pressed = pressed;
update_modifier(Mod_Super, m_left_super_pressed || m_right_super_pressed);
break;
case 0x2a:
m_left_shift_pressed = pressed;
update_modifier(Mod_Shift, m_left_shift_pressed || m_right_shift_pressed);
break;
case 0x36:
m_right_shift_pressed = pressed;
update_modifier(Mod_Shift, m_left_shift_pressed || m_right_shift_pressed);
break;
}
switch (ch) {
case I8042Response::Acknowledge:
break;
default:
if ((m_modifiers & Mod_Alt) != 0 && ch >= 2 && ch <= ConsoleManagement::s_max_virtual_consoles + 1) {
// FIXME: Do something sanely here if we can't allocate a work queue?
MUST(g_io_work->try_queue([ch]() {
ConsoleManagement::the().switch_to(ch - 0x02);
}));
}
key_state_changed(ch, pressed);
}
}
bool PS2KeyboardDevice::handle_irq(RegisterState const&)
{
// The controller will read the data and call irq_handle_byte_read
// for the appropriate device
return m_i8042_controller->irq_process_input_buffer(HIDDevice::Type::Keyboard);
}
UNMAP_AFTER_INIT ErrorOr<NonnullLockRefPtr<PS2KeyboardDevice>> PS2KeyboardDevice::try_to_initialize(I8042Controller const& ps2_controller)
{
auto keyboard_device = TRY(DeviceManagement::try_create_device<PS2KeyboardDevice>(ps2_controller));
TRY(keyboard_device->initialize());
return keyboard_device;
}
UNMAP_AFTER_INIT ErrorOr<void> PS2KeyboardDevice::initialize()
{
return m_i8042_controller->reset_device(HIDDevice::Type::Keyboard);
}
// FIXME: UNMAP_AFTER_INIT might not be correct, because in practice PS/2 devices
// are hot pluggable.
UNMAP_AFTER_INIT PS2KeyboardDevice::PS2KeyboardDevice(I8042Controller const& ps2_controller)
: IRQHandler(IRQ_KEYBOARD)
, KeyboardDevice()
, I8042Device(ps2_controller)
{
}
// FIXME: UNMAP_AFTER_INIT might not be correct, because in practice PS/2 devices
// are hot pluggable.
UNMAP_AFTER_INIT PS2KeyboardDevice::~PS2KeyboardDevice() = default;
}

View file

@ -0,0 +1,48 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/CircularQueue.h>
#include <AK/Types.h>
#include <Kernel/API/KeyCode.h>
#include <Kernel/Arch/x86_64/ISABus/I8042Controller.h>
#include <Kernel/Devices/HID/KeyboardDevice.h>
#include <Kernel/Interrupts/IRQHandler.h>
#include <Kernel/Random.h>
namespace Kernel {
class PS2KeyboardDevice final : public IRQHandler
, public KeyboardDevice
, public I8042Device {
friend class DeviceManagement;
public:
static ErrorOr<NonnullLockRefPtr<PS2KeyboardDevice>> try_to_initialize(I8042Controller const&);
virtual ~PS2KeyboardDevice() override;
ErrorOr<void> initialize();
virtual StringView purpose() const override { return class_name(); }
// ^I8042Device
virtual void irq_handle_byte_read(u8 byte) override;
virtual void enable_interrupts() override
{
enable_irq();
}
private:
explicit PS2KeyboardDevice(I8042Controller const&);
// ^IRQHandler
virtual bool handle_irq(RegisterState const&) override;
// ^CharacterDevice
virtual StringView class_name() const override { return "KeyboardDevice"sv; }
};
}

View file

@ -0,0 +1,223 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Memory.h>
#include <Kernel/Arch/x86_64/Hypervisor/VMWareBackdoor.h>
#include <Kernel/Arch/x86_64/ISABus/HID/PS2MouseDevice.h>
#include <Kernel/Debug.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Sections.h>
namespace Kernel {
#define IRQ_MOUSE 12
#define PS2MOUSE_INTELLIMOUSE_ID 0x03
#define PS2MOUSE_INTELLIMOUSE_EXPLORER_ID 0x04
UNMAP_AFTER_INIT PS2MouseDevice::PS2MouseDevice(I8042Controller const& ps2_controller)
: IRQHandler(IRQ_MOUSE)
, MouseDevice()
, I8042Device(ps2_controller)
{
}
UNMAP_AFTER_INIT PS2MouseDevice::~PS2MouseDevice() = default;
bool PS2MouseDevice::handle_irq(RegisterState const&)
{
// The controller will read the data and call irq_handle_byte_read
// for the appropriate device
return m_i8042_controller->irq_process_input_buffer(instrument_type());
}
void PS2MouseDevice::irq_handle_byte_read(u8 byte)
{
auto commit_packet = [&] {
m_data_state = 0;
dbgln_if(PS2MOUSE_DEBUG, "PS2Mouse: {}, {} {} {}",
m_data.bytes[1],
m_data.bytes[2],
(m_data.bytes[0] & 1) ? "Left" : "",
(m_data.bytes[0] & 2) ? "Right" : "");
m_entropy_source.add_random_event(m_data.dword);
{
SpinlockLocker lock(m_queue_lock);
m_queue.enqueue(parse_data_packet(m_data));
}
evaluate_block_conditions();
};
VERIFY(m_data_state < sizeof(m_data.bytes) / sizeof(m_data.bytes[0]));
m_data.bytes[m_data_state] = byte;
switch (m_data_state) {
case 0:
if (!(byte & 0x08)) {
dbgln("PS2Mouse: Stream out of sync.");
break;
}
++m_data_state;
break;
case 1:
++m_data_state;
break;
case 2:
if (m_has_wheel) {
++m_data_state;
break;
}
commit_packet();
break;
case 3:
VERIFY(m_has_wheel);
commit_packet();
break;
}
}
MousePacket PS2MouseDevice::parse_data_packet(RawPacket const& raw_packet)
{
int x = raw_packet.bytes[1];
int y = raw_packet.bytes[2];
int z = 0;
int w = 0;
if (m_has_wheel) {
// FIXME: For non-Intellimouse, this is a full byte.
// However, for now, m_has_wheel is only set for Intellimouse.
z = (char)(raw_packet.bytes[3] & 0x0f);
// -1 in 4 bits
if (z == 15)
z = -1;
if ((raw_packet.bytes[3] & 0xc0) == 0x40) {
// FIXME: Scroll only functions correctly when the sign is flipped there
w = -z;
z = 0;
} else {
w = 0;
}
}
bool x_overflow = raw_packet.bytes[0] & 0x40;
bool y_overflow = raw_packet.bytes[0] & 0x80;
bool x_sign = raw_packet.bytes[0] & 0x10;
bool y_sign = raw_packet.bytes[0] & 0x20;
if (x && x_sign)
x -= 0x100;
if (y && y_sign)
y -= 0x100;
if (x_overflow || y_overflow) {
x = 0;
y = 0;
}
MousePacket packet;
packet.x = x;
packet.y = y;
packet.z = z;
packet.w = w;
packet.buttons = raw_packet.bytes[0] & 0x07;
if (m_has_five_buttons) {
if (raw_packet.bytes[3] & 0x10)
packet.buttons |= MousePacket::BackwardButton;
if (raw_packet.bytes[3] & 0x20)
packet.buttons |= MousePacket::ForwardButton;
}
packet.is_relative = true;
dbgln_if(PS2MOUSE_DEBUG, "PS2 Relative Mouse: Buttons {:x}", packet.buttons);
dbgln_if(PS2MOUSE_DEBUG, "Mouse: X {}, Y {}, Z {}, W {}", packet.x, packet.y, packet.z, packet.w);
return packet;
}
ErrorOr<u8> PS2MouseDevice::get_device_id()
{
TRY(send_command(I8042Command::GetDeviceID));
return read_from_device();
}
ErrorOr<u8> PS2MouseDevice::read_from_device()
{
return m_i8042_controller->read_from_device(instrument_type());
}
ErrorOr<u8> PS2MouseDevice::send_command(u8 command)
{
u8 response = TRY(m_i8042_controller->send_command(instrument_type(), command));
if (response != I8042Response::Acknowledge) {
dbgln("PS2MouseDevice: Command {} got {} but expected ack: {}", command, response, static_cast<u8>(I8042Response::Acknowledge));
return Error::from_errno(EIO);
}
return response;
}
ErrorOr<u8> PS2MouseDevice::send_command(u8 command, u8 data)
{
u8 response = TRY(m_i8042_controller->send_command(instrument_type(), command, data));
if (response != I8042Response::Acknowledge) {
dbgln("PS2MouseDevice: Command {} got {} but expected ack: {}", command, response, static_cast<u8>(I8042Response::Acknowledge));
return Error::from_errno(EIO);
}
return response;
}
ErrorOr<void> PS2MouseDevice::set_sample_rate(u8 rate)
{
TRY(send_command(I8042Command::SetSampleRate, rate));
return {};
}
UNMAP_AFTER_INIT ErrorOr<NonnullLockRefPtr<PS2MouseDevice>> PS2MouseDevice::try_to_initialize(I8042Controller const& ps2_controller)
{
auto mouse_device = TRY(DeviceManagement::try_create_device<PS2MouseDevice>(ps2_controller));
TRY(mouse_device->initialize());
return mouse_device;
}
UNMAP_AFTER_INIT ErrorOr<void> PS2MouseDevice::initialize()
{
TRY(m_i8042_controller->reset_device(instrument_type()));
u8 device_id = TRY(read_from_device());
TRY(send_command(I8042Command::SetDefaults));
TRY(send_command(I8042Command::EnablePacketStreaming));
if (device_id != PS2MOUSE_INTELLIMOUSE_ID) {
// Send magical wheel initiation sequence.
TRY(set_sample_rate(200));
TRY(set_sample_rate(100));
TRY(set_sample_rate(80));
device_id = TRY(get_device_id());
}
if (device_id == PS2MOUSE_INTELLIMOUSE_ID) {
m_has_wheel = true;
dmesgln("PS2MouseDevice: Mouse wheel enabled!");
} else {
dmesgln("PS2MouseDevice: No mouse wheel detected!");
}
if (device_id == PS2MOUSE_INTELLIMOUSE_ID) {
// Try to enable 5 buttons as well!
TRY(set_sample_rate(200));
TRY(set_sample_rate(200));
TRY(set_sample_rate(80));
device_id = TRY(get_device_id());
}
if (device_id == PS2MOUSE_INTELLIMOUSE_EXPLORER_ID) {
m_has_five_buttons = true;
dmesgln("PS2MouseDevice: 5 buttons enabled!");
}
return {};
}
}

View file

@ -0,0 +1,63 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/CircularQueue.h>
#include <Kernel/API/MousePacket.h>
#include <Kernel/Arch/x86_64/ISABus/I8042Controller.h>
#include <Kernel/Devices/HID/MouseDevice.h>
#include <Kernel/Interrupts/IRQHandler.h>
#include <Kernel/Random.h>
namespace Kernel {
class PS2MouseDevice : public IRQHandler
, public MouseDevice
, public I8042Device {
friend class DeviceManagement;
public:
static ErrorOr<NonnullLockRefPtr<PS2MouseDevice>> try_to_initialize(I8042Controller const&);
ErrorOr<void> initialize();
virtual ~PS2MouseDevice() override;
virtual StringView purpose() const override { return class_name(); }
// ^I8042Device
virtual void irq_handle_byte_read(u8 byte) override;
virtual void enable_interrupts() override
{
enable_irq();
}
protected:
explicit PS2MouseDevice(I8042Controller const&);
// ^IRQHandler
virtual bool handle_irq(RegisterState const&) override;
struct RawPacket {
union {
u32 dword;
u8 bytes[4];
};
};
ErrorOr<u8> read_from_device();
ErrorOr<u8> send_command(u8 command);
ErrorOr<u8> send_command(u8 command, u8 data);
MousePacket parse_data_packet(RawPacket const&);
ErrorOr<void> set_sample_rate(u8);
ErrorOr<u8> get_device_id();
u8 m_data_state { 0 };
RawPacket m_data;
bool m_has_wheel { false };
bool m_has_five_buttons { false };
};
}

View file

@ -0,0 +1,60 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86_64/Hypervisor/VMWareBackdoor.h>
#include <Kernel/Arch/x86_64/ISABus/HID/VMWareMouseDevice.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Sections.h>
namespace Kernel {
UNMAP_AFTER_INIT ErrorOr<NonnullLockRefPtr<VMWareMouseDevice>> VMWareMouseDevice::try_to_initialize(I8042Controller const& ps2_controller)
{
// FIXME: return the correct error
if (!VMWareBackdoor::the())
return Error::from_errno(EIO);
if (!VMWareBackdoor::the()->vmmouse_is_absolute())
return Error::from_errno(EIO);
auto mouse_device = TRY(DeviceManagement::try_create_device<VMWareMouseDevice>(ps2_controller));
TRY(mouse_device->initialize());
return mouse_device;
}
void VMWareMouseDevice::irq_handle_byte_read(u8)
{
auto backdoor = VMWareBackdoor::the();
VERIFY(backdoor);
VERIFY(backdoor->vmmouse_is_absolute());
// We will receive 4 bytes from the I8042 controller that we are going to
// ignore. Instead, we will check with VMWareBackdoor to see how many bytes
// of mouse event data are waiting for us. For each multiple of 4, we
// produce a mouse packet.
constexpr u8 max_iterations = 128;
u8 current_iteration = 0;
while (++current_iteration < max_iterations) {
auto number_of_mouse_event_bytes = backdoor->read_mouse_status_queue_size();
if (number_of_mouse_event_bytes == 0)
break;
VERIFY(number_of_mouse_event_bytes % 4 == 0);
auto mouse_packet = backdoor->receive_mouse_packet();
m_entropy_source.add_random_event(mouse_packet);
{
SpinlockLocker lock(m_queue_lock);
m_queue.enqueue(mouse_packet);
}
}
evaluate_block_conditions();
}
VMWareMouseDevice::VMWareMouseDevice(I8042Controller const& ps2_controller)
: PS2MouseDevice(ps2_controller)
{
}
VMWareMouseDevice::~VMWareMouseDevice() = default;
}

View file

@ -0,0 +1,31 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/CircularQueue.h>
#include <Kernel/API/MousePacket.h>
#include <Kernel/Arch/x86_64/ISABus/HID/PS2MouseDevice.h>
#include <Kernel/Arch/x86_64/ISABus/I8042Controller.h>
#include <Kernel/Interrupts/IRQHandler.h>
#include <Kernel/Random.h>
namespace Kernel {
class VMWareMouseDevice final : public PS2MouseDevice {
public:
friend class DeviceManagement;
static ErrorOr<NonnullLockRefPtr<VMWareMouseDevice>> try_to_initialize(I8042Controller const&);
virtual ~VMWareMouseDevice() override;
// ^I8042Device
virtual void irq_handle_byte_read(u8 byte) override;
private:
explicit VMWareMouseDevice(I8042Controller const&);
};
}

View file

@ -0,0 +1,387 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Delay.h>
#include <Kernel/Arch/x86_64/IO.h>
#include <Kernel/Arch/x86_64/ISABus/HID/PS2KeyboardDevice.h>
#include <Kernel/Arch/x86_64/ISABus/HID/PS2MouseDevice.h>
#include <Kernel/Arch/x86_64/ISABus/HID/VMWareMouseDevice.h>
#include <Kernel/Arch/x86_64/ISABus/I8042Controller.h>
#include <Kernel/Sections.h>
namespace Kernel {
UNMAP_AFTER_INIT NonnullLockRefPtr<I8042Controller> I8042Controller::initialize()
{
return adopt_lock_ref(*new I8042Controller());
}
LockRefPtr<MouseDevice> I8042Controller::mouse() const
{
return m_mouse_device;
}
LockRefPtr<KeyboardDevice> I8042Controller::keyboard() const
{
return m_keyboard_device;
}
UNMAP_AFTER_INIT I8042Controller::I8042Controller()
{
}
UNMAP_AFTER_INIT bool I8042Controller::check_existence_via_probing(Badge<HIDManagement>)
{
{
u8 configuration = 0;
SpinlockLocker lock(m_lock);
// This drains the output buffer and serves as an existence test.
if (auto result = drain_output_buffer(); result.is_error()) {
dbgln("I8042: Trying to flush output buffer as an existence test failed, error {}", result.error());
return false;
}
// Note: Perform controller self-test before touching the controller
// Try to probe the controller for 10 times and give up if nothing
// responded.
// Some controllers will reset and behave abnormally on this, so let's ensure
// we keep the configuration before initiating this command.
if (auto result = do_wait_then_write(I8042Port::Command, I8042Command::ReadConfiguration); result.is_error()) {
dbgln("I8042: Trying to read configuration failed during the existence test, error {}", result.error());
return false;
}
{
auto result = do_wait_then_read(I8042Port::Buffer);
if (result.is_error()) {
dbgln("I8042: Trying to read configuration failed during the existence test, error {}", result.error());
return false;
}
configuration = result.release_value();
}
bool successful_self_test = false;
for (int attempt = 0; attempt < 20; attempt++) {
do_write(I8042Port::Command, I8042Command::TestPS2Controller);
if (do_read(I8042Port::Buffer) == I8042Response::ControllerTestPassed) {
successful_self_test = true;
break;
}
// Note: Wait 500 microseconds in case the controller couldn't respond
microseconds_delay(500);
}
if (!successful_self_test) {
dbgln("I8042: Trying to probe for existence of controller failed");
return false;
}
if (auto result = do_wait_then_write(I8042Port::Command, I8042Command::WriteConfiguration); result.is_error()) {
dbgln("I8042: Trying to restore configuration after self-test failed with error {}", result.error());
return false;
}
if (auto result = do_wait_then_write(I8042Port::Buffer, configuration); result.is_error()) {
dbgln("I8042: Trying to write restored configuration after self-test failed with error {}", result.error());
return false;
}
return true;
}
}
UNMAP_AFTER_INIT ErrorOr<void> I8042Controller::detect_devices()
{
u8 configuration;
{
SpinlockLocker lock(m_lock);
// Note: This flushes all the garbage left in the controller registers
TRY(drain_output_buffer());
TRY(do_wait_then_write(I8042Port::Command, I8042Command::DisableFirstPS2Port));
TRY(do_wait_then_write(I8042Port::Command, I8042Command::DisableSecondPS2Port)); // ignored if it doesn't exist
TRY(do_wait_then_write(I8042Port::Command, I8042Command::ReadConfiguration));
configuration = TRY(do_wait_then_read(I8042Port::Buffer));
TRY(do_wait_then_write(I8042Port::Command, I8042Command::WriteConfiguration));
configuration &= ~I8042ConfigurationFlag::FirstPS2PortInterrupt;
configuration &= ~I8042ConfigurationFlag::SecondPS2PortInterrupt;
// Note: The default BIOS on the QEMU microvm machine type (qboot) doesn't
// behave like SeaBIOS, which means it doesn't set first port scan code translation.
// However we rely on compatibility feature of the i8042 to send scan codes of set 1.
// To ensure that the controller is always outputting correct scan codes, set it
// to scan code 2 (because SeaBIOS on regular QEMU machine does this for us) and enable
// first port translation to ensure all scan codes are translated to scan code set 1.
configuration |= I8042ConfigurationFlag::FirstPS2PortTranslation;
TRY(do_wait_then_write(I8042Port::Buffer, configuration));
TRY(do_wait_then_write(I8042Port::Buffer, I8042Command::SetScanCodeSet));
TRY(do_wait_then_write(I8042Port::Buffer, 0x2));
m_is_dual_channel = (configuration & I8042ConfigurationFlag::SecondPS2PortClock) != 0;
dbgln("I8042: {} channel controller", m_is_dual_channel ? "Dual" : "Single");
// Perform controller self-test
TRY(do_wait_then_write(I8042Port::Command, I8042Command::TestPS2Controller));
auto self_test_result = TRY(do_wait_then_read(I8042Port::Buffer));
if (self_test_result == I8042Response::ControllerTestPassed) {
// Restore configuration in case the controller reset
TRY(do_wait_then_write(I8042Port::Command, I8042Command::WriteConfiguration));
TRY(do_wait_then_write(I8042Port::Buffer, configuration));
} else {
dbgln("I8042: Controller self test failed");
}
// Test ports and enable them if available
TRY(do_wait_then_write(I8042Port::Command, I8042Command::TestFirstPS2Port));
auto first_port_test_result = TRY(do_wait_then_read(I8042Port::Buffer));
m_first_port_available = (first_port_test_result == 0);
if (m_first_port_available) {
TRY(do_wait_then_write(I8042Port::Command, I8042Command::EnableFirstPS2Port));
configuration |= I8042ConfigurationFlag::FirstPS2PortInterrupt;
configuration &= ~I8042ConfigurationFlag::FirstPS2PortClock;
} else {
dbgln("I8042: Keyboard port not available");
}
TRY(drain_output_buffer());
if (m_is_dual_channel) {
TRY(do_wait_then_write(I8042Port::Command, I8042Command::TestSecondPS2Port));
auto test_second_port_result = TRY(do_wait_then_read(I8042Port::Buffer));
m_second_port_available = (test_second_port_result == 0);
if (m_second_port_available) {
TRY(do_wait_then_write(I8042Port::Command, I8042Command::EnableSecondPS2Port));
configuration |= I8042ConfigurationFlag::SecondPS2PortInterrupt;
configuration &= ~I8042ConfigurationFlag::SecondPS2PortClock;
} else {
dbgln("I8042: Mouse port not available");
}
}
// Enable IRQs for the ports that are usable
if (m_first_port_available || m_second_port_available) {
configuration &= ~I8042ConfigurationFlag::FirstPS2PortClock;
configuration &= ~I8042ConfigurationFlag::SecondPS2PortClock;
TRY(do_wait_then_write(I8042Port::Command, I8042Command::WriteConfiguration));
TRY(do_wait_then_write(I8042Port::Buffer, configuration));
}
}
// Try to detect and initialize the devices
if (m_first_port_available) {
auto error_or_device = PS2KeyboardDevice::try_to_initialize(*this);
if (error_or_device.is_error()) {
dbgln("I8042: Keyboard device failed to initialize, disable");
m_first_port_available = false;
configuration &= ~I8042ConfigurationFlag::FirstPS2PortInterrupt;
configuration |= I8042ConfigurationFlag::FirstPS2PortClock;
m_keyboard_device = nullptr;
SpinlockLocker lock(m_lock);
TRY(do_wait_then_write(I8042Port::Command, I8042Command::WriteConfiguration));
TRY(do_wait_then_write(I8042Port::Buffer, configuration));
} else {
m_keyboard_device = error_or_device.release_value();
}
}
if (m_second_port_available) {
auto vmmouse_device_or_error = VMWareMouseDevice::try_to_initialize(*this);
if (vmmouse_device_or_error.is_error()) {
// FIXME: is there something to do with the VMWare errors?
auto mouse_device_or_error = PS2MouseDevice::try_to_initialize(*this);
if (mouse_device_or_error.is_error()) {
dbgln("I8042: Mouse device failed to initialize, disable");
m_second_port_available = false;
configuration |= I8042ConfigurationFlag::SecondPS2PortClock;
m_mouse_device = nullptr;
SpinlockLocker lock(m_lock);
TRY(do_wait_then_write(I8042Port::Command, I8042Command::WriteConfiguration));
TRY(do_wait_then_write(I8042Port::Buffer, configuration));
} else {
m_mouse_device = mouse_device_or_error.release_value();
}
} else {
m_mouse_device = vmmouse_device_or_error.release_value();
}
}
// Enable IRQs after both are detected and initialized
if (m_keyboard_device)
m_keyboard_device->enable_interrupts();
if (m_mouse_device)
m_mouse_device->enable_interrupts();
return {};
}
bool I8042Controller::irq_process_input_buffer(HIDDevice::Type instrument_type)
{
VERIFY(Processor::current_in_irq());
u8 status = IO::in8(I8042Port::Status);
if (!(status & I8042StatusFlag::OutputBuffer))
return false;
u8 byte = IO::in8(I8042Port::Buffer);
if (instrument_type == HIDDevice::Type::Mouse) {
VERIFY(m_mouse_device);
static_cast<PS2MouseDevice&>(*m_mouse_device).irq_handle_byte_read(byte);
return true;
}
if (instrument_type == HIDDevice::Type::Keyboard) {
VERIFY(m_keyboard_device);
static_cast<PS2KeyboardDevice&>(*m_keyboard_device).irq_handle_byte_read(byte);
return true;
}
return false;
}
ErrorOr<void> I8042Controller::drain_output_buffer()
{
for (int attempt = 0; attempt < 50; attempt++) {
u8 status = IO::in8(I8042Port::Status);
if (!(status & I8042StatusFlag::OutputBuffer))
return {};
IO::in8(I8042Port::Buffer);
microseconds_delay(100);
}
return Error::from_errno(EBUSY);
}
ErrorOr<void> I8042Controller::do_reset_device(HIDDevice::Type device)
{
VERIFY(device != HIDDevice::Type::Unknown);
VERIFY(m_lock.is_locked());
VERIFY(!Processor::current_in_irq());
auto reset_result = TRY(do_send_command(device, I8042Command::Reset));
// FIXME: Is this the correct errno value for this?
if (reset_result != I8042Response::Acknowledge)
return Error::from_errno(EIO);
// Wait until we get the self-test result
auto self_test_result = TRY(do_wait_then_read(I8042Port::Buffer));
// FIXME: Is this the correct errno value for this?
if (self_test_result != I8042Response::Success)
return Error::from_errno(EIO);
return {};
}
ErrorOr<u8> I8042Controller::do_send_command(HIDDevice::Type device, u8 command)
{
VERIFY(device != HIDDevice::Type::Unknown);
VERIFY(m_lock.is_locked());
VERIFY(!Processor::current_in_irq());
return do_write_to_device(device, command);
}
ErrorOr<u8> I8042Controller::do_send_command(HIDDevice::Type device, u8 command, u8 data)
{
VERIFY(device != HIDDevice::Type::Unknown);
VERIFY(m_lock.is_locked());
VERIFY(!Processor::current_in_irq());
u8 response = TRY(do_write_to_device(device, command));
if (response == I8042Response::Acknowledge)
response = TRY(do_write_to_device(device, data));
return response;
}
ErrorOr<u8> I8042Controller::do_write_to_device(HIDDevice::Type device, u8 data)
{
VERIFY(device != HIDDevice::Type::Unknown);
VERIFY(m_lock.is_locked());
VERIFY(!Processor::current_in_irq());
int attempts = 0;
u8 response;
do {
if (device != HIDDevice::Type::Keyboard) {
TRY(prepare_for_output());
IO::out8(I8042Port::Command, I8042Command::WriteSecondPS2PortInputBuffer);
}
TRY(prepare_for_output());
IO::out8(I8042Port::Buffer, data);
response = TRY(do_wait_then_read(I8042Port::Buffer));
} while (response == I8042Response::Resend && ++attempts < 250);
if (attempts >= 250)
dbgln("Failed to write byte to device, gave up");
return response;
}
ErrorOr<u8> I8042Controller::do_read_from_device(HIDDevice::Type device)
{
VERIFY(device != HIDDevice::Type::Unknown);
TRY(prepare_for_input(device));
return IO::in8(I8042Port::Buffer);
}
ErrorOr<void> I8042Controller::prepare_for_input(HIDDevice::Type device)
{
VERIFY(m_lock.is_locked());
u8 const second_port_flag = device == HIDDevice::Type::Keyboard ? 0 : I8042StatusFlag::SecondPS2PortOutputBuffer;
for (int attempt = 0; attempt < 1000; attempt++) {
u8 status = IO::in8(I8042Port::Status);
if (!(status & I8042StatusFlag::OutputBuffer)) {
microseconds_delay(1000);
continue;
}
if (device == HIDDevice::Type::Unknown)
return {};
if ((status & I8042StatusFlag::SecondPS2PortOutputBuffer) == second_port_flag)
return {};
microseconds_delay(1000);
}
return Error::from_errno(EBUSY);
}
ErrorOr<void> I8042Controller::prepare_for_output()
{
VERIFY(m_lock.is_locked());
for (int attempt = 0; attempt < 250; attempt++) {
u8 status = IO::in8(I8042Port::Status);
if (!(status & I8042StatusFlag::InputBuffer))
return {};
microseconds_delay(1000);
}
return Error::from_errno(EBUSY);
}
UNMAP_AFTER_INIT void I8042Controller::do_write(u8 port, u8 data)
{
VERIFY(m_lock.is_locked());
IO::out8(port, data);
}
UNMAP_AFTER_INIT u8 I8042Controller::do_read(u8 port)
{
VERIFY(m_lock.is_locked());
return IO::in8(port);
}
ErrorOr<void> I8042Controller::do_wait_then_write(u8 port, u8 data)
{
VERIFY(m_lock.is_locked());
TRY(prepare_for_output());
IO::out8(port, data);
return {};
}
ErrorOr<u8> I8042Controller::do_wait_then_read(u8 port)
{
VERIFY(m_lock.is_locked());
TRY(prepare_for_input(HIDDevice::Type::Unknown));
return IO::in8(port);
}
}

View file

@ -0,0 +1,164 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/AtomicRefCounted.h>
#include <Kernel/Devices/HID/KeyboardDevice.h>
#include <Kernel/Devices/HID/MouseDevice.h>
#include <Kernel/Locking/Spinlock.h>
namespace Kernel {
enum I8042Port : u8 {
Buffer = 0x60,
Command = 0x64,
Status = 0x64,
};
enum I8042Command : u8 {
ReadConfiguration = 0x20,
WriteConfiguration = 0x60,
DisableSecondPS2Port = 0xA7,
EnableSecondPS2Port = 0xA8,
TestSecondPS2Port = 0xA9,
TestPS2Controller = 0xAA,
TestFirstPS2Port = 0xAB,
DisableFirstPS2Port = 0xAD,
EnableFirstPS2Port = 0xAE,
WriteSecondPS2PortInputBuffer = 0xD4,
SetScanCodeSet = 0xF0,
GetDeviceID = 0xF2,
SetSampleRate = 0xF3,
EnablePacketStreaming = 0xF4,
SetDefaults = 0xF6,
Reset = 0xFF,
};
enum I8042ConfigurationFlag : u8 {
FirstPS2PortInterrupt = 1 << 0,
SecondPS2PortInterrupt = 1 << 1,
SystemFlag = 1 << 2,
FirstPS2PortClock = 1 << 4,
SecondPS2PortClock = 1 << 5,
FirstPS2PortTranslation = 1 << 6,
};
enum I8042StatusFlag : u8 {
OutputBuffer = 1 << 0,
InputBuffer = 1 << 1,
System = 1 << 2,
InputType = 1 << 3,
SecondPS2PortOutputBuffer = 1 << 5,
TimeoutError = 1 << 6,
ParityError = 1 << 7,
};
enum I8042Response : u8 {
ControllerTestPassed = 0x55,
Success = 0xAA,
Acknowledge = 0xFA,
Resend = 0xFE,
};
class I8042Controller;
class I8042Device {
public:
virtual ~I8042Device() = default;
virtual void irq_handle_byte_read(u8 byte) = 0;
protected:
explicit I8042Device(I8042Controller const& ps2_controller)
: m_i8042_controller(ps2_controller)
{
}
NonnullLockRefPtr<I8042Controller> m_i8042_controller;
};
class PS2KeyboardDevice;
class PS2MouseDevice;
class HIDManagement;
class I8042Controller final : public AtomicRefCounted<I8042Controller> {
friend class PS2KeyboardDevice;
friend class PS2MouseDevice;
public:
static NonnullLockRefPtr<I8042Controller> initialize();
ErrorOr<void> detect_devices();
ErrorOr<void> reset_device(HIDDevice::Type device)
{
SpinlockLocker lock(m_lock);
return do_reset_device(device);
}
ErrorOr<u8> send_command(HIDDevice::Type device, u8 command)
{
SpinlockLocker lock(m_lock);
return do_send_command(device, command);
}
ErrorOr<u8> send_command(HIDDevice::Type device, u8 command, u8 data)
{
SpinlockLocker lock(m_lock);
return do_send_command(device, command, data);
}
ErrorOr<u8> read_from_device(HIDDevice::Type device)
{
SpinlockLocker lock(m_lock);
return do_read_from_device(device);
}
ErrorOr<void> wait_then_write(u8 port, u8 data)
{
SpinlockLocker lock(m_lock);
return do_wait_then_write(port, data);
}
ErrorOr<u8> wait_then_read(u8 port)
{
SpinlockLocker lock(m_lock);
return do_wait_then_read(port);
}
ErrorOr<void> prepare_for_output();
ErrorOr<void> prepare_for_input(HIDDevice::Type);
bool irq_process_input_buffer(HIDDevice::Type);
LockRefPtr<MouseDevice> mouse() const;
LockRefPtr<KeyboardDevice> keyboard() const;
// Note: This function exists only for the initialization process of the controller
bool check_existence_via_probing(Badge<HIDManagement>);
private:
I8042Controller();
ErrorOr<void> do_reset_device(HIDDevice::Type);
ErrorOr<u8> do_send_command(HIDDevice::Type type, u8 data);
ErrorOr<u8> do_send_command(HIDDevice::Type device, u8 command, u8 data);
ErrorOr<u8> do_write_to_device(HIDDevice::Type device, u8 data);
ErrorOr<u8> do_read_from_device(HIDDevice::Type device);
ErrorOr<void> do_wait_then_write(u8 port, u8 data);
ErrorOr<u8> do_wait_then_read(u8 port);
ErrorOr<void> drain_output_buffer();
// Note: These functions exist only for the initialization process of the controller
void do_write(u8 port, u8 data);
u8 do_read(u8 port);
Spinlock m_lock { LockRank::None };
bool m_first_port_available { false };
bool m_second_port_available { false };
bool m_is_dual_channel { false };
LockRefPtr<MouseDevice> m_mouse_device;
LockRefPtr<KeyboardDevice> m_keyboard_device;
};
}

View file

@ -0,0 +1,56 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <Kernel/Arch/x86_64/ISABus/IDEController.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Sections.h>
#include <Kernel/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
namespace Kernel {
UNMAP_AFTER_INIT ErrorOr<NonnullLockRefPtr<ISAIDEController>> ISAIDEController::initialize()
{
auto controller = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) ISAIDEController()));
TRY(controller->initialize_channels());
return controller;
}
UNMAP_AFTER_INIT ISAIDEController::ISAIDEController()
{
}
UNMAP_AFTER_INIT ErrorOr<void> ISAIDEController::initialize_channels()
{
auto primary_base_io_window = TRY(IOWindow::create_for_io_space(IOAddress(0x1F0), 8));
auto primary_control_io_window = TRY(IOWindow::create_for_io_space(IOAddress(0x3F6), 4));
auto secondary_base_io_window = TRY(IOWindow::create_for_io_space(IOAddress(0x170), 8));
auto secondary_control_io_window = TRY(IOWindow::create_for_io_space(IOAddress(0x376), 4));
auto initialize_and_enumerate = [](IDEChannel& channel) -> ErrorOr<void> {
TRY(channel.allocate_resources_for_isa_ide_controller({}));
TRY(channel.detect_connected_devices());
return {};
};
auto primary_channel_io_window_group = IDEChannel::IOWindowGroup { move(primary_base_io_window), move(primary_control_io_window) };
auto secondary_channel_io_window_group = IDEChannel::IOWindowGroup { move(secondary_base_io_window), move(secondary_control_io_window) };
TRY(m_channels.try_append(IDEChannel::create(*this, move(primary_channel_io_window_group), IDEChannel::ChannelType::Primary)));
TRY(initialize_and_enumerate(m_channels[0]));
m_channels[0].enable_irq();
TRY(m_channels.try_append(IDEChannel::create(*this, move(secondary_channel_io_window_group), IDEChannel::ChannelType::Secondary)));
TRY(initialize_and_enumerate(m_channels[1]));
m_channels[1].enable_irq();
dbgln("ISA IDE controller detected and initialized");
return {};
}
}

View file

@ -0,0 +1,29 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/Storage/StorageDevice.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class ISAIDEController final : public IDEController {
public:
static ErrorOr<NonnullLockRefPtr<ISAIDEController>> initialize();
private:
ISAIDEController();
LockRefPtr<StorageDevice> device_by_channel_and_position(u32 index) const;
ErrorOr<void> initialize_channels();
};
}

View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Devices/SerialDevice.h>
#include <Kernel/IOWindow.h>
#include <Kernel/Sections.h>
namespace Kernel {
#define SERIAL_COM1_ADDR 0x3F8
#define SERIAL_COM2_ADDR 0x2F8
#define SERIAL_COM3_ADDR 0x3E8
#define SERIAL_COM4_ADDR 0x2E8
UNMAP_AFTER_INIT NonnullLockRefPtr<SerialDevice> SerialDevice::must_create(size_t com_number)
{
// FIXME: This way of blindly doing release_value is really not a good thing, find
// a way to propagate errors back.
LockRefPtr<SerialDevice> serial_device;
switch (com_number) {
case 0: {
auto io_window = IOWindow::create_for_io_space(IOAddress(SERIAL_COM1_ADDR), 16).release_value_but_fixme_should_propagate_errors();
serial_device = DeviceManagement::try_create_device<SerialDevice>(move(io_window), 64).release_value();
break;
}
case 1: {
auto io_window = IOWindow::create_for_io_space(IOAddress(SERIAL_COM2_ADDR), 16).release_value_but_fixme_should_propagate_errors();
serial_device = DeviceManagement::try_create_device<SerialDevice>(move(io_window), 65).release_value();
break;
}
case 2: {
auto io_window = IOWindow::create_for_io_space(IOAddress(SERIAL_COM3_ADDR), 16).release_value_but_fixme_should_propagate_errors();
serial_device = DeviceManagement::try_create_device<SerialDevice>(move(io_window), 66).release_value();
break;
}
case 3: {
auto io_window = IOWindow::create_for_io_space(IOAddress(SERIAL_COM4_ADDR), 16).release_value_but_fixme_should_propagate_errors();
serial_device = DeviceManagement::try_create_device<SerialDevice>(move(io_window), 67).release_value();
break;
}
default:
break;
}
return serial_device.release_nonnull();
}
}

View file

@ -0,0 +1,237 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Arch/x86_64/Interrupts.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(32)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(33)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(34)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(35)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(36)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(37)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(38)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(39)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(40)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(41)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(42)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(43)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(44)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(45)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(46)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(47)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(48)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(49)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(50)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(51)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(52)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(53)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(54)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(55)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(56)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(57)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(58)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(59)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(60)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(61)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(62)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(63)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(64)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(65)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(66)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(67)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(68)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(69)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(70)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(71)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(72)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(73)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(74)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(75)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(76)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(77)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(78)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(79)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(80)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(81)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(82)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(83)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(84)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(85)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(86)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(87)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(88)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(89)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(90)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(91)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(92)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(93)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(94)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(95)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(96)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(97)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(98)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(99)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(100)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(101)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(102)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(103)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(104)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(105)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(106)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(107)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(108)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(109)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(110)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(111)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(112)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(113)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(114)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(115)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(116)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(117)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(118)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(119)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(120)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(121)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(122)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(123)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(124)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(125)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(126)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(127)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(128)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(129)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(130)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(131)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(132)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(133)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(134)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(135)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(136)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(137)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(138)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(139)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(140)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(141)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(142)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(143)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(144)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(145)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(146)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(147)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(148)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(149)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(150)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(151)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(152)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(153)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(154)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(155)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(156)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(157)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(158)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(159)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(160)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(161)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(162)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(163)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(164)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(165)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(166)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(167)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(168)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(169)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(170)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(171)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(172)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(173)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(174)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(175)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(176)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(177)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(178)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(179)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(180)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(181)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(182)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(183)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(184)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(185)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(186)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(187)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(188)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(189)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(190)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(191)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(192)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(193)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(194)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(195)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(196)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(197)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(198)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(199)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(200)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(201)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(202)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(203)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(204)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(205)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(206)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(207)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(208)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(209)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(210)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(211)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(212)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(213)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(214)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(215)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(216)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(217)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(218)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(219)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(220)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(221)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(222)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(223)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(224)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(225)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(226)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(227)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(228)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(229)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(230)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(231)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(232)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(233)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(234)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(235)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(236)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(237)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(238)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(239)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(240)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(241)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(242)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(243)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(244)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(245)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(246)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(247)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(248)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(249)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(250)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(251)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(252)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(253)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(254)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(255)

View file

@ -0,0 +1,67 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86_64/DescriptorTable.h>
#include <Kernel/Arch/x86_64/TrapFrame.h>
// clang-format off
asm(
".globl interrupt_common_asm_entry\n"
"interrupt_common_asm_entry: \n"
// save all the other registers
" pushq %r15\n"
" pushq %r14\n"
" pushq %r13\n"
" pushq %r12\n"
" pushq %r11\n"
" pushq %r10\n"
" pushq %r9\n"
" pushq %r8\n"
" pushq %rax\n"
" pushq %rcx\n"
" pushq %rdx\n"
" pushq %rbx\n"
" pushq %rsp\n"
" pushq %rbp\n"
" pushq %rsi\n"
" pushq %rdi\n"
" pushq %rsp \n" /* set TrapFrame::regs */
" subq $" __STRINGIFY(TRAP_FRAME_SIZE - 8) ", %rsp \n"
" movq %rsp, %rdi \n"
" cld\n"
" call enter_trap \n"
" movq %rsp, %rdi \n"
" call handle_interrupt \n"
".globl common_trap_exit \n"
"common_trap_exit: \n"
// another thread may have handled this trap at this point, so don't
// make assumptions about the stack other than there's a TrapFrame.
" movq %rsp, %rdi \n"
" call exit_trap \n"
" addq $" __STRINGIFY(TRAP_FRAME_SIZE) ", %rsp\n" // pop TrapFrame
".globl interrupt_common_asm_exit \n"
"interrupt_common_asm_exit: \n"
" popq %rdi\n"
" popq %rsi\n"
" popq %rbp\n"
" addq $8, %rsp\n" // skip restoring rsp
" popq %rbx\n"
" popq %rdx\n"
" popq %rcx\n"
" popq %rax\n"
" popq %r8\n"
" popq %r9\n"
" popq %r10\n"
" popq %r11\n"
" popq %r12\n"
" popq %r13\n"
" popq %r14\n"
" popq %r15\n"
" addq $0x8, %rsp\n" // skip exception_code, isr_number
" iretq\n"
);
// clang-format on

View file

@ -0,0 +1,228 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/ByteReader.h>
#include <Kernel/API/Syscall.h>
#include <Kernel/Arch/Interrupts.h>
#include <Kernel/Arch/x86_64/InterruptManagement.h>
#include <Kernel/Arch/x86_64/Interrupts/APIC.h>
#include <Kernel/Arch/x86_64/Interrupts/IOAPIC.h>
#include <Kernel/Arch/x86_64/Interrupts/PIC.h>
#include <Kernel/CommandLine.h>
#include <Kernel/Firmware/MultiProcessor/Parser.h>
#include <Kernel/InterruptDisabler.h>
#include <Kernel/Interrupts/SharedIRQHandler.h>
#include <Kernel/Interrupts/SpuriousInterruptHandler.h>
#include <Kernel/Memory/TypedMapping.h>
#include <Kernel/Sections.h>
#define PCAT_COMPAT_FLAG 0x1
namespace Kernel {
static InterruptManagement* s_interrupt_management;
bool InterruptManagement::initialized()
{
return (s_interrupt_management != nullptr);
}
InterruptManagement& InterruptManagement::the()
{
VERIFY(InterruptManagement::initialized());
return *s_interrupt_management;
}
UNMAP_AFTER_INIT void InterruptManagement::initialize()
{
VERIFY(!InterruptManagement::initialized());
s_interrupt_management = new InterruptManagement();
if (!kernel_command_line().is_smp_enabled_without_ioapic_enabled()) {
dbgln("Can't enable SMP mode without IOAPIC mode being enabled");
}
if (!kernel_command_line().is_ioapic_enabled() && !kernel_command_line().is_smp_enabled())
InterruptManagement::the().switch_to_pic_mode();
else
InterruptManagement::the().switch_to_ioapic_mode();
}
void InterruptManagement::enumerate_interrupt_handlers(Function<void(GenericInterruptHandler&)> callback)
{
for (size_t i = 0; i < GENERIC_INTERRUPT_HANDLERS_COUNT; i++) {
auto& handler = get_interrupt_handler(i);
if (handler.type() == HandlerType::SharedIRQHandler) {
static_cast<SharedIRQHandler&>(handler).enumerate_handlers(callback);
continue;
}
if (handler.type() != HandlerType::UnhandledInterruptHandler)
callback(handler);
}
}
IRQController& InterruptManagement::get_interrupt_controller(size_t index)
{
return *m_interrupt_controllers[index];
}
u8 InterruptManagement::acquire_mapped_interrupt_number(u8 original_irq)
{
if (!InterruptManagement::initialized()) {
// This is necessary, because we install UnhandledInterruptHandlers before we actually initialize the Interrupt Management object...
return original_irq;
}
return InterruptManagement::the().get_mapped_interrupt_vector(original_irq);
}
u8 InterruptManagement::acquire_irq_number(u8 mapped_interrupt_vector)
{
VERIFY(InterruptManagement::initialized());
return InterruptManagement::the().get_irq_vector(mapped_interrupt_vector);
}
u8 InterruptManagement::get_mapped_interrupt_vector(u8 original_irq)
{
// FIXME: For SMP configuration (with IOAPICs) use a better routing scheme to make redirections more efficient.
// FIXME: Find a better way to handle conflict with Syscall interrupt gate.
VERIFY((original_irq + IRQ_VECTOR_BASE) != syscall_vector);
return original_irq;
}
u8 InterruptManagement::get_irq_vector(u8 mapped_interrupt_vector)
{
// FIXME: For SMP configuration (with IOAPICs) use a better routing scheme to make redirections more efficient.
return mapped_interrupt_vector;
}
NonnullLockRefPtr<IRQController> InterruptManagement::get_responsible_irq_controller(IRQControllerType controller_type, u8 interrupt_vector)
{
for (auto& irq_controller : m_interrupt_controllers) {
if (irq_controller->gsi_base() <= interrupt_vector && irq_controller->type() == controller_type)
return irq_controller;
}
VERIFY_NOT_REACHED();
}
NonnullLockRefPtr<IRQController> InterruptManagement::get_responsible_irq_controller(u8 interrupt_vector)
{
if (m_interrupt_controllers.size() == 1 && m_interrupt_controllers[0]->type() == IRQControllerType::i8259) {
return m_interrupt_controllers[0];
}
for (auto& irq_controller : m_interrupt_controllers) {
if (irq_controller->gsi_base() <= interrupt_vector)
if (!irq_controller->is_hard_disabled())
return irq_controller;
}
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT PhysicalAddress InterruptManagement::search_for_madt()
{
dbgln("Early access to ACPI tables for interrupt setup");
auto rsdp = ACPI::StaticParsing::find_rsdp();
if (!rsdp.has_value())
return {};
auto apic = ACPI::StaticParsing::find_table(rsdp.value(), "APIC"sv);
if (!apic.has_value())
return {};
return apic.value();
}
UNMAP_AFTER_INIT InterruptManagement::InterruptManagement()
: m_madt(search_for_madt())
{
}
UNMAP_AFTER_INIT void InterruptManagement::switch_to_pic_mode()
{
VERIFY(m_interrupt_controllers.is_empty());
dmesgln("Interrupts: Switch to Legacy PIC mode");
InterruptDisabler disabler;
m_interrupt_controllers.append(adopt_lock_ref(*new PIC()));
SpuriousInterruptHandler::initialize(7);
SpuriousInterruptHandler::initialize(15);
dbgln("Interrupts: Detected {}", m_interrupt_controllers[0]->model());
}
UNMAP_AFTER_INIT void InterruptManagement::switch_to_ioapic_mode()
{
dmesgln("Interrupts: Switch to IOAPIC mode");
InterruptDisabler disabler;
if (m_madt.is_null()) {
dbgln("Interrupts: ACPI MADT is not available, reverting to PIC mode");
switch_to_pic_mode();
return;
}
dbgln("Interrupts: MADT @ P {}", m_madt.as_ptr());
locate_apic_data();
if (m_interrupt_controllers.size() == 1) {
if (get_interrupt_controller(0).type() == IRQControllerType::i8259) {
dmesgln("Interrupts: NO IOAPIC detected, Reverting to PIC mode.");
return;
}
}
for (auto& irq_controller : m_interrupt_controllers) {
VERIFY(irq_controller);
if (irq_controller->type() == IRQControllerType::i8259) {
irq_controller->hard_disable();
dbgln("Interrupts: Detected {} - Disabled", irq_controller->model());
SpuriousInterruptHandler::initialize_for_disabled_master_pic();
SpuriousInterruptHandler::initialize_for_disabled_slave_pic();
} else {
dbgln("Interrupts: Detected {}", irq_controller->model());
}
}
if (auto mp_parser = MultiProcessorParser::autodetect()) {
m_pci_interrupt_overrides = mp_parser->get_pci_interrupt_redirections();
}
APIC::initialize();
APIC::the().init_bsp();
}
UNMAP_AFTER_INIT void InterruptManagement::locate_apic_data()
{
VERIFY(!m_madt.is_null());
auto madt = Memory::map_typed<ACPI::Structures::MADT>(m_madt).release_value_but_fixme_should_propagate_errors();
if (madt->flags & PCAT_COMPAT_FLAG)
m_interrupt_controllers.append(adopt_lock_ref(*new PIC()));
size_t entry_index = 0;
size_t entries_length = madt->h.length - sizeof(ACPI::Structures::MADT);
auto* madt_entry = madt->entries;
while (entries_length > 0) {
size_t entry_length = madt_entry->length;
if (madt_entry->type == (u8)ACPI::Structures::MADTEntryType::IOAPIC) {
auto* ioapic_entry = (const ACPI::Structures::MADTEntries::IOAPIC*)madt_entry;
dbgln("IOAPIC found @ MADT entry {}, MMIO Registers @ {}", entry_index, PhysicalAddress(ioapic_entry->ioapic_address));
m_interrupt_controllers.append(adopt_lock_ref(*new IOAPIC(PhysicalAddress(ioapic_entry->ioapic_address), ioapic_entry->gsi_base)));
}
if (madt_entry->type == (u8)ACPI::Structures::MADTEntryType::InterruptSourceOverride) {
auto* interrupt_override_entry = (const ACPI::Structures::MADTEntries::InterruptSourceOverride*)madt_entry;
u32 global_system_interrupt = 0;
ByteReader::load<u32>(reinterpret_cast<u8 const*>(&interrupt_override_entry->global_system_interrupt), global_system_interrupt);
u16 flags = 0;
ByteReader::load<u16>(reinterpret_cast<u8 const*>(&interrupt_override_entry->flags), flags);
MUST(m_isa_interrupt_overrides.try_empend(
interrupt_override_entry->bus,
interrupt_override_entry->source,
global_system_interrupt,
flags));
dbgln("Interrupts: Overriding INT {:#x} with GSI {}, for bus {:#x}",
interrupt_override_entry->source,
global_system_interrupt,
interrupt_override_entry->bus);
}
madt_entry = (ACPI::Structures::MADTEntryHeader*)(VirtualAddress(madt_entry).offset(entry_length).get());
entries_length -= entry_length;
entry_index++;
}
}
}

View file

@ -0,0 +1,78 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Function.h>
#include <AK/NonnullOwnPtr.h>
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <Kernel/Arch/x86_64/IRQController.h>
#include <Kernel/Arch/x86_64/Interrupts/IOAPIC.h>
#include <Kernel/Firmware/ACPI/Definitions.h>
#include <Kernel/Interrupts/GenericInterruptHandler.h>
#include <Kernel/Library/LockRefPtr.h>
namespace Kernel {
class ISAInterruptOverrideMetadata {
public:
ISAInterruptOverrideMetadata(u8 bus, u8 source, u32 global_system_interrupt, u16 flags)
: m_bus(bus)
, m_source(source)
, m_global_system_interrupt(global_system_interrupt)
, m_flags(flags)
{
}
u8 bus() const { return m_bus; }
u8 source() const { return m_source; }
u32 gsi() const { return m_global_system_interrupt; }
u16 flags() const { return m_flags; }
private:
const u8 m_bus;
const u8 m_source;
const u32 m_global_system_interrupt;
const u16 m_flags;
};
class InterruptManagement {
public:
static InterruptManagement& the();
static void initialize();
static bool initialized();
static u8 acquire_mapped_interrupt_number(u8 original_irq);
static u8 acquire_irq_number(u8 mapped_interrupt_vector);
virtual void switch_to_pic_mode();
virtual void switch_to_ioapic_mode();
NonnullLockRefPtr<IRQController> get_responsible_irq_controller(u8 interrupt_vector);
NonnullLockRefPtr<IRQController> get_responsible_irq_controller(IRQControllerType controller_type, u8 interrupt_vector);
Vector<ISAInterruptOverrideMetadata> const& isa_overrides() const { return m_isa_interrupt_overrides; }
u8 get_mapped_interrupt_vector(u8 original_irq);
u8 get_irq_vector(u8 mapped_interrupt_vector);
void enumerate_interrupt_handlers(Function<void(GenericInterruptHandler&)>);
IRQController& get_interrupt_controller(size_t index);
protected:
virtual ~InterruptManagement() = default;
private:
InterruptManagement();
PhysicalAddress search_for_madt();
void locate_apic_data();
Vector<NonnullLockRefPtr<IRQController>> m_interrupt_controllers;
Vector<ISAInterruptOverrideMetadata> m_isa_interrupt_overrides;
Vector<PCIInterruptOverrideMetadata> m_pci_interrupt_overrides;
PhysicalAddress m_madt;
};
}

View file

@ -0,0 +1,819 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Format.h>
#include <AK/RefPtr.h>
#include <AK/Types.h>
#include <Kernel/Arch/Interrupts.h>
#include <Kernel/Arch/x86_64/Interrupts/PIC.h>
#include <Kernel/Interrupts/GenericInterruptHandler.h>
#include <Kernel/Interrupts/SharedIRQHandler.h>
#include <Kernel/Interrupts/SpuriousInterruptHandler.h>
#include <Kernel/Interrupts/UnhandledInterruptHandler.h>
#include <Kernel/Panic.h>
#include <Kernel/PerformanceManager.h>
#include <Kernel/Process.h>
#include <Kernel/Random.h>
#include <Kernel/Scheduler.h>
#include <Kernel/Sections.h>
#include <Kernel/Thread.h>
#include <Kernel/ThreadTracer.h>
#include <LibC/mallocdefs.h>
#include <Kernel/Arch/CPU.h>
#include <Kernel/Arch/PageFault.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/RegisterState.h>
#include <Kernel/Arch/SafeMem.h>
#include <Kernel/Arch/TrapFrame.h>
#include <Kernel/Arch/x86_64/ISRStubs.h>
extern FlatPtr start_of_unmap_after_init;
extern FlatPtr end_of_unmap_after_init;
extern FlatPtr start_of_ro_after_init;
extern FlatPtr end_of_ro_after_init;
extern FlatPtr start_of_kernel_ksyms;
extern FlatPtr end_of_kernel_ksyms;
namespace Kernel {
READONLY_AFTER_INIT static DescriptorTablePointer s_idtr;
READONLY_AFTER_INIT static IDTEntry s_idt[256];
static GenericInterruptHandler* s_interrupt_handler[GENERIC_INTERRUPT_HANDLERS_COUNT];
static GenericInterruptHandler* s_disabled_interrupt_handler[2];
static EntropySource s_entropy_source_interrupts { EntropySource::Static::Interrupts };
// clang-format off
#define EH_ENTRY(ec, title) \
extern "C" void title##_asm_entry(); \
extern "C" void title##_handler(TrapFrame*) __attribute__((used)); \
NAKED void title##_asm_entry() { \
asm( \
" pushq %r15\n" \
" pushq %r14\n" \
" pushq %r13\n" \
" pushq %r12\n" \
" pushq %r11\n" \
" pushq %r10\n" \
" pushq %r9\n" \
" pushq %r8\n" \
" pushq %rax\n" \
" pushq %rcx\n" \
" pushq %rdx\n" \
" pushq %rbx\n" \
" pushq %rsp\n" \
" pushq %rbp\n" \
" pushq %rsi\n" \
" pushq %rdi\n" \
" pushq %rsp \n" /* set TrapFrame::regs */ \
" subq $" __STRINGIFY(TRAP_FRAME_SIZE - 8) ", %rsp \n" \
" subq $0x8, %rsp\n" /* align stack */ \
" lea 0x8(%rsp), %rdi \n" \
" cld\n" \
" call enter_trap_no_irq \n" \
" lea 0x8(%rsp), %rdi \n" \
" call " #title "_handler\n" \
" addq $0x8, %rsp\n" /* undo alignment */ \
" jmp common_trap_exit \n" \
); \
}
#define EH_ENTRY_NO_CODE(ec, title) \
extern "C" void title##_handler(TrapFrame*) __attribute__((used)); \
extern "C" void title##_asm_entry(); \
NAKED void title##_asm_entry() { \
asm( \
" pushq $0x0\n" \
" pushq %r15\n" \
" pushq %r14\n" \
" pushq %r13\n" \
" pushq %r12\n" \
" pushq %r11\n" \
" pushq %r10\n" \
" pushq %r9\n" \
" pushq %r8\n" \
" pushq %rax\n" \
" pushq %rcx\n" \
" pushq %rdx\n" \
" pushq %rbx\n" \
" pushq %rsp\n" \
" pushq %rbp\n" \
" pushq %rsi\n" \
" pushq %rdi\n" \
" pushq %rsp \n" /* set TrapFrame::regs */ \
" subq $" __STRINGIFY(TRAP_FRAME_SIZE - 8) ", %rsp \n" \
" movq %rsp, %rdi \n" \
" cld\n" \
" call enter_trap_no_irq \n" \
" movq %rsp, %rdi \n" \
" call " #title "_handler\n" \
" jmp common_trap_exit \n" \
); \
}
// clang-format on
void dump_registers(RegisterState const& regs)
{
u64 rsp;
if (!(regs.cs & 3))
rsp = regs.rsp;
else
rsp = regs.userspace_rsp;
dbgln("Exception code: {:04x} (isr: {:04x})", regs.exception_code, regs.isr_number);
dbgln(" pc={:#04x}:{:p} rflags={:p}", (u16)regs.cs, regs.rip, regs.rflags);
dbgln(" stack={:p}", rsp);
// FIXME: Add fs_base and gs_base here
dbgln(" rax={:p} rbx={:p} rcx={:p} rdx={:p}", regs.rax, regs.rbx, regs.rcx, regs.rdx);
dbgln(" rbp={:p} rsp={:p} rsi={:p} rdi={:p}", regs.rbp, regs.rsp, regs.rsi, regs.rdi);
dbgln(" r8={:p} r9={:p} r10={:p} r11={:p}", regs.r8, regs.r9, regs.r10, regs.r11);
dbgln(" r12={:p} r13={:p} r14={:p} r15={:p}", regs.r12, regs.r13, regs.r14, regs.r15);
dbgln(" cr0={:p} cr2={:p} cr3={:p} cr4={:p}", read_cr0(), read_cr2(), read_cr3(), read_cr4());
}
EH_ENTRY_NO_CODE(6, illegal_instruction);
void illegal_instruction_handler(TrapFrame* trap)
{
clac();
handle_crash(*trap->regs, "Illegal instruction", SIGILL);
}
EH_ENTRY_NO_CODE(0, divide_error);
void divide_error_handler(TrapFrame* trap)
{
clac();
handle_crash(*trap->regs, "Divide error", SIGFPE);
}
EH_ENTRY(13, general_protection_fault);
void general_protection_fault_handler(TrapFrame* trap)
{
clac();
handle_crash(*trap->regs, "General protection fault", SIGSEGV);
}
// 7: FPU not available exception
EH_ENTRY_NO_CODE(7, fpu_exception);
void fpu_exception_handler(TrapFrame*)
{
// Just clear the TS flag. We've already restored the FPU state eagerly.
// FIXME: It would be nice if we didn't have to do this at all.
asm volatile("clts");
}
// 14: Page Fault
EH_ENTRY(14, page_fault);
void page_fault_handler(TrapFrame* trap)
{
clac();
// NOTE: Once we've extracted the faulting address from CR2,
// we can re-enable interrupts.
auto fault_address = read_cr2();
sti();
auto& regs = *trap->regs;
if constexpr (PAGE_FAULT_DEBUG) {
u32 fault_page_directory = read_cr3();
dbgln("CPU #{} ring {} {} page fault in PD={:#x}, {}{} {}",
Processor::is_initialized() ? Processor::current_id() : 0,
regs.cs & 3,
regs.exception_code & 1 ? "PV" : "NP",
fault_page_directory,
regs.exception_code & 8 ? "reserved-bit " : "",
regs.exception_code & 2 ? "write" : "read",
VirtualAddress(fault_address));
dump_registers(regs);
}
bool faulted_in_kernel = !(regs.cs & 3);
if (faulted_in_kernel && Processor::current_in_irq()) {
// If we're faulting in an IRQ handler, first check if we failed
// due to safe_memcpy, safe_strnlen, or safe_memset. If we did,
// gracefully continue immediately. Because we're in an IRQ handler
// we can't really try to resolve the page fault in a meaningful
// way, so we need to do this before calling into
// MemoryManager::handle_page_fault, which would just bail and
// request a crash
if (handle_safe_access_fault(regs, fault_address))
return;
}
auto current_thread = Thread::current();
if (current_thread) {
current_thread->set_handling_page_fault(true);
PerformanceManager::add_page_fault_event(*current_thread, regs);
}
ScopeGuard guard = [current_thread] {
if (current_thread)
current_thread->set_handling_page_fault(false);
};
VirtualAddress userspace_sp = VirtualAddress { regs.userspace_sp() };
if (!faulted_in_kernel) {
bool has_valid_stack_pointer = current_thread->process().address_space().with([&](auto& space) {
return MM.validate_user_stack(*space, userspace_sp);
});
if (!has_valid_stack_pointer) {
dbgln("Invalid stack pointer: {}", userspace_sp);
return handle_crash(regs, "Bad stack on page fault", SIGSEGV);
}
}
PageFault fault { regs.exception_code, VirtualAddress { fault_address } };
auto response = MM.handle_page_fault(fault);
if (response == PageFaultResponse::ShouldCrash || response == PageFaultResponse::OutOfMemory || response == PageFaultResponse::BusError) {
if (faulted_in_kernel && handle_safe_access_fault(regs, fault_address)) {
// If this would be a ring0 (kernel) fault and the fault was triggered by
// safe_memcpy, safe_strnlen, or safe_memset then we resume execution at
// the appropriate _fault label rather than crashing
return;
}
if (response == PageFaultResponse::BusError && current_thread->has_signal_handler(SIGBUS)) {
current_thread->send_urgent_signal_to_self(SIGBUS);
return;
}
if (response != PageFaultResponse::OutOfMemory && current_thread) {
if (current_thread->has_signal_handler(SIGSEGV)) {
current_thread->send_urgent_signal_to_self(SIGSEGV);
return;
}
}
dbgln("Unrecoverable page fault, {}{}{} address {}",
regs.exception_code & PageFaultFlags::ReservedBitViolation ? "reserved bit violation / " : "",
regs.exception_code & PageFaultFlags::InstructionFetch ? "instruction fetch / " : "",
regs.exception_code & PageFaultFlags::Write ? "write to" : "read from",
VirtualAddress(fault_address));
constexpr FlatPtr malloc_scrub_pattern = explode_byte(MALLOC_SCRUB_BYTE);
constexpr FlatPtr free_scrub_pattern = explode_byte(FREE_SCRUB_BYTE);
constexpr FlatPtr kmalloc_scrub_pattern = explode_byte(KMALLOC_SCRUB_BYTE);
constexpr FlatPtr kfree_scrub_pattern = explode_byte(KFREE_SCRUB_BYTE);
if (response == PageFaultResponse::BusError) {
dbgln("Note: Address {} is an access to an undefined memory range of an Inode-backed VMObject", VirtualAddress(fault_address));
} else if ((fault_address & 0xffff0000) == (malloc_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be uninitialized malloc() memory", VirtualAddress(fault_address));
} else if ((fault_address & 0xffff0000) == (free_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be recently free()'d memory", VirtualAddress(fault_address));
} else if ((fault_address & 0xffff0000) == (kmalloc_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be uninitialized kmalloc() memory", VirtualAddress(fault_address));
} else if ((fault_address & 0xffff0000) == (kfree_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be recently kfree()'d memory", VirtualAddress(fault_address));
} else if (fault_address < 4096) {
dbgln("Note: Address {} looks like a possible nullptr dereference", VirtualAddress(fault_address));
} else if constexpr (SANITIZE_PTRS) {
constexpr FlatPtr refptr_scrub_pattern = explode_byte(REFPTR_SCRUB_BYTE);
constexpr FlatPtr nonnullrefptr_scrub_pattern = explode_byte(NONNULLREFPTR_SCRUB_BYTE);
constexpr FlatPtr ownptr_scrub_pattern = explode_byte(OWNPTR_SCRUB_BYTE);
constexpr FlatPtr nonnullownptr_scrub_pattern = explode_byte(NONNULLOWNPTR_SCRUB_BYTE);
constexpr FlatPtr lockrefptr_scrub_pattern = explode_byte(LOCKREFPTR_SCRUB_BYTE);
constexpr FlatPtr nonnulllockrefptr_scrub_pattern = explode_byte(NONNULLLOCKREFPTR_SCRUB_BYTE);
if ((fault_address & 0xffff0000) == (refptr_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be a recently destroyed LockRefPtr", VirtualAddress(fault_address));
} else if ((fault_address & 0xffff0000) == (nonnullrefptr_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be a recently destroyed NonnullLockRefPtr", VirtualAddress(fault_address));
} else if ((fault_address & 0xffff0000) == (ownptr_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be a recently destroyed OwnPtr", VirtualAddress(fault_address));
} else if ((fault_address & 0xffff0000) == (nonnullownptr_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be a recently destroyed NonnullOwnPtr", VirtualAddress(fault_address));
} else if ((fault_address & 0xffff0000) == (lockrefptr_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be a recently destroyed LockRefPtr", VirtualAddress(fault_address));
} else if ((fault_address & 0xffff0000) == (nonnulllockrefptr_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be a recently destroyed NonnullLockRefPtr", VirtualAddress(fault_address));
}
}
if (current_thread) {
auto& current_process = current_thread->process();
if (current_process.is_user_process()) {
auto fault_address_string = KString::formatted("{:p}", fault_address);
auto fault_address_view = fault_address_string.is_error() ? ""sv : fault_address_string.value()->view();
(void)current_process.try_set_coredump_property("fault_address"sv, fault_address_view);
(void)current_process.try_set_coredump_property("fault_type"sv, fault.type() == PageFault::Type::PageNotPresent ? "NotPresent"sv : "ProtectionViolation"sv);
StringView fault_access;
if (fault.is_instruction_fetch())
fault_access = "Execute"sv;
else
fault_access = fault.access() == PageFault::Access::Read ? "Read"sv : "Write"sv;
(void)current_process.try_set_coredump_property("fault_access"sv, fault_access);
}
}
if (response == PageFaultResponse::BusError)
return handle_crash(regs, "Page Fault (Bus Error)", SIGBUS, false);
return handle_crash(regs, "Page Fault", SIGSEGV, response == PageFaultResponse::OutOfMemory);
} else if (response == PageFaultResponse::Continue) {
dbgln_if(PAGE_FAULT_DEBUG, "Continuing after resolved page fault");
} else {
VERIFY_NOT_REACHED();
}
}
EH_ENTRY_NO_CODE(1, debug);
void debug_handler(TrapFrame* trap)
{
clac();
auto& regs = *trap->regs;
auto current_thread = Thread::current();
auto& process = current_thread->process();
if ((regs.cs & 3) == 0) {
PANIC("Debug exception in ring 0");
}
constexpr u8 REASON_SINGLESTEP = 14;
auto debug_status = read_dr6();
auto should_trap_mask = (1 << REASON_SINGLESTEP) | 0b1111;
if ((debug_status & should_trap_mask) == 0)
return;
if (auto tracer = process.tracer()) {
tracer->set_regs(regs);
}
current_thread->send_urgent_signal_to_self(SIGTRAP);
write_dr6(debug_status & ~(should_trap_mask));
}
EH_ENTRY_NO_CODE(3, breakpoint);
void breakpoint_handler(TrapFrame* trap)
{
clac();
auto& regs = *trap->regs;
auto current_thread = Thread::current();
auto& process = current_thread->process();
if ((regs.cs & 3) == 0) {
PANIC("Breakpoint trap in ring 0");
}
if (auto tracer = process.tracer()) {
tracer->set_regs(regs);
}
current_thread->send_urgent_signal_to_self(SIGTRAP);
}
#define EH(i, msg) \
static void _exception##i() \
{ \
dbgln("{}", msg); \
PANIC("cr0={:08x} cr2={:08x} cr3={:08x} cr4={:08x}", read_cr0(), read_cr2(), read_cr3(), read_cr4()); \
}
EH(2, "Unknown error")
EH(4, "Overflow")
EH(5, "Bounds check")
EH(8, "Double fault")
EH(9, "Coprocessor segment overrun")
EH(10, "Invalid TSS")
EH(11, "Segment not present")
EH(12, "Stack exception")
EH(15, "Unknown error")
EH(16, "Coprocessor error")
extern "C" void pre_init_finished(void) __attribute__((used));
extern "C" void post_init_finished(void) __attribute__((used));
extern "C" void handle_interrupt(TrapFrame*) __attribute__((used));
extern "C" UNMAP_AFTER_INIT void pre_init_finished(void)
{
VERIFY(g_scheduler_lock.is_locked_by_current_processor());
// Because init_finished() will wait on the other APs, we need
// to release the scheduler lock so that the other APs can also get
// to this point
// The target flags will get restored upon leaving the trap
Scheduler::leave_on_first_switch(processor_interrupts_state());
}
extern "C" UNMAP_AFTER_INIT void post_init_finished(void)
{
// We need to re-acquire the scheduler lock before a context switch
// transfers control into the idle loop, which needs the lock held
Scheduler::prepare_for_idle_loop();
}
void handle_interrupt(TrapFrame* trap)
{
clac();
auto& regs = *trap->regs;
GenericInterruptHandler* handler = nullptr;
// Note: we declare interrupt service routine offset 0x20 to 0x2f as
// reserved for when the PIC is disabled, so we can still route spurious
// IRQs to a different interrupt handlers at different location.
if (regs.isr_number >= pic_disabled_vector_base && regs.isr_number <= pic_disabled_vector_end) {
u8 irq = (u8)(regs.isr_number - pic_disabled_vector_base);
if (irq == 7) {
handler = s_disabled_interrupt_handler[0];
} else if (irq == 15) {
handler = s_disabled_interrupt_handler[1];
}
} else {
VERIFY(regs.isr_number >= IRQ_VECTOR_BASE && regs.isr_number <= (IRQ_VECTOR_BASE + GENERIC_INTERRUPT_HANDLERS_COUNT));
u8 irq = (u8)(regs.isr_number - IRQ_VECTOR_BASE);
s_entropy_source_interrupts.add_random_event(irq);
handler = s_interrupt_handler[irq];
}
VERIFY(handler);
handler->increment_call_count();
handler->handle_interrupt(regs);
handler->eoi();
}
DescriptorTablePointer const& get_idtr()
{
return s_idtr;
}
static void unimp_trap()
{
PANIC("Unhandled IRQ");
}
GenericInterruptHandler& get_interrupt_handler(u8 interrupt_number)
{
auto*& handler_slot = s_interrupt_handler[interrupt_number];
VERIFY(handler_slot != nullptr);
return *handler_slot;
}
static void revert_to_unused_handler(u8 interrupt_number)
{
auto handler = new UnhandledInterruptHandler(interrupt_number);
handler->register_interrupt_handler();
}
void register_disabled_interrupt_handler(u8 number, GenericInterruptHandler& handler)
{
if (number == 15) {
s_disabled_interrupt_handler[0] = &handler;
return;
} else if (number == 7) {
s_disabled_interrupt_handler[1] = &handler;
return;
}
VERIFY_NOT_REACHED();
}
void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
{
VERIFY(interrupt_number < GENERIC_INTERRUPT_HANDLERS_COUNT);
auto*& handler_slot = s_interrupt_handler[interrupt_number];
if (handler_slot != nullptr) {
if (handler_slot->type() == HandlerType::UnhandledInterruptHandler) {
if (handler_slot) {
auto* unhandled_handler = static_cast<UnhandledInterruptHandler*>(handler_slot);
unhandled_handler->unregister_interrupt_handler();
delete unhandled_handler;
}
handler_slot = &handler;
return;
}
if (handler_slot->is_shared_handler() && !handler_slot->is_sharing_with_others()) {
VERIFY(handler_slot->type() == HandlerType::SharedIRQHandler);
static_cast<SharedIRQHandler*>(handler_slot)->register_handler(handler);
return;
}
if (!handler_slot->is_shared_handler()) {
if (handler_slot->type() == HandlerType::SpuriousInterruptHandler) {
static_cast<SpuriousInterruptHandler*>(handler_slot)->register_handler(handler);
return;
}
VERIFY(handler_slot->type() == HandlerType::IRQHandler);
auto& previous_handler = *handler_slot;
handler_slot = nullptr;
SharedIRQHandler::initialize(interrupt_number);
VERIFY(handler_slot);
static_cast<SharedIRQHandler*>(handler_slot)->register_handler(previous_handler);
static_cast<SharedIRQHandler*>(handler_slot)->register_handler(handler);
return;
}
VERIFY_NOT_REACHED();
} else {
handler_slot = &handler;
}
}
void unregister_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
{
auto*& handler_slot = s_interrupt_handler[interrupt_number];
VERIFY(handler_slot != nullptr);
if (handler_slot->type() == HandlerType::UnhandledInterruptHandler) {
dbgln("Trying to unregister unused handler (?)");
return;
}
if (handler_slot->is_shared_handler() && !handler_slot->is_sharing_with_others()) {
VERIFY(handler_slot->type() == HandlerType::SharedIRQHandler);
auto* shared_handler = static_cast<SharedIRQHandler*>(handler_slot);
shared_handler->unregister_handler(handler);
if (!shared_handler->sharing_devices_count()) {
handler_slot = nullptr;
revert_to_unused_handler(interrupt_number);
}
return;
}
if (!handler_slot->is_shared_handler()) {
VERIFY(handler_slot->type() == HandlerType::IRQHandler);
handler_slot = nullptr;
revert_to_unused_handler(interrupt_number);
return;
}
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT void register_interrupt_handler(u8 index, void (*handler)())
{
// FIXME: Is the Gate Type really required to be an Interrupt
// FIXME: What's up with that storage segment 0?
s_idt[index] = IDTEntry((FlatPtr)handler, GDT_SELECTOR_CODE0, IDTEntryType::InterruptGate32, 0, 0);
}
UNMAP_AFTER_INIT void register_user_callable_interrupt_handler(u8 index, void (*handler)())
{
// FIXME: Is the Gate Type really required to be a Trap
// FIXME: What's up with that storage segment 0?
s_idt[index] = IDTEntry((FlatPtr)handler, GDT_SELECTOR_CODE0, IDTEntryType::TrapGate32, 0, 3);
}
UNMAP_AFTER_INIT void flush_idt()
{
asm("lidt %0" ::"m"(s_idtr));
}
UNMAP_AFTER_INIT void initialize_interrupts()
{
s_idtr.address = s_idt;
s_idtr.limit = 256 * sizeof(IDTEntry) - 1;
register_interrupt_handler(0x00, divide_error_asm_entry);
register_user_callable_interrupt_handler(0x01, debug_asm_entry);
register_interrupt_handler(0x02, _exception2);
register_user_callable_interrupt_handler(0x03, breakpoint_asm_entry);
register_interrupt_handler(0x04, _exception4);
register_interrupt_handler(0x05, _exception5);
register_interrupt_handler(0x06, illegal_instruction_asm_entry);
register_interrupt_handler(0x07, fpu_exception_asm_entry);
register_interrupt_handler(0x08, _exception8);
register_interrupt_handler(0x09, _exception9);
register_interrupt_handler(0x0a, _exception10);
register_interrupt_handler(0x0b, _exception11);
register_interrupt_handler(0x0c, _exception12);
register_interrupt_handler(0x0d, general_protection_fault_asm_entry);
register_interrupt_handler(0x0e, page_fault_asm_entry);
register_interrupt_handler(0x0f, _exception15);
register_interrupt_handler(0x10, _exception16);
for (u8 i = 0x11; i < 0x20; i++)
register_interrupt_handler(i, unimp_trap);
dbgln("Initializing unhandled interrupt handlers");
register_interrupt_handler(0x20, interrupt_32_asm_entry);
register_interrupt_handler(0x21, interrupt_33_asm_entry);
register_interrupt_handler(0x22, interrupt_34_asm_entry);
register_interrupt_handler(0x23, interrupt_35_asm_entry);
register_interrupt_handler(0x24, interrupt_36_asm_entry);
register_interrupt_handler(0x25, interrupt_37_asm_entry);
register_interrupt_handler(0x26, interrupt_38_asm_entry);
register_interrupt_handler(0x27, interrupt_39_asm_entry);
register_interrupt_handler(0x28, interrupt_40_asm_entry);
register_interrupt_handler(0x29, interrupt_41_asm_entry);
register_interrupt_handler(0x2a, interrupt_42_asm_entry);
register_interrupt_handler(0x2b, interrupt_43_asm_entry);
register_interrupt_handler(0x2c, interrupt_44_asm_entry);
register_interrupt_handler(0x2d, interrupt_45_asm_entry);
register_interrupt_handler(0x2e, interrupt_46_asm_entry);
register_interrupt_handler(0x2f, interrupt_47_asm_entry);
register_interrupt_handler(0x30, interrupt_48_asm_entry);
register_interrupt_handler(0x31, interrupt_49_asm_entry);
register_interrupt_handler(0x32, interrupt_50_asm_entry);
register_interrupt_handler(0x33, interrupt_51_asm_entry);
register_interrupt_handler(0x34, interrupt_52_asm_entry);
register_interrupt_handler(0x35, interrupt_53_asm_entry);
register_interrupt_handler(0x36, interrupt_54_asm_entry);
register_interrupt_handler(0x37, interrupt_55_asm_entry);
register_interrupt_handler(0x38, interrupt_56_asm_entry);
register_interrupt_handler(0x39, interrupt_57_asm_entry);
register_interrupt_handler(0x3a, interrupt_58_asm_entry);
register_interrupt_handler(0x3b, interrupt_59_asm_entry);
register_interrupt_handler(0x3c, interrupt_60_asm_entry);
register_interrupt_handler(0x3d, interrupt_61_asm_entry);
register_interrupt_handler(0x3e, interrupt_62_asm_entry);
register_interrupt_handler(0x3f, interrupt_63_asm_entry);
register_interrupt_handler(0x40, interrupt_64_asm_entry);
register_interrupt_handler(0x41, interrupt_65_asm_entry);
register_interrupt_handler(0x42, interrupt_66_asm_entry);
register_interrupt_handler(0x43, interrupt_67_asm_entry);
register_interrupt_handler(0x44, interrupt_68_asm_entry);
register_interrupt_handler(0x45, interrupt_69_asm_entry);
register_interrupt_handler(0x46, interrupt_70_asm_entry);
register_interrupt_handler(0x47, interrupt_71_asm_entry);
register_interrupt_handler(0x48, interrupt_72_asm_entry);
register_interrupt_handler(0x49, interrupt_73_asm_entry);
register_interrupt_handler(0x4a, interrupt_74_asm_entry);
register_interrupt_handler(0x4b, interrupt_75_asm_entry);
register_interrupt_handler(0x4c, interrupt_76_asm_entry);
register_interrupt_handler(0x4d, interrupt_77_asm_entry);
register_interrupt_handler(0x4e, interrupt_78_asm_entry);
register_interrupt_handler(0x4f, interrupt_79_asm_entry);
register_interrupt_handler(0x50, interrupt_80_asm_entry);
register_interrupt_handler(0x51, interrupt_81_asm_entry);
register_interrupt_handler(0x52, interrupt_82_asm_entry);
register_interrupt_handler(0x53, interrupt_83_asm_entry);
register_interrupt_handler(0x54, interrupt_84_asm_entry);
register_interrupt_handler(0x55, interrupt_85_asm_entry);
register_interrupt_handler(0x56, interrupt_86_asm_entry);
register_interrupt_handler(0x57, interrupt_87_asm_entry);
register_interrupt_handler(0x58, interrupt_88_asm_entry);
register_interrupt_handler(0x59, interrupt_89_asm_entry);
register_interrupt_handler(0x5a, interrupt_90_asm_entry);
register_interrupt_handler(0x5b, interrupt_91_asm_entry);
register_interrupt_handler(0x5c, interrupt_92_asm_entry);
register_interrupt_handler(0x5d, interrupt_93_asm_entry);
register_interrupt_handler(0x5e, interrupt_94_asm_entry);
register_interrupt_handler(0x5f, interrupt_95_asm_entry);
register_interrupt_handler(0x60, interrupt_96_asm_entry);
register_interrupt_handler(0x61, interrupt_97_asm_entry);
register_interrupt_handler(0x62, interrupt_98_asm_entry);
register_interrupt_handler(0x63, interrupt_99_asm_entry);
register_interrupt_handler(0x64, interrupt_100_asm_entry);
register_interrupt_handler(0x65, interrupt_101_asm_entry);
register_interrupt_handler(0x66, interrupt_102_asm_entry);
register_interrupt_handler(0x67, interrupt_103_asm_entry);
register_interrupt_handler(0x68, interrupt_104_asm_entry);
register_interrupt_handler(0x69, interrupt_105_asm_entry);
register_interrupt_handler(0x6a, interrupt_106_asm_entry);
register_interrupt_handler(0x6b, interrupt_107_asm_entry);
register_interrupt_handler(0x6c, interrupt_108_asm_entry);
register_interrupt_handler(0x6d, interrupt_109_asm_entry);
register_interrupt_handler(0x6e, interrupt_110_asm_entry);
register_interrupt_handler(0x6f, interrupt_111_asm_entry);
register_interrupt_handler(0x70, interrupt_112_asm_entry);
register_interrupt_handler(0x71, interrupt_113_asm_entry);
register_interrupt_handler(0x72, interrupt_114_asm_entry);
register_interrupt_handler(0x73, interrupt_115_asm_entry);
register_interrupt_handler(0x74, interrupt_116_asm_entry);
register_interrupt_handler(0x75, interrupt_117_asm_entry);
register_interrupt_handler(0x76, interrupt_118_asm_entry);
register_interrupt_handler(0x77, interrupt_119_asm_entry);
register_interrupt_handler(0x78, interrupt_120_asm_entry);
register_interrupt_handler(0x79, interrupt_121_asm_entry);
register_interrupt_handler(0x7a, interrupt_122_asm_entry);
register_interrupt_handler(0x7b, interrupt_123_asm_entry);
register_interrupt_handler(0x7c, interrupt_124_asm_entry);
register_interrupt_handler(0x7d, interrupt_125_asm_entry);
register_interrupt_handler(0x7e, interrupt_126_asm_entry);
register_interrupt_handler(0x7f, interrupt_127_asm_entry);
register_interrupt_handler(0x80, interrupt_128_asm_entry);
register_interrupt_handler(0x81, interrupt_129_asm_entry);
register_interrupt_handler(0x82, interrupt_130_asm_entry);
register_interrupt_handler(0x83, interrupt_131_asm_entry);
register_interrupt_handler(0x84, interrupt_132_asm_entry);
register_interrupt_handler(0x85, interrupt_133_asm_entry);
register_interrupt_handler(0x86, interrupt_134_asm_entry);
register_interrupt_handler(0x87, interrupt_135_asm_entry);
register_interrupt_handler(0x88, interrupt_136_asm_entry);
register_interrupt_handler(0x89, interrupt_137_asm_entry);
register_interrupt_handler(0x8a, interrupt_138_asm_entry);
register_interrupt_handler(0x8b, interrupt_139_asm_entry);
register_interrupt_handler(0x8c, interrupt_140_asm_entry);
register_interrupt_handler(0x8d, interrupt_141_asm_entry);
register_interrupt_handler(0x8e, interrupt_142_asm_entry);
register_interrupt_handler(0x8f, interrupt_143_asm_entry);
register_interrupt_handler(0x90, interrupt_144_asm_entry);
register_interrupt_handler(0x91, interrupt_145_asm_entry);
register_interrupt_handler(0x92, interrupt_146_asm_entry);
register_interrupt_handler(0x93, interrupt_147_asm_entry);
register_interrupt_handler(0x94, interrupt_148_asm_entry);
register_interrupt_handler(0x95, interrupt_149_asm_entry);
register_interrupt_handler(0x96, interrupt_150_asm_entry);
register_interrupt_handler(0x97, interrupt_151_asm_entry);
register_interrupt_handler(0x98, interrupt_152_asm_entry);
register_interrupt_handler(0x99, interrupt_153_asm_entry);
register_interrupt_handler(0x9a, interrupt_154_asm_entry);
register_interrupt_handler(0x9b, interrupt_155_asm_entry);
register_interrupt_handler(0x9c, interrupt_156_asm_entry);
register_interrupt_handler(0x9d, interrupt_157_asm_entry);
register_interrupt_handler(0x9e, interrupt_158_asm_entry);
register_interrupt_handler(0x9f, interrupt_159_asm_entry);
register_interrupt_handler(0xa0, interrupt_160_asm_entry);
register_interrupt_handler(0xa1, interrupt_161_asm_entry);
register_interrupt_handler(0xa2, interrupt_162_asm_entry);
register_interrupt_handler(0xa3, interrupt_163_asm_entry);
register_interrupt_handler(0xa4, interrupt_164_asm_entry);
register_interrupt_handler(0xa5, interrupt_165_asm_entry);
register_interrupt_handler(0xa6, interrupt_166_asm_entry);
register_interrupt_handler(0xa7, interrupt_167_asm_entry);
register_interrupt_handler(0xa8, interrupt_168_asm_entry);
register_interrupt_handler(0xa9, interrupt_169_asm_entry);
register_interrupt_handler(0xaa, interrupt_170_asm_entry);
register_interrupt_handler(0xab, interrupt_171_asm_entry);
register_interrupt_handler(0xac, interrupt_172_asm_entry);
register_interrupt_handler(0xad, interrupt_173_asm_entry);
register_interrupt_handler(0xae, interrupt_174_asm_entry);
register_interrupt_handler(0xaf, interrupt_175_asm_entry);
register_interrupt_handler(0xb0, interrupt_176_asm_entry);
register_interrupt_handler(0xb1, interrupt_177_asm_entry);
register_interrupt_handler(0xb2, interrupt_178_asm_entry);
register_interrupt_handler(0xb3, interrupt_179_asm_entry);
register_interrupt_handler(0xb4, interrupt_180_asm_entry);
register_interrupt_handler(0xb5, interrupt_181_asm_entry);
register_interrupt_handler(0xb6, interrupt_182_asm_entry);
register_interrupt_handler(0xb7, interrupt_183_asm_entry);
register_interrupt_handler(0xb8, interrupt_184_asm_entry);
register_interrupt_handler(0xb9, interrupt_185_asm_entry);
register_interrupt_handler(0xba, interrupt_186_asm_entry);
register_interrupt_handler(0xbb, interrupt_187_asm_entry);
register_interrupt_handler(0xbc, interrupt_188_asm_entry);
register_interrupt_handler(0xbd, interrupt_189_asm_entry);
register_interrupt_handler(0xbe, interrupt_190_asm_entry);
register_interrupt_handler(0xbf, interrupt_191_asm_entry);
register_interrupt_handler(0xc0, interrupt_192_asm_entry);
register_interrupt_handler(0xc1, interrupt_193_asm_entry);
register_interrupt_handler(0xc2, interrupt_194_asm_entry);
register_interrupt_handler(0xc3, interrupt_195_asm_entry);
register_interrupt_handler(0xc4, interrupt_196_asm_entry);
register_interrupt_handler(0xc5, interrupt_197_asm_entry);
register_interrupt_handler(0xc6, interrupt_198_asm_entry);
register_interrupt_handler(0xc7, interrupt_199_asm_entry);
register_interrupt_handler(0xc8, interrupt_200_asm_entry);
register_interrupt_handler(0xc9, interrupt_201_asm_entry);
register_interrupt_handler(0xca, interrupt_202_asm_entry);
register_interrupt_handler(0xcb, interrupt_203_asm_entry);
register_interrupt_handler(0xcc, interrupt_204_asm_entry);
register_interrupt_handler(0xcd, interrupt_205_asm_entry);
register_interrupt_handler(0xce, interrupt_206_asm_entry);
register_interrupt_handler(0xcf, interrupt_207_asm_entry);
register_interrupt_handler(0xd0, interrupt_208_asm_entry);
register_interrupt_handler(0xd1, interrupt_209_asm_entry);
register_interrupt_handler(0xd2, interrupt_210_asm_entry);
register_interrupt_handler(0xd3, interrupt_211_asm_entry);
register_interrupt_handler(0xd4, interrupt_212_asm_entry);
register_interrupt_handler(0xd5, interrupt_213_asm_entry);
register_interrupt_handler(0xd6, interrupt_214_asm_entry);
register_interrupt_handler(0xd7, interrupt_215_asm_entry);
register_interrupt_handler(0xd8, interrupt_216_asm_entry);
register_interrupt_handler(0xd9, interrupt_217_asm_entry);
register_interrupt_handler(0xda, interrupt_218_asm_entry);
register_interrupt_handler(0xdb, interrupt_219_asm_entry);
register_interrupt_handler(0xdc, interrupt_220_asm_entry);
register_interrupt_handler(0xdd, interrupt_221_asm_entry);
register_interrupt_handler(0xde, interrupt_222_asm_entry);
register_interrupt_handler(0xdf, interrupt_223_asm_entry);
register_interrupt_handler(0xe0, interrupt_224_asm_entry);
register_interrupt_handler(0xe1, interrupt_225_asm_entry);
register_interrupt_handler(0xe2, interrupt_226_asm_entry);
register_interrupt_handler(0xe3, interrupt_227_asm_entry);
register_interrupt_handler(0xe4, interrupt_228_asm_entry);
register_interrupt_handler(0xe5, interrupt_229_asm_entry);
register_interrupt_handler(0xe6, interrupt_230_asm_entry);
register_interrupt_handler(0xe7, interrupt_231_asm_entry);
register_interrupt_handler(0xe8, interrupt_232_asm_entry);
register_interrupt_handler(0xe9, interrupt_233_asm_entry);
register_interrupt_handler(0xea, interrupt_234_asm_entry);
register_interrupt_handler(0xeb, interrupt_235_asm_entry);
register_interrupt_handler(0xec, interrupt_236_asm_entry);
register_interrupt_handler(0xed, interrupt_237_asm_entry);
register_interrupt_handler(0xee, interrupt_238_asm_entry);
register_interrupt_handler(0xef, interrupt_239_asm_entry);
register_interrupt_handler(0xf0, interrupt_240_asm_entry);
register_interrupt_handler(0xf1, interrupt_241_asm_entry);
register_interrupt_handler(0xf2, interrupt_242_asm_entry);
register_interrupt_handler(0xf3, interrupt_243_asm_entry);
register_interrupt_handler(0xf4, interrupt_244_asm_entry);
register_interrupt_handler(0xf5, interrupt_245_asm_entry);
register_interrupt_handler(0xf6, interrupt_246_asm_entry);
register_interrupt_handler(0xf7, interrupt_247_asm_entry);
register_interrupt_handler(0xf8, interrupt_248_asm_entry);
register_interrupt_handler(0xf9, interrupt_249_asm_entry);
register_interrupt_handler(0xfa, interrupt_250_asm_entry);
register_interrupt_handler(0xfb, interrupt_251_asm_entry);
register_interrupt_handler(0xfc, interrupt_252_asm_entry);
register_interrupt_handler(0xfd, interrupt_253_asm_entry);
register_interrupt_handler(0xfe, interrupt_254_asm_entry);
register_interrupt_handler(0xff, interrupt_255_asm_entry);
for (u8 i = 0; i < GENERIC_INTERRUPT_HANDLERS_COUNT; ++i) {
auto* handler = new UnhandledInterruptHandler(i);
handler->register_interrupt_handler();
}
flush_idt();
}
}

View file

@ -0,0 +1,43 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <Kernel/Interrupts/GenericInterruptHandler.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
namespace Kernel {
class GenericInterruptHandler;
extern "C" void interrupt_common_asm_entry();
#define INTERRUPT_HANDLER_PUSH_PADDING "pushw $0\npushw $0\n"
// clang-format off
#define GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(isr_number) \
extern "C" void interrupt_##isr_number##_asm_entry(); \
static void interrupt_##isr_number##_asm_entry_dummy() __attribute__((used)); \
NEVER_INLINE void interrupt_##isr_number##_asm_entry_dummy() \
{ \
asm(".globl interrupt_" #isr_number "_asm_entry\n" \
"interrupt_" #isr_number "_asm_entry:\n" \
INTERRUPT_HANDLER_PUSH_PADDING \
" pushw $" #isr_number "\n" \
" pushw $0\n" \
" jmp interrupt_common_asm_entry\n"); \
}
// clang-format on
void register_interrupt_handler(u8 number, void (*handler)());
void register_user_callable_interrupt_handler(u8 number, void (*handler)());
void register_disabled_interrupt_handler(u8 number, GenericInterruptHandler& handler);
}

View file

@ -0,0 +1,670 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/Memory.h>
#include <AK/Singleton.h>
#include <AK/Types.h>
#include <Kernel/Arch/Delay.h>
#include <Kernel/Arch/x86_64/Interrupts/APIC.h>
#include <Kernel/Arch/x86_64/MSR.h>
#include <Kernel/Arch/x86_64/ProcessorInfo.h>
#include <Kernel/Arch/x86_64/Time/APICTimer.h>
#include <Kernel/Debug.h>
#include <Kernel/Firmware/ACPI/Parser.h>
#include <Kernel/Interrupts/SpuriousInterruptHandler.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/PageDirectory.h>
#include <Kernel/Memory/TypedMapping.h>
#include <Kernel/Panic.h>
#include <Kernel/Scheduler.h>
#include <Kernel/Sections.h>
#include <Kernel/Thread.h>
#define IRQ_APIC_TIMER (0xfc - IRQ_VECTOR_BASE)
#define IRQ_APIC_IPI (0xfd - IRQ_VECTOR_BASE)
#define IRQ_APIC_ERR (0xfe - IRQ_VECTOR_BASE)
#define IRQ_APIC_SPURIOUS (0xff - IRQ_VECTOR_BASE)
#define APIC_ICR_DELIVERY_PENDING (1 << 12)
#define APIC_ENABLED (1 << 8)
#define APIC_BASE_MSR 0x1b
#define APIC_REGS_MSR_BASE 0x800
#define APIC_REG_ID 0x20
#define APIC_REG_EOI 0xb0
#define APIC_REG_LD 0xd0
#define APIC_REG_DF 0xe0
#define APIC_REG_SIV 0xf0
#define APIC_REG_TPR 0x80
#define APIC_REG_ICR_LOW 0x300
#define APIC_REG_ICR_HIGH 0x310
#define APIC_REG_LVT_TIMER 0x320
#define APIC_REG_LVT_THERMAL 0x330
#define APIC_REG_LVT_PERFORMANCE_COUNTER 0x340
#define APIC_REG_LVT_LINT0 0x350
#define APIC_REG_LVT_LINT1 0x360
#define APIC_REG_LVT_ERR 0x370
#define APIC_REG_TIMER_INITIAL_COUNT 0x380
#define APIC_REG_TIMER_CURRENT_COUNT 0x390
#define APIC_REG_TIMER_CONFIGURATION 0x3e0
namespace Kernel {
static Singleton<APIC> s_apic;
class APICIPIInterruptHandler final : public GenericInterruptHandler {
public:
explicit APICIPIInterruptHandler(u8 interrupt_vector)
: GenericInterruptHandler(interrupt_vector, true)
{
}
virtual ~APICIPIInterruptHandler()
{
}
static void initialize(u8 interrupt_number)
{
auto* handler = new APICIPIInterruptHandler(interrupt_number);
handler->register_interrupt_handler();
}
virtual bool handle_interrupt(RegisterState const&) override;
virtual bool eoi() override;
virtual HandlerType type() const override { return HandlerType::IRQHandler; }
virtual StringView purpose() const override { return "IPI Handler"sv; }
virtual StringView controller() const override { return {}; }
virtual size_t sharing_devices_count() const override { return 0; }
virtual bool is_shared_handler() const override { return false; }
virtual bool is_sharing_with_others() const override { return false; }
private:
};
class APICErrInterruptHandler final : public GenericInterruptHandler {
public:
explicit APICErrInterruptHandler(u8 interrupt_vector)
: GenericInterruptHandler(interrupt_vector, true)
{
}
virtual ~APICErrInterruptHandler()
{
}
static void initialize(u8 interrupt_number)
{
auto* handler = new APICErrInterruptHandler(interrupt_number);
handler->register_interrupt_handler();
}
virtual bool handle_interrupt(RegisterState const&) override;
virtual bool eoi() override;
virtual HandlerType type() const override { return HandlerType::IRQHandler; }
virtual StringView purpose() const override { return "SMP Error Handler"sv; }
virtual StringView controller() const override { return {}; }
virtual size_t sharing_devices_count() const override { return 0; }
virtual bool is_shared_handler() const override { return false; }
virtual bool is_sharing_with_others() const override { return false; }
private:
};
bool APIC::initialized()
{
return s_apic.is_initialized();
}
APIC& APIC::the()
{
VERIFY(APIC::initialized());
return *s_apic;
}
UNMAP_AFTER_INIT void APIC::initialize()
{
VERIFY(!APIC::initialized());
s_apic.ensure_instance();
}
PhysicalAddress APIC::get_base()
{
MSR msr(APIC_BASE_MSR);
auto base = msr.get();
return PhysicalAddress(base & 0xfffff000);
}
void APIC::set_base(PhysicalAddress const& base)
{
MSR msr(APIC_BASE_MSR);
u64 flags = 1 << 11;
if (m_is_x2)
flags |= 1 << 10;
msr.set(base.get() | flags);
}
void APIC::write_register(u32 offset, u32 value)
{
if (m_is_x2) {
MSR msr(APIC_REGS_MSR_BASE + (offset >> 4));
msr.set(value);
} else {
*reinterpret_cast<u32 volatile*>(m_apic_base->vaddr().offset(offset).as_ptr()) = value;
}
}
u32 APIC::read_register(u32 offset)
{
if (m_is_x2) {
MSR msr(APIC_REGS_MSR_BASE + (offset >> 4));
return (u32)msr.get();
}
return *reinterpret_cast<u32 volatile*>(m_apic_base->vaddr().offset(offset).as_ptr());
}
void APIC::set_lvt(u32 offset, u8 interrupt)
{
write_register(offset, read_register(offset) | interrupt);
}
void APIC::set_siv(u32 offset, u8 interrupt)
{
write_register(offset, read_register(offset) | interrupt | APIC_ENABLED);
}
void APIC::wait_for_pending_icr()
{
while ((read_register(APIC_REG_ICR_LOW) & APIC_ICR_DELIVERY_PENDING) != 0) {
microseconds_delay(200);
}
}
void APIC::write_icr(ICRReg const& icr)
{
if (m_is_x2) {
MSR msr(APIC_REGS_MSR_BASE + (APIC_REG_ICR_LOW >> 4));
msr.set(icr.x2_value());
} else {
write_register(APIC_REG_ICR_HIGH, icr.x_high());
write_register(APIC_REG_ICR_LOW, icr.x_low());
}
}
#define APIC_LVT_TIMER_ONESHOT 0
#define APIC_LVT_TIMER_PERIODIC (1 << 17)
#define APIC_LVT_TIMER_TSCDEADLINE (1 << 18)
#define APIC_LVT_MASKED (1 << 16)
#define APIC_LVT_TRIGGER_LEVEL (1 << 14)
#define APIC_LVT(iv, dm) (((iv)&0xff) | (((dm)&0x7) << 8))
extern "C" void apic_ap_start(void);
extern "C" u16 apic_ap_start_size;
extern "C" FlatPtr ap_cpu_init_stacks;
extern "C" FlatPtr ap_cpu_init_processor_info_array;
extern "C" u32 ap_cpu_init_cr0;
extern "C" FlatPtr ap_cpu_init_cr3;
extern "C" u32 ap_cpu_init_cr4;
extern "C" FlatPtr ap_cpu_gdtr;
extern "C" FlatPtr ap_cpu_idtr;
extern "C" FlatPtr ap_cpu_kernel_map_base;
extern "C" FlatPtr ap_cpu_kernel_entry_function;
extern "C" [[noreturn]] void init_ap(FlatPtr, Processor*);
void APIC::eoi()
{
write_register(APIC_REG_EOI, 0x0);
}
u8 APIC::spurious_interrupt_vector()
{
return IRQ_APIC_SPURIOUS;
}
#define APIC_INIT_VAR_PTR(tpe, vaddr, varname) \
reinterpret_cast<tpe volatile*>(reinterpret_cast<ptrdiff_t>(vaddr) \
+ reinterpret_cast<ptrdiff_t>(&varname) \
- reinterpret_cast<ptrdiff_t>(&apic_ap_start))
UNMAP_AFTER_INIT bool APIC::init_bsp()
{
// FIXME: Use the ACPI MADT table
if (!MSR::have())
return false;
// check if we support local apic
CPUID id(1);
if ((id.edx() & (1 << 9)) == 0)
return false;
if (id.ecx() & (1 << 21))
m_is_x2 = true;
PhysicalAddress apic_base = get_base();
dbgln_if(APIC_DEBUG, "Initializing {}APIC, base: {}", m_is_x2 ? "x2" : "x", apic_base);
set_base(apic_base);
if (!m_is_x2) {
auto region_or_error = MM.allocate_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::ReadWrite);
if (region_or_error.is_error()) {
dbgln("APIC: Failed to allocate memory for APIC base");
return false;
}
m_apic_base = region_or_error.release_value();
}
auto rsdp = ACPI::StaticParsing::find_rsdp();
if (!rsdp.has_value()) {
dbgln("APIC: RSDP not found");
return false;
}
auto madt_address = ACPI::StaticParsing::find_table(rsdp.value(), "APIC"sv);
if (!madt_address.has_value()) {
dbgln("APIC: MADT table not found");
return false;
}
if (kernel_command_line().is_smp_enabled()) {
auto madt_or_error = Memory::map_typed<ACPI::Structures::MADT>(madt_address.value());
if (madt_or_error.is_error()) {
dbgln("APIC: Failed to map MADT table");
return false;
}
auto madt = madt_or_error.release_value();
size_t entry_index = 0;
size_t entries_length = madt->h.length - sizeof(ACPI::Structures::MADT);
auto* madt_entry = madt->entries;
while (entries_length > 0) {
size_t entry_length = madt_entry->length;
if (madt_entry->type == (u8)ACPI::Structures::MADTEntryType::LocalAPIC) {
auto* plapic_entry = (const ACPI::Structures::MADTEntries::ProcessorLocalAPIC*)madt_entry;
dbgln_if(APIC_DEBUG, "APIC: AP found @ MADT entry {}, processor ID: {}, xAPIC ID: {}, flags: {:#08x}", entry_index, plapic_entry->acpi_processor_id, plapic_entry->apic_id, plapic_entry->flags);
m_processor_cnt++;
if ((plapic_entry->flags & 0x1) != 0)
m_processor_enabled_cnt++;
} else if (madt_entry->type == (u8)ACPI::Structures::MADTEntryType::Local_x2APIC) {
// Only used for APID IDs >= 255
auto* plx2apic_entry = (const ACPI::Structures::MADTEntries::ProcessorLocalX2APIC*)madt_entry;
dbgln_if(APIC_DEBUG, "APIC: AP found @ MADT entry {}, processor ID: {}, x2APIC ID: {}, flags: {:#08x}", entry_index, plx2apic_entry->acpi_processor_id, plx2apic_entry->apic_id, plx2apic_entry->flags);
m_processor_cnt++;
if ((plx2apic_entry->flags & 0x1) != 0)
m_processor_enabled_cnt++;
}
madt_entry = (ACPI::Structures::MADTEntryHeader*)(VirtualAddress(madt_entry).offset(entry_length).get());
entries_length -= entry_length;
entry_index++;
}
dbgln("APIC processors found: {}, enabled: {}", m_processor_cnt, m_processor_enabled_cnt);
}
if (m_processor_enabled_cnt < 1)
m_processor_enabled_cnt = 1;
if (m_processor_cnt < 1)
m_processor_cnt = 1;
enable(0);
return true;
}
UNMAP_AFTER_INIT void APIC::setup_ap_boot_environment()
{
VERIFY(!m_ap_boot_environment);
VERIFY(m_processor_enabled_cnt > 1);
u32 aps_to_enable = m_processor_enabled_cnt - 1;
// Copy the APIC startup code and variables to P0x00008000
// Also account for the data appended to:
// * aps_to_enable u32 values for ap_cpu_init_stacks
// * aps_to_enable u32 values for ap_cpu_init_processor_info_array
constexpr u64 apic_startup_region_base = 0x8000;
auto apic_startup_region_size = Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(FlatPtr))).release_value_but_fixme_should_propagate_errors();
VERIFY(apic_startup_region_size < USER_RANGE_BASE);
auto apic_startup_region = MUST(MM.create_identity_mapped_region(PhysicalAddress(apic_startup_region_base), apic_startup_region_size));
u8* apic_startup_region_ptr = apic_startup_region->vaddr().as_ptr();
memcpy(apic_startup_region_ptr, reinterpret_cast<void const*>(apic_ap_start), apic_ap_start_size);
// Allocate enough stacks for all APs
m_ap_temporary_boot_stacks.ensure_capacity(aps_to_enable);
for (u32 i = 0; i < aps_to_enable; i++) {
auto stack_region_or_error = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow);
if (stack_region_or_error.is_error()) {
dbgln("APIC: Failed to allocate stack for AP #{}", i);
return;
}
auto stack_region = stack_region_or_error.release_value();
stack_region->set_stack(true);
m_ap_temporary_boot_stacks.unchecked_append(move(stack_region));
}
// Store pointers to all stacks for the APs to use
auto* ap_stack_array = APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_init_stacks);
VERIFY(aps_to_enable == m_ap_temporary_boot_stacks.size());
for (size_t i = 0; i < aps_to_enable; i++) {
ap_stack_array[i] = m_ap_temporary_boot_stacks[i]->vaddr().get() + Thread::default_kernel_stack_size;
dbgln_if(APIC_DEBUG, "APIC: CPU[{}] stack at {}", i + 1, VirtualAddress { ap_stack_array[i] });
}
// Allocate Processor structures for all APs and store the pointer to the data
m_ap_processor_info.resize(aps_to_enable);
for (size_t i = 0; i < aps_to_enable; i++)
m_ap_processor_info[i] = adopt_nonnull_own_or_enomem(new (nothrow) Processor()).release_value_but_fixme_should_propagate_errors();
auto* ap_processor_info_array = &ap_stack_array[aps_to_enable];
for (size_t i = 0; i < aps_to_enable; i++) {
ap_processor_info_array[i] = FlatPtr(m_ap_processor_info[i].ptr());
dbgln_if(APIC_DEBUG, "APIC: CPU[{}] processor at {}", i + 1, VirtualAddress { ap_processor_info_array[i] });
}
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_init_processor_info_array) = FlatPtr(&ap_processor_info_array[0]);
// Store the BSP's CR3 value for the APs to use
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_init_cr3) = MM.kernel_page_directory().cr3();
// Store the BSP's GDT and IDT for the APs to use
auto const& gdtr = Processor::current().get_gdtr();
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_gdtr) = FlatPtr(&gdtr);
auto const& idtr = get_idtr();
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_idtr) = FlatPtr(&idtr);
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_kernel_map_base) = FlatPtr(kernel_mapping_base);
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_kernel_entry_function) = FlatPtr(&init_ap);
// Store the BSP's CR0 and CR4 values for the APs to use
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_init_cr0) = read_cr0();
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_init_cr4) = read_cr4();
m_ap_boot_environment = move(apic_startup_region);
}
UNMAP_AFTER_INIT void APIC::do_boot_aps()
{
VERIFY(m_ap_boot_environment);
VERIFY(m_processor_enabled_cnt > 1);
u32 aps_to_enable = m_processor_enabled_cnt - 1;
// Create an idle thread for each processor. We have to do this here
// because we won't be able to send FlushTLB messages, so we have to
// have all memory set up for the threads so that when the APs are
// starting up, they can access all the memory properly
m_ap_idle_threads.resize(aps_to_enable);
for (u32 i = 0; i < aps_to_enable; i++)
m_ap_idle_threads[i] = Scheduler::create_ap_idle_thread(i + 1);
dbgln_if(APIC_DEBUG, "APIC: Starting {} AP(s)", aps_to_enable);
// INIT
write_icr({ 0, 0, ICRReg::INIT, ICRReg::Physical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf });
microseconds_delay(10 * 1000);
for (int i = 0; i < 2; i++) {
// SIPI
write_icr({ 0x08, 0, ICRReg::StartUp, ICRReg::Physical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf }); // start execution at P8000
microseconds_delay(200);
}
// Now wait until the ap_cpu_init_pending variable dropped to 0, which means all APs are initialized and no longer need these special mappings
if (m_apic_ap_count.load(AK::MemoryOrder::memory_order_consume) != aps_to_enable) {
dbgln_if(APIC_DEBUG, "APIC: Waiting for {} AP(s) to finish initialization...", aps_to_enable);
do {
// Wait a little bit
microseconds_delay(200);
} while (m_apic_ap_count.load(AK::MemoryOrder::memory_order_consume) != aps_to_enable);
}
dbgln_if(APIC_DEBUG, "APIC: {} processors are initialized and running", m_processor_enabled_cnt);
// NOTE: Since this region is identity-mapped, we have to unmap it manually to prevent the virtual
// address range from leaking into the general virtual range allocator.
m_ap_boot_environment->unmap();
m_ap_boot_environment = nullptr;
// When the APs signal that they finished their initialization they have already switched over to their
// idle thread's stack, so the temporary boot stack can be deallocated
m_ap_temporary_boot_stacks.clear();
}
UNMAP_AFTER_INIT void APIC::boot_aps()
{
if (m_processor_enabled_cnt <= 1)
return;
// We split this into another call because do_boot_aps() will cause
// MM calls upon exit, and we don't want to call smp_enable before that
do_boot_aps();
// Enable SMP, which means IPIs may now be sent
Processor::smp_enable();
dbgln_if(APIC_DEBUG, "All processors initialized and waiting, trigger all to continue");
// Now trigger all APs to continue execution (need to do this after
// the regions have been freed so that we don't trigger IPIs
m_apic_ap_continue.store(1, AK::MemoryOrder::memory_order_release);
}
UNMAP_AFTER_INIT void APIC::enable(u32 cpu)
{
VERIFY(m_is_x2 || cpu < 8);
u32 apic_id;
if (m_is_x2) {
dbgln_if(APIC_DEBUG, "Enable x2APIC on CPU #{}", cpu);
// We need to enable x2 mode on each core independently
set_base(get_base());
apic_id = read_register(APIC_REG_ID);
} else {
dbgln_if(APIC_DEBUG, "Setting logical xAPIC ID for CPU #{}", cpu);
// Use the CPU# as logical apic id
VERIFY(cpu <= 8);
write_register(APIC_REG_LD, (read_register(APIC_REG_LD) & 0x00ffffff) | (cpu << 24));
// read it back to make sure it's actually set
apic_id = read_register(APIC_REG_LD) >> 24;
}
dbgln_if(APIC_DEBUG, "CPU #{} apic id: {}", cpu, apic_id);
Processor::current().info().set_apic_id(apic_id);
dbgln_if(APIC_DEBUG, "Enabling local APIC for CPU #{}, logical APIC ID: {}", cpu, apic_id);
if (cpu == 0) {
SpuriousInterruptHandler::initialize(IRQ_APIC_SPURIOUS);
APICErrInterruptHandler::initialize(IRQ_APIC_ERR);
// register IPI interrupt vector
APICIPIInterruptHandler::initialize(IRQ_APIC_IPI);
}
if (!m_is_x2) {
// local destination mode (flat mode), not supported in x2 mode
write_register(APIC_REG_DF, 0xf0000000);
}
// set error interrupt vector
set_lvt(APIC_REG_LVT_ERR, IRQ_APIC_ERR);
// set spurious interrupt vector
set_siv(APIC_REG_SIV, IRQ_APIC_SPURIOUS);
write_register(APIC_REG_LVT_TIMER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_THERMAL, APIC_LVT(0, 0) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_PERFORMANCE_COUNTER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_LINT0, APIC_LVT(0, 7) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_LINT1, APIC_LVT(0, 0) | APIC_LVT_TRIGGER_LEVEL);
write_register(APIC_REG_TPR, 0);
}
Thread* APIC::get_idle_thread(u32 cpu) const
{
VERIFY(cpu > 0);
return m_ap_idle_threads[cpu - 1];
}
UNMAP_AFTER_INIT void APIC::init_finished(u32 cpu)
{
// This method is called once the boot stack is no longer needed
VERIFY(cpu > 0);
VERIFY(cpu < m_processor_enabled_cnt);
// Since we're waiting on other APs here, we shouldn't have the
// scheduler lock
VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
// Notify the BSP that we are done initializing. It will unmap the startup data at P8000
m_apic_ap_count.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
dbgln_if(APIC_DEBUG, "APIC: CPU #{} initialized, waiting for all others", cpu);
// The reason we're making all APs wait until the BSP signals them is that
// we don't want APs to trigger IPIs (e.g. through MM) while the BSP
// is unable to process them
while (!m_apic_ap_continue.load(AK::MemoryOrder::memory_order_consume)) {
microseconds_delay(200);
}
dbgln_if(APIC_DEBUG, "APIC: CPU #{} continues, all others are initialized", cpu);
// do_boot_aps() freed memory, so we need to update our tlb
Processor::flush_entire_tlb_local();
// Now enable all the interrupts
APIC::the().enable(cpu);
}
void APIC::broadcast_ipi()
{
dbgln_if(APIC_SMP_DEBUG, "SMP: Broadcast IPI from CPU #{}", Processor::current_id());
wait_for_pending_icr();
write_icr({ IRQ_APIC_IPI + IRQ_VECTOR_BASE, 0xffffffff, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf });
}
void APIC::send_ipi(u32 cpu)
{
dbgln_if(APIC_SMP_DEBUG, "SMP: Send IPI from CPU #{} to CPU #{}", Processor::current_id(), cpu);
VERIFY(cpu != Processor::current_id());
VERIFY(cpu < Processor::count());
wait_for_pending_icr();
write_icr({ IRQ_APIC_IPI + IRQ_VECTOR_BASE, m_is_x2 ? Processor::by_id(cpu).info().apic_id() : cpu, ICRReg::Fixed, m_is_x2 ? ICRReg::Physical : ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::NoShorthand });
}
UNMAP_AFTER_INIT APICTimer* APIC::initialize_timers(HardwareTimerBase& calibration_timer)
{
if (!m_apic_base && !m_is_x2)
return nullptr;
// We should only initialize and calibrate the APIC timer once on the BSP!
VERIFY(Processor::is_bootstrap_processor());
VERIFY(!m_apic_timer);
m_apic_timer = APICTimer::initialize(IRQ_APIC_TIMER, calibration_timer);
return m_apic_timer;
}
void APIC::setup_local_timer(u32 ticks, TimerMode timer_mode, bool enable)
{
u32 flags = 0;
switch (timer_mode) {
case TimerMode::OneShot:
flags |= APIC_LVT_TIMER_ONESHOT;
break;
case TimerMode::Periodic:
flags |= APIC_LVT_TIMER_PERIODIC;
break;
case TimerMode::TSCDeadline:
flags |= APIC_LVT_TIMER_TSCDEADLINE;
break;
}
if (!enable)
flags |= APIC_LVT_MASKED;
write_register(APIC_REG_LVT_TIMER, APIC_LVT(IRQ_APIC_TIMER + IRQ_VECTOR_BASE, 0) | flags);
u32 config = read_register(APIC_REG_TIMER_CONFIGURATION);
config &= ~0xf; // clear divisor (bits 0-3)
switch (get_timer_divisor()) {
case 1:
config |= (1 << 3) | 3;
break;
case 2:
break;
case 4:
config |= 1;
break;
case 8:
config |= 2;
break;
case 16:
config |= 3;
break;
case 32:
config |= (1 << 3);
break;
case 64:
config |= (1 << 3) | 1;
break;
case 128:
config |= (1 << 3) | 2;
break;
default:
VERIFY_NOT_REACHED();
}
write_register(APIC_REG_TIMER_CONFIGURATION, config);
if (timer_mode == TimerMode::Periodic)
write_register(APIC_REG_TIMER_INITIAL_COUNT, ticks / get_timer_divisor());
}
u32 APIC::get_timer_current_count()
{
return read_register(APIC_REG_TIMER_CURRENT_COUNT);
}
u32 APIC::get_timer_divisor()
{
return 16;
}
bool APICIPIInterruptHandler::handle_interrupt(RegisterState const&)
{
dbgln_if(APIC_SMP_DEBUG, "APIC IPI on CPU #{}", Processor::current_id());
return true;
}
bool APICIPIInterruptHandler::eoi()
{
dbgln_if(APIC_SMP_DEBUG, "SMP: IPI EOI");
APIC::the().eoi();
return true;
}
bool APICErrInterruptHandler::handle_interrupt(RegisterState const&)
{
dbgln("APIC: SMP error on CPU #{}", Processor::current_id());
return true;
}
bool APICErrInterruptHandler::eoi()
{
APIC::the().eoi();
return true;
}
bool HardwareTimer<GenericInterruptHandler>::eoi()
{
APIC::the().eoi();
return true;
}
}

View file

@ -0,0 +1,115 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Time/HardwareTimer.h>
namespace Kernel {
class APICTimer;
struct LocalAPIC {
u32 apic_id;
};
class APIC {
public:
static APIC& the();
static void initialize();
static bool initialized();
bool init_bsp();
void eoi();
void setup_ap_boot_environment();
void boot_aps();
void enable(u32 cpu);
void init_finished(u32 cpu);
void broadcast_ipi();
void send_ipi(u32 cpu);
static u8 spurious_interrupt_vector();
Thread* get_idle_thread(u32 cpu) const;
u32 enabled_processor_count() const { return m_processor_enabled_cnt; }
APICTimer* initialize_timers(HardwareTimerBase&);
APICTimer* get_timer() const { return m_apic_timer; }
enum class TimerMode {
OneShot,
Periodic,
TSCDeadline
};
void setup_local_timer(u32, TimerMode, bool);
u32 get_timer_current_count();
u32 get_timer_divisor();
private:
struct ICRReg {
enum DeliveryMode {
Fixed = 0x0,
LowPriority = 0x1,
SMI = 0x2,
NMI = 0x4,
INIT = 0x5,
StartUp = 0x6,
};
enum DestinationMode {
Physical = 0x0,
Logical = 0x1,
};
enum Level {
DeAssert = 0x0,
Assert = 0x1
};
enum class TriggerMode {
Edge = 0x0,
Level = 0x1,
};
enum DestinationShorthand {
NoShorthand = 0x0,
Self = 0x1,
AllIncludingSelf = 0x2,
AllExcludingSelf = 0x3,
};
u8 vector { 0 };
u32 destination { 0 };
DeliveryMode delivery_mode { DeliveryMode::Fixed };
DestinationMode destination_mode { DestinationMode::Physical };
Level level { Level::DeAssert };
TriggerMode trigger_mode { TriggerMode::Edge };
DestinationShorthand destination_short { DestinationShorthand::NoShorthand };
u32 x_low() const { return (u32)vector | (delivery_mode << 8) | (destination_mode << 11) | (level << 14) | (static_cast<u32>(trigger_mode) << 15) | (destination_short << 18); }
u32 x_high() const { return destination << 24; }
u64 x2_value() const { return ((u64)destination << 32) | x_low(); }
};
OwnPtr<Memory::Region> m_apic_base;
Vector<OwnPtr<Processor>> m_ap_processor_info;
Vector<OwnPtr<Memory::Region>> m_ap_temporary_boot_stacks;
Vector<Thread*> m_ap_idle_threads;
OwnPtr<Memory::Region> m_ap_boot_environment;
Atomic<u8> m_apic_ap_count { 0 };
Atomic<u8> m_apic_ap_continue { 0 };
u32 m_processor_cnt { 0 };
u32 m_processor_enabled_cnt { 0 };
APICTimer* m_apic_timer { nullptr };
bool m_is_x2 { false };
static PhysicalAddress get_base();
void set_base(PhysicalAddress const& base);
void write_register(u32 offset, u32 value);
u32 read_register(u32 offset);
void set_lvt(u32 offset, u8 interrupt);
void set_siv(u32 offset, u8 interrupt);
void wait_for_pending_icr();
void write_icr(ICRReg const& icr);
void do_boot_aps();
};
}

View file

@ -0,0 +1,311 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Optional.h>
#include <Kernel/Arch/x86_64/InterruptManagement.h>
#include <Kernel/Arch/x86_64/Interrupts/APIC.h>
#include <Kernel/Arch/x86_64/Interrupts/IOAPIC.h>
#include <Kernel/Debug.h>
#include <Kernel/InterruptDisabler.h>
#include <Kernel/Sections.h>
#define IOAPIC_REDIRECTION_ENTRY_OFFSET 0x10
namespace Kernel {
enum DeliveryMode {
Normal = 0,
LowPriority = 1,
SMI = 2,
NMI = 3,
INIT = 4,
External = 7
};
UNMAP_AFTER_INIT IOAPIC::IOAPIC(PhysicalAddress address, u32 gsi_base)
: m_address(address)
, m_regs(Memory::map_typed_writable<ioapic_mmio_regs>(m_address).release_value_but_fixme_should_propagate_errors())
, m_gsi_base(gsi_base)
, m_id((read_register(0x0) >> 24) & 0xFF)
, m_version(read_register(0x1) & 0xFF)
, m_redirection_entries_count((read_register(0x1) >> 16) + 1)
{
InterruptDisabler disabler;
dmesgln("IOAPIC ID: {:#x}", m_id);
dmesgln("IOAPIC Version: {:#x}, redirection entries: {}", m_version, m_redirection_entries_count);
dmesgln("IOAPIC Arbitration ID {:#x}", read_register(0x2));
mask_all_redirection_entries();
}
UNMAP_AFTER_INIT void IOAPIC::initialize()
{
}
void IOAPIC::map_interrupt_redirection(u8 interrupt_vector)
{
InterruptDisabler disabler;
for (auto redirection_override : InterruptManagement::the().isa_overrides()) {
if (redirection_override.source() != interrupt_vector)
continue;
bool active_low = false;
// See ACPI spec Version 6.2, page 205 to learn more about Interrupt Overriding Flags.
switch ((redirection_override.flags() & 0b11)) {
case 0:
active_low = false;
break;
case 1:
active_low = false;
break;
case 2:
VERIFY_NOT_REACHED(); // Reserved value
case 3:
active_low = true;
break;
}
bool trigger_level_mode = false;
// See ACPI spec Version 6.2, page 205 to learn more about Interrupt Overriding Flags.
switch (((redirection_override.flags() >> 2) & 0b11)) {
case 0:
trigger_level_mode = false;
break;
case 1:
trigger_level_mode = false;
break;
case 2:
VERIFY_NOT_REACHED(); // Reserved value
case 3:
trigger_level_mode = true;
break;
}
configure_redirection_entry(redirection_override.gsi() - gsi_base(), InterruptManagement::acquire_mapped_interrupt_number(redirection_override.source()) + IRQ_VECTOR_BASE, DeliveryMode::Normal, false, active_low, trigger_level_mode, true, 0);
return;
}
isa_identity_map(interrupt_vector);
}
void IOAPIC::isa_identity_map(size_t index)
{
InterruptDisabler disabler;
configure_redirection_entry(index, InterruptManagement::acquire_mapped_interrupt_number(index) + IRQ_VECTOR_BASE, DeliveryMode::Normal, false, false, false, true, 0);
}
void IOAPIC::map_pci_interrupts()
{
InterruptDisabler disabler;
configure_redirection_entry(11, 11 + IRQ_VECTOR_BASE, DeliveryMode::Normal, false, false, true, true, 0);
}
bool IOAPIC::is_enabled() const
{
return !is_hard_disabled();
}
void IOAPIC::spurious_eoi(GenericInterruptHandler const& handler) const
{
InterruptDisabler disabler;
VERIFY(handler.type() == HandlerType::SpuriousInterruptHandler);
VERIFY(handler.interrupt_number() == APIC::spurious_interrupt_vector());
dbgln("IOAPIC: Spurious interrupt");
}
void IOAPIC::map_isa_interrupts()
{
InterruptDisabler disabler;
for (auto redirection_override : InterruptManagement::the().isa_overrides()) {
if ((redirection_override.gsi() < gsi_base()) || (redirection_override.gsi() >= (gsi_base() + m_redirection_entries_count)))
continue;
bool active_low = false;
// See ACPI spec Version 6.2, page 205 to learn more about Interrupt Overriding Flags.
switch ((redirection_override.flags() & 0b11)) {
case 0:
active_low = false;
break;
case 1:
active_low = false;
break;
case 2:
VERIFY_NOT_REACHED();
case 3:
active_low = true;
break;
}
bool trigger_level_mode = false;
// See ACPI spec Version 6.2, page 205 to learn more about Interrupt Overriding Flags.
switch (((redirection_override.flags() >> 2) & 0b11)) {
case 0:
trigger_level_mode = false;
break;
case 1:
trigger_level_mode = false;
break;
case 2:
VERIFY_NOT_REACHED();
case 3:
trigger_level_mode = true;
break;
}
configure_redirection_entry(redirection_override.gsi() - gsi_base(), InterruptManagement::acquire_mapped_interrupt_number(redirection_override.source()) + IRQ_VECTOR_BASE, 0, false, active_low, trigger_level_mode, true, 0);
}
}
void IOAPIC::reset_all_redirection_entries() const
{
InterruptDisabler disabler;
for (size_t index = 0; index < m_redirection_entries_count; index++)
reset_redirection_entry(index);
}
void IOAPIC::hard_disable()
{
InterruptDisabler disabler;
reset_all_redirection_entries();
IRQController::hard_disable();
}
void IOAPIC::reset_redirection_entry(size_t index) const
{
InterruptDisabler disabler;
configure_redirection_entry(index, 0, 0, false, false, false, true, 0);
}
void IOAPIC::configure_redirection_entry(size_t index, u8 interrupt_vector, u8 delivery_mode, bool logical_destination, bool active_low, bool trigger_level_mode, bool masked, u8 destination) const
{
InterruptDisabler disabler;
VERIFY(index < m_redirection_entries_count);
u32 redirection_entry1 = interrupt_vector | (delivery_mode & 0b111) << 8 | logical_destination << 11 | active_low << 13 | trigger_level_mode << 15 | masked << 16;
u32 redirection_entry2 = destination << 24;
write_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET, redirection_entry1);
if constexpr (IOAPIC_DEBUG)
dbgln("IOAPIC Value: {:#x}", read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET));
write_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET + 1, redirection_entry2);
if constexpr (IOAPIC_DEBUG)
dbgln("IOAPIC Value: {:#x}", read_register((index << 1) + 0x11));
}
void IOAPIC::mask_all_redirection_entries() const
{
InterruptDisabler disabler;
for (size_t index = 0; index < m_redirection_entries_count; index++)
mask_redirection_entry(index);
}
void IOAPIC::mask_redirection_entry(u8 index) const
{
VERIFY(index < m_redirection_entries_count);
u32 redirection_entry = read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET);
if (redirection_entry & (1 << 16))
return;
write_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET, redirection_entry | (1 << 16));
}
bool IOAPIC::is_redirection_entry_masked(u8 index) const
{
VERIFY(index < m_redirection_entries_count);
return (read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET) & (1 << 16)) != 0;
}
void IOAPIC::unmask_redirection_entry(u8 index) const
{
VERIFY(index < m_redirection_entries_count);
u32 redirection_entry = read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET);
if (!(redirection_entry & (1 << 16)))
return;
write_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET, redirection_entry & ~(1 << 16));
}
bool IOAPIC::is_vector_enabled(u8 interrupt_vector) const
{
InterruptDisabler disabler;
return is_redirection_entry_masked(interrupt_vector);
}
u8 IOAPIC::read_redirection_entry_vector(u8 index) const
{
VERIFY(index < m_redirection_entries_count);
return (read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET) & 0xFF);
}
Optional<int> IOAPIC::find_redirection_entry_by_vector(u8 vector) const
{
InterruptDisabler disabler;
for (size_t index = 0; index < m_redirection_entries_count; index++) {
if (read_redirection_entry_vector(index) == (InterruptManagement::acquire_mapped_interrupt_number(vector) + IRQ_VECTOR_BASE))
return index;
}
return {};
}
void IOAPIC::disable(GenericInterruptHandler const& handler)
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
u8 interrupt_vector = handler.interrupt_number();
VERIFY(interrupt_vector >= gsi_base() && interrupt_vector < interrupt_vectors_count());
auto found_index = find_redirection_entry_by_vector(interrupt_vector);
if (!found_index.has_value()) {
map_interrupt_redirection(interrupt_vector);
found_index = find_redirection_entry_by_vector(interrupt_vector);
}
VERIFY(found_index.has_value());
mask_redirection_entry(found_index.value());
}
void IOAPIC::enable(GenericInterruptHandler const& handler)
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
u8 interrupt_vector = handler.interrupt_number();
VERIFY(interrupt_vector >= gsi_base() && interrupt_vector < interrupt_vectors_count());
auto found_index = find_redirection_entry_by_vector(interrupt_vector);
if (!found_index.has_value()) {
map_interrupt_redirection(interrupt_vector);
found_index = find_redirection_entry_by_vector(interrupt_vector);
}
VERIFY(found_index.has_value());
unmask_redirection_entry(found_index.value());
}
void IOAPIC::eoi(GenericInterruptHandler const& handler) const
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
VERIFY(handler.interrupt_number() >= gsi_base() && handler.interrupt_number() < interrupt_vectors_count());
VERIFY(handler.type() != HandlerType::SpuriousInterruptHandler);
APIC::the().eoi();
}
u16 IOAPIC::get_isr() const
{
InterruptDisabler disabler;
VERIFY_NOT_REACHED();
}
u16 IOAPIC::get_irr() const
{
InterruptDisabler disabler;
VERIFY_NOT_REACHED();
}
void IOAPIC::write_register(u32 index, u32 value) const
{
InterruptDisabler disabler;
m_regs->select = index;
m_regs->window = value;
dbgln_if(IOAPIC_DEBUG, "IOAPIC Writing, Value {:#x} @ offset {:#x}", (u32)m_regs->window, (u32)m_regs->select);
}
u32 IOAPIC::read_register(u32 index) const
{
InterruptDisabler disabler;
m_regs->select = index;
dbgln_if(IOAPIC_DEBUG, "IOAPIC Reading, Value {:#x} @ offset {:#x}", (u32)m_regs->window, (u32)m_regs->select);
return m_regs->window;
}
}

View file

@ -0,0 +1,87 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Arch/x86_64/IRQController.h>
#include <Kernel/Memory/TypedMapping.h>
namespace Kernel {
struct [[gnu::packed]] ioapic_mmio_regs {
volatile u32 select;
u32 reserved[3];
volatile u32 window;
};
class PCIInterruptOverrideMetadata {
public:
PCIInterruptOverrideMetadata(u8 bus_id, u8 polarity, u8 trigger_mode, u8 source_irq, u32 ioapic_id, u16 ioapic_int_pin);
u8 bus() const { return m_bus_id; }
u8 polarity() const { return m_polarity; }
u8 trigger_mode() const { return m_trigger_mode; }
u8 pci_interrupt_pin() const { return m_pci_interrupt_pin; }
u8 pci_device_number() const { return m_pci_device_number; }
u32 ioapic_id() const { return m_ioapic_id; }
u16 ioapic_interrupt_pin() const { return m_ioapic_interrupt_pin; }
private:
const u8 m_bus_id;
const u8 m_polarity;
const u8 m_trigger_mode;
const u8 m_pci_interrupt_pin;
const u8 m_pci_device_number;
const u32 m_ioapic_id;
const u16 m_ioapic_interrupt_pin;
};
class IOAPIC final : public IRQController {
public:
IOAPIC(PhysicalAddress, u32 gsi_base);
virtual void enable(GenericInterruptHandler const&) override;
virtual void disable(GenericInterruptHandler const&) override;
virtual void hard_disable() override;
virtual void eoi(GenericInterruptHandler const&) const override;
virtual void spurious_eoi(GenericInterruptHandler const&) const override;
virtual bool is_vector_enabled(u8 number) const override;
virtual bool is_enabled() const override;
virtual u16 get_isr() const override;
virtual u16 get_irr() const override;
virtual u32 gsi_base() const override { return m_gsi_base; }
virtual size_t interrupt_vectors_count() const override { return m_redirection_entries_count; }
virtual StringView model() const override { return "IOAPIC"sv; };
virtual IRQControllerType type() const override { return IRQControllerType::i82093AA; }
private:
void configure_redirection_entry(size_t index, u8 interrupt_vector, u8 delivery_mode, bool logical_destination, bool active_low, bool trigger_level_mode, bool masked, u8 destination) const;
void reset_redirection_entry(size_t index) const;
void map_interrupt_redirection(u8 interrupt_vector);
void reset_all_redirection_entries() const;
void mask_all_redirection_entries() const;
void mask_redirection_entry(u8 index) const;
void unmask_redirection_entry(u8 index) const;
bool is_redirection_entry_masked(u8 index) const;
u8 read_redirection_entry_vector(u8 index) const;
Optional<int> find_redirection_entry_by_vector(u8 vector) const;
void configure_redirections() const;
void write_register(u32 index, u32 value) const;
u32 read_register(u32 index) const;
virtual void initialize() override;
void map_isa_interrupts();
void map_pci_interrupts();
void isa_identity_map(size_t index);
PhysicalAddress m_address;
mutable Memory::TypedMapping<ioapic_mmio_regs> m_regs;
u32 m_gsi_base;
u8 m_id;
u8 m_version;
size_t m_redirection_entries_count;
};
}

View file

@ -0,0 +1,230 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/Types.h>
#include <Kernel/Arch/x86_64/IO.h>
#include <Kernel/Arch/x86_64/Interrupts/PIC.h>
#include <Kernel/InterruptDisabler.h>
#include <Kernel/Interrupts/GenericInterruptHandler.h>
#include <Kernel/Sections.h>
namespace Kernel {
// The slave 8259 is connected to the master's IRQ2 line.
// This is really only to enhance clarity.
#define SLAVE_INDEX 2
#define PIC0_CTL 0x20
#define PIC0_CMD 0x21
#define PIC1_CTL 0xA0
#define PIC1_CMD 0xA1
#define ICW1_ICW4 0x01 // ICW4 (not) needed
#define ICW1_SINGLE 0x02 // Single (cascade) mode
#define ICW1_INTERVAL4 0x04 // Call address interval 4 (8)
#define ICW1_LEVEL 0x08 // Level triggered (edge) mode
#define ICW1_INIT 0x10 // Initialization - required
#define ICW4_8086 0x01 // 8086/88 (MCS-80/85) mode
#define ICW4_AUTO 0x02 // Auto (normal) EOI
#define ICW4_BUF_SLAVE 0x08 // Buffered mode/slave
#define ICW4_BUF_MASTER 0x0C // Buffered mode/master
#define ICW4_SFNM 0x10 // Special fully nested (not)
bool inline static is_all_masked(u16 reg)
{
return reg == 0xFFFF;
}
bool PIC::is_enabled() const
{
return !is_all_masked(m_cached_irq_mask) && !is_hard_disabled();
}
void PIC::disable(GenericInterruptHandler const& handler)
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
VERIFY(handler.interrupt_number() >= gsi_base() && handler.interrupt_number() < interrupt_vectors_count());
u8 irq = handler.interrupt_number();
if (m_cached_irq_mask & (1 << irq))
return;
u8 imr;
if (irq & 8) {
imr = IO::in8(PIC1_CMD);
imr |= 1 << (irq & 7);
IO::out8(PIC1_CMD, imr);
} else {
imr = IO::in8(PIC0_CMD);
imr |= 1 << irq;
IO::out8(PIC0_CMD, imr);
}
m_cached_irq_mask |= 1 << irq;
}
UNMAP_AFTER_INIT PIC::PIC()
{
initialize();
}
void PIC::spurious_eoi(GenericInterruptHandler const& handler) const
{
VERIFY(handler.type() == HandlerType::SpuriousInterruptHandler);
if (handler.interrupt_number() == 7)
return;
if (handler.interrupt_number() == 15) {
IO::in8(PIC1_CMD); /* dummy read */
IO::out8(PIC0_CTL, 0x60 | (2));
}
}
bool PIC::is_vector_enabled(u8 irq) const
{
return m_cached_irq_mask & (1 << irq);
}
void PIC::enable(GenericInterruptHandler const& handler)
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
VERIFY(handler.interrupt_number() >= gsi_base() && handler.interrupt_number() < interrupt_vectors_count());
enable_vector(handler.interrupt_number());
}
void PIC::enable_vector(u8 irq)
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
if (!(m_cached_irq_mask & (1 << irq)))
return;
u8 imr;
if (irq & 8) {
imr = IO::in8(PIC1_CMD);
imr &= ~(1 << (irq & 7));
IO::out8(PIC1_CMD, imr);
} else {
imr = IO::in8(PIC0_CMD);
imr &= ~(1 << irq);
IO::out8(PIC0_CMD, imr);
}
m_cached_irq_mask &= ~(1 << irq);
}
void PIC::eoi(GenericInterruptHandler const& handler) const
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
u8 irq = handler.interrupt_number();
VERIFY(irq >= gsi_base() && irq < interrupt_vectors_count());
if ((1 << irq) & m_cached_irq_mask) {
spurious_eoi(handler);
return;
}
eoi_interrupt(irq);
}
void PIC::eoi_interrupt(u8 irq) const
{
if (irq & 8) {
IO::in8(PIC1_CMD); /* dummy read */
IO::out8(PIC1_CTL, 0x60 | (irq & 7));
IO::out8(PIC0_CTL, 0x60 | (2));
return;
}
IO::in8(PIC0_CMD); /* dummy read */
IO::out8(PIC0_CTL, 0x60 | irq);
}
void PIC::complete_eoi() const
{
IO::out8(PIC1_CTL, 0x20);
IO::out8(PIC0_CTL, 0x20);
}
void PIC::hard_disable()
{
InterruptDisabler disabler;
remap(pic_disabled_vector_base);
IO::out8(PIC0_CMD, 0xff);
IO::out8(PIC1_CMD, 0xff);
m_cached_irq_mask = 0xffff;
IRQController::hard_disable();
}
void PIC::remap(u8 offset)
{
/* ICW1 (edge triggered mode, cascading controllers, expect ICW4) */
IO::out8(PIC0_CTL, ICW1_INIT | ICW1_ICW4);
IO::out8(PIC1_CTL, ICW1_INIT | ICW1_ICW4);
/* ICW2 (upper 5 bits specify ISR indices, lower 3 don't specify anything) */
IO::out8(PIC0_CMD, offset);
IO::out8(PIC1_CMD, offset + 0x08);
/* ICW3 (configure master/slave relationship) */
IO::out8(PIC0_CMD, 1 << SLAVE_INDEX);
IO::out8(PIC1_CMD, SLAVE_INDEX);
/* ICW4 (set x86 mode) */
IO::out8(PIC0_CMD, ICW4_8086);
IO::out8(PIC1_CMD, ICW4_8086);
// Mask -- start out with all IRQs disabled.
IO::out8(PIC0_CMD, 0xff);
IO::out8(PIC1_CMD, 0xff);
m_cached_irq_mask = 0xffff;
// ...except IRQ2, since that's needed for the master to let through slave interrupts.
enable_vector(2);
}
UNMAP_AFTER_INIT void PIC::initialize()
{
/* ICW1 (edge triggered mode, cascading controllers, expect ICW4) */
IO::out8(PIC0_CTL, ICW1_INIT | ICW1_ICW4);
IO::out8(PIC1_CTL, ICW1_INIT | ICW1_ICW4);
/* ICW2 (upper 5 bits specify ISR indices, lower 3 don't specify anything) */
IO::out8(PIC0_CMD, IRQ_VECTOR_BASE);
IO::out8(PIC1_CMD, IRQ_VECTOR_BASE + 0x08);
/* ICW3 (configure master/slave relationship) */
IO::out8(PIC0_CMD, 1 << SLAVE_INDEX);
IO::out8(PIC1_CMD, SLAVE_INDEX);
/* ICW4 (set x86 mode) */
IO::out8(PIC0_CMD, ICW4_8086);
IO::out8(PIC1_CMD, ICW4_8086);
// Mask -- start out with all IRQs disabled.
IO::out8(PIC0_CMD, 0xff);
IO::out8(PIC1_CMD, 0xff);
// ...except IRQ2, since that's needed for the master to let through slave interrupts.
enable_vector(2);
dmesgln("PIC: Cascading mode, vectors {:#02x}-{:#02x}", IRQ_VECTOR_BASE, IRQ_VECTOR_BASE + 0xf);
}
u16 PIC::get_isr() const
{
IO::out8(PIC0_CTL, 0x0b);
IO::out8(PIC1_CTL, 0x0b);
u8 isr0 = IO::in8(PIC0_CTL);
u8 isr1 = IO::in8(PIC1_CTL);
return (isr1 << 8) | isr0;
}
u16 PIC::get_irr() const
{
IO::out8(PIC0_CTL, 0x0a);
IO::out8(PIC1_CTL, 0x0a);
u8 irr0 = IO::in8(PIC0_CTL);
u8 irr1 = IO::in8(PIC1_CTL);
return (irr1 << 8) | irr0;
}
}

View file

@ -0,0 +1,43 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <Kernel/Arch/x86_64/IRQController.h>
namespace Kernel {
static constexpr size_t pic_disabled_vector_base = 0x20;
static constexpr size_t pic_disabled_vector_end = 0x2f;
class PIC final : public IRQController {
public:
PIC();
virtual void enable(GenericInterruptHandler const&) override;
virtual void disable(GenericInterruptHandler const&) override;
virtual void hard_disable() override;
virtual void eoi(GenericInterruptHandler const&) const override;
virtual bool is_vector_enabled(u8 number) const override;
virtual bool is_enabled() const override;
virtual void spurious_eoi(GenericInterruptHandler const&) const override;
virtual u16 get_isr() const override;
virtual u16 get_irr() const override;
virtual u32 gsi_base() const override { return 0; }
virtual size_t interrupt_vectors_count() const override { return 16; }
virtual StringView model() const override { return "Dual i8259"sv; }
virtual IRQControllerType type() const override { return IRQControllerType::i8259; }
private:
u16 m_cached_irq_mask { 0xffff };
void eoi_interrupt(u8 irq) const;
void enable_vector(u8 number);
void remap(u8 offset);
void complete_eoi() const;
virtual void initialize() override;
};
}

53
Kernel/Arch/x86_64/MSR.h Normal file
View file

@ -0,0 +1,53 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <Kernel/Arch/x86_64/CPUID.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
namespace Kernel {
class MSR {
uint32_t m_msr;
public:
static bool have()
{
CPUID id(1);
return (id.edx() & (1 << 5)) != 0;
}
MSR(const MSR&) = delete;
MSR& operator=(const MSR&) = delete;
MSR(uint32_t msr)
: m_msr(msr)
{
}
[[nodiscard]] u64 get()
{
u32 low, high;
asm volatile("rdmsr"
: "=a"(low), "=d"(high)
: "c"(m_msr));
return ((u64)high << 32) | low;
}
void set(u64 value)
{
u32 low = value & 0xffffffff;
u32 high = value >> 32;
asm volatile("wrmsr" ::"a"(low), "d"(high), "c"(m_msr));
}
};
}

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <Kernel/Arch/x86_64/IO.h>
namespace Kernel {
class NonMaskableInterruptDisabler {
public:
NonMaskableInterruptDisabler()
{
IO::out8(0x70, IO::in8(0x70) | 0x80);
}
~NonMaskableInterruptDisabler()
{
IO::out8(0x70, IO::in8(0x70) & 0x7F);
}
};
}

View file

@ -0,0 +1,62 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86_64/IO.h>
#include <Kernel/Arch/x86_64/PCI/Controller/HostBridge.h>
#include <Kernel/Bus/PCI/Access.h>
#include <Kernel/Sections.h>
namespace Kernel::PCI {
NonnullOwnPtr<HostBridge> HostBridge::must_create_with_io_access()
{
PCI::Domain domain { 0, 0, 0xff };
return adopt_own_if_nonnull(new (nothrow) HostBridge(domain)).release_nonnull();
}
HostBridge::HostBridge(PCI::Domain const& domain)
: HostController(domain)
{
}
static u32 io_address_for_pci_field(BusNumber bus, DeviceNumber device, FunctionNumber function, u8 field)
{
return 0x80000000u | (bus.value() << 16u) | (device.value() << 11u) | (function.value() << 8u) | (field & 0xfc);
}
void HostBridge::write8_field(BusNumber bus, DeviceNumber device, FunctionNumber function, u32 field, u8 value)
{
IO::out32(PCI::address_port, io_address_for_pci_field(bus, device, function, field));
IO::out8(PCI::value_port + (field & 3), value);
}
void HostBridge::write16_field(BusNumber bus, DeviceNumber device, FunctionNumber function, u32 field, u16 value)
{
IO::out32(PCI::address_port, io_address_for_pci_field(bus, device, function, field));
IO::out16(PCI::value_port + (field & 2), value);
}
void HostBridge::write32_field(BusNumber bus, DeviceNumber device, FunctionNumber function, u32 field, u32 value)
{
IO::out32(PCI::address_port, io_address_for_pci_field(bus, device, function, field));
IO::out32(PCI::value_port, value);
}
u8 HostBridge::read8_field(BusNumber bus, DeviceNumber device, FunctionNumber function, u32 field)
{
IO::out32(PCI::address_port, io_address_for_pci_field(bus, device, function, field));
return IO::in8(PCI::value_port + (field & 3));
}
u16 HostBridge::read16_field(BusNumber bus, DeviceNumber device, FunctionNumber function, u32 field)
{
IO::out32(PCI::address_port, io_address_for_pci_field(bus, device, function, field));
return IO::in16(PCI::value_port + (field & 2));
}
u32 HostBridge::read32_field(BusNumber bus, DeviceNumber device, FunctionNumber function, u32 field)
{
IO::out32(PCI::address_port, io_address_for_pci_field(bus, device, function, field));
return IO::in32(PCI::value_port);
}
}

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Bitmap.h>
#include <AK/NonnullOwnPtr.h>
#include <AK/OwnPtr.h>
#include <AK/Vector.h>
#include <Kernel/Bus/PCI/Controller/HostController.h>
#include <Kernel/Locking/Spinlock.h>
namespace Kernel::PCI {
class HostBridge : public HostController {
public:
static NonnullOwnPtr<HostBridge> must_create_with_io_access();
virtual void write8_field(BusNumber, DeviceNumber, FunctionNumber, u32 field, u8 value) override;
virtual void write16_field(BusNumber, DeviceNumber, FunctionNumber, u32 field, u16 value) override;
virtual void write32_field(BusNumber, DeviceNumber, FunctionNumber, u32 field, u32 value) override;
virtual u8 read8_field(BusNumber, DeviceNumber, FunctionNumber, u32 field) override;
virtual u16 read16_field(BusNumber, DeviceNumber, FunctionNumber, u32 field) override;
virtual u32 read32_field(BusNumber, DeviceNumber, FunctionNumber, u32 field) override;
private:
explicit HostBridge(PCI::Domain const&);
};
}

View file

@ -0,0 +1,168 @@
/*
* Copyright (c) 2020-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <Kernel/Arch/x86_64/PCI/IDELegacyModeController.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Sections.h>
#include <Kernel/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
namespace Kernel {
UNMAP_AFTER_INIT NonnullLockRefPtr<PCIIDELegacyModeController> PCIIDELegacyModeController::initialize(PCI::DeviceIdentifier const& device_identifier, bool force_pio)
{
return adopt_lock_ref(*new PCIIDELegacyModeController(device_identifier, force_pio));
}
UNMAP_AFTER_INIT PCIIDELegacyModeController::PCIIDELegacyModeController(PCI::DeviceIdentifier const& device_identifier, bool force_pio)
: PCI::Device(device_identifier.address())
, m_prog_if(device_identifier.prog_if())
, m_interrupt_line(device_identifier.interrupt_line())
{
PCI::enable_io_space(device_identifier.address());
PCI::enable_memory_space(device_identifier.address());
PCI::enable_bus_mastering(device_identifier.address());
enable_pin_based_interrupts();
initialize(force_pio);
}
bool PCIIDELegacyModeController::is_pci_native_mode_enabled() const
{
return (m_prog_if.value() & 0x05) != 0;
}
bool PCIIDELegacyModeController::is_pci_native_mode_enabled_on_primary_channel() const
{
return (m_prog_if.value() & 0x1) == 0x1;
}
bool PCIIDELegacyModeController::is_pci_native_mode_enabled_on_secondary_channel() const
{
return (m_prog_if.value() & 0x4) == 0x4;
}
bool PCIIDELegacyModeController::is_bus_master_capable() const
{
return m_prog_if.value() & (1 << 7);
}
static char const* detect_controller_type(u8 programming_value)
{
switch (programming_value) {
case 0x00:
return "ISA Compatibility mode-only controller";
case 0x05:
return "PCI native mode-only controller";
case 0x0A:
return "ISA Compatibility mode controller, supports both channels switched to PCI native mode";
case 0x0F:
return "PCI native mode controller, supports both channels switched to ISA compatibility mode";
case 0x80:
return "ISA Compatibility mode-only controller, supports bus mastering";
case 0x85:
return "PCI native mode-only controller, supports bus mastering";
case 0x8A:
return "ISA Compatibility mode controller, supports both channels switched to PCI native mode, supports bus mastering";
case 0x8F:
return "PCI native mode controller, supports both channels switched to ISA compatibility mode, supports bus mastering";
default:
VERIFY_NOT_REACHED();
}
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT void PCIIDELegacyModeController::initialize(bool force_pio)
{
dbgln("IDE controller @ {}: interrupt line was set to {}", pci_address(), m_interrupt_line.value());
dbgln("IDE controller @ {}: {}", pci_address(), detect_controller_type(m_prog_if.value()));
{
auto bus_master_base = IOAddress(PCI::get_BAR4(pci_address()) & (~1));
dbgln("IDE controller @ {}: bus master base was set to {}", pci_address(), bus_master_base);
}
auto initialize_and_enumerate = [&force_pio](IDEChannel& channel) -> void {
{
auto result = channel.allocate_resources_for_pci_ide_controller({}, force_pio);
// FIXME: Propagate errors properly
VERIFY(!result.is_error());
}
{
auto result = channel.detect_connected_devices();
// FIXME: Propagate errors properly
VERIFY(!result.is_error());
}
};
if (!is_bus_master_capable())
force_pio = true;
OwnPtr<IOWindow> primary_base_io_window;
OwnPtr<IOWindow> primary_control_io_window;
if (!is_pci_native_mode_enabled_on_primary_channel()) {
primary_base_io_window = IOWindow::create_for_io_space(IOAddress(0x1F0), 8).release_value_but_fixme_should_propagate_errors();
primary_control_io_window = IOWindow::create_for_io_space(IOAddress(0x3F6), 4).release_value_but_fixme_should_propagate_errors();
} else {
auto primary_base_io_window = IOWindow::create_for_pci_device_bar(pci_address(), PCI::HeaderType0BaseRegister::BAR0).release_value_but_fixme_should_propagate_errors();
auto pci_primary_control_io_window = IOWindow::create_for_pci_device_bar(pci_address(), PCI::HeaderType0BaseRegister::BAR1).release_value_but_fixme_should_propagate_errors();
// Note: the PCI IDE specification says we should access the IO address with an offset of 2
// on native PCI IDE controllers.
primary_control_io_window = pci_primary_control_io_window->create_from_io_window_with_offset(2, 4).release_value_but_fixme_should_propagate_errors();
}
VERIFY(primary_base_io_window);
VERIFY(primary_control_io_window);
OwnPtr<IOWindow> secondary_base_io_window;
OwnPtr<IOWindow> secondary_control_io_window;
if (!is_pci_native_mode_enabled_on_primary_channel()) {
secondary_base_io_window = IOWindow::create_for_io_space(IOAddress(0x170), 8).release_value_but_fixme_should_propagate_errors();
secondary_control_io_window = IOWindow::create_for_io_space(IOAddress(0x376), 4).release_value_but_fixme_should_propagate_errors();
} else {
secondary_base_io_window = IOWindow::create_for_pci_device_bar(pci_address(), PCI::HeaderType0BaseRegister::BAR2).release_value_but_fixme_should_propagate_errors();
auto pci_secondary_control_io_window = IOWindow::create_for_pci_device_bar(pci_address(), PCI::HeaderType0BaseRegister::BAR3).release_value_but_fixme_should_propagate_errors();
// Note: the PCI IDE specification says we should access the IO address with an offset of 2
// on native PCI IDE controllers.
secondary_control_io_window = pci_secondary_control_io_window->create_from_io_window_with_offset(2, 4).release_value_but_fixme_should_propagate_errors();
}
VERIFY(secondary_base_io_window);
VERIFY(secondary_control_io_window);
auto primary_bus_master_io = IOWindow::create_for_pci_device_bar(pci_address(), PCI::HeaderType0BaseRegister::BAR4, 16).release_value_but_fixme_should_propagate_errors();
auto secondary_bus_master_io = primary_bus_master_io->create_from_io_window_with_offset(8).release_value_but_fixme_should_propagate_errors();
// FIXME: On IOAPIC based system, this value might be completely wrong
// On QEMU for example, it should be "u8 irq_line = 22;" to actually work.
auto irq_line = m_interrupt_line.value();
if (is_pci_native_mode_enabled()) {
VERIFY(irq_line != 0);
}
auto primary_channel_io_window_group = IDEChannel::IOWindowGroup { primary_base_io_window.release_nonnull(), primary_control_io_window.release_nonnull(), move(primary_bus_master_io) };
auto secondary_channel_io_window_group = IDEChannel::IOWindowGroup { secondary_base_io_window.release_nonnull(), secondary_control_io_window.release_nonnull(), move(secondary_bus_master_io) };
if (is_pci_native_mode_enabled_on_primary_channel()) {
m_channels.append(IDEChannel::create(*this, irq_line, move(primary_channel_io_window_group), IDEChannel::ChannelType::Primary));
} else {
m_channels.append(IDEChannel::create(*this, move(primary_channel_io_window_group), IDEChannel::ChannelType::Primary));
}
initialize_and_enumerate(m_channels[0]);
m_channels[0].enable_irq();
if (is_pci_native_mode_enabled_on_secondary_channel()) {
m_channels.append(IDEChannel::create(*this, irq_line, move(secondary_channel_io_window_group), IDEChannel::ChannelType::Secondary));
} else {
m_channels.append(IDEChannel::create(*this, move(secondary_channel_io_window_group), IDEChannel::ChannelType::Secondary));
}
initialize_and_enumerate(m_channels[1]);
m_channels[1].enable_irq();
}
}

View file

@ -0,0 +1,40 @@
/*
* Copyright (c) 2020-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/Storage/StorageDevice.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class PCIIDELegacyModeController final : public IDEController
, public PCI::Device {
public:
static NonnullLockRefPtr<PCIIDELegacyModeController> initialize(PCI::DeviceIdentifier const&, bool force_pio);
bool is_bus_master_capable() const;
bool is_pci_native_mode_enabled() const;
private:
bool is_pci_native_mode_enabled_on_primary_channel() const;
bool is_pci_native_mode_enabled_on_secondary_channel() const;
PCIIDELegacyModeController(PCI::DeviceIdentifier const&, bool force_pio);
LockRefPtr<StorageDevice> device_by_channel_and_position(u32 index) const;
void initialize(bool force_pio);
void detect_disks();
// FIXME: Find a better way to get the ProgrammingInterface
PCI::ProgrammingInterface m_prog_if;
PCI::InterruptLine m_interrupt_line;
};
}

View file

@ -0,0 +1,93 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86_64/IO.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Bus/PCI/Access.h>
#include <Kernel/Bus/PCI/Initializer.h>
#include <Kernel/CommandLine.h>
#include <Kernel/FileSystem/SysFS/Subsystems/Bus/PCI/BusDirectory.h>
#include <Kernel/Firmware/ACPI/Parser.h>
#include <Kernel/Panic.h>
#include <Kernel/Sections.h>
namespace Kernel::PCI {
READONLY_AFTER_INIT bool g_pci_access_io_probe_failed;
READONLY_AFTER_INIT bool g_pci_access_is_disabled_from_commandline;
static bool test_pci_io();
UNMAP_AFTER_INIT static PCIAccessLevel detect_optimal_access_type()
{
auto boot_determined = kernel_command_line().pci_access_level();
if (!ACPI::is_enabled() || !ACPI::Parser::the()->find_table("MCFG"sv).has_value())
return PCIAccessLevel::IOAddressing;
if (boot_determined != PCIAccessLevel::IOAddressing)
return boot_determined;
if (!g_pci_access_io_probe_failed)
return PCIAccessLevel::IOAddressing;
PANIC("No PCI bus access method detected!");
}
UNMAP_AFTER_INIT void initialize()
{
g_pci_access_is_disabled_from_commandline = kernel_command_line().is_pci_disabled();
Optional<PhysicalAddress> possible_mcfg;
// FIXME: There are other arch-specific methods to find the memory range
// for accessing the PCI configuration space.
// For example, the QEMU microvm machine type might expose an FDT so we could
// parse it to find a PCI host bridge.
if (ACPI::is_enabled()) {
possible_mcfg = ACPI::Parser::the()->find_table("MCFG"sv);
g_pci_access_io_probe_failed = (!test_pci_io()) && (!possible_mcfg.has_value());
} else {
g_pci_access_io_probe_failed = !test_pci_io();
}
if (g_pci_access_is_disabled_from_commandline || g_pci_access_io_probe_failed)
return;
switch (detect_optimal_access_type()) {
case PCIAccessLevel::MemoryAddressing: {
VERIFY(possible_mcfg.has_value());
auto success = Access::initialize_for_multiple_pci_domains(possible_mcfg.value());
VERIFY(success);
break;
}
case PCIAccessLevel::IOAddressing: {
auto success = Access::initialize_for_one_pci_domain();
VERIFY(success);
break;
}
default:
VERIFY_NOT_REACHED();
}
PCIBusSysFSDirectory::initialize();
MUST(PCI::enumerate([&](DeviceIdentifier const& device_identifier) {
dmesgln("{} {}", device_identifier.address(), device_identifier.hardware_id());
}));
}
UNMAP_AFTER_INIT bool test_pci_io()
{
dmesgln("Testing PCI via manual probing...");
u32 tmp = 0x80000000;
IO::out32(PCI::address_port, tmp);
tmp = IO::in32(PCI::address_port);
if (tmp == 0x80000000) {
dmesgln("PCI IO supported");
return true;
}
dmesgln("PCI IO not supported");
return false;
}
}

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86_64/IO.h>
#include <Kernel/Arch/x86_64/PCSpeaker.h>
#include <Kernel/Arch/x86_64/Time/PIT.h>
void PCSpeaker::tone_on(int frequency)
{
IO::out8(PIT_CTL, TIMER2_SELECT | WRITE_WORD | MODE_SQUARE_WAVE);
u16 timer_reload = BASE_FREQUENCY / frequency;
IO::out8(TIMER2_CTL, LSB(timer_reload));
IO::out8(TIMER2_CTL, MSB(timer_reload));
IO::out8(0x61, IO::in8(0x61) | 3);
}
void PCSpeaker::tone_off()
{
IO::out8(0x61, IO::in8(0x61) & ~3);
}

View file

@ -0,0 +1,13 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
class PCSpeaker {
public:
static void tone_on(int frequency);
static void tone_off();
};

View file

@ -0,0 +1,52 @@
/*
* Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2018-2022, James Mintram <me@jamesrm.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Singleton.h>
#include <Kernel/Memory/PageDirectory.h>
#include <Kernel/Thread.h>
namespace Kernel::Memory {
struct CR3Map {
SpinlockProtected<IntrusiveRedBlackTree<&PageDirectory::m_tree_node>> map { LockRank::None };
};
static Singleton<CR3Map> s_cr3_map;
void PageDirectory::register_page_directory(PageDirectory* directory)
{
s_cr3_map->map.with([&](auto& map) {
map.insert(directory->cr3(), *directory);
});
}
void PageDirectory::deregister_page_directory(PageDirectory* directory)
{
s_cr3_map->map.with([&](auto& map) {
map.remove(directory->cr3());
});
}
LockRefPtr<PageDirectory> PageDirectory::find_current()
{
return s_cr3_map->map.with([&](auto& map) {
return map.find(read_cr3());
});
}
void activate_kernel_page_directory(PageDirectory const& pgd)
{
write_cr3(pgd.cr3());
}
void activate_page_directory(PageDirectory const& pgd, Thread* current_thread)
{
current_thread->regs().cr3 = pgd.cr3();
write_cr3(pgd.cr3());
}
}

View file

@ -0,0 +1,154 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Badge.h>
#include <AK/Types.h>
#include <Kernel/Forward.h>
#include <Kernel/PhysicalAddress.h>
namespace Kernel {
class PageDirectoryEntry {
public:
PhysicalPtr page_table_base() const { return PhysicalAddress::physical_page_base(m_raw); }
void set_page_table_base(PhysicalPtr value)
{
m_raw &= 0x8000000000000fffULL;
m_raw |= PhysicalAddress::physical_page_base(value);
}
bool is_null() const { return m_raw == 0; }
void clear() { m_raw = 0; }
u64 raw() const { return m_raw; }
void copy_from(Badge<Memory::PageDirectory>, PageDirectoryEntry const& other) { m_raw = other.m_raw; }
enum Flags {
Present = 1 << 0,
ReadWrite = 1 << 1,
UserSupervisor = 1 << 2,
WriteThrough = 1 << 3,
CacheDisabled = 1 << 4,
Huge = 1 << 7,
Global = 1 << 8,
NoExecute = 0x8000000000000000ULL,
};
bool is_present() const { return (raw() & Present) == Present; }
void set_present(bool b) { set_bit(Present, b); }
bool is_user_allowed() const { return (raw() & UserSupervisor) == UserSupervisor; }
void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
bool is_huge() const { return (raw() & Huge) == Huge; }
void set_huge(bool b) { set_bit(Huge, b); }
bool is_writable() const { return (raw() & ReadWrite) == ReadWrite; }
void set_writable(bool b) { set_bit(ReadWrite, b); }
bool is_write_through() const { return (raw() & WriteThrough) == WriteThrough; }
void set_write_through(bool b) { set_bit(WriteThrough, b); }
bool is_cache_disabled() const { return (raw() & CacheDisabled) == CacheDisabled; }
void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); }
bool is_global() const { return (raw() & Global) == Global; }
void set_global(bool b) { set_bit(Global, b); }
bool is_execute_disabled() const { return (raw() & NoExecute) == NoExecute; }
void set_execute_disabled(bool b) { set_bit(NoExecute, b); }
private:
void set_bit(u64 bit, bool value)
{
if (value)
m_raw |= bit;
else
m_raw &= ~bit;
}
u64 m_raw;
};
class PageTableEntry {
public:
PhysicalPtr physical_page_base() const { return PhysicalAddress::physical_page_base(m_raw); }
void set_physical_page_base(PhysicalPtr value)
{
// FIXME: IS THIS PLATFORM SPECIFIC?
m_raw &= 0x8000000000000fffULL;
m_raw |= PhysicalAddress::physical_page_base(value);
}
u64 raw() const { return m_raw; }
enum Flags {
Present = 1 << 0,
ReadWrite = 1 << 1,
UserSupervisor = 1 << 2,
WriteThrough = 1 << 3,
CacheDisabled = 1 << 4,
PAT = 1 << 7,
Global = 1 << 8,
NoExecute = 0x8000000000000000ULL,
};
bool is_present() const { return (raw() & Present) == Present; }
void set_present(bool b) { set_bit(Present, b); }
bool is_user_allowed() const { return (raw() & UserSupervisor) == UserSupervisor; }
void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
bool is_writable() const { return (raw() & ReadWrite) == ReadWrite; }
void set_writable(bool b) { set_bit(ReadWrite, b); }
bool is_write_through() const { return (raw() & WriteThrough) == WriteThrough; }
void set_write_through(bool b) { set_bit(WriteThrough, b); }
bool is_cache_disabled() const { return (raw() & CacheDisabled) == CacheDisabled; }
void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); }
bool is_global() const { return (raw() & Global) == Global; }
void set_global(bool b) { set_bit(Global, b); }
bool is_execute_disabled() const { return (raw() & NoExecute) == NoExecute; }
void set_execute_disabled(bool b) { set_bit(NoExecute, b); }
bool is_pat() const { return (raw() & PAT) == PAT; }
void set_pat(bool b) { set_bit(PAT, b); }
bool is_null() const { return m_raw == 0; }
void clear() { m_raw = 0; }
private:
void set_bit(u64 bit, bool value)
{
if (value)
m_raw |= bit;
else
m_raw &= ~bit;
}
u64 m_raw;
};
static_assert(AssertSize<PageDirectoryEntry, 8>());
static_assert(AssertSize<PageTableEntry, 8>());
class PageDirectoryPointerTable {
public:
PageDirectoryEntry* directory(size_t index)
{
VERIFY(index <= (NumericLimits<size_t>::max() << 30));
return (PageDirectoryEntry*)(PhysicalAddress::physical_page_base(raw[index]));
}
u64 raw[512];
};
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,425 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Array.h>
#include <AK/Concepts.h>
#include <AK/Function.h>
#include <AK/Types.h>
#include <Kernel/Arch/DeferredCallEntry.h>
#include <Kernel/Arch/PageDirectory.h>
#include <Kernel/Arch/ProcessorSpecificDataID.h>
#include <Kernel/Arch/x86_64/ASM_wrapper.h>
#include <Kernel/Arch/x86_64/CPUID.h>
#include <Kernel/Arch/x86_64/DescriptorTable.h>
#include <Kernel/Arch/x86_64/SIMDState.h>
#include <Kernel/Arch/x86_64/TSS.h>
#include <Kernel/Forward.h>
#include <Kernel/KString.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
namespace Kernel {
class ProcessorInfo;
struct ProcessorMessage;
struct ProcessorMessageEntry;
#define MSR_EFER 0xc0000080
#define MSR_STAR 0xc0000081
#define MSR_LSTAR 0xc0000082
#define MSR_SFMASK 0xc0000084
#define MSR_FS_BASE 0xc0000100
#define MSR_GS_BASE 0xc0000101
#define MSR_IA32_EFER 0xc0000080
#define MSR_IA32_PAT 0x277
// FIXME: Find a better place for these
extern "C" void thread_context_first_enter(void);
extern "C" void exit_kernel_thread(void);
extern "C" void do_assume_context(Thread* thread, u32 flags);
struct [[gnu::aligned(64), gnu::packed]] FPUState
{
SIMD::LegacyRegion legacy_region;
SIMD::Header xsave_header;
// FIXME: This should be dynamically allocated! For now, we only save the `YMM` registers here,
// so this will do for now. The size of the area is queried via CPUID(EAX=0dh, ECX=2):EAX.
// https://www.intel.com/content/dam/develop/external/us/en/documents/36945
u8 ext_save_area[256];
};
class Processor;
// Note: We only support 64 processors at most at the moment,
// so allocate 64 slots of inline capacity in the container.
constexpr size_t MAX_CPU_COUNT = 64;
using ProcessorContainer = Array<Processor*, MAX_CPU_COUNT>;
class Processor {
friend class ProcessorInfo;
AK_MAKE_NONCOPYABLE(Processor);
AK_MAKE_NONMOVABLE(Processor);
Processor* m_self;
// Saved user stack for the syscall instruction.
void* m_user_stack;
DescriptorTablePointer m_gdtr;
alignas(Descriptor) Descriptor m_gdt[256];
u32 m_gdt_length;
u32 m_cpu;
FlatPtr m_in_irq;
volatile u32 m_in_critical;
static Atomic<u32> s_idle_cpu_mask;
TSS m_tss;
static FPUState s_clean_fpu_state;
CPUFeature::Type m_features;
static Atomic<u32> g_total_processors;
u8 m_physical_address_bit_width;
u8 m_virtual_address_bit_width;
bool m_has_qemu_hvf_quirk;
ProcessorInfo* m_info;
Thread* m_current_thread;
Thread* m_idle_thread;
Atomic<ProcessorMessageEntry*> m_message_queue;
bool m_invoke_scheduler_async;
bool m_scheduler_initialized;
bool m_in_scheduler;
Atomic<bool> m_halt_requested;
DeferredCallEntry* m_pending_deferred_calls; // in reverse order
DeferredCallEntry* m_free_deferred_call_pool_entry;
DeferredCallEntry m_deferred_call_pool[5];
void* m_processor_specific_data[(size_t)ProcessorSpecificDataID::__Count];
void gdt_init();
void write_raw_gdt_entry(u16 selector, u32 low, u32 high);
void write_gdt_entry(u16 selector, Descriptor& descriptor);
static ProcessorContainer& processors();
static void smp_return_to_pool(ProcessorMessage& msg);
static ProcessorMessage& smp_get_from_pool();
static void smp_cleanup_message(ProcessorMessage& msg);
bool smp_enqueue_message(ProcessorMessage&);
static void smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async);
static void smp_broadcast_message(ProcessorMessage& msg);
static void smp_broadcast_wait_sync(ProcessorMessage& msg);
static void smp_broadcast_halt();
void deferred_call_pool_init();
void deferred_call_execute_pending();
DeferredCallEntry* deferred_call_get_free();
void deferred_call_return_to_pool(DeferredCallEntry*);
void deferred_call_queue_entry(DeferredCallEntry*);
void cpu_detect();
void cpu_setup();
public:
Processor() = default;
void early_initialize(u32 cpu);
void initialize(u32 cpu);
void detect_hypervisor();
void detect_hypervisor_hyperv(CPUID const& hypervisor_leaf_range);
void idle_begin() const
{
s_idle_cpu_mask.fetch_or(1u << m_cpu, AK::MemoryOrder::memory_order_relaxed);
}
void idle_end() const
{
s_idle_cpu_mask.fetch_and(~(1u << m_cpu), AK::MemoryOrder::memory_order_relaxed);
}
static Processor& by_id(u32);
static u32 count()
{
// NOTE: because this value never changes once all APs are booted,
// we can safely bypass loading it atomically.
return *g_total_processors.ptr();
}
ALWAYS_INLINE static u64 read_cpu_counter()
{
return read_tsc();
}
ALWAYS_INLINE static void pause()
{
asm volatile("pause");
}
ALWAYS_INLINE static void wait_check()
{
Processor::pause();
if (Processor::is_smp_enabled())
Processor::current().smp_process_pending_messages();
}
[[noreturn]] static void halt();
static void flush_entire_tlb_local()
{
write_cr3(read_cr3());
}
static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
static void flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
Descriptor& get_gdt_entry(u16 selector);
void flush_gdt();
DescriptorTablePointer const& get_gdtr();
template<IteratorFunction<Processor&> Callback>
static inline IterationDecision for_each(Callback callback)
{
auto& procs = processors();
size_t count = procs.size();
for (size_t i = 0; i < count; i++) {
if (callback(*procs[i]) == IterationDecision::Break)
return IterationDecision::Break;
}
return IterationDecision::Continue;
}
template<VoidFunction<Processor&> Callback>
static inline IterationDecision for_each(Callback callback)
{
auto& procs = processors();
size_t count = procs.size();
for (size_t i = 0; i < count; i++) {
if (procs[i] != nullptr)
callback(*procs[i]);
}
return IterationDecision::Continue;
}
static inline ErrorOr<void> try_for_each(Function<ErrorOr<void>(Processor&)> callback)
{
auto& procs = processors();
size_t count = procs.size();
for (size_t i = 0; i < count; i++) {
if (procs[i] != nullptr)
TRY(callback(*procs[i]));
}
return {};
}
ALWAYS_INLINE u8 physical_address_bit_width() const { return m_physical_address_bit_width; }
ALWAYS_INLINE u8 virtual_address_bit_width() const { return m_virtual_address_bit_width; }
ALWAYS_INLINE ProcessorInfo& info() { return *m_info; }
u64 time_spent_idle() const;
static bool is_smp_enabled();
static constexpr u64 user_stack_offset()
{
return __builtin_offsetof(Processor, m_user_stack);
}
static constexpr u64 kernel_stack_offset()
{
return __builtin_offsetof(Processor, m_tss) + __builtin_offsetof(TSS, rsp0l);
}
ALWAYS_INLINE static Processor& current()
{
return *(Processor*)read_gs_ptr(__builtin_offsetof(Processor, m_self));
}
ALWAYS_INLINE static bool is_initialized()
{
return read_gs_ptr(__builtin_offsetof(Processor, m_self)) != 0;
}
template<typename T>
T* get_specific()
{
return static_cast<T*>(m_processor_specific_data[static_cast<size_t>(T::processor_specific_data_id())]);
}
void set_specific(ProcessorSpecificDataID specific_id, void* ptr)
{
m_processor_specific_data[static_cast<size_t>(specific_id)] = ptr;
}
ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
{
m_idle_thread = &idle_thread;
}
ALWAYS_INLINE static Thread* current_thread()
{
// If we were to use Processor::current here, we'd have to
// disable interrupts to prevent a race where we may get pre-empted
// right after getting the Processor structure and then get moved
// to another processor, which would lead us to get the wrong thread.
// To avoid having to disable interrupts, we can just read the field
// directly in an atomic fashion, similar to Processor::current.
return (Thread*)read_gs_ptr(__builtin_offsetof(Processor, m_current_thread));
}
ALWAYS_INLINE static void set_current_thread(Thread& current_thread)
{
// See comment in Processor::current_thread
write_gs_ptr(__builtin_offsetof(Processor, m_current_thread), FlatPtr(&current_thread));
}
ALWAYS_INLINE static Thread* idle_thread()
{
// See comment in Processor::current_thread
return (Thread*)read_gs_ptr(__builtin_offsetof(Processor, m_idle_thread));
}
ALWAYS_INLINE u32 id() const
{
// NOTE: This variant should only be used when iterating over all
// Processor instances, or when it's guaranteed that the thread
// cannot move to another processor in between calling Processor::current
// and Processor::get_id, or if this fact is not important.
// All other cases should use Processor::id instead!
return m_cpu;
}
ALWAYS_INLINE static u32 current_id()
{
// See comment in Processor::current_thread
return read_gs_ptr(__builtin_offsetof(Processor, m_cpu));
}
ALWAYS_INLINE static bool is_bootstrap_processor()
{
return Processor::current_id() == 0;
}
ALWAYS_INLINE static FlatPtr current_in_irq()
{
return read_gs_ptr(__builtin_offsetof(Processor, m_in_irq));
}
ALWAYS_INLINE static void restore_in_critical(u32 critical)
{
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), critical);
}
ALWAYS_INLINE static void enter_critical()
{
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), in_critical() + 1);
}
ALWAYS_INLINE static bool current_in_scheduler()
{
return read_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler));
}
ALWAYS_INLINE static void set_current_in_scheduler(bool value)
{
write_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler), value);
}
private:
void do_leave_critical();
public:
static void leave_critical();
static u32 clear_critical();
ALWAYS_INLINE static void restore_critical(u32 prev_critical)
{
// NOTE: This doesn't have to be atomic, and it's also fine if we
// get preempted in between these steps. If we move to another
// processors m_in_critical will move along with us. And if we
// are preempted, we would resume with the same flags.
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), prev_critical);
}
ALWAYS_INLINE static u32 in_critical()
{
// See comment in Processor::current_thread
return read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
}
ALWAYS_INLINE static void verify_no_spinlocks_held()
{
VERIFY(!Processor::in_critical());
}
ALWAYS_INLINE static FPUState const& clean_fpu_state() { return s_clean_fpu_state; }
static void smp_enable();
bool smp_process_pending_messages();
static void smp_unicast(u32 cpu, Function<void()>, bool async);
static void smp_broadcast_flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
static u32 smp_wake_n_idle_processors(u32 wake_count);
static void deferred_call_queue(Function<void()> callback);
ALWAYS_INLINE bool has_nx() const
{
return has_feature(CPUFeature::NX);
}
ALWAYS_INLINE bool has_pat() const
{
return has_feature(CPUFeature::PAT);
}
ALWAYS_INLINE bool has_feature(CPUFeature::Type const& feature) const
{
return m_features.has_flag(feature);
}
ALWAYS_INLINE static bool are_interrupts_enabled()
{
return Kernel::are_interrupts_enabled();
}
ALWAYS_INLINE static void enable_interrupts()
{
sti();
}
ALWAYS_INLINE static void disable_interrupts()
{
cli();
}
void check_invoke_scheduler();
void invoke_scheduler_async() { m_invoke_scheduler_async = true; }
void enter_trap(TrapFrame& trap, bool raise_irq);
void exit_trap(TrapFrame& trap);
[[noreturn]] void initialize_context_switching(Thread& initial_thread);
NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
[[noreturn]] static void assume_context(Thread& thread, FlatPtr flags);
FlatPtr init_context(Thread& thread, bool leave_crit);
static ErrorOr<Vector<FlatPtr, 32>> capture_stack_trace(Thread& thread, size_t max_frames = 0);
static StringView platform_string();
};
}

View file

@ -0,0 +1,186 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2022, Linus Groh <linusg@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/StringBuilder.h>
#include <AK/Types.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/x86_64/CPUID.h>
#include <Kernel/Arch/x86_64/ProcessorInfo.h>
namespace Kernel {
ProcessorInfo::ProcessorInfo(Processor const& processor)
: m_vendor_id_string(build_vendor_id_string())
, m_hypervisor_vendor_id_string(build_hypervisor_vendor_id_string(processor))
, m_brand_string(build_brand_string())
, m_features_string(build_features_string(processor))
{
CPUID cpuid(1);
m_stepping = cpuid.eax() & 0xf;
u32 model = (cpuid.eax() >> 4) & 0xf;
u32 family = (cpuid.eax() >> 8) & 0xf;
m_type = (cpuid.eax() >> 12) & 0x3;
u32 extended_model = (cpuid.eax() >> 16) & 0xf;
u32 extended_family = (cpuid.eax() >> 20) & 0xff;
if (family == 15) {
m_display_family = family + extended_family;
m_display_model = model + (extended_model << 4);
} else if (family == 6) {
m_display_family = family;
m_display_model = model + (extended_model << 4);
} else {
m_display_family = family;
m_display_model = model;
}
// NOTE: Intel exposes detailed CPU's cache information in CPUID 04. On the
// other hand, AMD uses CPUID's extended function set.
if (m_vendor_id_string->view() == s_amd_vendor_id)
populate_cache_sizes_amd();
else if (m_vendor_id_string->view() == s_intel_vendor_id)
populate_cache_sizes_intel();
}
static void emit_u32(StringBuilder& builder, u32 value)
{
builder.appendff("{:c}{:c}{:c}{:c}",
value & 0xff,
(value >> 8) & 0xff,
(value >> 16) & 0xff,
(value >> 24) & 0xff);
}
NonnullOwnPtr<KString> ProcessorInfo::build_vendor_id_string()
{
CPUID cpuid(0);
StringBuilder builder;
emit_u32(builder, cpuid.ebx());
emit_u32(builder, cpuid.edx());
emit_u32(builder, cpuid.ecx());
// NOTE: This isn't necessarily fixed length and might have null terminators at the end.
return KString::must_create(builder.string_view().trim("\0"sv, TrimMode::Right));
}
NonnullOwnPtr<KString> ProcessorInfo::build_hypervisor_vendor_id_string(Processor const& processor)
{
if (!processor.has_feature(CPUFeature::HYPERVISOR))
return KString::must_create({});
CPUID cpuid(0x40000000);
StringBuilder builder;
emit_u32(builder, cpuid.ebx());
emit_u32(builder, cpuid.ecx());
emit_u32(builder, cpuid.edx());
// NOTE: This isn't necessarily fixed length and might have null terminators at the end.
return KString::must_create(builder.string_view().trim("\0"sv, TrimMode::Right));
}
NonnullOwnPtr<KString> ProcessorInfo::build_brand_string()
{
u32 max_extended_leaf = CPUID(0x80000000).eax();
if (max_extended_leaf < 0x80000004)
return KString::must_create({});
StringBuilder builder;
auto append_brand_string_part_to_builder = [&](u32 i) {
CPUID cpuid(0x80000002 + i);
emit_u32(builder, cpuid.eax());
emit_u32(builder, cpuid.ebx());
emit_u32(builder, cpuid.ecx());
emit_u32(builder, cpuid.edx());
};
append_brand_string_part_to_builder(0);
append_brand_string_part_to_builder(1);
append_brand_string_part_to_builder(2);
// NOTE: This isn't necessarily fixed length and might have null terminators at the end.
return KString::must_create(builder.string_view().trim("\0"sv, TrimMode::Right));
}
NonnullOwnPtr<KString> ProcessorInfo::build_features_string(Processor const& processor)
{
StringBuilder builder;
bool first = true;
for (auto feature = CPUFeature::Type(1u); feature != CPUFeature::__End; feature <<= 1u) {
if (processor.has_feature(feature)) {
if (first)
first = false;
else
MUST(builder.try_append(' '));
MUST(builder.try_append(cpu_feature_to_string_view(feature)));
}
}
return KString::must_create(builder.string_view());
}
void ProcessorInfo::populate_cache_sizes_amd()
{
auto const max_extended_leaf = CPUID(0x80000000).eax();
if (max_extended_leaf < 0x80000005)
return;
auto const l1_cache_info = CPUID(0x80000005);
if (l1_cache_info.ecx() != 0) {
m_l1_data_cache = Cache {
.size = ((l1_cache_info.ecx() >> 24) & 0xff) * KiB,
.line_size = l1_cache_info.ecx() & 0xff,
};
}
if (l1_cache_info.edx() != 0) {
m_l1_instruction_cache = Cache {
.size = ((l1_cache_info.edx() >> 24) & 0xff) * KiB,
.line_size = l1_cache_info.edx() & 0xff,
};
}
if (max_extended_leaf < 0x80000006)
return;
auto const l2_l3_cache_info = CPUID(0x80000006);
if (l2_l3_cache_info.ecx() != 0) {
m_l2_cache = Cache {
.size = ((l2_l3_cache_info.ecx() >> 16) & 0xffff) * KiB,
.line_size = l2_l3_cache_info.ecx() & 0xff,
};
}
if (l2_l3_cache_info.edx() != 0) {
m_l3_cache = Cache {
.size = (static_cast<u64>((l2_l3_cache_info.edx() >> 18)) & 0x3fff) * 512 * KiB,
.line_size = l2_l3_cache_info.edx() & 0xff,
};
}
}
void ProcessorInfo::populate_cache_sizes_intel()
{
auto const collect_cache_info = [](u32 ecx) {
auto const cache_info = CPUID(0x04, ecx);
auto const ways = ((cache_info.ebx() >> 22) & 0x3ff) + 1;
auto const partitions = ((cache_info.ebx() >> 12) & 0x3ff) + 1;
auto const line_size = (cache_info.ebx() & 0xfff) + 1;
auto const sets = cache_info.ecx() + 1;
return Cache {
.size = ways * partitions * line_size * sets,
.line_size = line_size
};
};
// NOTE: Those ECX numbers are the one used on recent Intel CPUs, an algorithm
// also exists to retrieve them.
m_l1_instruction_cache = collect_cache_info(0);
m_l1_data_cache = collect_cache_info(1);
m_l2_cache = collect_cache_info(2);
m_l3_cache = collect_cache_info(3);
}
}

View file

@ -0,0 +1,73 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2022, Linus Groh <linusg@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <Kernel/KString.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
namespace Kernel {
class Processor;
class ProcessorInfo {
public:
ProcessorInfo(Processor const& processor);
struct Cache {
u64 size;
u64 line_size;
};
StringView vendor_id_string() const { return m_vendor_id_string->view(); }
StringView hypervisor_vendor_id_string() const { return m_hypervisor_vendor_id_string->view(); }
StringView brand_string() const { return m_brand_string->view(); }
StringView features_string() const { return m_features_string->view(); }
u32 display_model() const { return m_display_model; }
u32 display_family() const { return m_display_family; }
u32 stepping() const { return m_stepping; }
u32 type() const { return m_type; }
u32 apic_id() const { return m_apic_id; }
Optional<Cache> const& l1_data_cache() const { return m_l1_data_cache; }
Optional<Cache> const& l1_instruction_cache() const { return m_l1_instruction_cache; }
Optional<Cache> const& l2_cache() const { return m_l2_cache; }
Optional<Cache> const& l3_cache() const { return m_l3_cache; }
void set_apic_id(u32 apic_id) { m_apic_id = apic_id; }
static constexpr StringView s_amd_vendor_id = "AuthenticAMD"sv;
static constexpr StringView s_intel_vendor_id = "GenuineIntel"sv;
private:
static NonnullOwnPtr<KString> build_vendor_id_string();
static NonnullOwnPtr<KString> build_hypervisor_vendor_id_string(Processor const&);
static NonnullOwnPtr<KString> build_brand_string();
static NonnullOwnPtr<KString> build_features_string(Processor const&);
void populate_cache_sizes_amd();
void populate_cache_sizes_intel();
NonnullOwnPtr<KString> m_vendor_id_string;
NonnullOwnPtr<KString> m_hypervisor_vendor_id_string;
NonnullOwnPtr<KString> m_brand_string;
NonnullOwnPtr<KString> m_features_string;
u32 m_display_model { 0 };
u32 m_display_family { 0 };
u32 m_stepping { 0 };
u32 m_type { 0 };
u32 m_apic_id { 0 };
Optional<Cache> m_l1_data_cache;
Optional<Cache> m_l1_instruction_cache;
Optional<Cache> m_l2_cache;
Optional<Cache> m_l3_cache;
};
}

119
Kernel/Arch/x86_64/RTC.cpp Normal file
View file

@ -0,0 +1,119 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Format.h>
#include <AK/Time.h>
#include <Kernel/Arch/Delay.h>
#include <Kernel/Arch/x86_64/CMOS.h>
#include <Kernel/Arch/x86_64/RTC.h>
namespace Kernel::RTC {
static time_t s_boot_time;
void initialize()
{
s_boot_time = now();
}
Time boot_time()
{
return Time::from_timespec({ s_boot_time, 0 });
}
static bool update_in_progress()
{
return CMOS::read(0x0a) & 0x80;
}
static u8 bcd_to_binary(u8 bcd)
{
return (bcd & 0x0F) + ((bcd >> 4) * 10);
}
static bool try_to_read_registers(unsigned& year, unsigned& month, unsigned& day, unsigned& hour, unsigned& minute, unsigned& second)
{
// Note: Let's wait 0.01 seconds until we stop trying to query the RTC CMOS
size_t time_passed_in_milliseconds = 0;
bool update_in_progress_ended_successfully = false;
while (time_passed_in_milliseconds < 100) {
if (!update_in_progress()) {
update_in_progress_ended_successfully = true;
break;
}
microseconds_delay(1000);
time_passed_in_milliseconds++;
}
if (!update_in_progress_ended_successfully) {
year = 1970;
month = 1;
day = 1;
hour = 0;
minute = 0;
second = 0;
return false;
}
u8 status_b = CMOS::read(0x0b);
second = CMOS::read(0x00);
minute = CMOS::read(0x02);
hour = CMOS::read(0x04);
day = CMOS::read(0x07);
month = CMOS::read(0x08);
year = CMOS::read(0x09);
bool is_pm = hour & 0x80;
if (!(status_b & 0x04)) {
second = bcd_to_binary(second);
minute = bcd_to_binary(minute);
hour = bcd_to_binary(hour & 0x7F);
day = bcd_to_binary(day);
month = bcd_to_binary(month);
year = bcd_to_binary(year);
}
if (!(status_b & 0x02)) {
// In the 12 hour clock, midnight and noon are 12, not 0. Map it to 0.
hour %= 12;
if (is_pm)
hour += 12;
}
year += 2000;
return true;
}
time_t now()
{
auto check_registers_against_preloaded_values = [](unsigned year, unsigned month, unsigned day, unsigned hour, unsigned minute, unsigned second) {
unsigned checked_year, checked_month, checked_day, checked_hour, checked_minute, checked_second;
if (!try_to_read_registers(checked_year, checked_month, checked_day, checked_hour, checked_minute, checked_second))
return false;
return checked_year == year && checked_month == month && checked_day == day && checked_hour == hour && checked_minute == minute && checked_second == second;
};
unsigned year, month, day, hour, minute, second;
bool did_read_rtc_successfully = false;
for (size_t attempt = 0; attempt < 5; attempt++) {
if (!try_to_read_registers(year, month, day, hour, minute, second))
break;
if (check_registers_against_preloaded_values(year, month, day, hour, minute, second)) {
did_read_rtc_successfully = true;
break;
}
}
dmesgln("RTC: {} Year: {}, month: {}, day: {}, hour: {}, minute: {}, second: {}", (did_read_rtc_successfully ? "" : "(failed to read)"), year, month, day, hour, minute, second);
time_t days_since_epoch = years_to_days_since_epoch(year) + day_of_year(year, month, day);
return ((days_since_epoch * 24 + hour) * 60 + minute) * 60 + second;
}
}

17
Kernel/Arch/x86_64/RTC.h Normal file
View file

@ -0,0 +1,17 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/UnixTypes.h>
namespace Kernel::RTC {
void initialize();
time_t now();
Time boot_time();
}

View file

@ -0,0 +1,165 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <LibC/sys/arch/regs.h>
#include <Kernel/Arch/CPU.h>
#include <Kernel/Arch/x86_64/ASM_wrapper.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
namespace Kernel {
struct [[gnu::packed]] RegisterState {
FlatPtr rdi;
FlatPtr rsi;
FlatPtr rbp;
FlatPtr rsp;
FlatPtr rbx;
FlatPtr rdx;
FlatPtr rcx;
FlatPtr rax;
FlatPtr r8;
FlatPtr r9;
FlatPtr r10;
FlatPtr r11;
FlatPtr r12;
FlatPtr r13;
FlatPtr r14;
FlatPtr r15;
u16 exception_code;
u16 isr_number;
u32 padding;
FlatPtr rip;
FlatPtr cs;
FlatPtr rflags;
FlatPtr userspace_rsp;
FlatPtr userspace_ss;
FlatPtr userspace_sp() const
{
return userspace_rsp;
}
void set_userspace_sp(FlatPtr value) { userspace_rsp = value; }
FlatPtr ip() const { return rip; }
void set_ip(FlatPtr value) { rip = value; }
void set_dx(FlatPtr value) { rdx = value; }
FlatPtr bp() const { return rbp; }
void set_bp(FlatPtr value) { rbp = value; }
FlatPtr flags() const { return rflags; }
void set_flags(FlatPtr value) { rflags = value; }
void set_return_reg(FlatPtr value) { rax = value; }
void capture_syscall_params(FlatPtr& function, FlatPtr& arg1, FlatPtr& arg2, FlatPtr& arg3, FlatPtr& arg4) const
{
// The syscall instruction clobbers rcx, so we must use a different calling convention to 32-bit.
function = rax;
arg1 = rdx;
arg2 = rdi;
arg3 = rbx;
arg4 = rsi;
}
};
#define REGISTER_STATE_SIZE (22 * 8)
static_assert(AssertSize<RegisterState, REGISTER_STATE_SIZE>());
inline void copy_kernel_registers_into_ptrace_registers(PtraceRegisters& ptrace_regs, RegisterState const& kernel_regs)
{
ptrace_regs.rax = kernel_regs.rax;
ptrace_regs.rcx = kernel_regs.rcx;
ptrace_regs.rdx = kernel_regs.rdx;
ptrace_regs.rbx = kernel_regs.rbx;
ptrace_regs.rsp = kernel_regs.userspace_rsp;
ptrace_regs.rbp = kernel_regs.rbp;
ptrace_regs.rsi = kernel_regs.rsi;
ptrace_regs.rdi = kernel_regs.rdi;
ptrace_regs.rip = kernel_regs.rip;
ptrace_regs.r8 = kernel_regs.r8;
ptrace_regs.r9 = kernel_regs.r9;
ptrace_regs.r10 = kernel_regs.r10;
ptrace_regs.r11 = kernel_regs.r11;
ptrace_regs.r12 = kernel_regs.r12;
ptrace_regs.r13 = kernel_regs.r13;
ptrace_regs.r14 = kernel_regs.r14;
ptrace_regs.r15 = kernel_regs.r15;
ptrace_regs.rflags = kernel_regs.rflags,
ptrace_regs.cs = 0;
ptrace_regs.ss = 0;
ptrace_regs.ds = 0;
ptrace_regs.es = 0;
ptrace_regs.fs = 0;
ptrace_regs.gs = 0;
}
inline void copy_ptrace_registers_into_kernel_registers(RegisterState& kernel_regs, PtraceRegisters const& ptrace_regs)
{
kernel_regs.rax = ptrace_regs.rax;
kernel_regs.rcx = ptrace_regs.rcx;
kernel_regs.rdx = ptrace_regs.rdx;
kernel_regs.rbx = ptrace_regs.rbx;
kernel_regs.rsp = ptrace_regs.rsp;
kernel_regs.rbp = ptrace_regs.rbp;
kernel_regs.rsi = ptrace_regs.rsi;
kernel_regs.rdi = ptrace_regs.rdi;
kernel_regs.rip = ptrace_regs.rip;
kernel_regs.r8 = ptrace_regs.r8;
kernel_regs.r9 = ptrace_regs.r9;
kernel_regs.r10 = ptrace_regs.r10;
kernel_regs.r11 = ptrace_regs.r11;
kernel_regs.r12 = ptrace_regs.r12;
kernel_regs.r13 = ptrace_regs.r13;
kernel_regs.r14 = ptrace_regs.r14;
kernel_regs.r15 = ptrace_regs.r15;
// FIXME: do we need a separate safe_rflags_mask here?
kernel_regs.rflags = (kernel_regs.rflags & ~safe_eflags_mask) | (ptrace_regs.rflags & safe_eflags_mask);
}
struct [[gnu::packed]] DebugRegisterState {
FlatPtr dr0;
FlatPtr dr1;
FlatPtr dr2;
FlatPtr dr3;
FlatPtr dr6;
FlatPtr dr7;
};
inline void read_debug_registers_into(DebugRegisterState& state)
{
state.dr0 = read_dr0();
state.dr1 = read_dr1();
state.dr2 = read_dr2();
state.dr3 = read_dr3();
state.dr6 = read_dr6();
state.dr7 = read_dr7();
}
inline void write_debug_registers_from(DebugRegisterState const& state)
{
write_dr0(state.dr0);
write_dr1(state.dr1);
write_dr2(state.dr2);
write_dr3(state.dr3);
write_dr6(state.dr6);
write_dr7(state.dr7);
}
inline void clear_debug_registers()
{
write_dr0(0);
write_dr1(0);
write_dr2(0);
write_dr3(0);
write_dr7(1 << 10); // Bit 10 is reserved and must be set to 1.
}
}

View file

@ -0,0 +1,67 @@
/*
* Copyright (c) 2022, Leon Albrecht <leon.a@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/EnumBits.h>
#include <AK/FPControl.h>
#include <AK/Platform.h>
#include <AK/Types.h>
namespace Kernel::SIMD {
// Intel-Manual Vol 1 Chp 13.4
enum StateComponent : u64 {
X87 = 1ull << 0ull,
SSE = 1ull << 1ull, // xmm0-xmm7(15)
AVX = 1ull << 2ull, // ymm0-ymm7(15) hi
MPX_BNDREGS = 1ull << 3ull,
MPX_BNDCSR = 1ull << 4ull,
AVX512_opmask = 1ull << 5ull, // k0 - k9
AVX512_ZMM_hi = 1ull << 6ull, // 0 - 15
AVX512_ZMM = 1ull << 7ull, // 16 - 31 full
PT = 1ull << 8ull,
PKRU = 1ull << 9ull,
CET_U = 1ull << 11ull,
CET_S = 1ull << 12ull,
HDC = 1ull << 13ull,
LBR = 1ull << 15ull,
HWP = 1ull << 16ull,
XCOMP_ENABLE = 1ull << 63ull
};
AK_ENUM_BITWISE_OPERATORS(StateComponent);
struct [[gnu::packed]] LegacyRegion {
AK::X87ControlWord FCW;
u16 FSW;
u8 FTW;
u8 : 8;
u16 FOP;
// 64-bit version
u64 FIP_64;
u64 FDP_64;
AK::MXCSR MXCSR;
u32 MXCSR_mask;
u8 st_mmx[128];
u8 xmm[256];
u8 available[96]; // Extra available space
};
static_assert(sizeof(LegacyRegion) == 512);
struct [[gnu::packed]] Header {
StateComponent xstate_bv;
StateComponent xcomp_bv;
u8 reserved[48];
};
static_assert(sizeof(Header) == 64);
}

View file

@ -0,0 +1,346 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/RegisterState.h>
#include <Kernel/Arch/SafeMem.h>
#define CODE_SECTION(section_name) __attribute__((section(section_name)))
extern "C" u8 start_of_safemem_text[];
extern "C" u8 end_of_safemem_text[];
extern "C" u8 safe_memcpy_ins_1[];
extern "C" u8 safe_memcpy_1_faulted[];
extern "C" u8 safe_memcpy_ins_2[];
extern "C" u8 safe_memcpy_2_faulted[];
extern "C" u8 safe_strnlen_ins[];
extern "C" u8 safe_strnlen_faulted[];
extern "C" u8 safe_memset_ins_1[];
extern "C" u8 safe_memset_1_faulted[];
extern "C" u8 safe_memset_ins_2[];
extern "C" u8 safe_memset_2_faulted[];
extern "C" u8 start_of_safemem_atomic_text[];
extern "C" u8 end_of_safemem_atomic_text[];
extern "C" u8 safe_atomic_fetch_add_relaxed_ins[];
extern "C" u8 safe_atomic_fetch_add_relaxed_faulted[];
extern "C" u8 safe_atomic_exchange_relaxed_ins[];
extern "C" u8 safe_atomic_exchange_relaxed_faulted[];
extern "C" u8 safe_atomic_load_relaxed_ins[];
extern "C" u8 safe_atomic_load_relaxed_faulted[];
extern "C" u8 safe_atomic_store_relaxed_ins[];
extern "C" u8 safe_atomic_store_relaxed_faulted[];
extern "C" u8 safe_atomic_compare_exchange_relaxed_ins[];
extern "C" u8 safe_atomic_compare_exchange_relaxed_faulted[];
namespace Kernel {
ALWAYS_INLINE bool validate_canonical_address(size_t address)
{
auto most_significant_bits = Processor::current().virtual_address_bit_width() - 1;
auto insignificant_bits = address >> most_significant_bits;
return insignificant_bits == 0 || insignificant_bits == (0xffffffffffffffffull >> most_significant_bits);
}
CODE_SECTION(".text.safemem")
NEVER_INLINE bool safe_memcpy(void* dest_ptr, void const* src_ptr, size_t n, void*& fault_at)
{
fault_at = nullptr;
size_t dest = (size_t)dest_ptr;
if (!validate_canonical_address(dest)) {
fault_at = dest_ptr;
return false;
}
size_t src = (size_t)src_ptr;
if (!validate_canonical_address(src)) {
fault_at = const_cast<void*>(src_ptr);
return false;
}
size_t remainder;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
asm volatile(
".globl safe_memcpy_ins_1 \n"
"safe_memcpy_ins_1: \n"
"rep movsq \n"
".globl safe_memcpy_1_faulted \n"
"safe_memcpy_1_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
: "=S"(src),
"=D"(dest),
"=c"(remainder),
[fault_at] "=d"(fault_at)
: "S"(src),
"D"(dest),
"c"(size_ts)
: "memory");
if (remainder != 0)
return false; // fault_at is already set!
n -= size_ts * sizeof(size_t);
if (n == 0) {
fault_at = nullptr;
return true;
}
}
asm volatile(
".globl safe_memcpy_ins_2 \n"
"safe_memcpy_ins_2: \n"
"rep movsb \n"
".globl safe_memcpy_2_faulted \n"
"safe_memcpy_2_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
: "=c"(remainder),
[fault_at] "=d"(fault_at)
: "S"(src),
"D"(dest),
"c"(n)
: "memory");
if (remainder != 0)
return false; // fault_at is already set!
fault_at = nullptr;
return true;
}
CODE_SECTION(".text.safemem")
NEVER_INLINE ssize_t safe_strnlen(char const* str, size_t max_n, void*& fault_at)
{
if (!validate_canonical_address((size_t)str)) {
fault_at = const_cast<char*>(str);
return false;
}
ssize_t count = 0;
fault_at = nullptr;
asm volatile(
"1: \n"
"test %[max_n], %[max_n] \n"
"je 2f \n"
"dec %[max_n] \n"
".globl safe_strnlen_ins \n"
"safe_strnlen_ins: \n"
"cmpb $0,(%[str], %[count], 1) \n"
"je 2f \n"
"inc %[count] \n"
"jmp 1b \n"
".globl safe_strnlen_faulted \n"
"safe_strnlen_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
"xor %[count_on_error], %[count_on_error] \n"
"dec %[count_on_error] \n" // return -1 on fault
"2:"
: [count_on_error] "=c"(count),
[fault_at] "=d"(fault_at)
: [str] "b"(str),
[count] "c"(count),
[max_n] "d"(max_n));
if (count >= 0)
fault_at = nullptr;
return count;
}
CODE_SECTION(".text.safemem")
NEVER_INLINE bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
{
fault_at = nullptr;
size_t dest = (size_t)dest_ptr;
if (!validate_canonical_address(dest)) {
fault_at = dest_ptr;
return false;
}
size_t remainder;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
size_t expanded_c = (u8)c;
expanded_c |= expanded_c << 8;
expanded_c |= expanded_c << 16;
asm volatile(
".globl safe_memset_ins_1 \n"
"safe_memset_ins_1: \n"
"rep stosq \n"
".globl safe_memset_1_faulted \n"
"safe_memset_1_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
: "=D"(dest),
"=c"(remainder),
[fault_at] "=d"(fault_at)
: "D"(dest),
"a"(expanded_c),
"c"(size_ts)
: "memory");
if (remainder != 0)
return false; // fault_at is already set!
n -= size_ts * sizeof(size_t);
if (n == 0) {
fault_at = nullptr;
return true;
}
}
asm volatile(
".globl safe_memset_ins_2 \n"
"safe_memset_ins_2: \n"
"rep stosb \n"
".globl safe_memset_2_faulted \n"
"safe_memset_2_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
: "=D"(dest),
"=c"(remainder),
[fault_at] "=d"(fault_at)
: "D"(dest),
"c"(n),
"a"(c)
: "memory");
if (remainder != 0)
return false; // fault_at is already set!
fault_at = nullptr;
return true;
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE Optional<u32> safe_atomic_fetch_add_relaxed(u32 volatile* var, u32 val)
{
u32 result;
bool error;
asm volatile(
"xor %[error], %[error] \n"
".globl safe_atomic_fetch_add_relaxed_ins \n"
"safe_atomic_fetch_add_relaxed_ins: \n"
"lock xadd %[result], %[var] \n"
".globl safe_atomic_fetch_add_relaxed_faulted \n"
"safe_atomic_fetch_add_relaxed_faulted: \n"
: [error] "=d"(error), [result] "=a"(result), [var] "=m"(*var)
: [val] "a"(val)
: "memory");
if (error)
return {};
return result;
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE Optional<u32> safe_atomic_exchange_relaxed(u32 volatile* var, u32 val)
{
u32 result;
bool error;
asm volatile(
"xor %[error], %[error] \n"
".globl safe_atomic_exchange_relaxed_ins \n"
"safe_atomic_exchange_relaxed_ins: \n"
"xchg %[val], %[var] \n"
".globl safe_atomic_exchange_relaxed_faulted \n"
"safe_atomic_exchange_relaxed_faulted: \n"
: [error] "=d"(error), "=a"(result), [var] "=m"(*var)
: [val] "a"(val)
: "memory");
if (error)
return {};
return result;
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE Optional<u32> safe_atomic_load_relaxed(u32 volatile* var)
{
u32 result;
bool error;
asm volatile(
"xor %[error], %[error] \n"
".globl safe_atomic_load_relaxed_ins \n"
"safe_atomic_load_relaxed_ins: \n"
"mov (%[var]), %[result] \n"
".globl safe_atomic_load_relaxed_faulted \n"
"safe_atomic_load_relaxed_faulted: \n"
: [error] "=d"(error), [result] "=c"(result)
: [var] "b"(var)
: "memory");
if (error)
return {};
return result;
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE bool safe_atomic_store_relaxed(u32 volatile* var, u32 val)
{
bool error;
asm volatile(
"xor %[error], %[error] \n"
".globl safe_atomic_store_relaxed_ins \n"
"safe_atomic_store_relaxed_ins: \n"
"xchg %[val], %[var] \n"
".globl safe_atomic_store_relaxed_faulted \n"
"safe_atomic_store_relaxed_faulted: \n"
: [error] "=d"(error), [var] "=m"(*var)
: [val] "r"(val)
: "memory");
return !error;
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE Optional<bool> safe_atomic_compare_exchange_relaxed(u32 volatile* var, u32& expected, u32 val)
{
// NOTE: accessing expected is NOT protected as it should always point
// to a valid location in kernel memory!
bool error;
bool did_exchange;
asm volatile(
"xor %[error], %[error] \n"
".globl safe_atomic_compare_exchange_relaxed_ins \n"
"safe_atomic_compare_exchange_relaxed_ins: \n"
"lock cmpxchg %[val], %[var] \n"
".globl safe_atomic_compare_exchange_relaxed_faulted \n"
"safe_atomic_compare_exchange_relaxed_faulted: \n"
: [error] "=d"(error), "=a"(expected), [var] "=m"(*var), "=@ccz"(did_exchange)
: "a"(expected), [val] "b"(val)
: "memory");
if (error)
return {};
return did_exchange;
}
bool handle_safe_access_fault(RegisterState& regs, FlatPtr fault_address)
{
FlatPtr ip = regs.ip();
;
if (ip >= (FlatPtr)&start_of_safemem_text && ip < (FlatPtr)&end_of_safemem_text) {
// If we detect that the fault happened in safe_memcpy() safe_strnlen(),
// or safe_memset() then resume at the appropriate _faulted label
if (ip == (FlatPtr)safe_memcpy_ins_1)
ip = (FlatPtr)safe_memcpy_1_faulted;
else if (ip == (FlatPtr)safe_memcpy_ins_2)
ip = (FlatPtr)safe_memcpy_2_faulted;
else if (ip == (FlatPtr)safe_strnlen_ins)
ip = (FlatPtr)safe_strnlen_faulted;
else if (ip == (FlatPtr)safe_memset_ins_1)
ip = (FlatPtr)safe_memset_1_faulted;
else if (ip == (FlatPtr)safe_memset_ins_2)
ip = (FlatPtr)safe_memset_2_faulted;
else
return false;
regs.set_ip(ip);
regs.set_dx(fault_address);
return true;
}
if (ip >= (FlatPtr)&start_of_safemem_atomic_text && ip < (FlatPtr)&end_of_safemem_atomic_text) {
// If we detect that a fault happened in one of the atomic safe_
// functions, resume at the appropriate _faulted label and set
// the edx/rdx register to 1 to indicate an error
if (ip == (FlatPtr)safe_atomic_fetch_add_relaxed_ins)
ip = (FlatPtr)safe_atomic_fetch_add_relaxed_faulted;
else if (ip == (FlatPtr)safe_atomic_exchange_relaxed_ins)
ip = (FlatPtr)safe_atomic_exchange_relaxed_faulted;
else if (ip == (FlatPtr)safe_atomic_load_relaxed_ins)
ip = (FlatPtr)safe_atomic_load_relaxed_faulted;
else if (ip == (FlatPtr)safe_atomic_store_relaxed_ins)
ip = (FlatPtr)safe_atomic_store_relaxed_faulted;
else if (ip == (FlatPtr)safe_atomic_compare_exchange_relaxed_ins)
ip = (FlatPtr)safe_atomic_compare_exchange_relaxed_faulted;
else
return false;
regs.set_ip(ip);
regs.set_dx(1);
return true;
}
return false;
}
}

View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/ScopedCritical.h>
#include <Kernel/Arch/Processor.h>
namespace Kernel {
ScopedCritical::ScopedCritical()
{
enter();
}
ScopedCritical::~ScopedCritical()
{
if (m_valid)
leave();
}
ScopedCritical::ScopedCritical(ScopedCritical&& from)
: m_valid(exchange(from.m_valid, false))
{
}
ScopedCritical& ScopedCritical::operator=(ScopedCritical&& from)
{
if (&from != this) {
m_valid = exchange(from.m_valid, false);
}
return *this;
}
void ScopedCritical::leave()
{
VERIFY(m_valid);
m_valid = false;
Processor::leave_critical();
}
void ScopedCritical::enter()
{
VERIFY(!m_valid);
m_valid = true;
Processor::enter_critical();
}
}

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86_64/IO.h>
#include <Kernel/Arch/x86_64/Shutdown.h>
namespace Kernel {
void qemu_shutdown()
{
// Note: This will invoke QEMU Shutdown, but for other platforms (or emulators),
// this has no effect on the system.
// We also try the Bochs/Old QEMU shutdown method, if the first didn't work.
IO::out16(0x604, 0x2000);
IO::out16(0xb004, 0x2000);
}
void virtualbox_shutdown()
{
IO::out16(0x4004, 0x3400);
}
}

View file

@ -0,0 +1,14 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
namespace Kernel {
void qemu_shutdown();
void virtualbox_shutdown();
}

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/SmapDisabler.h>
#include <Kernel/Arch/x86_64/ASM_wrapper.h>
namespace Kernel {
SmapDisabler::SmapDisabler()
: m_flags(cpu_flags())
{
stac();
}
SmapDisabler::~SmapDisabler()
{
if (!(m_flags & 0x40000))
clac();
}
}

View file

@ -0,0 +1,82 @@
/*
* Copyright (c) 2021, Owen Smith <yeeetari@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/TrapFrame.h>
#include <Kernel/Arch/x86_64/DescriptorTable.h>
#include <Kernel/Arch/x86_64/Processor.h>
extern "C" void syscall_entry();
extern "C" [[gnu::naked]] void syscall_entry()
{
// clang-format off
asm(
// Store the user stack, then switch to the kernel stack.
" movq %%rsp, %%gs:%c[user_stack] \n"
" movq %%gs:%c[kernel_stack], %%rsp \n"
// Build RegisterState.
" pushq $0x1b \n" // User ss
" pushq %%gs:%c[user_stack] \n" // User rsp
" sti \n" // It's now safe to enable interrupts, but we can't index into gs after this point
" pushq %%r11 \n" // The CPU preserves the user rflags in r11
" pushq $0x23 \n" // User cs
" pushq %%rcx \n" // The CPU preserves the user IP in rcx
" pushq $0 \n"
" pushq %%r15 \n"
" pushq %%r14 \n"
" pushq %%r13 \n"
" pushq %%r12 \n"
" pushq %%r11 \n"
" pushq %%r10 \n"
" pushq %%r9 \n"
" pushq %%r8 \n"
" pushq %%rax \n"
" pushq %%rcx \n"
" pushq %%rdx \n"
" pushq %%rbx \n"
" pushq %%rsp \n"
" pushq %%rbp \n"
" pushq %%rsi \n"
" pushq %%rdi \n"
" pushq %%rsp \n" // TrapFrame::regs
" subq $" __STRINGIFY(TRAP_FRAME_SIZE - 8) ", %%rsp \n"
" movq %%rsp, %%rdi \n"
" call enter_trap_no_irq \n"
" movq %%rsp, %%rdi \n"
" call syscall_handler \n"
" movq %%rsp, %%rdi \n"
" call exit_trap \n"
" addq $" __STRINGIFY(TRAP_FRAME_SIZE) ", %%rsp \n" // Pop TrapFrame
" popq %%rdi \n"
" popq %%rsi \n"
" popq %%rbp \n"
" addq $8, %%rsp \n" // Skip restoring kernel rsp
" popq %%rbx \n"
" popq %%rdx \n"
" popq %%rcx \n"
" popq %%rax \n"
" popq %%r8 \n"
" popq %%r9 \n"
" popq %%r10 \n"
" popq %%r11 \n"
" popq %%r12 \n"
" popq %%r13 \n"
" popq %%r14 \n"
" popq %%r15 \n"
" addq $8, %%rsp \n"
" popq %%rcx \n"
" addq $16, %%rsp \n"
// Disable interrupts before we restore the user stack pointer. sysret will re-enable interrupts when it restores
// rflags.
" cli \n"
" popq %%rsp \n"
" sysretq \n"
:: [user_stack] "i"(Kernel::Processor::user_stack_offset()), [kernel_stack] "i"(Kernel::Processor::kernel_stack_offset()));
// clang-format on
}

67
Kernel/Arch/x86_64/TSS.h Normal file
View file

@ -0,0 +1,67 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
namespace Kernel {
struct [[gnu::packed]] TSS32 {
u16 backlink, __blh;
u32 esp0;
u16 ss0, __ss0h;
u32 esp1;
u16 ss1, __ss1h;
u32 esp2;
u16 ss2, __ss2h;
u32 cr3, eip, eflags;
u32 eax, ecx, edx, ebx, esp, ebp, esi, edi;
u16 es, __esh;
u16 cs, __csh;
u16 ss, __ssh;
u16 ds, __dsh;
u16 fs, __fsh;
u16 gs, __gsh;
u16 ldt, __ldth;
u16 trace, iomapbase;
};
struct [[gnu::packed]] TSS64 {
u32 __1; // Link?
u32 rsp0l;
u32 rsp0h;
u32 rsp1l;
u32 rsp1h;
u32 rsp2l;
u32 rsp2h;
u64 __2; // probably CR3 and EIP?
u32 ist1l;
u32 ist1h;
u32 ist2l;
u32 ist2h;
u32 ist3l;
u32 ist3h;
u32 ist4l;
u32 ist4h;
u32 ist5l;
u32 ist5h;
u32 ist6l;
u32 ist6h;
u32 ist7l;
u32 ist7h;
u64 __3; // GS and LDTR?
u16 __4;
u16 iomapbase;
};
using TSS = TSS64;
}

View file

@ -0,0 +1,179 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86_64/Interrupts/APIC.h>
#include <Kernel/Arch/x86_64/Time/APICTimer.h>
#include <Kernel/Panic.h>
#include <Kernel/Sections.h>
#include <Kernel/Time/TimeManagement.h>
namespace Kernel {
#define APIC_TIMER_MEASURE_CPU_CLOCK
UNMAP_AFTER_INIT APICTimer* APICTimer::initialize(u8 interrupt_number, HardwareTimerBase& calibration_source)
{
auto timer = adopt_lock_ref(*new APICTimer(interrupt_number, nullptr));
timer->register_interrupt_handler();
if (!timer->calibrate(calibration_source)) {
return nullptr;
}
return &timer.leak_ref();
}
UNMAP_AFTER_INIT APICTimer::APICTimer(u8 interrupt_number, Function<void(RegisterState const&)> callback)
: HardwareTimer<GenericInterruptHandler>(interrupt_number, move(callback))
{
disable_remap();
}
UNMAP_AFTER_INIT bool APICTimer::calibrate(HardwareTimerBase& calibration_source)
{
VERIFY_INTERRUPTS_DISABLED();
dmesgln("APICTimer: Using {} as calibration source", calibration_source.model());
struct {
#ifdef APIC_TIMER_MEASURE_CPU_CLOCK
bool supports_tsc { Processor::current().has_feature(CPUFeature::TSC) };
#endif
APIC& apic { APIC::the() };
size_t ticks_in_100ms { 0 };
Atomic<size_t, AK::memory_order_relaxed> calibration_ticks { 0 };
#ifdef APIC_TIMER_MEASURE_CPU_CLOCK
volatile u64 start_tsc { 0 }, end_tsc { 0 };
#endif
volatile u64 start_reference { 0 }, end_reference { 0 };
volatile u32 start_apic_count { 0 }, end_apic_count { 0 };
bool query_reference { false };
} state;
state.ticks_in_100ms = calibration_source.ticks_per_second() / 10;
state.query_reference = calibration_source.can_query_raw();
// temporarily replace the timer callbacks
auto original_source_callback = calibration_source.set_callback([&state, &calibration_source](RegisterState const&) {
u32 current_timer_count = state.apic.get_timer_current_count();
#ifdef APIC_TIMER_MEASURE_CPU_CLOCK
u64 current_tsc = state.supports_tsc ? read_tsc() : 0;
#endif
u64 current_reference = state.query_reference ? calibration_source.current_raw() : 0;
auto prev_tick = state.calibration_ticks.fetch_add(1);
if (prev_tick == 0) {
#ifdef APIC_TIMER_MEASURE_CPU_CLOCK
state.start_tsc = current_tsc;
#endif
state.start_apic_count = current_timer_count;
state.start_reference = current_reference;
} else if (prev_tick + 1 == state.ticks_in_100ms + 1) {
#ifdef APIC_TIMER_MEASURE_CPU_CLOCK
state.end_tsc = current_tsc;
#endif
state.end_apic_count = current_timer_count;
state.end_reference = current_reference;
}
});
// Setup a counter that should be much longer than our calibration time.
// We don't want the APIC timer to actually fire. We do however want the
// calbibration_source timer to fire so that we can read the current
// tick count from the APIC timer
auto original_callback = set_callback([&](RegisterState const&) {
// TODO: How should we handle this?
PANIC("APICTimer: Timer fired during calibration!");
});
state.apic.setup_local_timer(0xffffffff, APIC::TimerMode::Periodic, true);
sti();
// Loop for about 100 ms
while (state.calibration_ticks.load() <= state.ticks_in_100ms)
;
cli();
// Restore timer callbacks
calibration_source.set_callback(move(original_source_callback));
set_callback(move(original_callback));
disable_local_timer();
if (state.query_reference) {
u64 one_tick_ns = calibration_source.raw_to_ns((state.end_reference - state.start_reference) / state.ticks_in_100ms);
m_frequency = (u32)(1000000000ull / one_tick_ns);
dmesgln("APICTimer: Ticks per second: {} ({}.{}ms)", m_frequency, one_tick_ns / 1000000, one_tick_ns % 1000000);
} else {
// For now, assume the frequency is exactly the same
m_frequency = calibration_source.ticks_per_second();
dmesgln("APICTimer: Ticks per second: {} (assume same frequency as reference clock)", m_frequency);
}
auto delta_apic_count = state.start_apic_count - state.end_apic_count; // The APIC current count register decrements!
m_timer_period = (delta_apic_count * state.apic.get_timer_divisor()) / state.ticks_in_100ms;
u64 apic_freq = delta_apic_count * state.apic.get_timer_divisor() * 10;
dmesgln("APICTimer: Bus clock speed: {}.{} MHz", apic_freq / 1000000, apic_freq % 1000000);
if (apic_freq < 1000000) {
dmesgln("APICTimer: Frequency too slow!");
return false;
}
#ifdef APIC_TIMER_MEASURE_CPU_CLOCK
if (state.supports_tsc) {
auto delta_tsc = (state.end_tsc - state.start_tsc) * 10;
dmesgln("APICTimer: CPU clock speed: {}.{} MHz", delta_tsc / 1000000, delta_tsc % 1000000);
}
#endif
enable_local_timer();
return true;
}
void APICTimer::enable_local_timer()
{
APIC::the().setup_local_timer(m_timer_period, m_timer_mode, true);
}
void APICTimer::disable_local_timer()
{
APIC::the().setup_local_timer(0, APIC::TimerMode::OneShot, false);
}
size_t APICTimer::ticks_per_second() const
{
return m_frequency;
}
void APICTimer::set_periodic()
{
// FIXME: Implement it...
VERIFY_NOT_REACHED();
}
void APICTimer::set_non_periodic()
{
// FIXME: Implement it...
VERIFY_NOT_REACHED();
}
void APICTimer::reset_to_default_ticks_per_second()
{
}
bool APICTimer::try_to_set_frequency([[maybe_unused]] size_t frequency)
{
return true;
}
bool APICTimer::is_capable_of_frequency([[maybe_unused]] size_t frequency) const
{
return false;
}
size_t APICTimer::calculate_nearest_possible_frequency([[maybe_unused]] size_t frequency) const
{
return 0;
}
}

View file

@ -0,0 +1,47 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <Kernel/Arch/x86_64/Interrupts/APIC.h>
#include <Kernel/Interrupts/GenericInterruptHandler.h>
#include <Kernel/Time/HardwareTimer.h>
namespace Kernel {
class APICTimer final : public HardwareTimer<GenericInterruptHandler> {
public:
static APICTimer* initialize(u8, HardwareTimerBase&);
virtual HardwareTimerType timer_type() const override { return HardwareTimerType::LocalAPICTimer; }
virtual StringView model() const override { return "LocalAPIC"sv; }
virtual size_t ticks_per_second() const override;
virtual bool is_periodic() const override { return m_timer_mode == APIC::TimerMode::Periodic; }
virtual bool is_periodic_capable() const override { return true; }
virtual void set_periodic() override;
virtual void set_non_periodic() override;
virtual void disable() override { }
virtual void reset_to_default_ticks_per_second() override;
virtual bool try_to_set_frequency(size_t frequency) override;
virtual bool is_capable_of_frequency(size_t frequency) const override;
virtual size_t calculate_nearest_possible_frequency(size_t frequency) const override;
void will_be_destroyed() override { HardwareTimer<GenericInterruptHandler>::will_be_destroyed(); }
void enable_local_timer();
void disable_local_timer();
private:
explicit APICTimer(u8, Function<void(RegisterState const&)>);
bool calibrate(HardwareTimerBase&);
u32 m_timer_period { 0 };
APIC::TimerMode m_timer_mode { APIC::TimerMode::Periodic };
};
}

View file

@ -0,0 +1,462 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/StringView.h>
#include <Kernel/Arch/x86_64/Time/HPET.h>
#include <Kernel/Arch/x86_64/Time/HPETComparator.h>
#include <Kernel/Debug.h>
#include <Kernel/Firmware/ACPI/Parser.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/TypedMapping.h>
#include <Kernel/Sections.h>
#include <Kernel/Time/TimeManagement.h>
namespace Kernel {
#define ABSOLUTE_MAXIMUM_COUNTER_TICK_PERIOD 0x05F5E100
#define NANOSECOND_PERIOD_TO_HERTZ(x) 1000000000 / x
#define HERTZ_TO_MEGAHERTZ(x) (x / 1000000)
namespace HPETFlags {
enum class Attributes {
Counter64BitCapable = 1 << 13,
LegacyReplacementRouteCapable = 1 << 15
};
enum class Configuration {
Enable = 1 << 0,
LegacyReplacementRoute = 1 << 1
};
enum class TimerConfiguration : u32 {
LevelTriggered = 1 << 1,
InterruptEnable = 1 << 2,
GeneratePeriodicInterrupt = 1 << 3,
PeriodicInterruptCapable = 1 << 4,
Timer64BitsCapable = 1 << 5,
ValueSet = 1 << 6,
Force32BitMode = 1 << 8,
FSBInterruptEnable = 1 << 14,
FSBInterruptDelivery = 1 << 15
};
};
struct [[gnu::packed]] HPETRegister {
union {
volatile u64 full;
struct {
volatile u32 low;
volatile u32 high;
};
};
};
struct [[gnu::packed]] TimerStructure {
volatile u32 capabilities;
volatile u32 interrupt_routing;
HPETRegister comparator_value;
volatile u64 fsb_interrupt_route;
u64 reserved;
};
struct [[gnu::packed]] HPETCapabilityRegister {
// Note: We must do a 32 bit access to offsets 0x0, or 0x4 only, according to HPET spec.
volatile u32 attributes;
volatile u32 main_counter_tick_period;
u64 reserved;
};
struct [[gnu::packed]] HPETRegistersBlock {
HPETCapabilityRegister capabilities;
HPETRegister configuration;
u64 reserved1;
HPETRegister interrupt_status;
u8 reserved2[0xF0 - 0x28];
HPETRegister main_counter_value;
u64 reserved3;
TimerStructure timers[32];
};
static_assert(__builtin_offsetof(HPETRegistersBlock, main_counter_value) == 0xf0);
static_assert(__builtin_offsetof(HPETRegistersBlock, timers[0]) == 0x100);
static_assert(__builtin_offsetof(HPETRegistersBlock, timers[1]) == 0x120);
// Note: The HPET specification says it reserves the range of byte 0x160 to
// 0x400 for comparators 3-31, but for implementing all 32 comparators the HPET
// MMIO space has to be 1280 bytes and not 1024 bytes.
static_assert(AssertSize<HPETRegistersBlock, 0x500>());
static u64 read_register_safe64(HPETRegister const& reg)
{
return reg.full;
}
static HPET* s_hpet;
static bool hpet_initialized { false };
bool HPET::initialized()
{
return hpet_initialized;
}
HPET& HPET::the()
{
VERIFY(HPET::initialized());
VERIFY(s_hpet != nullptr);
return *s_hpet;
}
UNMAP_AFTER_INIT bool HPET::test_and_initialize()
{
VERIFY(!HPET::initialized());
hpet_initialized = true;
auto hpet_table = ACPI::Parser::the()->find_table("HPET"sv);
if (!hpet_table.has_value())
return false;
dmesgln("HPET @ {}", hpet_table.value());
auto sdt_or_error = Memory::map_typed<ACPI::Structures::HPET>(hpet_table.value());
if (sdt_or_error.is_error()) {
dbgln("Failed mapping HPET table");
return false;
}
// Note: HPET is only usable from System Memory
VERIFY(sdt_or_error.value()->event_timer_block.address_space == (u8)ACPI::GenericAddressStructure::AddressSpace::SystemMemory);
if (TimeManagement::is_hpet_periodic_mode_allowed()) {
if (!check_for_exisiting_periodic_timers()) {
dbgln("HPET: No periodic capable timers");
return false;
}
}
new HPET(PhysicalAddress(hpet_table.value()));
return true;
}
UNMAP_AFTER_INIT bool HPET::check_for_exisiting_periodic_timers()
{
auto hpet_table = ACPI::Parser::the()->find_table("HPET"sv);
if (!hpet_table.has_value())
return false;
auto sdt_or_error = Memory::map_typed<ACPI::Structures::HPET>(hpet_table.value());
if (sdt_or_error.is_error())
return false;
auto sdt = sdt_or_error.release_value();
VERIFY(sdt->event_timer_block.address_space == 0);
auto registers_or_error = Memory::map_typed<HPETRegistersBlock>(PhysicalAddress(sdt->event_timer_block.address));
if (registers_or_error.is_error())
return false;
auto registers = registers_or_error.release_value();
size_t timers_count = ((registers->capabilities.attributes >> 8) & 0x1f) + 1;
for (size_t index = 0; index < timers_count; index++) {
if (registers->timers[index].capabilities & (u32)HPETFlags::TimerConfiguration::PeriodicInterruptCapable)
return true;
}
return false;
}
void HPET::global_disable()
{
auto& regs = registers();
regs.configuration.low = regs.configuration.low & ~(u32)HPETFlags::Configuration::Enable;
}
void HPET::global_enable()
{
auto& regs = registers();
regs.configuration.low = regs.configuration.low | (u32)HPETFlags::Configuration::Enable;
}
void HPET::update_periodic_comparator_value()
{
// According to 2.3.9.2.2 the only safe way to change the periodic timer frequency
// is to disable all periodic timers, reset the main counter and each timer's comparator value.
// This introduces time drift, so it should be avoided unless absolutely necessary.
global_disable();
auto& regs = registers();
u64 previous_main_value = (u64)regs.main_counter_value.low | ((u64)regs.main_counter_value.high << 32);
m_main_counter_drift += previous_main_value - m_main_counter_last_read;
m_main_counter_last_read = 0;
regs.main_counter_value.low = 0;
if (m_main_counter_64bits)
regs.main_counter_value.high = 0;
for (auto& comparator : m_comparators) {
auto& timer = regs.timers[comparator.comparator_number()];
if (!comparator.is_enabled())
continue;
if (comparator.is_periodic()) {
// Note that this means we're restarting all periodic timers. There is no
// way to resume periodic timers properly because we reset the main counter
// and we can only write the period into the comparator value...
timer.capabilities = timer.capabilities | (u32)HPETFlags::TimerConfiguration::ValueSet;
u64 value = ns_to_raw_counter_ticks(1000000000ull / comparator.ticks_per_second());
dbgln_if(HPET_DEBUG, "HPET: Update periodic comparator {} comparator value to {} main value was: {}",
comparator.comparator_number(),
value,
previous_main_value);
timer.comparator_value.low = (u32)value;
if (comparator.is_64bit_capable()) {
timer.capabilities = timer.capabilities | (u32)HPETFlags::TimerConfiguration::ValueSet;
timer.comparator_value.high = (u32)(value >> 32);
}
} else {
// Set the new target comparator value to the delta to the remaining ticks
u64 current_value = (u64)timer.comparator_value.low | ((u64)timer.comparator_value.high << 32);
u64 value = current_value - previous_main_value;
dbgln_if(HPET_DEBUG, "HPET: Update non-periodic comparator {} comparator value from {} to {} main value was: {}",
comparator.comparator_number(),
current_value,
value,
previous_main_value);
timer.comparator_value.low = (u32)value;
if (comparator.is_64bit_capable())
timer.comparator_value.high = (u32)(value >> 32);
}
}
global_enable();
}
void HPET::update_non_periodic_comparator_value(HPETComparator const& comparator)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(!comparator.is_periodic());
VERIFY(comparator.comparator_number() <= m_comparators.size());
auto& regs = registers();
auto& timer = regs.timers[comparator.comparator_number()];
u64 value = frequency() / comparator.ticks_per_second();
// NOTE: If the main counter passes this new value before we finish writing it, we will never receive an interrupt!
u64 new_counter_value = read_main_counter() + value;
timer.comparator_value.high = (u32)(new_counter_value >> 32);
timer.comparator_value.low = (u32)new_counter_value;
}
u64 HPET::update_time(u64& seconds_since_boot, u32& ticks_this_second, bool query_only)
{
// Should only be called by the time keeper interrupt handler!
u64 current_value = read_main_counter();
u64 delta_ticks = m_main_counter_drift;
if (current_value >= m_main_counter_last_read) {
delta_ticks += current_value - m_main_counter_last_read;
} else {
// the counter wrapped around
if (m_main_counter_64bits) {
delta_ticks += (NumericLimits<u64>::max() - m_main_counter_last_read + 1) + current_value;
} else {
delta_ticks += (NumericLimits<u32>::max() - m_main_counter_last_read + 1) + current_value;
m_32bit_main_counter_wraps++;
}
}
u64 ticks_since_last_second = (u64)ticks_this_second + delta_ticks;
auto ticks_per_second = frequency();
seconds_since_boot += ticks_since_last_second / ticks_per_second;
ticks_this_second = ticks_since_last_second % ticks_per_second;
if (!query_only) {
m_main_counter_drift = 0;
m_main_counter_last_read = current_value;
}
// Return the time passed (in ns) since last time update_time was called
return (delta_ticks * 1000000000ull) / ticks_per_second;
}
u64 HPET::read_main_counter_unsafe() const
{
auto& main_counter = registers().main_counter_value;
if (m_main_counter_64bits)
return ((u64)main_counter.high << 32) | (u64)main_counter.low;
return ((u64)m_32bit_main_counter_wraps << 32) | (u64)main_counter.low;
}
u64 HPET::read_main_counter() const
{
if (m_main_counter_64bits)
return read_register_safe64(registers().main_counter_value);
auto& main_counter = registers().main_counter_value;
u32 wraps = m_32bit_main_counter_wraps;
u32 last_read_value = m_main_counter_last_read & 0xffffffff;
u32 current_value = main_counter.low;
if (current_value < last_read_value)
wraps++;
return ((u64)wraps << 32) | (u64)current_value;
}
void HPET::enable_periodic_interrupt(HPETComparator const& comparator)
{
dbgln_if(HPET_DEBUG, "HPET: Set comparator {} to be periodic.", comparator.comparator_number());
disable(comparator);
VERIFY(comparator.comparator_number() <= m_comparators.size());
auto& timer = registers().timers[comparator.comparator_number()];
auto capabilities = timer.capabilities;
VERIFY(capabilities & (u32)HPETFlags::TimerConfiguration::PeriodicInterruptCapable);
timer.capabilities = capabilities | (u32)HPETFlags::TimerConfiguration::GeneratePeriodicInterrupt;
if (comparator.is_enabled())
enable(comparator);
}
void HPET::disable_periodic_interrupt(HPETComparator const& comparator)
{
dbgln_if(HPET_DEBUG, "HPET: Disable periodic interrupt in comparator {}", comparator.comparator_number());
disable(comparator);
VERIFY(comparator.comparator_number() <= m_comparators.size());
auto& timer = registers().timers[comparator.comparator_number()];
auto capabilities = timer.capabilities;
VERIFY(capabilities & (u32)HPETFlags::TimerConfiguration::PeriodicInterruptCapable);
timer.capabilities = capabilities & ~(u32)HPETFlags::TimerConfiguration::GeneratePeriodicInterrupt;
if (comparator.is_enabled())
enable(comparator);
}
void HPET::disable(HPETComparator const& comparator)
{
dbgln_if(HPET_DEBUG, "HPET: Disable comparator {}", comparator.comparator_number());
VERIFY(comparator.comparator_number() <= m_comparators.size());
auto& timer = registers().timers[comparator.comparator_number()];
timer.capabilities = timer.capabilities & ~(u32)HPETFlags::TimerConfiguration::InterruptEnable;
}
void HPET::enable(HPETComparator const& comparator)
{
dbgln_if(HPET_DEBUG, "HPET: Enable comparator {}", comparator.comparator_number());
VERIFY(comparator.comparator_number() <= m_comparators.size());
auto& timer = registers().timers[comparator.comparator_number()];
timer.capabilities = timer.capabilities | (u32)HPETFlags::TimerConfiguration::InterruptEnable;
}
Vector<unsigned> HPET::capable_interrupt_numbers(HPETComparator const& comparator)
{
VERIFY(comparator.comparator_number() <= m_comparators.size());
Vector<unsigned> capable_interrupts;
auto& comparator_registers = registers().timers[comparator.comparator_number()];
u32 interrupt_bitfield = comparator_registers.interrupt_routing;
for (size_t index = 0; index < 32; index++) {
if (interrupt_bitfield & 1)
capable_interrupts.append(index);
interrupt_bitfield >>= 1;
}
return capable_interrupts;
}
Vector<unsigned> HPET::capable_interrupt_numbers(u8 comparator_number)
{
VERIFY(comparator_number <= m_comparators.size());
Vector<unsigned> capable_interrupts;
auto& comparator_registers = registers().timers[comparator_number];
u32 interrupt_bitfield = comparator_registers.interrupt_routing;
for (size_t index = 0; index < 32; index++) {
if (interrupt_bitfield & 1)
capable_interrupts.append(index);
interrupt_bitfield >>= 1;
}
return capable_interrupts;
}
void HPET::set_comparator_irq_vector(u8 comparator_number, u8 irq_vector)
{
VERIFY(comparator_number <= m_comparators.size());
auto& comparator_registers = registers().timers[comparator_number];
comparator_registers.capabilities = comparator_registers.capabilities | (irq_vector << 9);
}
bool HPET::is_periodic_capable(u8 comparator_number) const
{
VERIFY(comparator_number <= m_comparators.size());
auto& comparator_registers = registers().timers[comparator_number];
return comparator_registers.capabilities & (u32)HPETFlags::TimerConfiguration::PeriodicInterruptCapable;
}
bool HPET::is_64bit_capable(u8 comparator_number) const
{
VERIFY(comparator_number <= m_comparators.size());
auto& comparator_registers = registers().timers[comparator_number];
return comparator_registers.capabilities & (u32)HPETFlags::TimerConfiguration::Timer64BitsCapable;
}
void HPET::set_comparators_to_optimal_interrupt_state(size_t)
{
// FIXME: Implement this method for allowing to use HPET timers 2-31...
VERIFY_NOT_REACHED();
}
PhysicalAddress HPET::find_acpi_hpet_registers_block()
{
auto sdt = Memory::map_typed<const volatile ACPI::Structures::HPET>(m_physical_acpi_hpet_table).release_value_but_fixme_should_propagate_errors();
VERIFY(sdt->event_timer_block.address_space == (u8)ACPI::GenericAddressStructure::AddressSpace::SystemMemory);
return PhysicalAddress(sdt->event_timer_block.address);
}
HPETRegistersBlock const& HPET::registers() const
{
return *(HPETRegistersBlock const*)m_hpet_mmio_region->vaddr().offset(m_physical_acpi_hpet_registers.offset_in_page()).as_ptr();
}
HPETRegistersBlock& HPET::registers()
{
return *(HPETRegistersBlock*)m_hpet_mmio_region->vaddr().offset(m_physical_acpi_hpet_registers.offset_in_page()).as_ptr();
}
u64 HPET::raw_counter_ticks_to_ns(u64 raw_ticks) const
{
// ABSOLUTE_MAXIMUM_COUNTER_TICK_PERIOD == 100 nanoseconds
return (raw_ticks * (u64)registers().capabilities.main_counter_tick_period * 100ull) / ABSOLUTE_MAXIMUM_COUNTER_TICK_PERIOD;
}
u64 HPET::ns_to_raw_counter_ticks(u64 ns) const
{
return (ns * 1000000ull) / (u64)registers().capabilities.main_counter_tick_period;
}
UNMAP_AFTER_INIT HPET::HPET(PhysicalAddress acpi_hpet)
: m_physical_acpi_hpet_table(acpi_hpet)
, m_physical_acpi_hpet_registers(find_acpi_hpet_registers_block())
, m_hpet_mmio_region(MM.allocate_kernel_region(m_physical_acpi_hpet_registers.page_base(), PAGE_SIZE, "HPET MMIO"sv, Memory::Region::Access::ReadWrite).release_value())
{
s_hpet = this; // Make available as soon as possible so that IRQs can use it
auto sdt = Memory::map_typed<const volatile ACPI::Structures::HPET>(m_physical_acpi_hpet_table).release_value_but_fixme_should_propagate_errors();
m_vendor_id = sdt->pci_vendor_id;
m_minimum_tick = sdt->mininum_clock_tick;
dmesgln("HPET: Minimum clock tick - {}", m_minimum_tick);
auto& regs = registers();
// Note: We must do a 32 bit access to offsets 0x0, or 0x4 only.
size_t timers_count = ((regs.capabilities.attributes >> 8) & 0x1f) + 1;
m_main_counter_64bits = (regs.capabilities.attributes & (u32)HPETFlags::Attributes::Counter64BitCapable) != 0;
dmesgln("HPET: Timers count - {}", timers_count);
dmesgln("HPET: Main counter size: {}", (m_main_counter_64bits ? "64-bit" : "32-bit"));
for (size_t i = 0; i < timers_count; i++) {
bool capable_64_bit = regs.timers[i].capabilities & (u32)HPETFlags::TimerConfiguration::Timer64BitsCapable;
dmesgln("HPET: Timer[{}] comparator size: {}, mode: {}", i,
(capable_64_bit ? "64-bit" : "32-bit"),
((!capable_64_bit || (regs.timers[i].capabilities & (u32)HPETFlags::TimerConfiguration::Force32BitMode)) ? "32-bit" : "64-bit"));
}
VERIFY(timers_count >= 2);
global_disable();
m_frequency = NANOSECOND_PERIOD_TO_HERTZ(raw_counter_ticks_to_ns(1));
dmesgln("HPET: frequency {} Hz ({} MHz) resolution: {} ns", m_frequency, HERTZ_TO_MEGAHERTZ(m_frequency), raw_counter_ticks_to_ns(1));
VERIFY(regs.capabilities.main_counter_tick_period <= ABSOLUTE_MAXIMUM_COUNTER_TICK_PERIOD);
// Reset the counter, just in case... (needs to match m_main_counter_last_read)
regs.main_counter_value.high = 0;
regs.main_counter_value.low = 0;
if (regs.capabilities.attributes & (u32)HPETFlags::Attributes::LegacyReplacementRouteCapable)
regs.configuration.low = regs.configuration.low | (u32)HPETFlags::Configuration::LegacyReplacementRoute;
m_comparators.append(HPETComparator::create(0, 0, is_periodic_capable(0), is_64bit_capable(0)));
m_comparators.append(HPETComparator::create(1, 8, is_periodic_capable(1), is_64bit_capable(1)));
global_enable();
}
}

View file

@ -0,0 +1,83 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <AK/Vector.h>
#include <Kernel/Library/NonnullLockRefPtrVector.h>
#include <Kernel/Memory/Region.h>
#include <Kernel/PhysicalAddress.h>
namespace Kernel {
class HPETComparator;
struct HPETRegistersBlock;
class HPET {
public:
static bool initialized();
static bool test_and_initialize();
static bool check_for_exisiting_periodic_timers();
static HPET& the();
u64 frequency() const { return m_frequency; }
u64 raw_counter_ticks_to_ns(u64) const;
u64 ns_to_raw_counter_ticks(u64) const;
NonnullLockRefPtrVector<HPETComparator> const& comparators() const { return m_comparators; }
void disable(HPETComparator const&);
void enable(HPETComparator const&);
void update_periodic_comparator_value();
void update_non_periodic_comparator_value(HPETComparator const& comparator);
void set_comparator_irq_vector(u8 comparator_number, u8 irq_vector);
void enable_periodic_interrupt(HPETComparator const& comparator);
void disable_periodic_interrupt(HPETComparator const& comparator);
u64 update_time(u64& seconds_since_boot, u32& ticks_this_second, bool query_only);
u64 read_main_counter_unsafe() const;
u64 read_main_counter() const;
Vector<unsigned> capable_interrupt_numbers(u8 comparator_number);
Vector<unsigned> capable_interrupt_numbers(HPETComparator const&);
private:
HPETRegistersBlock const& registers() const;
HPETRegistersBlock& registers();
void global_disable();
void global_enable();
bool is_periodic_capable(u8 comparator_number) const;
bool is_64bit_capable(u8 comparator_number) const;
void set_comparators_to_optimal_interrupt_state(size_t timers_count);
u64 nanoseconds_to_raw_ticks() const;
PhysicalAddress find_acpi_hpet_registers_block();
explicit HPET(PhysicalAddress acpi_hpet);
PhysicalAddress m_physical_acpi_hpet_table;
PhysicalAddress m_physical_acpi_hpet_registers;
OwnPtr<Memory::Region> m_hpet_mmio_region;
u64 m_main_counter_last_read { 0 };
u64 m_main_counter_drift { 0 };
u32 m_32bit_main_counter_wraps { 0 };
u16 m_vendor_id;
u16 m_minimum_tick;
u64 m_frequency;
u8 m_revision_id;
bool m_main_counter_64bits : 1;
bool legacy_replacement_route_capable : 1;
NonnullLockRefPtrVector<HPETComparator> m_comparators;
};
}

View file

@ -0,0 +1,136 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86_64/Time/HPETComparator.h>
#include <Kernel/Assertions.h>
#include <Kernel/Debug.h>
#include <Kernel/InterruptDisabler.h>
#include <Kernel/Sections.h>
#include <Kernel/Time/TimeManagement.h>
namespace Kernel {
UNMAP_AFTER_INIT NonnullLockRefPtr<HPETComparator> HPETComparator::create(u8 number, u8 irq, bool periodic_capable, bool is_64bit_capable)
{
auto timer = adopt_lock_ref(*new HPETComparator(number, irq, periodic_capable, is_64bit_capable));
timer->register_interrupt_handler();
return timer;
}
UNMAP_AFTER_INIT HPETComparator::HPETComparator(u8 number, u8 irq, bool periodic_capable, bool is_64bit_capable)
: HardwareTimer(irq)
, m_periodic(false)
, m_periodic_capable(periodic_capable)
, m_enabled(false)
, m_is_64bit_capable(is_64bit_capable)
, m_comparator_number(number)
{
}
void HPETComparator::disable()
{
if (!m_enabled)
return;
m_enabled = false;
HPET::the().disable(*this);
}
void HPETComparator::set_periodic()
{
VERIFY(m_periodic_capable);
m_periodic = true;
m_enabled = true;
HPET::the().enable_periodic_interrupt(*this);
}
void HPETComparator::set_non_periodic()
{
VERIFY(m_periodic_capable);
m_periodic = false;
m_enabled = true;
HPET::the().disable_periodic_interrupt(*this);
}
bool HPETComparator::handle_irq(RegisterState const& regs)
{
auto result = HardwareTimer::handle_irq(regs);
if (!is_periodic())
set_new_countdown();
return result;
}
void HPETComparator::set_new_countdown()
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(m_frequency <= HPET::the().frequency());
HPET::the().update_non_periodic_comparator_value(*this);
}
size_t HPETComparator::ticks_per_second() const
{
return m_frequency;
}
void HPETComparator::reset_to_default_ticks_per_second()
{
dbgln("reset_to_default_ticks_per_second");
m_frequency = OPTIMAL_TICKS_PER_SECOND_RATE;
if (!is_periodic())
set_new_countdown();
else
try_to_set_frequency(m_frequency);
}
bool HPETComparator::try_to_set_frequency(size_t frequency)
{
InterruptDisabler disabler;
if (!is_capable_of_frequency(frequency)) {
dbgln("HPETComparator: not capable of frequency: {}", frequency);
return false;
}
auto hpet_frequency = HPET::the().frequency();
VERIFY(frequency <= hpet_frequency);
m_frequency = frequency;
m_enabled = true;
dbgln_if(HPET_COMPARATOR_DEBUG, "HPET Comparator: Max frequency {} Hz, want to set {} Hz, periodic: {}", hpet_frequency, frequency, is_periodic());
if (is_periodic()) {
HPET::the().update_periodic_comparator_value();
} else {
HPET::the().update_non_periodic_comparator_value(*this);
}
HPET::the().enable(*this);
enable_irq(); // Enable if we haven't already
return true;
}
bool HPETComparator::is_capable_of_frequency(size_t frequency) const
{
if (frequency > HPET::the().frequency())
return false;
// HPET::update_periodic_comparator_value and HPET::update_non_periodic_comparator_value
// calculate the best counter based on the desired frequency.
return true;
}
size_t HPETComparator::calculate_nearest_possible_frequency(size_t frequency) const
{
if (frequency > HPET::the().frequency())
return HPET::the().frequency();
// HPET::update_periodic_comparator_value and HPET::update_non_periodic_comparator_value
// calculate the best counter based on the desired frequency.
return frequency;
}
u64 HPETComparator::current_raw() const
{
return HPET::the().read_main_counter();
}
u64 HPETComparator::raw_to_ns(u64 raw_delta) const
{
return HPET::the().raw_counter_ticks_to_ns(raw_delta);
}
}

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Function.h>
#include <AK/Types.h>
#include <Kernel/Arch/x86_64/Time/HPET.h>
#include <Kernel/Time/HardwareTimer.h>
namespace Kernel {
class HPETComparator final : public HardwareTimer<IRQHandler> {
friend class HPET;
public:
static NonnullLockRefPtr<HPETComparator> create(u8 number, u8 irq, bool periodic_capable, bool is_64bit_capable);
virtual HardwareTimerType timer_type() const override { return HardwareTimerType::HighPrecisionEventTimer; }
virtual StringView model() const override { return "HPET"sv; }
u8 comparator_number() const { return m_comparator_number; }
bool is_enabled() const { return m_enabled; }
bool is_64bit_capable() const { return m_is_64bit_capable; }
virtual size_t ticks_per_second() const override;
virtual bool is_periodic() const override { return m_periodic; }
virtual bool is_periodic_capable() const override { return m_periodic_capable; }
virtual void set_periodic() override;
virtual void set_non_periodic() override;
virtual void disable() override;
virtual bool can_query_raw() const override { return true; }
virtual u64 current_raw() const override;
virtual u64 raw_to_ns(u64) const override;
virtual void reset_to_default_ticks_per_second() override;
virtual bool try_to_set_frequency(size_t frequency) override;
virtual bool is_capable_of_frequency(size_t frequency) const override;
virtual size_t calculate_nearest_possible_frequency(size_t frequency) const override;
private:
void set_new_countdown();
virtual bool handle_irq(RegisterState const&) override;
HPETComparator(u8 number, u8 irq, bool periodic_capable, bool is_64bit_capable);
bool m_periodic : 1;
bool m_periodic_capable : 1;
bool m_enabled : 1;
bool m_is_64bit_capable : 1;
u8 m_comparator_number { 0 };
};
}

View file

@ -0,0 +1,89 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86_64/IO.h>
#include <Kernel/Arch/x86_64/Time/PIT.h>
#include <Kernel/InterruptDisabler.h>
#include <Kernel/Interrupts/GenericInterruptHandler.h>
#include <Kernel/Scheduler.h>
#include <Kernel/Sections.h>
#include <Kernel/Thread.h>
#include <Kernel/Time/TimeManagement.h>
#define IRQ_TIMER 0
namespace Kernel {
UNMAP_AFTER_INIT NonnullLockRefPtr<PIT> PIT::initialize(Function<void(RegisterState const&)> callback)
{
return adopt_lock_ref(*new PIT(move(callback)));
}
[[maybe_unused]] inline static void reset_countdown(u16 timer_reload)
{
IO::out8(PIT_CTL, TIMER0_SELECT | WRITE_WORD | MODE_COUNTDOWN);
IO::out8(TIMER0_CTL, LSB(timer_reload));
IO::out8(TIMER0_CTL, MSB(timer_reload));
}
PIT::PIT(Function<void(RegisterState const&)> callback)
: HardwareTimer(IRQ_TIMER, move(callback))
, m_periodic(true)
{
IO::out8(PIT_CTL, TIMER0_SELECT | WRITE_WORD | MODE_SQUARE_WAVE);
dmesgln("PIT: {} Hz, square wave ({:#08x})", OPTIMAL_TICKS_PER_SECOND_RATE, BASE_FREQUENCY / OPTIMAL_TICKS_PER_SECOND_RATE);
reset_to_default_ticks_per_second();
enable_irq();
}
size_t PIT::ticks_per_second() const
{
return m_frequency;
}
void PIT::set_periodic()
{
IO::out8(PIT_CTL, TIMER0_CTL | WRITE_WORD | MODE_SQUARE_WAVE);
m_periodic = true;
}
void PIT::set_non_periodic()
{
IO::out8(PIT_CTL, TIMER0_CTL | WRITE_WORD | MODE_ONESHOT);
m_periodic = false;
}
void PIT::reset_to_default_ticks_per_second()
{
InterruptDisabler disabler;
bool success = try_to_set_frequency(OPTIMAL_TICKS_PER_SECOND_RATE);
VERIFY(success);
}
bool PIT::try_to_set_frequency(size_t frequency)
{
InterruptDisabler disabler;
if (!is_capable_of_frequency(frequency))
return false;
disable_irq();
size_t reload_value = BASE_FREQUENCY / frequency;
IO::out8(TIMER0_CTL, LSB(reload_value));
IO::out8(TIMER0_CTL, MSB(reload_value));
m_frequency = frequency;
enable_irq();
return true;
}
bool PIT::is_capable_of_frequency(size_t frequency) const
{
VERIFY(frequency != 0);
return frequency <= BASE_FREQUENCY;
}
size_t PIT::calculate_nearest_possible_frequency(size_t frequency) const
{
VERIFY(frequency != 0);
return frequency;
}
}

View file

@ -0,0 +1,57 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <Kernel/Library/NonnullLockRefPtr.h>
#include <Kernel/Time/HardwareTimer.h>
namespace Kernel {
/* Timer related ports */
#define TIMER0_CTL 0x40
#define TIMER1_CTL 0x41
#define TIMER2_CTL 0x42
#define PIT_CTL 0x43
/* Building blocks for PIT_CTL */
#define TIMER0_SELECT 0x00
#define TIMER1_SELECT 0x40
#define TIMER2_SELECT 0x80
#define MODE_COUNTDOWN 0x00
#define MODE_ONESHOT 0x02
#define MODE_RATE 0x04
#define MODE_SQUARE_WAVE 0x06
#define WRITE_WORD 0x30
#define BASE_FREQUENCY 1193182
class PIT final : public HardwareTimer<IRQHandler> {
public:
static NonnullLockRefPtr<PIT> initialize(Function<void(RegisterState const&)>);
virtual HardwareTimerType timer_type() const override { return HardwareTimerType::i8253; }
virtual StringView model() const override { return "i8254"sv; }
virtual size_t ticks_per_second() const override;
virtual bool is_periodic() const override { return m_periodic; }
virtual bool is_periodic_capable() const override { return true; }
virtual void set_periodic() override;
virtual void set_non_periodic() override;
virtual void disable() override { }
virtual void reset_to_default_ticks_per_second() override;
virtual bool try_to_set_frequency(size_t frequency) override;
virtual bool is_capable_of_frequency(size_t frequency) const override;
virtual size_t calculate_nearest_possible_frequency(size_t frequency) const override;
private:
explicit PIT(Function<void(RegisterState const&)>);
bool m_periodic { true };
};
}

View file

@ -0,0 +1,91 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86_64/CMOS.h>
#include <Kernel/Arch/x86_64/IO.h>
#include <Kernel/Arch/x86_64/NonMaskableInterruptDisabler.h>
#include <Kernel/Arch/x86_64/Time/RTC.h>
#include <Kernel/InterruptDisabler.h>
#include <Kernel/Time/TimeManagement.h>
namespace Kernel {
#define IRQ_TIMER 8
#define MAX_FREQUENCY 8000
NonnullLockRefPtr<RealTimeClock> RealTimeClock::create(Function<void(RegisterState const&)> callback)
{
return adopt_lock_ref(*new RealTimeClock(move(callback)));
}
RealTimeClock::RealTimeClock(Function<void(RegisterState const&)> callback)
: HardwareTimer(IRQ_TIMER, move(callback))
{
InterruptDisabler disabler;
NonMaskableInterruptDisabler nmi_disabler;
enable_irq();
CMOS::write(0x8B, CMOS::read(0xB) | 0x40);
reset_to_default_ticks_per_second();
}
bool RealTimeClock::handle_irq(RegisterState const& regs)
{
auto result = HardwareTimer::handle_irq(regs);
CMOS::read(0x8C);
return result;
}
size_t RealTimeClock::ticks_per_second() const
{
return m_frequency;
}
void RealTimeClock::reset_to_default_ticks_per_second()
{
InterruptDisabler disabler;
bool success = try_to_set_frequency(1024);
VERIFY(success);
}
// FIXME: This is a quick & dirty log base 2 with a parameter. Please provide something better in the future.
static int quick_log2(size_t number)
{
int count = 0;
while (number >>= 1)
count++;
return count;
}
bool RealTimeClock::try_to_set_frequency(size_t frequency)
{
InterruptDisabler disabler;
if (!is_capable_of_frequency(frequency))
return false;
disable_irq();
u8 previous_rate = CMOS::read(0x8A);
u8 rate = quick_log2(32768 / frequency) + 1;
dbgln("RTC: Set rate to {}", rate);
CMOS::write(0x8A, (previous_rate & 0xF0) | rate);
m_frequency = frequency;
dbgln("RTC: Set frequency to {} Hz", frequency);
enable_irq();
return true;
}
bool RealTimeClock::is_capable_of_frequency(size_t frequency) const
{
VERIFY(frequency != 0);
if (frequency > MAX_FREQUENCY)
return false;
if (32768 % frequency)
return false;
u16 divider = 32768 / frequency;
return (divider <= 16384 && divider >= 4); // Frequency can be in range of 2 Hz to 8 KHz
}
size_t RealTimeClock::calculate_nearest_possible_frequency(size_t frequency) const
{
VERIFY(frequency != 0);
return frequency;
}
}

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Arch/x86_64/RTC.h>
#include <Kernel/Library/NonnullLockRefPtr.h>
#include <Kernel/Time/HardwareTimer.h>
namespace Kernel {
class RealTimeClock final : public HardwareTimer<IRQHandler> {
public:
static NonnullLockRefPtr<RealTimeClock> create(Function<void(RegisterState const&)> callback);
virtual HardwareTimerType timer_type() const override { return HardwareTimerType::RTC; }
virtual StringView model() const override { return "Real Time Clock"sv; }
virtual size_t ticks_per_second() const override;
virtual bool is_periodic() const override { return true; }
virtual bool is_periodic_capable() const override { return true; }
virtual void set_periodic() override { }
virtual void set_non_periodic() override { }
virtual void disable() override { }
virtual void reset_to_default_ticks_per_second() override;
virtual bool try_to_set_frequency(size_t frequency) override;
virtual bool is_capable_of_frequency(size_t frequency) const override;
virtual size_t calculate_nearest_possible_frequency(size_t frequency) const override;
private:
explicit RealTimeClock(Function<void(RegisterState const&)> callback);
virtual bool handle_irq(RegisterState const&) override;
};
}

View file

@ -0,0 +1,31 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/x86_64/TrapFrame.h>
#include <Kernel/InterruptDisabler.h>
namespace Kernel {
extern "C" void enter_trap_no_irq(TrapFrame* trap)
{
InterruptDisabler disable;
Processor::current().enter_trap(*trap, false);
}
extern "C" void enter_trap(TrapFrame* trap)
{
InterruptDisabler disable;
Processor::current().enter_trap(*trap, true);
}
extern "C" void exit_trap(TrapFrame* trap)
{
InterruptDisabler disable;
return Processor::current().exit_trap(*trap);
}
}

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
namespace Kernel {
struct RegisterState;
struct TrapFrame {
FlatPtr prev_irq_level;
TrapFrame* next_trap;
RegisterState* regs; // must be last
TrapFrame() = delete;
TrapFrame(TrapFrame const&) = delete;
TrapFrame(TrapFrame&&) = delete;
TrapFrame& operator=(TrapFrame const&) = delete;
TrapFrame& operator=(TrapFrame&&) = delete;
};
#define TRAP_FRAME_SIZE (3 * 8)
static_assert(AssertSize<TrapFrame, TRAP_FRAME_SIZE>());
extern "C" void enter_trap_no_irq(TrapFrame* trap) __attribute__((used));
extern "C" void enter_trap(TrapFrame*) __attribute__((used));
extern "C" void exit_trap(TrapFrame*) __attribute__((used));
}

View file

@ -0,0 +1,83 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Try.h>
#include <Kernel/Arch/CPU.h>
#include <Kernel/Arch/Delay.h>
#include <Kernel/Arch/x86_64/IO.h>
#include <Kernel/Arch/x86_64/VGA/IOArbiter.h>
namespace Kernel {
NonnullOwnPtr<VGAIOArbiter> VGAIOArbiter::must_create(Badge<GraphicsManagement>)
{
return MUST(adopt_nonnull_own_or_enomem(new (nothrow) VGAIOArbiter()));
}
VGAIOArbiter::~VGAIOArbiter() = default;
VGAIOArbiter::VGAIOArbiter() = default;
void VGAIOArbiter::disable_vga_emulation_access_permanently(Badge<GraphicsManagement>)
{
SpinlockLocker locker(m_main_vga_lock);
disable_vga_text_mode_console_cursor();
IO::out8(0x3c4, 1);
u8 sr1 = IO::in8(0x3c5);
IO::out8(0x3c5, sr1 | 1 << 5);
microseconds_delay(1000);
m_vga_access_is_disabled = true;
}
void VGAIOArbiter::enable_vga_text_mode_console_cursor(Badge<GraphicsManagement>)
{
enable_vga_text_mode_console_cursor();
}
void VGAIOArbiter::enable_vga_text_mode_console_cursor()
{
SpinlockLocker locker(m_main_vga_lock);
if (m_vga_access_is_disabled)
return;
IO::out8(0x3D4, 0xA);
IO::out8(0x3D5, 0);
}
void VGAIOArbiter::disable_vga_text_mode_console_cursor(Badge<GraphicsManagement>)
{
disable_vga_text_mode_console_cursor();
}
void VGAIOArbiter::disable_vga_text_mode_console_cursor()
{
SpinlockLocker locker(m_main_vga_lock);
if (m_vga_access_is_disabled)
return;
IO::out8(0x3D4, 0xA);
IO::out8(0x3D5, 0x20);
}
void VGAIOArbiter::unblank_screen(Badge<GraphicsManagement>)
{
SpinlockLocker locker(m_main_vga_lock);
if (m_vga_access_is_disabled)
return;
IO::out8(0x3c0, 0x20);
}
void VGAIOArbiter::set_vga_text_mode_cursor(Badge<GraphicsManagement>, size_t console_width, size_t x, size_t y)
{
SpinlockLocker locker(m_main_vga_lock);
if (m_vga_access_is_disabled)
return;
enable_vga_text_mode_console_cursor();
u16 value = y * console_width + x;
IO::out8(0x3d4, 0x0e);
IO::out8(0x3d5, MSB(value));
IO::out8(0x3d4, 0x0f);
IO::out8(0x3d5, LSB(value));
}
}

View file

@ -0,0 +1,40 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/NonnullOwnPtr.h>
#include <AK/Platform.h>
#include <AK/Types.h>
#include <Kernel/Locking/Spinlock.h>
namespace Kernel {
class GraphicsManagement;
class VGAIOArbiter {
public:
static NonnullOwnPtr<VGAIOArbiter> must_create(Badge<GraphicsManagement>);
void disable_vga_emulation_access_permanently(Badge<GraphicsManagement>);
void enable_vga_text_mode_console_cursor(Badge<GraphicsManagement>);
void disable_vga_text_mode_console_cursor(Badge<GraphicsManagement>);
void set_vga_text_mode_cursor(Badge<GraphicsManagement>, size_t console_width, size_t x, size_t y);
void unblank_screen(Badge<GraphicsManagement>);
~VGAIOArbiter();
private:
VGAIOArbiter();
void disable_vga_text_mode_console_cursor();
void enable_vga_text_mode_console_cursor();
RecursiveSpinlock m_main_vga_lock { LockRank::None };
bool m_vga_access_is_disabled { false };
};
}

415
Kernel/Arch/x86_64/init.cpp Normal file
View file

@ -0,0 +1,415 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Types.h>
#include <Kernel/Arch/InterruptManagement.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/x86_64/Hypervisor/VMWareBackdoor.h>
#include <Kernel/Arch/x86_64/Interrupts/APIC.h>
#include <Kernel/Arch/x86_64/Interrupts/PIC.h>
#include <Kernel/BootInfo.h>
#include <Kernel/Bus/PCI/Access.h>
#include <Kernel/Bus/PCI/Initializer.h>
#include <Kernel/Bus/USB/USBManagement.h>
#include <Kernel/Bus/VirtIO/Device.h>
#include <Kernel/CommandLine.h>
#include <Kernel/Devices/Audio/Management.h>
#include <Kernel/Devices/DeviceControlDevice.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Devices/FullDevice.h>
#include <Kernel/Devices/HID/HIDManagement.h>
#include <Kernel/Devices/KCOVDevice.h>
#include <Kernel/Devices/MemoryDevice.h>
#include <Kernel/Devices/NullDevice.h>
#include <Kernel/Devices/PCISerialDevice.h>
#include <Kernel/Devices/RandomDevice.h>
#include <Kernel/Devices/SelfTTYDevice.h>
#include <Kernel/Devices/SerialDevice.h>
#include <Kernel/Devices/ZeroDevice.h>
#include <Kernel/FileSystem/SysFS/Registry.h>
#include <Kernel/FileSystem/SysFS/Subsystems/Firmware/Directory.h>
#include <Kernel/FileSystem/VirtualFileSystem.h>
#include <Kernel/Firmware/ACPI/Initialize.h>
#include <Kernel/Firmware/ACPI/Parser.h>
#include <Kernel/Graphics/Console/BootFramebufferConsole.h>
#include <Kernel/Graphics/Console/VGATextModeConsole.h>
#include <Kernel/Graphics/GraphicsManagement.h>
#include <Kernel/Heap/kmalloc.h>
#include <Kernel/JailManagement.h>
#include <Kernel/KSyms.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Multiboot.h>
#include <Kernel/Net/NetworkTask.h>
#include <Kernel/Net/NetworkingManagement.h>
#include <Kernel/Panic.h>
#include <Kernel/Prekernel/Prekernel.h>
#include <Kernel/Process.h>
#include <Kernel/ProcessExposed.h>
#include <Kernel/Random.h>
#include <Kernel/Scheduler.h>
#include <Kernel/Sections.h>
#include <Kernel/Storage/StorageManagement.h>
#include <Kernel/TTY/ConsoleManagement.h>
#include <Kernel/TTY/PTYMultiplexer.h>
#include <Kernel/TTY/VirtualConsole.h>
#include <Kernel/Tasks/FinalizerTask.h>
#include <Kernel/Tasks/SyncTask.h>
#include <Kernel/Time/TimeManagement.h>
#include <Kernel/WorkQueue.h>
#include <Kernel/kstdio.h>
// Defined in the linker script
typedef void (*ctor_func_t)();
extern ctor_func_t start_heap_ctors[];
extern ctor_func_t end_heap_ctors[];
extern ctor_func_t start_ctors[];
extern ctor_func_t end_ctors[];
extern uintptr_t __stack_chk_guard;
READONLY_AFTER_INIT uintptr_t __stack_chk_guard __attribute__((used));
extern "C" u8 start_of_safemem_text[];
extern "C" u8 end_of_safemem_text[];
extern "C" u8 start_of_safemem_atomic_text[];
extern "C" u8 end_of_safemem_atomic_text[];
extern "C" u8 end_of_kernel_image[];
multiboot_module_entry_t multiboot_copy_boot_modules_array[16];
size_t multiboot_copy_boot_modules_count;
READONLY_AFTER_INIT bool g_in_early_boot;
namespace Kernel {
[[noreturn]] static void init_stage2(void*);
static void setup_serial_debug();
// boot.S expects these functions to exactly have the following signatures.
// We declare them here to ensure their signatures don't accidentally change.
extern "C" void init_finished(u32 cpu) __attribute__((used));
extern "C" [[noreturn]] void init_ap(FlatPtr cpu, Processor* processor_info);
extern "C" [[noreturn]] void init(BootInfo const&);
READONLY_AFTER_INIT VirtualConsole* tty0;
ProcessID g_init_pid { 0 };
ALWAYS_INLINE static Processor& bsp_processor()
{
// This solves a problem where the bsp Processor instance
// gets "re"-initialized in init() when we run all global constructors.
alignas(Processor) static u8 bsp_processor_storage[sizeof(Processor)];
return (Processor&)bsp_processor_storage;
}
// SerenityOS Kernel C++ entry point :^)
//
// This is where C++ execution begins, after boot.S transfers control here.
//
// The purpose of init() is to start multi-tasking. It does the bare minimum
// amount of work needed to start the scheduler.
//
// Once multi-tasking is ready, we spawn a new thread that starts in the
// init_stage2() function. Initialization continues there.
extern "C" {
READONLY_AFTER_INIT PhysicalAddress start_of_prekernel_image;
READONLY_AFTER_INIT PhysicalAddress end_of_prekernel_image;
READONLY_AFTER_INIT size_t physical_to_virtual_offset;
READONLY_AFTER_INIT FlatPtr kernel_mapping_base;
READONLY_AFTER_INIT FlatPtr kernel_load_base;
READONLY_AFTER_INIT PhysicalAddress boot_pml4t;
READONLY_AFTER_INIT PhysicalAddress boot_pdpt;
READONLY_AFTER_INIT PhysicalAddress boot_pd0;
READONLY_AFTER_INIT PhysicalAddress boot_pd_kernel;
READONLY_AFTER_INIT PageTableEntry* boot_pd_kernel_pt1023;
READONLY_AFTER_INIT char const* kernel_cmdline;
READONLY_AFTER_INIT u32 multiboot_flags;
READONLY_AFTER_INIT multiboot_memory_map_t* multiboot_memory_map;
READONLY_AFTER_INIT size_t multiboot_memory_map_count;
READONLY_AFTER_INIT multiboot_module_entry_t* multiboot_modules;
READONLY_AFTER_INIT size_t multiboot_modules_count;
READONLY_AFTER_INIT PhysicalAddress multiboot_framebuffer_addr;
READONLY_AFTER_INIT u32 multiboot_framebuffer_pitch;
READONLY_AFTER_INIT u32 multiboot_framebuffer_width;
READONLY_AFTER_INIT u32 multiboot_framebuffer_height;
READONLY_AFTER_INIT u8 multiboot_framebuffer_bpp;
READONLY_AFTER_INIT u8 multiboot_framebuffer_type;
}
Atomic<Graphics::Console*> g_boot_console;
extern "C" [[noreturn]] UNMAP_AFTER_INIT void init(BootInfo const& boot_info)
{
g_in_early_boot = true;
start_of_prekernel_image = PhysicalAddress { boot_info.start_of_prekernel_image };
end_of_prekernel_image = PhysicalAddress { boot_info.end_of_prekernel_image };
physical_to_virtual_offset = boot_info.physical_to_virtual_offset;
kernel_mapping_base = boot_info.kernel_mapping_base;
kernel_load_base = boot_info.kernel_load_base;
gdt64ptr = boot_info.gdt64ptr;
code64_sel = boot_info.code64_sel;
boot_pml4t = PhysicalAddress { boot_info.boot_pml4t };
boot_pdpt = PhysicalAddress { boot_info.boot_pdpt };
boot_pd0 = PhysicalAddress { boot_info.boot_pd0 };
boot_pd_kernel = PhysicalAddress { boot_info.boot_pd_kernel };
boot_pd_kernel_pt1023 = (PageTableEntry*)boot_info.boot_pd_kernel_pt1023;
kernel_cmdline = (char const*)boot_info.kernel_cmdline;
multiboot_flags = boot_info.multiboot_flags;
multiboot_memory_map = (multiboot_memory_map_t*)boot_info.multiboot_memory_map;
multiboot_memory_map_count = boot_info.multiboot_memory_map_count;
multiboot_modules = (multiboot_module_entry_t*)boot_info.multiboot_modules;
multiboot_modules_count = boot_info.multiboot_modules_count;
multiboot_framebuffer_addr = PhysicalAddress { boot_info.multiboot_framebuffer_addr };
multiboot_framebuffer_pitch = boot_info.multiboot_framebuffer_pitch;
multiboot_framebuffer_width = boot_info.multiboot_framebuffer_width;
multiboot_framebuffer_height = boot_info.multiboot_framebuffer_height;
multiboot_framebuffer_bpp = boot_info.multiboot_framebuffer_bpp;
multiboot_framebuffer_type = boot_info.multiboot_framebuffer_type;
setup_serial_debug();
// We need to copy the command line before kmalloc is initialized,
// as it may overwrite parts of multiboot!
CommandLine::early_initialize(kernel_cmdline);
memcpy(multiboot_copy_boot_modules_array, multiboot_modules, multiboot_modules_count * sizeof(multiboot_module_entry_t));
multiboot_copy_boot_modules_count = multiboot_modules_count;
new (&bsp_processor()) Processor();
bsp_processor().early_initialize(0);
// Invoke the constructors needed for the kernel heap
for (ctor_func_t* ctor = start_heap_ctors; ctor < end_heap_ctors; ctor++)
(*ctor)();
kmalloc_init();
load_kernel_symbol_table();
bsp_processor().initialize(0);
CommandLine::initialize();
Memory::MemoryManager::initialize(0);
// NOTE: If the bootloader provided a framebuffer, then set up an initial console.
// If the bootloader didn't provide a framebuffer, then set up an initial text console.
// We do so we can see the output on the screen as soon as possible.
if (!kernel_command_line().is_early_boot_console_disabled()) {
if (!multiboot_framebuffer_addr.is_null() && multiboot_framebuffer_type == MULTIBOOT_FRAMEBUFFER_TYPE_RGB) {
g_boot_console = &try_make_lock_ref_counted<Graphics::BootFramebufferConsole>(multiboot_framebuffer_addr, multiboot_framebuffer_width, multiboot_framebuffer_height, multiboot_framebuffer_pitch).value().leak_ref();
} else {
g_boot_console = &Graphics::VGATextModeConsole::initialize().leak_ref();
}
}
dmesgln("Starting SerenityOS...");
DeviceManagement::initialize();
SysFSComponentRegistry::initialize();
DeviceManagement::the().attach_null_device(*NullDevice::must_initialize());
DeviceManagement::the().attach_console_device(*ConsoleDevice::must_create());
DeviceManagement::the().attach_device_control_device(*DeviceControlDevice::must_create());
MM.unmap_prekernel();
// Ensure that the safemem sections are not empty. This could happen if the linker accidentally discards the sections.
VERIFY(+start_of_safemem_text != +end_of_safemem_text);
VERIFY(+start_of_safemem_atomic_text != +end_of_safemem_atomic_text);
// Invoke all static global constructors in the kernel.
// Note that we want to do this as early as possible.
for (ctor_func_t* ctor = start_ctors; ctor < end_ctors; ctor++)
(*ctor)();
InterruptManagement::initialize();
ACPI::initialize();
// Initialize TimeManagement before using randomness!
TimeManagement::initialize(0);
__stack_chk_guard = get_fast_random<uintptr_t>();
ProcFSComponentRegistry::initialize();
JailManagement::the();
Process::initialize();
Scheduler::initialize();
if (APIC::initialized() && APIC::the().enabled_processor_count() > 1) {
// We must set up the AP boot environment before switching to a kernel process,
// as pages below address USER_RANGE_BASE are only accessible through the kernel
// page directory.
APIC::the().setup_ap_boot_environment();
}
{
LockRefPtr<Thread> init_stage2_thread;
(void)Process::create_kernel_process(init_stage2_thread, KString::must_create("init_stage2"sv), init_stage2, nullptr, THREAD_AFFINITY_DEFAULT, Process::RegisterProcess::No);
// We need to make sure we drop the reference for init_stage2_thread
// before calling into Scheduler::start, otherwise we will have a
// dangling Thread that never gets cleaned up
}
Scheduler::start();
VERIFY_NOT_REACHED();
}
//
// This is where C++ execution begins for APs, after boot.S transfers control here.
//
// The purpose of init_ap() is to initialize APs for multi-tasking.
//
extern "C" [[noreturn]] UNMAP_AFTER_INIT void init_ap(FlatPtr cpu, Processor* processor_info)
{
processor_info->early_initialize(cpu);
processor_info->initialize(cpu);
Memory::MemoryManager::initialize(cpu);
Scheduler::set_idle_thread(APIC::the().get_idle_thread(cpu));
Scheduler::start();
VERIFY_NOT_REACHED();
}
//
// This method is called once a CPU enters the scheduler and its idle thread
// At this point the initial boot stack can be freed
//
extern "C" UNMAP_AFTER_INIT void init_finished(u32 cpu)
{
if (cpu == 0) {
// TODO: we can reuse the boot stack, maybe for kmalloc()?
} else {
APIC::the().init_finished(cpu);
TimeManagement::initialize(cpu);
}
}
void init_stage2(void*)
{
// This is a little bit of a hack. We can't register our process at the time we're
// creating it, but we need to be registered otherwise finalization won't be happy.
// The colonel process gets away without having to do this because it never exits.
Process::register_new(Process::current());
WorkQueue::initialize();
if (kernel_command_line().is_smp_enabled() && APIC::initialized() && APIC::the().enabled_processor_count() > 1) {
// We can't start the APs until we have a scheduler up and running.
// We need to be able to process ICI messages, otherwise another
// core may send too many and end up deadlocking once the pool is
// exhausted
APIC::the().boot_aps();
}
// Initialize the PCI Bus as early as possible, for early boot (PCI based) serial logging
PCI::initialize();
if (!PCI::Access::is_disabled()) {
PCISerialDevice::detect();
}
VirtualFileSystem::initialize();
if (!is_serial_debug_enabled())
(void)SerialDevice::must_create(0).leak_ref();
(void)SerialDevice::must_create(1).leak_ref();
(void)SerialDevice::must_create(2).leak_ref();
(void)SerialDevice::must_create(3).leak_ref();
VMWareBackdoor::the(); // don't wait until first mouse packet
MUST(HIDManagement::initialize());
GraphicsManagement::the().initialize();
ConsoleManagement::the().initialize();
SyncTask::spawn();
FinalizerTask::spawn();
auto boot_profiling = kernel_command_line().is_boot_profiling_enabled();
if (!PCI::Access::is_disabled()) {
USB::USBManagement::initialize();
}
FirmwareSysFSDirectory::initialize();
if (!PCI::Access::is_disabled()) {
VirtIO::detect();
}
NetworkingManagement::the().initialize();
Syscall::initialize();
#ifdef ENABLE_KERNEL_COVERAGE_COLLECTION
(void)KCOVDevice::must_create().leak_ref();
#endif
(void)MemoryDevice::must_create().leak_ref();
(void)ZeroDevice::must_create().leak_ref();
(void)FullDevice::must_create().leak_ref();
(void)RandomDevice::must_create().leak_ref();
(void)SelfTTYDevice::must_create().leak_ref();
PTYMultiplexer::initialize();
AudioManagement::the().initialize();
StorageManagement::the().initialize(kernel_command_line().root_device(), kernel_command_line().is_force_pio(), kernel_command_line().is_nvme_polling_enabled());
if (VirtualFileSystem::the().mount_root(StorageManagement::the().root_filesystem()).is_error()) {
PANIC("VirtualFileSystem::mount_root failed");
}
// Switch out of early boot mode.
g_in_early_boot = false;
// NOTE: Everything marked READONLY_AFTER_INIT becomes non-writable after this point.
MM.protect_readonly_after_init_memory();
// NOTE: Everything in the .ksyms section becomes read-only after this point.
MM.protect_ksyms_after_init();
// NOTE: Everything marked UNMAP_AFTER_INIT becomes inaccessible after this point.
MM.unmap_text_after_init();
LockRefPtr<Thread> thread;
auto userspace_init = kernel_command_line().userspace_init();
auto init_args = kernel_command_line().userspace_init_args();
auto init_or_error = Process::try_create_user_process(thread, userspace_init, UserID(0), GroupID(0), move(init_args), {}, tty0);
if (init_or_error.is_error())
PANIC("init_stage2: Error spawning init process: {}", init_or_error.error());
g_init_pid = init_or_error.value()->pid();
thread->set_priority(THREAD_PRIORITY_HIGH);
if (boot_profiling) {
dbgln("Starting full system boot profiling");
MutexLocker mutex_locker(Process::current().big_lock());
auto const enable_all = ~(u64)0;
auto result = Process::current().profiling_enable(-1, enable_all);
VERIFY(!result.is_error());
}
NetworkTask::spawn();
Process::current().sys$exit(0);
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT void setup_serial_debug()
{
// serial_debug will output all the dbgln() data to COM1 at
// 8-N-1 57600 baud. this is particularly useful for debugging the boot
// process on live hardware.
if (StringView { kernel_cmdline, strlen(kernel_cmdline) }.contains("serial_debug"sv)) {
set_serial_debug_enabled(true);
}
}
// Define some Itanium C++ ABI methods to stop the linker from complaining.
// If we actually call these something has gone horribly wrong
void* __dso_handle __attribute__((visibility("hidden")));
}

View file

@ -0,0 +1,106 @@
#include <AK/Platform.h>
ENTRY(init)
#define PF_X 0x1
#define PF_W 0x2
#define PF_R 0x4
PHDRS
{
elf_headers PT_LOAD FILEHDR PHDRS FLAGS(PF_R) ;
text PT_LOAD FLAGS(PF_R | PF_X) ;
data PT_LOAD FLAGS(PF_R | PF_W) ;
bss PT_LOAD FLAGS(PF_R | PF_W) ;
dynamic_segment PT_LOAD FLAGS(PF_R | PF_W) ;
dynamic PT_DYNAMIC FLAGS(PF_R | PF_W) ;
ksyms PT_LOAD FLAGS(PF_R) ;
}
SECTIONS
{
start_of_kernel_image = .;
.elf_headers (SIZEOF_HEADERS) : AT (ADDR(.elf_headers) + SIZEOF_HEADERS)
{
start_of_elf_headers = .;
} :elf_headers
.text ALIGN(4K) : AT (ADDR(.text))
{
start_of_kernel_text = .;
start_of_safemem_text = .;
KEEP(*(.text.safemem))
end_of_safemem_text = .;
start_of_safemem_atomic_text = .;
KEEP(*(.text.safemem.atomic))
end_of_safemem_atomic_text = .;
*(.text*)
} :text
.unmap_after_init ALIGN(4K) : AT (ADDR(.unmap_after_init))
{
start_of_unmap_after_init = .;
*(.unmap_after_init*);
end_of_unmap_after_init = .;
end_of_kernel_text = .;
} :text
.rodata ALIGN(4K) : AT (ADDR(.rodata))
{
start_heap_ctors = .;
*libkernel_heap.a:*(.ctors)
*libkernel_heap.a:*(.init_array)
end_heap_ctors = .;
start_ctors = .;
*(.ctors)
*(.init_array)
end_ctors = .;
*(.rodata*)
} :data
.data ALIGN(4K) : AT (ADDR(.data))
{
start_of_kernel_data = .;
*(.data*)
end_of_kernel_data = .;
} :data
.ro_after_init ALIGN(4K) : AT(ADDR(.ro_after_init))
{
start_of_ro_after_init = .;
*(.ro_after_init);
end_of_ro_after_init = .;
} :data
.bss ALIGN(4K) (NOLOAD) : AT (ADDR(.bss))
{
start_of_kernel_bss = .;
*(page_tables)
*(COMMON)
*(.bss*)
end_of_kernel_bss = .;
. = ALIGN(4K);
*(.heap)
} :bss
.dynamic ALIGN(4K) : AT (ADDR(.dynamic))
{
*(.dynamic)
} :dynamic_segment :dynamic
.ksyms ALIGN(4K) : AT (ADDR(.ksyms))
{
start_of_kernel_ksyms = .;
*(.kernel_symbols)
end_of_kernel_ksyms = .;
} :ksyms
end_of_kernel_image = .;
}

View file

@ -0,0 +1,57 @@
/*
* Copyright (c) 2022, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/API/POSIX/sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct __attribute__((packed)) __mcontext {
#ifdef __i386__
uint32_t eax;
uint32_t ecx;
uint32_t edx;
uint32_t ebx;
uint32_t esp;
uint32_t ebp;
uint32_t esi;
uint32_t edi;
uint32_t eip;
uint32_t eflags;
#else
uint64_t rax;
uint64_t rcx;
uint64_t rdx;
uint64_t rbx;
uint64_t rsp;
uint64_t rbp;
uint64_t rsi;
uint64_t rdi;
uint64_t rip;
uint64_t r8;
uint64_t r9;
uint64_t r10;
uint64_t r11;
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t rflags;
#endif
uint32_t cs;
uint32_t ss;
uint32_t ds;
uint32_t es;
uint32_t fs;
uint32_t gs;
};
#ifdef __cplusplus
}
#endif