1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 06:57:45 +00:00

Kernel: Remove i686 support

This commit is contained in:
Liav A 2022-10-04 03:05:54 +03:00 committed by Andreas Kling
parent 32270dcd20
commit 5ff318cf3a
75 changed files with 142 additions and 895 deletions

View file

@ -16,7 +16,7 @@
#define LSB(x) ((x)&0xFF)
#define MSB(x) (((x) >> 8) & 0xFF)
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/CPU.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/CPU.h>

View file

@ -8,7 +8,7 @@
#include <AK/Platform.h>
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/IRQController.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/IRQController.h>

View file

@ -8,7 +8,7 @@
#include <AK/Platform.h>
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/InterruptManagement.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/InterruptManagement.h>

View file

@ -9,7 +9,7 @@
#include <AK/Platform.h>
#include <AK/Types.h>
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/Interrupts.h>
#endif

View file

@ -9,7 +9,7 @@
#include <AK/Platform.h>
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/PageDirectory.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/PageDirectory.h>

View file

@ -23,7 +23,7 @@ void restore_processor_interrupts_state(InterruptsState);
}
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/Processor.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/Processor.h>

View file

@ -8,7 +8,7 @@
#include <AK/Platform.h>
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/RegisterState.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/RegisterState.h>

View file

@ -8,7 +8,7 @@
#include <AK/Platform.h>
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/TrapFrame.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/TrapFrame.h>

View file

@ -33,38 +33,6 @@ ALWAYS_INLINE FlatPtr cpu_flags()
return flags;
}
#if ARCH(I386)
ALWAYS_INLINE void set_fs(u16 segment)
{
asm volatile(
"mov %%ax, %%fs" ::"a"(segment)
: "memory");
}
ALWAYS_INLINE void set_gs(u16 segment)
{
asm volatile(
"mov %%ax, %%gs" ::"a"(segment)
: "memory");
}
ALWAYS_INLINE u16 get_fs()
{
u16 fs;
asm("mov %%fs, %%eax"
: "=a"(fs));
return fs;
}
ALWAYS_INLINE u16 get_gs()
{
u16 gs;
asm("mov %%gs, %%eax"
: "=a"(gs));
return gs;
}
#endif
template<typename T>
ALWAYS_INLINE T read_gs_value(FlatPtr offset)
{

View file

@ -14,29 +14,13 @@
#include <AK/Platform.h>
VALIDATE_IS_X86()
#if ARCH(I386)
# define GDT_SELECTOR_CODE0 0x08
# define GDT_SELECTOR_DATA0 0x10
# define GDT_SELECTOR_CODE3 0x18
# define GDT_SELECTOR_DATA3 0x20
# define GDT_SELECTOR_TLS 0x28
# define GDT_SELECTOR_PROC 0x30
# define GDT_SELECTOR_TSS 0x38
// SYSENTER makes certain assumptions on how the GDT is structured:
static_assert(GDT_SELECTOR_CODE0 + 8 == GDT_SELECTOR_DATA0); // SS0 = CS0 + 8
// SYSEXIT makes certain assumptions on how the GDT is structured:
static_assert(GDT_SELECTOR_CODE0 + 16 == GDT_SELECTOR_CODE3); // CS3 = CS0 + 16
static_assert(GDT_SELECTOR_CODE0 + 24 == GDT_SELECTOR_DATA3); // SS3 = CS0 + 32
#else
# define GDT_SELECTOR_CODE0 0x08
# define GDT_SELECTOR_DATA0 0x10
# define GDT_SELECTOR_DATA3 0x18
# define GDT_SELECTOR_CODE3 0x20
# define GDT_SELECTOR_TSS 0x28
# define GDT_SELECTOR_TSS_PART2 0x30
#endif
// Note: These values are x86-64.
#define GDT_SELECTOR_CODE0 0x08
#define GDT_SELECTOR_DATA0 0x10
#define GDT_SELECTOR_DATA3 0x18
#define GDT_SELECTOR_CODE3 0x20
#define GDT_SELECTOR_TSS 0x28
#define GDT_SELECTOR_TSS_PART2 0x30
namespace Kernel {
@ -122,14 +106,11 @@ struct [[gnu::packed]] IDTEntry
u16 offset_1; // offset bits 0..15
u16 selector; // a code segment selector in GDT or LDT
#if ARCH(I386)
u8 zero; // unused, set to 0
#else
struct {
u8 interrupt_stack_table : 3;
u8 zero : 5; // unused, set to 0
};
#endif
struct {
u8 gate_type : 4;
u8 storage_segment : 1;
@ -137,18 +118,14 @@ struct [[gnu::packed]] IDTEntry
u8 present : 1;
} type_attr; // type and attributes
u16 offset_2; // offset bits 16..31
#if !ARCH(I386)
u32 offset_3;
u32 zeros;
#endif
IDTEntry() = default;
IDTEntry(FlatPtr callback, u16 selector_, IDTEntryType type, u8 storage_segment, u8 privilege_level)
: offset_1 { (u16)((FlatPtr)callback & 0xFFFF) }
, selector { selector_ }
#if !ARCH(I386)
, interrupt_stack_table { 0 }
#endif
, zero { 0 }
, type_attr {
.gate_type = (u8)type,
@ -157,20 +134,14 @@ struct [[gnu::packed]] IDTEntry
.present = 1,
}
, offset_2 { (u16)((FlatPtr)callback >> 16) }
#if !ARCH(I386)
, offset_3 { (u32)(((FlatPtr)callback) >> 32) }
, zeros { 0 }
#endif
{
}
FlatPtr off() const
{
#if ARCH(I386)
return (u32)offset_2 << 16 & (u32)offset_1;
#else
return (u64)offset_3 << 32 & (u64)offset_2 << 16 & (u64)offset_1;
#endif
}
IDTEntryType type() const
{

View file

@ -19,11 +19,7 @@ class GenericInterruptHandler;
extern "C" void interrupt_common_asm_entry();
#if ARCH(I386)
# define INTERRUPT_HANDLER_PUSH_PADDING
#else
# define INTERRUPT_HANDLER_PUSH_PADDING "pushw $0\npushw $0\n"
#endif
#define INTERRUPT_HANDLER_PUSH_PADDING "pushw $0\npushw $0\n"
// clang-format off
#define GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(isr_number) \

View file

@ -31,14 +31,12 @@ class ProcessorInfo;
struct ProcessorMessage;
struct ProcessorMessageEntry;
#if ARCH(X86_64)
# define MSR_EFER 0xc0000080
# define MSR_STAR 0xc0000081
# define MSR_LSTAR 0xc0000082
# define MSR_SFMASK 0xc0000084
# define MSR_FS_BASE 0xc0000100
# define MSR_GS_BASE 0xc0000101
#endif
#define MSR_EFER 0xc0000080
#define MSR_STAR 0xc0000081
#define MSR_LSTAR 0xc0000082
#define MSR_SFMASK 0xc0000084
#define MSR_FS_BASE 0xc0000100
#define MSR_GS_BASE 0xc0000101
#define MSR_IA32_EFER 0xc0000080
#define MSR_IA32_PAT 0x277
@ -73,10 +71,8 @@ class Processor {
Processor* m_self;
#if ARCH(X86_64)
// Saved user stack for the syscall instruction.
void* m_user_stack;
#endif
DescriptorTablePointer m_gdtr;
alignas(Descriptor) Descriptor m_gdt[256];
@ -93,9 +89,7 @@ class Processor {
static Atomic<u32> g_total_processors;
u8 m_physical_address_bit_width;
u8 m_virtual_address_bit_width;
#if ARCH(X86_64)
bool m_has_qemu_hvf_quirk;
#endif
ProcessorInfo* m_info;
Thread* m_current_thread;
@ -240,7 +234,6 @@ public:
static bool is_smp_enabled();
#if ARCH(X86_64)
static constexpr u64 user_stack_offset()
{
return __builtin_offsetof(Processor, m_user_stack);
@ -249,7 +242,6 @@ public:
{
return __builtin_offsetof(Processor, m_tss) + __builtin_offsetof(TSS, rsp0l);
}
#endif
ALWAYS_INLINE static Processor& current()
{
@ -258,11 +250,7 @@ public:
ALWAYS_INLINE static bool is_initialized()
{
return
#if ARCH(I386)
get_gs() == GDT_SELECTOR_PROC &&
#endif
read_gs_ptr(__builtin_offsetof(Processor, m_self)) != 0;
return read_gs_ptr(__builtin_offsetof(Processor, m_self)) != 0;
}
template<typename T>

View file

@ -18,21 +18,6 @@ VALIDATE_IS_X86()
namespace Kernel {
struct [[gnu::packed]] RegisterState {
#if ARCH(I386)
FlatPtr ss;
FlatPtr gs;
FlatPtr fs;
FlatPtr es;
FlatPtr ds;
FlatPtr edi;
FlatPtr esi;
FlatPtr ebp;
FlatPtr esp;
FlatPtr ebx;
FlatPtr edx;
FlatPtr ecx;
FlatPtr eax;
#else
FlatPtr rdi;
FlatPtr rsi;
FlatPtr rbp;
@ -49,43 +34,17 @@ struct [[gnu::packed]] RegisterState {
FlatPtr r13;
FlatPtr r14;
FlatPtr r15;
#endif
u16 exception_code;
u16 isr_number;
#if ARCH(X86_64)
u32 padding;
#endif
#if ARCH(I386)
FlatPtr eip;
#else
FlatPtr rip;
#endif
FlatPtr cs;
#if ARCH(I386)
FlatPtr eflags;
FlatPtr userspace_esp;
FlatPtr userspace_ss;
#else
FlatPtr rflags;
FlatPtr userspace_rsp;
FlatPtr userspace_ss;
#endif
#if ARCH(I386)
FlatPtr userspace_sp() const
{
return userspace_esp;
}
void set_userspace_sp(FlatPtr value) { userspace_esp = value; }
FlatPtr ip() const { return eip; }
void set_ip(FlatPtr value) { eip = value; }
void set_dx(FlatPtr value) { edx = value; }
FlatPtr bp() const { return ebp; }
void set_bp(FlatPtr value) { ebp = value; }
FlatPtr flags() const { return eflags; }
void set_flags(FlatPtr value) { eflags = value; }
void set_return_reg(FlatPtr value) { eax = value; }
#elif ARCH(X86_64)
FlatPtr userspace_sp() const
{
return userspace_rsp;
@ -99,49 +58,23 @@ struct [[gnu::packed]] RegisterState {
FlatPtr flags() const { return rflags; }
void set_flags(FlatPtr value) { rflags = value; }
void set_return_reg(FlatPtr value) { rax = value; }
#endif
void capture_syscall_params(FlatPtr& function, FlatPtr& arg1, FlatPtr& arg2, FlatPtr& arg3, FlatPtr& arg4) const
{
#if ARCH(I386)
function = eax;
arg1 = edx;
arg2 = ecx;
arg3 = ebx;
arg4 = esi;
#else
// The syscall instruction clobbers rcx, so we must use a different calling convention to 32-bit.
function = rax;
arg1 = rdx;
arg2 = rdi;
arg3 = rbx;
arg4 = rsi;
#endif
}
};
#if ARCH(I386)
# define REGISTER_STATE_SIZE (19 * 4)
#define REGISTER_STATE_SIZE (22 * 8)
static_assert(AssertSize<RegisterState, REGISTER_STATE_SIZE>());
#elif ARCH(X86_64)
# define REGISTER_STATE_SIZE (22 * 8)
static_assert(AssertSize<RegisterState, REGISTER_STATE_SIZE>());
#endif
inline void copy_kernel_registers_into_ptrace_registers(PtraceRegisters& ptrace_regs, RegisterState const& kernel_regs)
{
#if ARCH(I386)
ptrace_regs.eax = kernel_regs.eax;
ptrace_regs.ecx = kernel_regs.ecx;
ptrace_regs.edx = kernel_regs.edx;
ptrace_regs.ebx = kernel_regs.ebx;
ptrace_regs.esp = kernel_regs.userspace_esp;
ptrace_regs.ebp = kernel_regs.ebp;
ptrace_regs.esi = kernel_regs.esi;
ptrace_regs.edi = kernel_regs.edi;
ptrace_regs.eip = kernel_regs.eip;
ptrace_regs.eflags = kernel_regs.eflags;
#else
ptrace_regs.rax = kernel_regs.rax;
ptrace_regs.rcx = kernel_regs.rcx;
ptrace_regs.rdx = kernel_regs.rdx;
@ -160,7 +93,6 @@ inline void copy_kernel_registers_into_ptrace_registers(PtraceRegisters& ptrace_
ptrace_regs.r14 = kernel_regs.r14;
ptrace_regs.r15 = kernel_regs.r15;
ptrace_regs.rflags = kernel_regs.rflags,
#endif
ptrace_regs.cs = 0;
ptrace_regs.ss = 0;
ptrace_regs.ds = 0;
@ -171,18 +103,6 @@ inline void copy_kernel_registers_into_ptrace_registers(PtraceRegisters& ptrace_
inline void copy_ptrace_registers_into_kernel_registers(RegisterState& kernel_regs, PtraceRegisters const& ptrace_regs)
{
#if ARCH(I386)
kernel_regs.eax = ptrace_regs.eax;
kernel_regs.ecx = ptrace_regs.ecx;
kernel_regs.edx = ptrace_regs.edx;
kernel_regs.ebx = ptrace_regs.ebx;
kernel_regs.esp = ptrace_regs.esp;
kernel_regs.ebp = ptrace_regs.ebp;
kernel_regs.esi = ptrace_regs.esi;
kernel_regs.edi = ptrace_regs.edi;
kernel_regs.eip = ptrace_regs.eip;
kernel_regs.eflags = (kernel_regs.eflags & ~safe_eflags_mask) | (ptrace_regs.eflags & safe_eflags_mask);
#else
kernel_regs.rax = ptrace_regs.rax;
kernel_regs.rcx = ptrace_regs.rcx;
kernel_regs.rdx = ptrace_regs.rdx;
@ -202,7 +122,6 @@ inline void copy_ptrace_registers_into_kernel_registers(RegisterState& kernel_re
kernel_regs.r15 = ptrace_regs.r15;
// FIXME: do we need a separate safe_rflags_mask here?
kernel_regs.rflags = (kernel_regs.rflags & ~safe_eflags_mask) | (ptrace_regs.rflags & safe_eflags_mask);
#endif
}
struct [[gnu::packed]] DebugRegisterState {

View file

@ -43,19 +43,11 @@ struct [[gnu::packed]] LegacyRegion {
u8 FTW;
u8 : 8;
u16 FOP;
#if ARCH(I386)
// 32-bit version
u32 FIP_32;
u16 FCS;
u16 : 16;
u32 FPD_32;
u16 FDS;
u16 : 16;
#elif ARCH(X86_64)
// 64-bit version
u64 FIP_64;
u64 FDP_64;
#endif
AK::MXCSR MXCSR;
u32 MXCSR_mask;
u8 st_mmx[128];

View file

@ -62,10 +62,6 @@ struct [[gnu::packed]] TSS64 {
u16 iomapbase;
};
#if ARCH(I386)
using TSS = TSS32;
#elif ARCH(X86_64)
using TSS = TSS64;
#endif
}

View file

@ -91,23 +91,7 @@ static_assert(AssertSize<HPETRegistersBlock, 0x500>());
static u64 read_register_safe64(HPETRegister const& reg)
{
#if ARCH(X86_64)
return reg.full;
#elif ARCH(I386)
// As per 2.4.7 this reads the 64 bit value in a consistent manner
// using only 32 bit reads
u32 low, high = reg.high;
for (;;) {
low = reg.low;
u32 new_high = reg.high;
if (new_high == high)
break;
high = new_high;
}
return ((u64)high << 32) | (u64)low;
#else
# error Unknown architecture
#endif
}
static HPET* s_hpet;

View file

@ -27,11 +27,7 @@ struct TrapFrame {
TrapFrame& operator=(TrapFrame&&) = delete;
};
#if ARCH(I386)
# define TRAP_FRAME_SIZE (3 * 4)
#else
# define TRAP_FRAME_SIZE (3 * 8)
#endif
#define TRAP_FRAME_SIZE (3 * 8)
static_assert(AssertSize<TrapFrame, TRAP_FRAME_SIZE>());

View file

@ -52,61 +52,6 @@ static EntropySource s_entropy_source_interrupts { EntropySource::Static::Interr
// clang-format off
#if ARCH(I386)
#define EH_ENTRY(ec, title) \
extern "C" void title##_asm_entry(); \
extern "C" void title##_handler(TrapFrame*) __attribute__((used)); \
NAKED void title##_asm_entry() { \
asm( \
" pusha\n" \
" pushl %ds\n" \
" pushl %es\n" \
" pushl %fs\n" \
" pushl %gs\n" \
" pushl %ss\n" \
" mov $" __STRINGIFY(GDT_SELECTOR_DATA0) ", %ax\n" \
" mov %ax, %ds\n" \
" mov %ax, %es\n" \
" mov $" __STRINGIFY(GDT_SELECTOR_PROC) ", %ax\n" \
" mov %ax, %gs\n" \
" pushl %esp \n" /* set TrapFrame::regs */ \
" subl $" __STRINGIFY(TRAP_FRAME_SIZE - 4) ", %esp \n" \
" pushl %esp \n" \
" cld\n" \
" call enter_trap_no_irq \n" \
" call " #title "_handler\n" \
" jmp common_trap_exit \n" \
); \
}
#define EH_ENTRY_NO_CODE(ec, title) \
extern "C" void title##_asm_entry(); \
extern "C" void title##_handler(TrapFrame*) __attribute__((used)); \
NAKED void title##_asm_entry() { \
asm( \
" pushl $0x0\n" \
" pusha\n" \
" pushl %ds\n" \
" pushl %es\n" \
" pushl %fs\n" \
" pushl %gs\n" \
" pushl %ss\n" \
" mov $" __STRINGIFY(GDT_SELECTOR_DATA0) ", %ax\n" \
" mov %ax, %ds\n" \
" mov %ax, %es\n" \
" mov $" __STRINGIFY(GDT_SELECTOR_PROC) ", %ax\n" \
" mov %ax, %gs\n" \
" pushl %esp \n" /* set TrapFrame::regs */ \
" subl $" __STRINGIFY(TRAP_FRAME_SIZE - 4) ", %esp \n" \
" pushl %esp \n" \
" cld\n" \
" call enter_trap_no_irq \n" \
" call " #title "_handler\n" \
" jmp common_trap_exit \n" \
); \
}
#elif ARCH(X86_64)
#define EH_ENTRY(ec, title) \
extern "C" void title##_asm_entry(); \
extern "C" void title##_handler(TrapFrame*) __attribute__((used)); \
@ -173,41 +118,19 @@ static EntropySource s_entropy_source_interrupts { EntropySource::Static::Interr
" jmp common_trap_exit \n" \
); \
}
#endif
// clang-format on
void dump_registers(RegisterState const& regs)
{
#if ARCH(I386)
u16 ss;
u32 esp;
if (!(regs.cs & 3)) {
ss = regs.ss;
esp = regs.esp;
} else {
ss = regs.userspace_ss;
esp = regs.userspace_esp;
}
#else
u64 rsp;
if (!(regs.cs & 3))
rsp = regs.rsp;
else
rsp = regs.userspace_rsp;
#endif
dbgln("Exception code: {:04x} (isr: {:04x})", regs.exception_code, regs.isr_number);
#if ARCH(I386)
dbgln(" pc={:#04x}:{:p} eflags={:p}", (u16)regs.cs, regs.eip, regs.eflags);
dbgln(" stack={:#04x}:{:p}", ss, esp);
dbgln(" ds={:#04x} es={:#04x} fs={:#04x} gs={:#04x}", (u16)regs.ds, (u16)regs.es, (u16)regs.fs, (u16)regs.gs);
dbgln(" eax={:p} ebx={:p} ecx={:p} edx={:p}", regs.eax, regs.ebx, regs.ecx, regs.edx);
dbgln(" ebp={:p} esp={:p} esi={:p} edi={:p}", regs.ebp, regs.esp, regs.esi, regs.edi);
dbgln(" cr0={:p} cr2={:p} cr3={:p} cr4={:p}", read_cr0(), read_cr2(), read_cr3(), read_cr4());
#else
dbgln(" pc={:#04x}:{:p} rflags={:p}", (u16)regs.cs, regs.rip, regs.rflags);
dbgln(" stack={:p}", rsp);
// FIXME: Add fs_base and gs_base here
@ -216,7 +139,6 @@ void dump_registers(RegisterState const& regs)
dbgln(" r8={:p} r9={:p} r10={:p} r11={:p}", regs.r8, regs.r9, regs.r10, regs.r11);
dbgln(" r12={:p} r13={:p} r14={:p} r15={:p}", regs.r12, regs.r13, regs.r14, regs.r15);
dbgln(" cr0={:p} cr2={:p} cr3={:p} cr4={:p}", read_cr0(), read_cr2(), read_cr3(), read_cr4());
#endif
}
EH_ENTRY_NO_CODE(6, illegal_instruction);

View file

@ -218,10 +218,8 @@ extern "C" FlatPtr ap_cpu_init_cr3;
extern "C" u32 ap_cpu_init_cr4;
extern "C" FlatPtr ap_cpu_gdtr;
extern "C" FlatPtr ap_cpu_idtr;
#if ARCH(X86_64)
extern "C" FlatPtr ap_cpu_kernel_map_base;
extern "C" FlatPtr ap_cpu_kernel_entry_function;
#endif
extern "C" [[noreturn]] void init_ap(FlatPtr, Processor*);
@ -377,11 +375,8 @@ UNMAP_AFTER_INIT void APIC::setup_ap_boot_environment()
auto const& idtr = get_idtr();
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_idtr) = FlatPtr(&idtr);
#if ARCH(X86_64)
// TODO: Use these also in i686 builds
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_kernel_map_base) = FlatPtr(kernel_mapping_base);
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_kernel_entry_function) = FlatPtr(&init_ap);
#endif
// Store the BSP's CR0 and CR4 values for the APs to use
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_init_cr0) = read_cr0();

View file

@ -464,9 +464,7 @@ UNMAP_AFTER_INIT void Processor::cpu_detect()
}
}
#if ARCH(X86_64)
m_has_qemu_hvf_quirk = false;
#endif
if (max_extended_leaf >= 0x80000008) {
// CPUID.80000008H:EAX[7:0] reports the physical-address width supported by the processor.
@ -479,7 +477,6 @@ UNMAP_AFTER_INIT void Processor::cpu_detect()
m_physical_address_bit_width = has_feature(CPUFeature::PAE) ? 36 : 32;
// Processors that do not support CPUID function 80000008H, support a linear-address width of 32.
m_virtual_address_bit_width = 32;
#if ARCH(X86_64)
// Workaround QEMU hypervisor.framework bug
// https://gitlab.com/qemu-project/qemu/-/issues/664
//
@ -494,7 +491,6 @@ UNMAP_AFTER_INIT void Processor::cpu_detect()
m_virtual_address_bit_width = 48;
}
}
#endif
}
}
@ -565,7 +561,6 @@ UNMAP_AFTER_INIT void Processor::cpu_setup()
}
}
#if ARCH(X86_64)
// x86_64 processors must support the syscall feature.
VERIFY(has_feature(CPUFeature::SYSCALL));
MSR efer_msr(MSR_EFER);
@ -595,7 +590,6 @@ UNMAP_AFTER_INIT void Processor::cpu_setup()
// the RDGSBASE instruction until we implement proper GS swapping at the userspace/kernel boundaries
write_cr4(read_cr4() & ~0x10000);
}
#endif
// Query OS-enabled CPUID features again, and set the flags if needed.
CPUID processor_info(0x1);
@ -652,10 +646,8 @@ UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
dmesgln("CPU[{}]: No RDRAND support detected, randomness will be poor", current_id());
dmesgln("CPU[{}]: Physical address bit width: {}", current_id(), m_physical_address_bit_width);
dmesgln("CPU[{}]: Virtual address bit width: {}", current_id(), m_virtual_address_bit_width);
#if ARCH(X86_64)
if (m_has_qemu_hvf_quirk)
dmesgln("CPU[{}]: Applied correction for QEMU Hypervisor.framework quirk", current_id());
#endif
if (cpu == 0)
initialize_interrupts();
@ -1459,42 +1451,10 @@ UNMAP_AFTER_INIT void Processor::gdt_init()
m_gdtr.limit = 0;
write_raw_gdt_entry(0x0000, 0x00000000, 0x00000000);
#if ARCH(I386)
write_raw_gdt_entry(GDT_SELECTOR_CODE0, 0x0000ffff, 0x00cf9a00); // code0
write_raw_gdt_entry(GDT_SELECTOR_DATA0, 0x0000ffff, 0x00cf9200); // data0
write_raw_gdt_entry(GDT_SELECTOR_CODE3, 0x0000ffff, 0x00cffa00); // code3
write_raw_gdt_entry(GDT_SELECTOR_DATA3, 0x0000ffff, 0x00cff200); // data3
#else
write_raw_gdt_entry(GDT_SELECTOR_CODE0, 0x0000ffff, 0x00af9a00); // code0
write_raw_gdt_entry(GDT_SELECTOR_DATA0, 0x0000ffff, 0x00af9200); // data0
write_raw_gdt_entry(GDT_SELECTOR_DATA3, 0x0000ffff, 0x008ff200); // data3
write_raw_gdt_entry(GDT_SELECTOR_CODE3, 0x0000ffff, 0x00affa00); // code3
#endif
#if ARCH(I386)
Descriptor tls_descriptor {};
tls_descriptor.low = tls_descriptor.high = 0;
tls_descriptor.dpl = 3;
tls_descriptor.segment_present = 1;
tls_descriptor.granularity = 0;
tls_descriptor.operation_size64 = 0;
tls_descriptor.operation_size32 = 1;
tls_descriptor.descriptor_type = 1;
tls_descriptor.type = 2;
write_gdt_entry(GDT_SELECTOR_TLS, tls_descriptor); // tls3
Descriptor gs_descriptor {};
gs_descriptor.set_base(VirtualAddress { this });
gs_descriptor.set_limit(sizeof(Processor) - 1);
gs_descriptor.dpl = 0;
gs_descriptor.segment_present = 1;
gs_descriptor.granularity = 0;
gs_descriptor.operation_size64 = 0;
gs_descriptor.operation_size32 = 1;
gs_descriptor.descriptor_type = 1;
gs_descriptor.type = 2;
write_gdt_entry(GDT_SELECTOR_PROC, gs_descriptor); // gs0
#endif
Descriptor tss_descriptor {};
tss_descriptor.set_base(VirtualAddress { (size_t)&m_tss & 0xffffffff });
@ -1508,36 +1468,15 @@ UNMAP_AFTER_INIT void Processor::gdt_init()
tss_descriptor.type = Descriptor::SystemType::AvailableTSS;
write_gdt_entry(GDT_SELECTOR_TSS, tss_descriptor); // tss
#if ARCH(X86_64)
Descriptor tss_descriptor_part2 {};
tss_descriptor_part2.low = (size_t)&m_tss >> 32;
write_gdt_entry(GDT_SELECTOR_TSS_PART2, tss_descriptor_part2);
#endif
flush_gdt();
load_task_register(GDT_SELECTOR_TSS);
#if ARCH(X86_64)
MSR gs_base(MSR_GS_BASE);
gs_base.set((u64)this);
#else
asm volatile(
"mov %%ax, %%ds\n"
"mov %%ax, %%es\n"
"mov %%ax, %%fs\n"
"mov %%ax, %%ss\n" ::"a"(GDT_SELECTOR_DATA0)
: "memory");
set_gs(GDT_SELECTOR_PROC);
#endif
#if ARCH(I386)
// Make sure CS points to the kernel code descriptor.
// clang-format off
asm volatile(
"ljmpl $" __STRINGIFY(GDT_SELECTOR_CODE0) ", $sanity\n"
"sanity:\n");
// clang-format on
#endif
}
extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
@ -1594,13 +1533,6 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
: "=m"(from_thread->fpu_state()));
}
#if ARCH(I386)
from_regs.fs = get_fs();
from_regs.gs = get_gs();
set_fs(to_regs.fs);
set_gs(to_regs.gs);
#endif
if (from_thread->process().is_traced())
read_debug_registers_into(from_thread->debug_register_state());
@ -1611,14 +1543,8 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
}
auto& processor = Processor::current();
#if ARCH(I386)
auto& tls_descriptor = processor.get_gdt_entry(GDT_SELECTOR_TLS);
tls_descriptor.set_base(to_thread->thread_specific_data());
tls_descriptor.set_limit(to_thread->thread_specific_region_size());
#else
MSR fs_base_msr(MSR_FS_BASE);
fs_base_msr.set(to_thread->thread_specific_data().get());
#endif
if (from_regs.cr3 != to_regs.cr3)
write_cr3(to_regs.cr3);

View file

@ -42,14 +42,9 @@ namespace Kernel {
ALWAYS_INLINE bool validate_canonical_address(size_t address)
{
#if ARCH(X86_64)
auto most_significant_bits = Processor::current().virtual_address_bit_width() - 1;
auto insignificant_bits = address >> most_significant_bits;
return insignificant_bits == 0 || insignificant_bits == (0xffffffffffffffffull >> most_significant_bits);
#else
(void)address;
return true;
#endif
}
CODE_SECTION(".text.safemem")
@ -73,11 +68,7 @@ NEVER_INLINE bool safe_memcpy(void* dest_ptr, void const* src_ptr, size_t n, voi
asm volatile(
".globl safe_memcpy_ins_1 \n"
"safe_memcpy_ins_1: \n"
#if ARCH(I386)
"rep movsl \n"
#else
"rep movsq \n"
#endif
".globl safe_memcpy_1_faulted \n"
"safe_memcpy_1_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
: "=S"(src),
@ -168,11 +159,7 @@ NEVER_INLINE bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
asm volatile(
".globl safe_memset_ins_1 \n"
"safe_memset_ins_1: \n"
#if ARCH(I386)
"rep stosl \n"
#else
"rep stosq \n"
#endif
".globl safe_memset_1_faulted \n"
"safe_memset_1_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
: "=D"(dest),

View file

@ -1,62 +0,0 @@
/*
* Copyright (c) 2022, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Types.h>
extern "C" {
#if defined(AK_COMPILER_GCC) // FIXME: Remove this file once GCC supports 8-byte atomics on i686
u64 kernel__atomic_compare_exchange_8(u64 volatile*, u64*, u64, int, int);
# pragma redefine_extname kernel__atomic_compare_exchange_8 __atomic_compare_exchange_8
u64 kernel__atomic_compare_exchange_8(u64 volatile* memory, u64* expected, u64 desired, int, int)
{
u64 previous;
asm volatile("lock; cmpxchg8b %1"
: "=A"(previous), "+m"(*memory)
: "b"((u32)desired), "c"((u32)(desired >> 32)), "0"(*expected));
return previous;
}
u64 kernel__atomic_load_8(u64 volatile*, int);
# pragma redefine_extname kernel__atomic_load_8 __atomic_load_8
u64 kernel__atomic_load_8(u64 volatile* memory, int)
{
u64 previous;
asm volatile("movl %%ebx, %%eax\n"
"movl %%ecx, %%edx\n"
"lock; cmpxchg8b %1"
: "=A"(previous), "+m"(*memory));
return previous;
}
void kernel__atomic_store_8(u64 volatile*, u64, int);
# pragma redefine_extname kernel__atomic_store_8 __atomic_store_8
void kernel__atomic_store_8(u64 volatile* memory, u64 value, int)
{
u64 expected = *memory;
asm volatile("1: lock; cmpxchg8b %0\n"
" jne 1b"
: "=m"(*memory)
: "b"((u32)value), "c"((u32)(value >> 32)), "A"(expected));
}
u64 kernel__atomic_fetch_add_8(u64 volatile*, u64, int);
# pragma redefine_extname kernel__atomic_fetch_add_8 __atomic_fetch_add_8
u64 kernel__atomic_fetch_add_8(u64 volatile* memory, u64 value, int memory_order)
{
u64 previous = *memory;
while (kernel__atomic_compare_exchange_8(memory, &previous, previous + value, memory_order, memory_order) != previous)
;
return previous;
}
#endif
}

View file

@ -122,9 +122,7 @@ READONLY_AFTER_INIT PhysicalAddress end_of_prekernel_image;
READONLY_AFTER_INIT size_t physical_to_virtual_offset;
READONLY_AFTER_INIT FlatPtr kernel_mapping_base;
READONLY_AFTER_INIT FlatPtr kernel_load_base;
#if ARCH(X86_64)
READONLY_AFTER_INIT PhysicalAddress boot_pml4t;
#endif
READONLY_AFTER_INIT PhysicalAddress boot_pdpt;
READONLY_AFTER_INIT PhysicalAddress boot_pd0;
READONLY_AFTER_INIT PhysicalAddress boot_pd_kernel;
@ -154,11 +152,9 @@ extern "C" [[noreturn]] UNMAP_AFTER_INIT void init(BootInfo const& boot_info)
physical_to_virtual_offset = boot_info.physical_to_virtual_offset;
kernel_mapping_base = boot_info.kernel_mapping_base;
kernel_load_base = boot_info.kernel_load_base;
#if ARCH(X86_64)
gdt64ptr = boot_info.gdt64ptr;
code64_sel = boot_info.code64_sel;
boot_pml4t = PhysicalAddress { boot_info.boot_pml4t };
#endif
boot_pdpt = PhysicalAddress { boot_info.boot_pdpt };
boot_pd0 = PhysicalAddress { boot_info.boot_pd0 };
boot_pd_kernel = PhysicalAddress { boot_info.boot_pd_kernel };

View file

@ -4,11 +4,7 @@
.global gdt64ptr
gdt64ptr:
#if ARCH(X86_64)
.quad 0
#else
.int 0
#endif
.global code64_sel
code64_sel: