1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 09:07:45 +00:00

Kernel: Take some baby steps towards x86_64

Make more of the kernel compile in 64-bit mode, and make some things
pointer-size-agnostic (by using FlatPtr.)

There's a lot of work to do here before the kernel will even compile.
This commit is contained in:
Andreas Kling 2021-02-25 16:18:36 +01:00
parent eb08a0edd5
commit 8f70528f30
12 changed files with 187 additions and 118 deletions

View file

@ -210,9 +210,7 @@ void page_fault_handler(TrapFrame* trap)
clac();
auto& regs = *trap->regs;
u32 fault_address;
asm("movl %%cr2, %%eax"
: "=a"(fault_address));
auto fault_address = read_cr2();
if constexpr (PAGE_FAULT_DEBUG) {
u32 fault_page_directory = read_cr3();
@ -717,14 +715,22 @@ void exit_trap(TrapFrame* trap)
return Processor::current().exit_trap(*trap);
}
UNMAP_AFTER_INIT void write_cr0(u32 value)
UNMAP_AFTER_INIT void write_cr0(FlatPtr value)
{
asm volatile("movl %%eax, %%cr0" ::"a"(value));
#if ARCH(I386)
asm volatile("mov %%eax, %%cr0" ::"a"(value));
#else
asm volatile("mov %%rax, %%cr0" ::"a"(value));
#endif
}
UNMAP_AFTER_INIT void write_cr4(u32 value)
UNMAP_AFTER_INIT void write_cr4(FlatPtr value)
{
asm volatile("movl %%eax, %%cr4" ::"a"(value));
#if ARCH(I386)
asm volatile("mov %%eax, %%cr4" ::"a"(value));
#else
asm volatile("mov %%rax, %%cr4" ::"a"(value));
#endif
}
UNMAP_AFTER_INIT static void sse_init()
@ -733,50 +739,80 @@ UNMAP_AFTER_INIT static void sse_init()
write_cr4(read_cr4() | 0x600);
}
u32 read_cr0()
FlatPtr read_cr0()
{
u32 cr0;
asm("movl %%cr0, %%eax"
FlatPtr cr0;
#if ARCH(I386)
asm("mov %%cr0, %%eax"
: "=a"(cr0));
#else
asm("mov %%cr0, %%rax"
: "=a"(cr0));
#endif
return cr0;
}
u32 read_cr2()
FlatPtr read_cr2()
{
u32 cr2;
asm("movl %%cr2, %%eax"
FlatPtr cr2;
#if ARCH(I386)
asm("mov %%cr2, %%eax"
: "=a"(cr2));
#else
asm("mov %%cr2, %%rax"
: "=a"(cr2));
#endif
return cr2;
}
u32 read_cr3()
FlatPtr read_cr3()
{
u32 cr3;
asm("movl %%cr3, %%eax"
FlatPtr cr3;
#if ARCH(I386)
asm("mov %%cr3, %%eax"
: "=a"(cr3));
#else
asm("mov %%cr3, %%rax"
: "=a"(cr3));
#endif
return cr3;
}
void write_cr3(u32 cr3)
void write_cr3(FlatPtr cr3)
{
// NOTE: If you're here from a GPF crash, it's very likely that a PDPT entry is incorrect, not this!
asm volatile("movl %%eax, %%cr3" ::"a"(cr3)
#if ARCH(I386)
asm volatile("mov %%eax, %%cr3" ::"a"(cr3)
: "memory");
#else
asm volatile("mov %%rax, %%cr3" ::"a"(cr3)
: "memory");
#endif
}
u32 read_cr4()
FlatPtr read_cr4()
{
u32 cr4;
asm("movl %%cr4, %%eax"
FlatPtr cr4;
#if ARCH(I386)
asm("mov %%cr4, %%eax"
: "=a"(cr4));
#else
asm("mov %%cr4, %%rax"
: "=a"(cr4));
#endif
return cr4;
}
u32 read_dr6()
FlatPtr read_dr6()
{
u32 dr6;
asm("movl %%dr6, %%eax"
FlatPtr dr6;
#if ARCH(I386)
asm("mov %%dr6, %%eax"
: "=a"(dr6));
#else
asm("mov %%dr6, %%rax"
: "=a"(dr6));
#endif
return dr6;
}
@ -1291,6 +1327,7 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
from_thread->save_critical(m_in_critical);
#if ARCH(I386)
// clang-format off
// Switch to new thread context, passing from_thread and to_thread
// through to the new context using registers edx and eax
@ -1333,6 +1370,9 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
: "memory"
);
// clang-format on
#else
PANIC("Context switching not implemented.");
#endif
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
@ -1576,6 +1616,7 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
m_scheduler_initialized = true;
#if ARCH(I386)
// clang-format off
asm volatile(
"movl %[new_esp], %%esp \n" // switch to new stack
@ -1601,6 +1642,7 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
[cpu] "c" (id())
);
// clang-format on
#endif
VERIFY_NOT_REACHED();
}

View file

@ -296,9 +296,9 @@ void load_task_register(u16 selector);
#define sti() asm volatile("sti" :: \
: "memory")
inline u32 cpu_flags()
inline FlatPtr cpu_flags()
{
u32 flags;
FlatPtr flags;
asm volatile(
"pushf\n"
"pop %0\n"
@ -441,29 +441,36 @@ private:
};
struct [[gnu::packed]] RegisterState {
u32 ss;
u32 gs;
u32 fs;
u32 es;
u32 ds;
u32 edi;
u32 esi;
u32 ebp;
u32 esp;
u32 ebx;
u32 edx;
u32 ecx;
u32 eax;
FlatPtr ss;
FlatPtr gs;
FlatPtr fs;
FlatPtr es;
FlatPtr ds;
FlatPtr edi;
FlatPtr esi;
FlatPtr ebp;
FlatPtr esp;
FlatPtr ebx;
FlatPtr edx;
FlatPtr ecx;
FlatPtr eax;
u16 exception_code;
u16 isr_number;
u32 eip;
u32 cs;
u32 eflags;
u32 userspace_esp;
u32 userspace_ss;
#if ARCH(X86_64)
u32 padding;
#endif
FlatPtr eip;
FlatPtr cs;
FlatPtr eflags;
FlatPtr userspace_esp;
FlatPtr userspace_ss;
};
#define REGISTER_STATE_SIZE (19 * 4)
#if ARCH(I386)
# define REGISTER_STATE_SIZE (19 * 4)
#else
# define REGISTER_STATE_SIZE (19 * 8)
#endif
static_assert(REGISTER_STATE_SIZE == sizeof(RegisterState));
void copy_kernel_registers_into_ptrace_registers(PtraceRegisters&, const RegisterState&);
@ -494,16 +501,16 @@ inline FlatPtr offset_in_page(const void* address)
return offset_in_page((FlatPtr)address);
}
u32 read_cr0();
u32 read_cr2();
u32 read_cr3();
u32 read_cr4();
FlatPtr read_cr0();
FlatPtr read_cr2();
FlatPtr read_cr3();
FlatPtr read_cr4();
void write_cr0(u32);
void write_cr3(u32);
void write_cr4(u32);
void write_cr0(FlatPtr);
void write_cr3(FlatPtr);
void write_cr4(FlatPtr);
u32 read_dr6();
FlatPtr read_dr6();
static inline bool is_kernel_mode()
{
@ -1071,7 +1078,12 @@ struct TrapFrame {
TrapFrame& operator=(TrapFrame&&) = delete;
};
#define TRAP_FRAME_SIZE (3 * sizeof(FlatPtr))
#if ARCH(I386)
# define TRAP_FRAME_SIZE (3 * 4)
#else
# define TRAP_FRAME_SIZE (3 * 8)
#endif
static_assert(TRAP_FRAME_SIZE == sizeof(TrapFrame));
extern "C" void enter_trap_no_irq(TrapFrame*);