1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 23:07:35 +00:00

Kernel: Add stubs for missing x86_64 functionality

This adds just enough stubs to make the kernel compile on x86_64. Obviously
it won't do anything useful - in fact it won't even attempt to boot because
Multiboot doesn't support ELF64 binaries - but it gets those compiler errors
out of the way so more progress can be made getting all the missing
functionality in place.
This commit is contained in:
Gunnar Beutner 2021-06-23 21:54:41 +02:00 committed by Andreas Kling
parent f2eb759901
commit 38fca26f54
21 changed files with 295 additions and 40 deletions

View file

@ -289,7 +289,7 @@ public:
ALWAYS_INLINE static Thread* idle_thread()
{
// See comment in Processor::current_thread
return (Thread*)read_fs_u32(__builtin_offsetof(Processor, m_idle_thread));
return (Thread*)read_fs_ptr(__builtin_offsetof(Processor, m_idle_thread));
}
ALWAYS_INLINE u32 get_id() const

View file

@ -0,0 +1,42 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/Types.h>
#include <Kernel/Arch/x86/CPU.h>
#include <Kernel/Arch/x86/Processor.h>
#include <Kernel/Arch/x86/TrapFrame.h>
#include <Kernel/KSyms.h>
#include <Kernel/Process.h>
void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func)
{
asm volatile("cli");
critical_dmesgln("ASSERTION FAILED: {}", msg);
critical_dmesgln("{}:{} in {}", file, line, func);
abort();
}
[[noreturn]] void abort()
{
// Switch back to the current process's page tables if there are any.
// Otherwise stack walking will be a disaster.
auto process = Process::current();
if (process)
MM.enter_process_paging_scope(*process);
Kernel::dump_backtrace();
Processor::halt();
abort();
}
[[noreturn]] void _abort()
{
asm volatile("ud2");
__builtin_unreachable();
}

View file

@ -485,7 +485,13 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
// to be ebp.
ProcessPagingScope paging_scope(thread.process());
auto& tss = thread.tss();
u32* stack_top = reinterpret_cast<u32*>(tss.esp);
u32* stack_top;
#if ARCH(I386)
stack_top = reinterpret_cast<u32*>(tss.esp);
#else
(void)tss;
TODO();
#endif
if (is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) {
if (!copy_from_user(&frame_ptr, &((FlatPtr*)stack_top)[0]))
frame_ptr = 0;
@ -494,7 +500,11 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
if (!safe_memcpy(&frame_ptr, &((FlatPtr*)stack_top)[0], sizeof(FlatPtr), fault_at))
frame_ptr = 0;
}
#if ARCH(I386)
eip = tss.eip;
#else
TODO();
#endif
// TODO: We need to leave the scheduler lock here, but we also
// need to prevent the target thread from being run while
// we walk the stack

View file

@ -101,32 +101,3 @@ extern "C" u32 do_init_context(Thread* thread, u32 flags)
}
}
void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func)
{
asm volatile("cli");
critical_dmesgln("ASSERTION FAILED: {}", msg);
critical_dmesgln("{}:{} in {}", file, line, func);
abort();
}
[[noreturn]] void abort()
{
// Switch back to the current process's page tables if there are any.
// Otherwise stack walking will be a disaster.
auto process = Process::current();
if (process)
MM.enter_process_paging_scope(*process);
Kernel::dump_backtrace();
Processor::halt();
abort();
}
[[noreturn]] void _abort()
{
asm volatile("ud2");
__builtin_unreachable();
}

View file

@ -6,6 +6,7 @@
#include <Kernel/Arch/x86/DescriptorTable.h>
#include <Kernel/Arch/x86/TrapFrame.h>
// clang-format off
asm(
".globl interrupt_common_asm_entry\n"

View file

@ -37,7 +37,6 @@ asm(
);
// clang-format on
#if ARCH(I386)
// clang-format off
asm(
".global do_assume_context \n"
@ -59,7 +58,6 @@ asm(
" jmp enter_thread_context \n"
);
// clang-format on
#endif
String Processor::platform_string() const
{

View file

@ -317,7 +317,8 @@ apic_ap_start32:
movl $0x80000001, %eax
cpuid
testl $0x100000, %edx
je (1f - apic_ap_start + 0x8000)
// TODO: Uncomment this
//je (1f - apic_ap_start + 0x8000)
/* turn on IA32_EFER.NXE */
movl $0xc0000080, %ecx
rdmsr

View file

@ -0,0 +1,43 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/Types.h>
#include <Kernel/Arch/x86/CPU.h>
#include <Kernel/Arch/x86/Processor.h>
#include <Kernel/Arch/x86/TrapFrame.h>
#include <Kernel/KSyms.h>
#include <Kernel/Process.h>
#include <Kernel/Thread.h>
namespace Kernel {
// The compiler can't see the calls to these functions inside assembly.
// Declare them, to avoid dead code warnings.
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used));
extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap) __attribute__((used));
extern "C" u32 do_init_context(Thread* thread, u32 flags) __attribute__((used));
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
{
(void)from_thread;
(void)to_thread;
TODO();
}
extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
{
TODO();
}
extern "C" u32 do_init_context(Thread* thread, u32 flags)
{
(void)thread;
(void)flags;
TODO();
}
}

View file

@ -0,0 +1,24 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86/DescriptorTable.h>
#include <Kernel/Arch/x86/TrapFrame.h>
// clang-format off
asm(
".globl interrupt_common_asm_entry\n"
"interrupt_common_asm_entry: \n"
" int3 \n" // FIXME
".globl common_trap_exit \n"
"common_trap_exit: \n"
// another thread may have handled this trap at this point, so don't
// make assumptions about the stack other than there's a TrapFrame
// and a pointer to it.
" call exit_trap \n"
" int3 \n" // FIXME
);
// clang-format on

View file

@ -88,6 +88,7 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
// TODO: handle NT?
VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
#if 0
auto& tss = thread.tss();
bool return_to_user = (tss.cs & 3) != 0;
@ -116,7 +117,6 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
// However, the first step is to always start in kernel mode with thread_context_first_enter
RegisterState& iretframe = *reinterpret_cast<RegisterState*>(stack_top);
// FIXME: copy state to be recovered through TSS
TODO();
// make space for a trap frame
stack_top -= sizeof(TrapFrame);
@ -161,6 +161,9 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
tss.gs = GDT_SELECTOR_DATA0;
tss.ss = GDT_SELECTOR_DATA0;
tss.fs = GDT_SELECTOR_PROC;
#else
TODO();
#endif
return stack_top;
}
@ -202,11 +205,15 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
auto& tss = initial_thread.tss();
m_tss = tss;
#if 0
m_tss.esp0 = tss.esp0;
m_tss.ss0 = GDT_SELECTOR_DATA0;
// user mode needs to be able to switch to kernel mode:
m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3;
m_tss.fs = GDT_SELECTOR_PROC | 3;
#else
TODO();
#endif
m_scheduler_initialized = true;

View file

@ -0,0 +1,89 @@
/*
* Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86/SafeMem.h>
#define CODE_SECTION(section_name) __attribute__((section(section_name)))
namespace Kernel {
CODE_SECTION(".text.safemem")
NEVER_INLINE bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at)
{
(void)dest_ptr;
(void)src_ptr;
(void)n;
(void)fault_at;
TODO();
}
CODE_SECTION(".text.safemem")
NEVER_INLINE ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at)
{
(void)str;
(void)max_n;
(void)fault_at;
TODO();
}
CODE_SECTION(".text.safemem")
NEVER_INLINE bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
{
(void)dest_ptr;
(void)c;
(void)n;
(void)fault_at;
TODO();
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
{
(void)var;
(void)val;
TODO();
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val)
{
(void)var;
(void)val;
TODO();
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE Optional<u32> safe_atomic_load_relaxed(volatile u32* var)
{
(void)var;
TODO();
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE bool safe_atomic_store_relaxed(volatile u32* var, u32 val)
{
(void)var;
(void)val;
TODO();
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
{
(void)var;
(void)expected;
(void)val;
TODO();
}
bool handle_safe_access_fault(RegisterState& regs, u32 fault_address)
{
(void)regs;
(void)fault_address;
TODO();
}
}