From ebdb899d3d3ca4c47d2d05b2db74e61b18d18ce2 Mon Sep 17 00:00:00 2001 From: Timon Kruiper Date: Fri, 6 Jan 2023 16:01:47 +0100 Subject: [PATCH] Kernel/aarch64: Add pre_init function for that sets up the CPU and MMU This is a separate file that behaves similar to the Prekernel for x86_64, and makes sure the CPU is dropped to EL1, the MMU is enabled, and makes sure the CPU is running in high virtual memory. This code then jumps to the usual init function of the kernel. --- Kernel/Arch/aarch64/Processor.cpp | 2 - Kernel/Arch/aarch64/boot.S | 2 +- Kernel/Arch/aarch64/init.cpp | 3 -- Kernel/Arch/aarch64/pre_init.cpp | 61 +++++++++++++++++++++++++++++++ Kernel/CMakeLists.txt | 1 + 5 files changed, 63 insertions(+), 6 deletions(-) create mode 100644 Kernel/Arch/aarch64/pre_init.cpp diff --git a/Kernel/Arch/aarch64/Processor.cpp b/Kernel/Arch/aarch64/Processor.cpp index 2e43f89af2..418894800a 100644 --- a/Kernel/Arch/aarch64/Processor.cpp +++ b/Kernel/Arch/aarch64/Processor.cpp @@ -36,8 +36,6 @@ void Processor::install(u32 cpu) m_physical_address_bit_width = detect_physical_address_bit_width(); m_virtual_address_bit_width = detect_virtual_address_bit_width(); - initialize_exceptions(); - g_current_processor = this; } diff --git a/Kernel/Arch/aarch64/boot.S b/Kernel/Arch/aarch64/boot.S index ac4796f616..d5610ba0ac 100644 --- a/Kernel/Arch/aarch64/boot.S +++ b/Kernel/Arch/aarch64/boot.S @@ -32,7 +32,7 @@ Lbss_clear_loop: subs x15, x15, #1 bne Lbss_clear_loop - b init + b pre_init halt: msr daifset, #2 diff --git a/Kernel/Arch/aarch64/init.cpp b/Kernel/Arch/aarch64/init.cpp index 3709baec45..3ecd599bad 100644 --- a/Kernel/Arch/aarch64/init.cpp +++ b/Kernel/Arch/aarch64/init.cpp @@ -156,9 +156,6 @@ extern "C" [[noreturn]] void init() new (&bootstrap_processor()) Processor(); bootstrap_processor().install(0); - // We want to enable the MMU as fast as possible to make the boot faster. - init_page_tables(); - // We call the constructors of kmalloc.cpp separately, because other constructors in the Kernel // might rely on being able to call new/kmalloc in the constructor. We do have to run the // kmalloc constructors, because kmalloc_init relies on that. diff --git a/Kernel/Arch/aarch64/pre_init.cpp b/Kernel/Arch/aarch64/pre_init.cpp new file mode 100644 index 0000000000..0528c065f3 --- /dev/null +++ b/Kernel/Arch/aarch64/pre_init.cpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2023, Timon Kruiper + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include +#include +#include + +// We arrive here from boot.S with the MMU disabled and in an unknown exception level (EL). +// The kernel is linked at the virtual address, so we have to be really carefull when accessing +// global variables, as the MMU is not yet enabled. + +// FIXME: This should probably be shared with the Prekernel. + +namespace Kernel { + +extern "C" [[noreturn]] void init(); + +extern "C" [[noreturn]] void pre_init(); +extern "C" [[noreturn]] void pre_init() +{ + // We want to drop to EL1 as soon as possible, because that is the + // exception level the kernel should run at. + initialize_exceptions(); + + // Next step is to set up page tables and enable the MMU. + init_page_tables(); + + // At this point the MMU is enabled, physical memory is identity mapped, + // and the kernel is also mapped into higher virtual memory. However we are still executing + // from the physical memory address, so we have to jump to the kernel in high memory. We also need to + // switch the stack pointer to high memory, such that we can unmap the identity mapping. + + // Continue execution at high virtual address, by using an absolute jump. + asm volatile( + "ldr x0, =1f \n" + "br x0 \n" + "1: \n" :: + : "x0"); + + // Add kernel_mapping_base to the stack pointer, such that it is also using the mapping + // in high virtual memory. + asm volatile( + "mov x0, %[base] \n" + "add sp, sp, x0 \n" ::[base] "r"(kernel_mapping_base) + : "x0"); + + // Clear the frame pointer (x29) and link register (x30) to make sure the kernel cannot backtrace + // into this code, and jump to actual init function in the kernel. + asm volatile( + "mov x29, xzr \n" + "mov x30, xzr \n" + "b init \n"); + + VERIFY_NOT_REACHED(); +} + +} diff --git a/Kernel/CMakeLists.txt b/Kernel/CMakeLists.txt index 0f8e39a79a..a6f70f5656 100644 --- a/Kernel/CMakeLists.txt +++ b/Kernel/CMakeLists.txt @@ -462,6 +462,7 @@ elseif("${SERENITY_ARCH}" STREQUAL "aarch64") Arch/aarch64/MMU.cpp Arch/aarch64/PageDirectory.cpp Arch/aarch64/Panic.cpp + Arch/aarch64/pre_init.cpp Arch/aarch64/Processor.cpp Arch/aarch64/SafeMem.cpp Arch/aarch64/SmapDisabler.cpp