From d850e483f7b23b848ed6f04befc3fa9f3da98893 Mon Sep 17 00:00:00 2001 From: Idan Horowitz Date: Tue, 22 Mar 2022 13:34:40 +0200 Subject: [PATCH] Kernel: Keep kernel base load address 2 MiB aligned This requirement comes from the fact the Prekernel mapping logic only uses 2 MiB pages. This unfortunately reduces the bits of entropy in kernel addresses from 16 bits to 7, but it could be further improved in the future by making the Prekernel mapping logic a bit more dynamic. --- Kernel/Prekernel/init.cpp | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/Kernel/Prekernel/init.cpp b/Kernel/Prekernel/init.cpp index ef55299346..9fced041a2 100644 --- a/Kernel/Prekernel/init.cpp +++ b/Kernel/Prekernel/init.cpp @@ -91,17 +91,15 @@ extern "C" [[noreturn]] void init() FlatPtr kernel_physical_base = 0x200000; #if ARCH(I386) - FlatPtr kernel_load_base = 0xc0200000; + FlatPtr default_kernel_load_base = 0xc0200000; #else - FlatPtr kernel_load_base = 0x2000200000; + FlatPtr default_kernel_load_base = 0x2000200000; #endif -#if ARCH(X86_64) // KASLR static constexpr auto maximum_offset = 256 * MiB; - kernel_load_base = kernel_load_base + (generate_secure_seed() % maximum_offset); - kernel_load_base = kernel_load_base & ~(PAGE_SIZE - 1); -#endif + FlatPtr kernel_load_base = default_kernel_load_base + (generate_secure_seed() % maximum_offset); + kernel_load_base &= ~(2 * MiB - 1); FlatPtr kernel_load_end = 0; for (size_t i = 0; i < kernel_elf_header.e_phnum; i++) {