1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 10:28:10 +00:00

Kernel: Support loading the kernel at almost arbitrary virtual addresses

This enables further work on implementing KASLR by adding relocation
support to the pre-kernel and updating the kernel to be less dependent
on specific virtual memory layouts.
This commit is contained in:
Gunnar Beutner 2021-07-26 15:10:51 +02:00 committed by Andreas Kling
parent e3d2ca6bd2
commit 57417a3d6e
20 changed files with 123 additions and 87 deletions

View file

@ -12,6 +12,7 @@
#include <Kernel/Prekernel/Prekernel.h>
#include <Kernel/VirtualAddress.h>
#include <LibC/elf.h>
#include <LibELF/Relocation.h>
// Defined in the linker script
extern size_t __stack_chk_guard;
@ -28,7 +29,8 @@ extern "C" u64 boot_pdpt[512];
extern "C" u64 boot_pd0[512];
extern "C" u64 boot_pd0_pts[512 * (MAX_KERNEL_SIZE >> 21 & 0x1ff)];
extern "C" u64 boot_pd_kernel[512];
extern "C" u64 boot_pd_kernel_pts[512 * (MAX_KERNEL_SIZE >> 21 & 0x1ff)];
extern "C" u64 boot_pd_kernel_pt0[512];
extern "C" u64 boot_pd_kernel_image_pts[512 * (MAX_KERNEL_SIZE >> 21 & 0x1ff)];
extern "C" u64 boot_pd_kernel_pt1023[512];
extern "C" char const kernel_cmdline[4096];
@ -38,10 +40,20 @@ extern "C" {
multiboot_info_t* multiboot_info_ptr;
}
[[noreturn]] static void halt()
{
asm volatile("hlt");
__builtin_unreachable();
}
void __stack_chk_fail()
{
asm("ud2");
__builtin_unreachable();
halt();
}
void __assertion_failed(char const*, char const*, unsigned int, char const*)
{
halt();
}
namespace Kernel {
@ -50,11 +62,6 @@ namespace Kernel {
// We declare them here to ensure their signatures don't accidentally change.
extern "C" [[noreturn]] void init();
static void halt()
{
asm volatile("hlt");
}
// SerenityOS Pre-Kernel Environment C++ entry point :^)
//
// This is where C++ execution begins, after boot.S transfers control here.
@ -75,50 +82,63 @@ extern "C" [[noreturn]] void init()
halt();
__builtin_memcpy(kernel_program_headers, kernel_image + kernel_elf_header.e_phoff, sizeof(ElfW(Phdr)) * kernel_elf_header.e_phnum);
FlatPtr kernel_load_base = 0;
FlatPtr kernel_physical_base = 0x200000;
#if ARCH(I386)
FlatPtr kernel_load_base = 0xc0200000;
#else
FlatPtr kernel_load_base = 0x2000200000;
#endif
FlatPtr kernel_load_end = 0;
for (size_t i = 0; i < kernel_elf_header.e_phnum; i++) {
auto& kernel_program_header = kernel_program_headers[i];
if (kernel_program_header.p_type != PT_LOAD)
continue;
auto start = kernel_program_header.p_vaddr;
auto start = kernel_load_base + kernel_program_header.p_vaddr;
auto end = start + kernel_program_header.p_memsz;
if (start < (FlatPtr)end_of_prekernel_image)
halt();
if (kernel_program_header.p_paddr < (FlatPtr)end_of_prekernel_image)
if (kernel_physical_base + kernel_program_header.p_paddr < (FlatPtr)end_of_prekernel_image)
halt();
if (kernel_load_base == 0 || start < kernel_load_base)
kernel_load_base = start;
if (end > kernel_load_end)
kernel_load_end = end;
}
// align to 1GB
kernel_load_base &= ~(FlatPtr)0x3fffffff;
FlatPtr kernel_mapping_base = kernel_load_base & ~(FlatPtr)0x3fffffff;
VERIFY(kernel_load_base % 0x1000 == 0);
VERIFY(kernel_load_base >= kernel_mapping_base + 0x200000);
#if ARCH(I386)
int pdpt_flags = 0x1;
#else
int pdpt_flags = 0x3;
#endif
boot_pdpt[(kernel_load_base >> 30) & 0x1ffu] = (FlatPtr)boot_pd_kernel | pdpt_flags;
boot_pdpt[(kernel_mapping_base >> 30) & 0x1ffu] = (FlatPtr)boot_pd_kernel | pdpt_flags;
for (size_t i = 0; i <= (kernel_load_end - kernel_load_base) >> 21; i++)
boot_pd_kernel[i] = (FlatPtr)&boot_pd_kernel_pts[i * 512] | 0x3;
boot_pd_kernel[0] = (FlatPtr)boot_pd_kernel_pt0 | 0x3;
__builtin_memset(boot_pd_kernel_pts, 0, sizeof(boot_pd_kernel_pts));
for (FlatPtr vaddr = kernel_load_base; vaddr <= kernel_load_end; vaddr += PAGE_SIZE * 512)
boot_pd_kernel[(vaddr - kernel_mapping_base) >> 21] = (FlatPtr)(&boot_pd_kernel_image_pts[(vaddr - kernel_load_base) >> 12]) | 0x3;
__builtin_memset(boot_pd_kernel_pt0, 0, sizeof(boot_pd_kernel_pt0));
VERIFY((size_t)end_of_prekernel_image < array_size(boot_pd_kernel_pt0) * PAGE_SIZE);
/* pseudo-identity map 0M - end_of_prekernel_image */
for (size_t i = 0; i < (FlatPtr)end_of_prekernel_image / PAGE_SIZE; i++)
boot_pd_kernel_pts[i] = i * PAGE_SIZE | 0x3;
boot_pd_kernel_pt0[i] = i * PAGE_SIZE | 0x3;
__builtin_memset(boot_pd_kernel_image_pts, 0, sizeof(boot_pd_kernel_image_pts));
for (size_t i = 0; i < kernel_elf_header.e_phnum; i++) {
auto& kernel_program_header = kernel_program_headers[i];
if (kernel_program_header.p_type != PT_LOAD)
continue;
for (FlatPtr offset = 0; offset < kernel_program_header.p_memsz; offset += PAGE_SIZE) {
auto pte_index = (kernel_program_header.p_vaddr + offset - kernel_load_base) >> 12;
boot_pd_kernel_pts[pte_index] = (kernel_program_header.p_paddr + offset) | 0x3;
auto pte_index = ((kernel_load_base & 0x1fffff) + kernel_program_header.p_vaddr + offset) >> 12;
boot_pd_kernel_image_pts[pte_index] = (kernel_physical_base + kernel_program_header.p_paddr + offset) | 0x3;
}
}
@ -130,28 +150,29 @@ extern "C" [[noreturn]] void init()
auto& kernel_program_header = kernel_program_headers[i];
if (kernel_program_header.p_type != PT_LOAD)
continue;
__builtin_memmove((u8*)kernel_program_header.p_vaddr, kernel_image + kernel_program_header.p_offset, kernel_program_header.p_filesz);
__builtin_memmove((u8*)kernel_load_base + kernel_program_header.p_vaddr, kernel_image + kernel_program_header.p_offset, kernel_program_header.p_filesz);
}
for (ssize_t i = kernel_elf_header.e_phnum - 1; i >= 0; i--) {
auto& kernel_program_header = kernel_program_headers[i];
if (kernel_program_header.p_type != PT_LOAD)
continue;
__builtin_memset((u8*)kernel_program_header.p_vaddr + kernel_program_header.p_filesz, 0, kernel_program_header.p_memsz - kernel_program_header.p_filesz);
__builtin_memset((u8*)kernel_load_base + kernel_program_header.p_vaddr + kernel_program_header.p_filesz, 0, kernel_program_header.p_memsz - kernel_program_header.p_filesz);
}
multiboot_info_ptr->mods_count--;
multiboot_info_ptr->mods_addr += sizeof(multiboot_module_entry_t);
auto adjust_by_load_base = [kernel_load_base](auto ptr) {
return (decltype(ptr))((FlatPtr)ptr + kernel_load_base);
auto adjust_by_mapping_base = [kernel_mapping_base](auto ptr) {
return (decltype(ptr))((FlatPtr)ptr + kernel_mapping_base);
};
BootInfo info;
info.start_of_prekernel_image = (PhysicalPtr)start_of_prekernel_image;
info.end_of_prekernel_image = (PhysicalPtr)end_of_prekernel_image;
info.physical_to_virtual_offset = kernel_load_base;
info.kernel_base = kernel_load_base;
info.physical_to_virtual_offset = kernel_load_base - kernel_physical_base;
info.kernel_mapping_base = kernel_mapping_base;
info.kernel_load_base = kernel_load_base;
#if ARCH(X86_64)
info.gdt64ptr = (PhysicalPtr)gdt64ptr;
info.code64_sel = code64_sel;
@ -160,12 +181,12 @@ extern "C" [[noreturn]] void init()
info.boot_pdpt = (PhysicalPtr)boot_pdpt;
info.boot_pd0 = (PhysicalPtr)boot_pd0;
info.boot_pd_kernel = (PhysicalPtr)boot_pd_kernel;
info.boot_pd_kernel_pt1023 = (FlatPtr)adjust_by_load_base(boot_pd_kernel_pt1023);
info.kernel_cmdline = (FlatPtr)adjust_by_load_base(kernel_cmdline);
info.boot_pd_kernel_pt1023 = (FlatPtr)adjust_by_mapping_base(boot_pd_kernel_pt1023);
info.kernel_cmdline = (FlatPtr)adjust_by_mapping_base(kernel_cmdline);
info.multiboot_flags = multiboot_info_ptr->flags;
info.multiboot_memory_map = adjust_by_load_base((FlatPtr)multiboot_info_ptr->mmap_addr);
info.multiboot_memory_map = adjust_by_mapping_base((FlatPtr)multiboot_info_ptr->mmap_addr);
info.multiboot_memory_map_count = multiboot_info_ptr->mmap_length / sizeof(multiboot_memory_map_t);
info.multiboot_modules = adjust_by_load_base((FlatPtr)multiboot_info_ptr->mods_addr);
info.multiboot_modules = adjust_by_mapping_base((FlatPtr)multiboot_info_ptr->mods_addr);
info.multiboot_modules_count = multiboot_info_ptr->mods_count;
info.multiboot_framebuffer_addr = multiboot_info_ptr->framebuffer_addr;
info.multiboot_framebuffer_pitch = multiboot_info_ptr->framebuffer_pitch;
@ -178,9 +199,11 @@ extern "C" [[noreturn]] void init()
#if ARCH(I386)
"add %0, %%esp"
#else
"add %0, %%rsp"
"movabs %0, %%rax\n"
"add %%rax, %%rsp"
#endif
::"g"(kernel_load_base));
::"g"(kernel_mapping_base)
: "ax");
// unmap the 0-1MB region
for (size_t i = 0; i < 256; i++)
@ -192,8 +215,10 @@ extern "C" [[noreturn]] void init()
reload_cr3();
void (*entry)(BootInfo const&) = (void (*)(BootInfo const&))kernel_elf_header.e_entry;
entry(*adjust_by_load_base(&info));
ELF::perform_relative_relocations(kernel_load_base);
void (*entry)(BootInfo const&) = (void (*)(BootInfo const&))(kernel_load_base + kernel_elf_header.e_entry);
entry(*adjust_by_mapping_base(&info));
__builtin_unreachable();
}