From a850a89c1b62bea35117ba8c10064949809c48bc Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Fri, 17 Jan 2020 23:05:37 +0100 Subject: [PATCH] Kernel: Add a random offset to the base of the per-process VM allocator This is not ASLR, but it does de-trivialize exploiting the ELF loader which would previously always parse executables at 0x01001000 in every single exec(). I've taken advantage of this multiple times in my own toy exploits and it's starting to feel cheesy. :^) --- Kernel/VM/PageDirectory.cpp | 16 +++++++++++++--- Kernel/VM/RangeAllocator.cpp | 11 ++++++++--- Kernel/VM/RangeAllocator.h | 6 ++++-- 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/Kernel/VM/PageDirectory.cpp b/Kernel/VM/PageDirectory.cpp index 9d3f98e9ae..f808748635 100644 --- a/Kernel/VM/PageDirectory.cpp +++ b/Kernel/VM/PageDirectory.cpp @@ -1,9 +1,11 @@ #include +#include #include #include #include -static const u32 userspace_range_base = 0x01000000; +static const u32 userspace_range_base = 0x00800000; +static const u32 userspace_range_ceiling = 0xbe000000; static const u32 kernelspace_range_base = 0xc0800000; static HashMap& cr3_map() @@ -26,8 +28,9 @@ extern "C" PageDirectoryEntry boot_pd0[1024]; extern "C" PageDirectoryEntry boot_pd3[1024]; PageDirectory::PageDirectory() - : m_range_allocator(VirtualAddress(0xc0c00000), 0x3f000000) { + m_range_allocator.initialize_with_range(VirtualAddress(0xc0800000), 0x3f000000); + // Adopt the page tables already set up by boot.S PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((u32)boot_pdpt)); PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((u32)boot_pd0)); @@ -42,8 +45,15 @@ PageDirectory::PageDirectory() PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_range_allocator) : m_process(&process) - , m_range_allocator(parent_range_allocator ? RangeAllocator(*parent_range_allocator) : RangeAllocator(VirtualAddress(userspace_range_base), kernelspace_range_base - userspace_range_base)) { + if (parent_range_allocator) { + m_range_allocator.initialize_from_parent(*parent_range_allocator); + } else { + size_t random_offset = (get_good_random() % 32 * MB) & PAGE_MASK; + u32 base = userspace_range_base + random_offset; + m_range_allocator.initialize_with_range(VirtualAddress(base), userspace_range_ceiling - base); + } + // Set up a userspace page directory m_directory_table = MM.allocate_user_physical_page(); m_directory_pages[0] = MM.allocate_user_physical_page(); diff --git a/Kernel/VM/RangeAllocator.cpp b/Kernel/VM/RangeAllocator.cpp index 2273befaaf..6f168ccf38 100644 --- a/Kernel/VM/RangeAllocator.cpp +++ b/Kernel/VM/RangeAllocator.cpp @@ -1,11 +1,16 @@ #include +#include #include #include //#define VRA_DEBUG #define VM_GUARD_PAGES -RangeAllocator::RangeAllocator(VirtualAddress base, size_t size) +RangeAllocator::RangeAllocator() +{ +} + +void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size) { m_available_ranges.append({ base, size }); #ifdef VRA_DEBUG @@ -13,9 +18,9 @@ RangeAllocator::RangeAllocator(VirtualAddress base, size_t size) #endif } -RangeAllocator::RangeAllocator(const RangeAllocator& parent_allocator) - : m_available_ranges(parent_allocator.m_available_ranges) +void RangeAllocator::initialize_from_parent(const RangeAllocator& parent_allocator) { + m_available_ranges = parent_allocator.m_available_ranges; } RangeAllocator::~RangeAllocator() diff --git a/Kernel/VM/RangeAllocator.h b/Kernel/VM/RangeAllocator.h index 90f043bba9..5b8e50a728 100644 --- a/Kernel/VM/RangeAllocator.h +++ b/Kernel/VM/RangeAllocator.h @@ -47,10 +47,12 @@ private: class RangeAllocator { public: - RangeAllocator(VirtualAddress, size_t); - RangeAllocator(const RangeAllocator&); + RangeAllocator(); ~RangeAllocator(); + void initialize_with_range(VirtualAddress, size_t); + void initialize_from_parent(const RangeAllocator&); + Range allocate_anywhere(size_t); Range allocate_specific(VirtualAddress, size_t); void deallocate(Range);