From 9c549c178a35934425e4e46be353f6e5e4f60b4f Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Sun, 22 Sep 2019 15:12:29 +0200 Subject: [PATCH] Kernel: Pad virtual address space allocations with guard pages Put one unused page on each side of VM allocations to make invalid accesses more likely to generate crashes. Note that we will not add this guard padding for mmap() at a specific memory address, only to "mmap it anywhere" requests. --- Kernel/VM/RangeAllocator.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Kernel/VM/RangeAllocator.cpp b/Kernel/VM/RangeAllocator.cpp index f4fb7ab3c1..8330a798be 100644 --- a/Kernel/VM/RangeAllocator.cpp +++ b/Kernel/VM/RangeAllocator.cpp @@ -59,12 +59,14 @@ void RangeAllocator::carve_at_index(int index, const Range& range) Range RangeAllocator::allocate_anywhere(size_t size) { + // NOTE: We pad VM allocations with a guard page on each side. + size_t padded_size = size + PAGE_SIZE * 2; for (int i = 0; i < m_available_ranges.size(); ++i) { auto& available_range = m_available_ranges[i]; - if (available_range.size() < size) + if (available_range.size() < padded_size) continue; - Range allocated_range(available_range.base(), size); - if (available_range.size() == size) { + Range allocated_range(available_range.base().offset(PAGE_SIZE), size); + if (available_range.size() == padded_size) { #ifdef VRA_DEBUG dbgprintf("VRA: Allocated perfect-fit anywhere(%u): %x\n", size, allocated_range.base().get()); #endif