From 0ca668f59c7ba71a98cd817adbcc73931a31e917 Mon Sep 17 00:00:00 2001 From: Brian Gianforcaro Date: Thu, 29 Apr 2021 02:20:28 -0700 Subject: [PATCH] Kernel: Harden sys$munmap Vector usage against OOM. Theoretically the append should never fail as we have in-line storage of 2, which should be enough. However I'm planning on removing the non-try variants of AK::Vector when compiling in kernel mode in the future, so this will need to go eventually. I suppose it also protects against some unforeseen bug where we we can append more than 2 items. --- Kernel/Syscalls/mmap.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Kernel/Syscalls/mmap.cpp b/Kernel/Syscalls/mmap.cpp index 07d2b66247..44a11d4400 100644 --- a/Kernel/Syscalls/mmap.cpp +++ b/Kernel/Syscalls/mmap.cpp @@ -505,7 +505,7 @@ KResultOr Process::sys$munmap(Userspace addr, size_t size) // slow: without caching const auto& regions = space().find_regions_intersecting(range_to_unmap); - // check if any of the regions is not mmapped, to not accientally + // Check if any of the regions is not mmapped, to not accidentally // error-out with just half a region map left for (auto* region : regions) { if (!region->is_mmap()) @@ -530,8 +530,9 @@ KResultOr Process::sys$munmap(Userspace addr, size_t size) // We manually unmap the old region here, specifying that we *don't* want the VM deallocated. region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No); - // otherwise just split the regions and collect them for future mapping - new_regions.append(space().split_region_around_range(*region, range_to_unmap)); + // Otherwise just split the regions and collect them for future mapping + if (new_regions.try_append(space().split_region_around_range(*region, range_to_unmap))) + return ENOMEM; } // Instead we give back the unwanted VM manually at the end. space().page_directory().range_allocator().deallocate(range_to_unmap);