From 95c2166ca985b4fad16eec070c45d7b36088029e Mon Sep 17 00:00:00 2001 From: Gunnar Beutner Date: Fri, 28 May 2021 11:03:21 +0200 Subject: [PATCH] Kernel: Move sys$munmap functionality into a helper method --- Kernel/Syscalls/mmap.cpp | 97 ++------------------------------------- Kernel/VM/Range.cpp | 18 ++++++++ Kernel/VM/Range.h | 2 + Kernel/VM/Space.cpp | 98 ++++++++++++++++++++++++++++++++++++++++ Kernel/VM/Space.h | 2 + 5 files changed, 123 insertions(+), 94 deletions(-) diff --git a/Kernel/Syscalls/mmap.cpp b/Kernel/Syscalls/mmap.cpp index 72f0ef3f6c..2282ec02b3 100644 --- a/Kernel/Syscalls/mmap.cpp +++ b/Kernel/Syscalls/mmap.cpp @@ -507,100 +507,9 @@ KResultOr Process::sys$munmap(Userspace addr, size_t size) { REQUIRE_PROMISE(stdio); - if (!size) - return EINVAL; - - auto range_or_error = expand_range_to_page_boundaries(addr, size); - if (range_or_error.is_error()) - return range_or_error.error(); - - auto range_to_unmap = range_or_error.value(); - - if (!is_user_range(range_to_unmap)) - return EFAULT; - - if (auto* whole_region = space().find_region_from_range(range_to_unmap)) { - if (!whole_region->is_mmap()) - return EPERM; - - PerformanceManager::add_unmap_perf_event(*this, whole_region->range()); - - bool success = space().deallocate_region(*whole_region); - VERIFY(success); - return 0; - } - - if (auto* old_region = space().find_region_containing(range_to_unmap)) { - if (!old_region->is_mmap()) - return EPERM; - - // Remove the old region from our regions tree, since were going to add another region - // with the exact same start address, but dont deallocate it yet - auto region = space().take_region(*old_region); - VERIFY(region); - - // We manually unmap the old region here, specifying that we *don't* want the VM deallocated. - region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No); - - auto new_regions = space().split_region_around_range(*region, range_to_unmap); - - // Instead we give back the unwanted VM manually. - space().page_directory().range_allocator().deallocate(range_to_unmap); - - // And finally we map the new region(s) using our page directory (they were just allocated and don't have one). - for (auto* new_region : new_regions) { - new_region->map(space().page_directory()); - } - - if (auto* event_buffer = current_perf_events_buffer()) { - [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MUNMAP, range_to_unmap.base().get(), range_to_unmap.size(), nullptr); - } - - return 0; - } - - // Try again while checkin multiple regions at a time - // slow: without caching - const auto& regions = space().find_regions_intersecting(range_to_unmap); - - // Check if any of the regions is not mmapped, to not accidentally - // error-out with just half a region map left - for (auto* region : regions) { - if (!region->is_mmap()) - return EPERM; - } - - Vector new_regions; - - for (auto* old_region : regions) { - // if it's a full match we can delete the complete old region - if (old_region->range().intersect(range_to_unmap).size() == old_region->size()) { - bool res = space().deallocate_region(*old_region); - VERIFY(res); - continue; - } - - // Remove the old region from our regions tree, since were going to add another region - // with the exact same start address, but dont deallocate it yet - auto region = space().take_region(*old_region); - VERIFY(region); - - // We manually unmap the old region here, specifying that we *don't* want the VM deallocated. - region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No); - - // Otherwise just split the regions and collect them for future mapping - if (new_regions.try_append(space().split_region_around_range(*region, range_to_unmap))) - return ENOMEM; - } - // Instead we give back the unwanted VM manually at the end. - space().page_directory().range_allocator().deallocate(range_to_unmap); - // And finally we map the new region(s) using our page directory (they were just allocated and don't have one). - for (auto* new_region : new_regions) { - new_region->map(space().page_directory()); - } - - PerformanceManager::add_unmap_perf_event(*this, range_to_unmap); - + auto result = space().unmap_mmap_range(VirtualAddress { addr }, size); + if (result.is_error()) + return result; return 0; } diff --git a/Kernel/VM/Range.cpp b/Kernel/VM/Range.cpp index 7cb305c489..6b846daaea 100644 --- a/Kernel/VM/Range.cpp +++ b/Kernel/VM/Range.cpp @@ -7,6 +7,7 @@ #include #include +#include #include namespace Kernel { @@ -35,4 +36,21 @@ Range Range::intersect(const Range& other) const return Range(new_base, (new_end - new_base).get()); } +KResultOr Range::expand_to_page_boundaries(FlatPtr address, size_t size) +{ + if (page_round_up_would_wrap(size)) + return EINVAL; + + if ((address + size) < address) + return EINVAL; + + if (page_round_up_would_wrap(address + size)) + return EINVAL; + + auto base = VirtualAddress { address }.page_base(); + auto end = page_round_up(address + size); + + return Range { base, end - base.get() }; +} + } diff --git a/Kernel/VM/Range.h b/Kernel/VM/Range.h index 1af5b30782..8976e424c9 100644 --- a/Kernel/VM/Range.h +++ b/Kernel/VM/Range.h @@ -50,6 +50,8 @@ public: Vector carve(const Range&) const; Range intersect(const Range&) const; + static KResultOr expand_to_page_boundaries(FlatPtr address, size_t size); + private: VirtualAddress m_base; size_t m_size { 0 }; diff --git a/Kernel/VM/Space.cpp b/Kernel/VM/Space.cpp index ea89fdbb61..3d28e37502 100644 --- a/Kernel/VM/Space.cpp +++ b/Kernel/VM/Space.cpp @@ -6,6 +6,7 @@ */ #include +#include #include #include #include @@ -37,6 +38,103 @@ Space::~Space() { } +KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size) +{ + if (!size) + return EINVAL; + + auto range_or_error = Range::expand_to_page_boundaries(addr.get(), size); + if (range_or_error.is_error()) + return range_or_error.error(); + + auto range_to_unmap = range_or_error.value(); + + if (!is_user_range(range_to_unmap)) + return EFAULT; + + if (auto* whole_region = find_region_from_range(range_to_unmap)) { + if (!whole_region->is_mmap()) + return EPERM; + + PerformanceManager::add_unmap_perf_event(*Process::current(), whole_region->range()); + + bool success = deallocate_region(*whole_region); + VERIFY(success); + return KSuccess; + } + + if (auto* old_region = find_region_containing(range_to_unmap)) { + if (!old_region->is_mmap()) + return EPERM; + + // Remove the old region from our regions tree, since were going to add another region + // with the exact same start address, but dont deallocate it yet + auto region = take_region(*old_region); + VERIFY(region); + + // We manually unmap the old region here, specifying that we *don't* want the VM deallocated. + region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No); + + auto new_regions = split_region_around_range(*region, range_to_unmap); + + // Instead we give back the unwanted VM manually. + page_directory().range_allocator().deallocate(range_to_unmap); + + // And finally we map the new region(s) using our page directory (they were just allocated and don't have one). + for (auto* new_region : new_regions) { + new_region->map(page_directory()); + } + + PerformanceManager::add_unmap_perf_event(*Process::current(), range_to_unmap); + + return KSuccess; + } + + // Try again while checkin multiple regions at a time + // slow: without caching + const auto& regions = find_regions_intersecting(range_to_unmap); + + // Check if any of the regions is not mmapped, to not accidentally + // error-out with just half a region map left + for (auto* region : regions) { + if (!region->is_mmap()) + return EPERM; + } + + Vector new_regions; + + for (auto* old_region : regions) { + // if it's a full match we can delete the complete old region + if (old_region->range().intersect(range_to_unmap).size() == old_region->size()) { + bool res = deallocate_region(*old_region); + VERIFY(res); + continue; + } + + // Remove the old region from our regions tree, since were going to add another region + // with the exact same start address, but dont deallocate it yet + auto region = take_region(*old_region); + VERIFY(region); + + // We manually unmap the old region here, specifying that we *don't* want the VM deallocated. + region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No); + + // Otherwise just split the regions and collect them for future mapping + if (new_regions.try_append(split_region_around_range(*region, range_to_unmap))) + return ENOMEM; + } + // Instead we give back the unwanted VM manually at the end. + page_directory().range_allocator().deallocate(range_to_unmap); + // And finally we map the new region(s) using our page directory (they were just allocated and don't have one). + for (auto* new_region : new_regions) { + new_region->map(page_directory()); + } + + PerformanceManager::add_unmap_perf_event(*Process::current(), range_to_unmap); + + return KSuccess; +} + Optional Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment) { vaddr.mask(PAGE_MASK); diff --git a/Kernel/VM/Space.h b/Kernel/VM/Space.h index a83746fad5..d45d3e7326 100644 --- a/Kernel/VM/Space.h +++ b/Kernel/VM/Space.h @@ -33,6 +33,8 @@ public: void dump_regions(); + KResult unmap_mmap_range(VirtualAddress, size_t); + Optional allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE); KResultOr allocate_region_with_vmobject(const Range&, NonnullRefPtr, size_t offset_in_vmobject, StringView name, int prot, bool shared);