1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 21:57:43 +00:00

Kernel: Move sys$munmap functionality into a helper method

This commit is contained in:
Gunnar Beutner 2021-05-28 11:03:21 +02:00 committed by Andreas Kling
parent b9d693665b
commit 95c2166ca9
5 changed files with 123 additions and 94 deletions

View file

@ -507,100 +507,9 @@ KResultOr<int> Process::sys$munmap(Userspace<void*> addr, size_t size)
{
REQUIRE_PROMISE(stdio);
if (!size)
return EINVAL;
auto range_or_error = expand_range_to_page_boundaries(addr, size);
if (range_or_error.is_error())
return range_or_error.error();
auto range_to_unmap = range_or_error.value();
if (!is_user_range(range_to_unmap))
return EFAULT;
if (auto* whole_region = space().find_region_from_range(range_to_unmap)) {
if (!whole_region->is_mmap())
return EPERM;
PerformanceManager::add_unmap_perf_event(*this, whole_region->range());
bool success = space().deallocate_region(*whole_region);
VERIFY(success);
return 0;
}
if (auto* old_region = space().find_region_containing(range_to_unmap)) {
if (!old_region->is_mmap())
return EPERM;
// Remove the old region from our regions tree, since were going to add another region
// with the exact same start address, but dont deallocate it yet
auto region = space().take_region(*old_region);
VERIFY(region);
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
auto new_regions = space().split_region_around_range(*region, range_to_unmap);
// Instead we give back the unwanted VM manually.
space().page_directory().range_allocator().deallocate(range_to_unmap);
// And finally we map the new region(s) using our page directory (they were just allocated and don't have one).
for (auto* new_region : new_regions) {
new_region->map(space().page_directory());
}
if (auto* event_buffer = current_perf_events_buffer()) {
[[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MUNMAP, range_to_unmap.base().get(), range_to_unmap.size(), nullptr);
}
return 0;
}
// Try again while checkin multiple regions at a time
// slow: without caching
const auto& regions = space().find_regions_intersecting(range_to_unmap);
// Check if any of the regions is not mmapped, to not accidentally
// error-out with just half a region map left
for (auto* region : regions) {
if (!region->is_mmap())
return EPERM;
}
Vector<Region*, 2> new_regions;
for (auto* old_region : regions) {
// if it's a full match we can delete the complete old region
if (old_region->range().intersect(range_to_unmap).size() == old_region->size()) {
bool res = space().deallocate_region(*old_region);
VERIFY(res);
continue;
}
// Remove the old region from our regions tree, since were going to add another region
// with the exact same start address, but dont deallocate it yet
auto region = space().take_region(*old_region);
VERIFY(region);
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
// Otherwise just split the regions and collect them for future mapping
if (new_regions.try_append(space().split_region_around_range(*region, range_to_unmap)))
return ENOMEM;
}
// Instead we give back the unwanted VM manually at the end.
space().page_directory().range_allocator().deallocate(range_to_unmap);
// And finally we map the new region(s) using our page directory (they were just allocated and don't have one).
for (auto* new_region : new_regions) {
new_region->map(space().page_directory());
}
PerformanceManager::add_unmap_perf_event(*this, range_to_unmap);
auto result = space().unmap_mmap_range(VirtualAddress { addr }, size);
if (result.is_error())
return result;
return 0;
}