diff --git a/Kernel/Syscalls/mmap.cpp b/Kernel/Syscalls/mmap.cpp index 0be99ca724..82c59f190f 100644 --- a/Kernel/Syscalls/mmap.cpp +++ b/Kernel/Syscalls/mmap.cpp @@ -334,7 +334,6 @@ KResultOr Process::sys$mprotect(Userspace addr, size_t size, int // Remove the old region from our regions tree, since were going to add another region // with the exact same start address, but dont deallocate it yet auto region = space().take_region(*old_region); - VERIFY(region); // Unmap the old region here, specifying that we *don't* want the VM deallocated. region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No); @@ -398,7 +397,6 @@ KResultOr Process::sys$mprotect(Userspace addr, size_t size, int // Remove the old region from our regions tree, since were going to add another region // with the exact same start address, but dont deallocate it yet auto region = space().take_region(*old_region); - VERIFY(region); // Unmap the old region here, specifying that we *don't* want the VM deallocated. region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No); @@ -564,8 +562,7 @@ KResultOr Process::sys$mremap(Userspaceunmap(Region::ShouldDeallocateVirtualMemoryRange::No); - bool success = space().deallocate_region(*old_region); - VERIFY(success); + space().deallocate_region(*old_region); auto new_region_or_error = space().allocate_region_with_vmobject(range, new_vmobject.release_nonnull(), old_offset, old_name->view(), old_prot, false); if (new_region_or_error.is_error()) diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index fc1fec3bdb..40abdba261 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -404,9 +404,7 @@ void Thread::exit(void* exit_value) [[maybe_unused]] auto rc = unlock_process_if_locked(unlock_count); if (m_thread_specific_range.has_value()) { auto* region = process().space().find_region_from_range(m_thread_specific_range.value()); - VERIFY(region); - if (!process().space().deallocate_region(*region)) - dbgln("Failed to unmap TLS range, exiting thread anyway."); + process().space().deallocate_region(*region); } die_if_needed(); } diff --git a/Kernel/VM/Space.cpp b/Kernel/VM/Space.cpp index 211a14a2e4..25df2a2093 100644 --- a/Kernel/VM/Space.cpp +++ b/Kernel/VM/Space.cpp @@ -57,8 +57,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size) PerformanceManager::add_unmap_perf_event(*Process::current(), whole_region->range()); - bool success = deallocate_region(*whole_region); - VERIFY(success); + deallocate_region(*whole_region); return KSuccess; } @@ -69,7 +68,6 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size) // Remove the old region from our regions tree, since were going to add another region // with the exact same start address, but dont deallocate it yet auto region = take_region(*old_region); - VERIFY(region); // We manually unmap the old region here, specifying that we *don't* want the VM deallocated. region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No); @@ -108,15 +106,13 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size) for (auto* old_region : regions) { // if it's a full match we can delete the complete old region if (old_region->range().intersect(range_to_unmap).size() == old_region->size()) { - bool res = deallocate_region(*old_region); - VERIFY(res); + deallocate_region(*old_region); continue; } // Remove the old region from our regions tree, since were going to add another region // with the exact same start address, but dont deallocate it yet auto region = take_region(*old_region); - VERIFY(region); // We manually unmap the old region here, specifying that we *don't* want the VM deallocated. region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No); @@ -219,25 +215,21 @@ KResultOr Space::allocate_region_with_vmobject(Range const& range, Nonn return added_region; } -bool Space::deallocate_region(Region& region) +void Space::deallocate_region(Region& region) { - return take_region(region); + take_region(region); } -OwnPtr Space::take_region(Region& region) +NonnullOwnPtr Space::take_region(Region& region) { ScopedSpinLock lock(m_lock); if (m_region_lookup_cache.region.unsafe_ptr() == ®ion) m_region_lookup_cache.region = nullptr; - // FIXME: currently we traverse the RBTree twice, once to check if the region in the tree starting at region.vaddr() - // is the same region and once to actually remove it, maybe we can add some kind of remove_if()? - auto found_region = m_regions.find(region.vaddr().get()); - if (!found_region) - return {}; - if (found_region->ptr() != ®ion) - return {}; - return m_regions.unsafe_remove(region.vaddr().get()); + + auto found_region = m_regions.unsafe_remove(region.vaddr().get()); + VERIFY(found_region.ptr() == ®ion); + return found_region; } Region* Space::find_region_from_range(const Range& range) diff --git a/Kernel/VM/Space.h b/Kernel/VM/Space.h index 97171c84b6..5cd458edcc 100644 --- a/Kernel/VM/Space.h +++ b/Kernel/VM/Space.h @@ -39,8 +39,8 @@ public: KResultOr allocate_region_with_vmobject(const Range&, NonnullRefPtr, size_t offset_in_vmobject, StringView name, int prot, bool shared); KResultOr allocate_region(const Range&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve); - bool deallocate_region(Region& region); - OwnPtr take_region(Region& region); + void deallocate_region(Region& region); + NonnullOwnPtr take_region(Region& region); KResultOr try_allocate_split_region(Region const& source_region, Range const&, size_t offset_in_vmobject); KResultOr> try_split_region_around_range(Region const& source_region, Range const&);