mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 15:17:36 +00:00
Kernel: Remove double RedBlackTree lookup in VM/Space region removal
We should never request a regions removal that we don't currently own. We currently assert this everywhere else by all callers. Instead lets just push the assert down into the RedBlackTree removal and assume that we will always successfully remove the region.
This commit is contained in:
parent
d879709ec7
commit
c0987453e6
4 changed files with 13 additions and 26 deletions
|
@ -57,8 +57,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
|
|||
|
||||
PerformanceManager::add_unmap_perf_event(*Process::current(), whole_region->range());
|
||||
|
||||
bool success = deallocate_region(*whole_region);
|
||||
VERIFY(success);
|
||||
deallocate_region(*whole_region);
|
||||
return KSuccess;
|
||||
}
|
||||
|
||||
|
@ -69,7 +68,6 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
|
|||
// Remove the old region from our regions tree, since were going to add another region
|
||||
// with the exact same start address, but dont deallocate it yet
|
||||
auto region = take_region(*old_region);
|
||||
VERIFY(region);
|
||||
|
||||
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
||||
|
@ -108,15 +106,13 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
|
|||
for (auto* old_region : regions) {
|
||||
// if it's a full match we can delete the complete old region
|
||||
if (old_region->range().intersect(range_to_unmap).size() == old_region->size()) {
|
||||
bool res = deallocate_region(*old_region);
|
||||
VERIFY(res);
|
||||
deallocate_region(*old_region);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Remove the old region from our regions tree, since were going to add another region
|
||||
// with the exact same start address, but dont deallocate it yet
|
||||
auto region = take_region(*old_region);
|
||||
VERIFY(region);
|
||||
|
||||
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
||||
|
@ -219,25 +215,21 @@ KResultOr<Region*> Space::allocate_region_with_vmobject(Range const& range, Nonn
|
|||
return added_region;
|
||||
}
|
||||
|
||||
bool Space::deallocate_region(Region& region)
|
||||
void Space::deallocate_region(Region& region)
|
||||
{
|
||||
return take_region(region);
|
||||
take_region(region);
|
||||
}
|
||||
|
||||
OwnPtr<Region> Space::take_region(Region& region)
|
||||
NonnullOwnPtr<Region> Space::take_region(Region& region)
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
|
||||
if (m_region_lookup_cache.region.unsafe_ptr() == ®ion)
|
||||
m_region_lookup_cache.region = nullptr;
|
||||
// FIXME: currently we traverse the RBTree twice, once to check if the region in the tree starting at region.vaddr()
|
||||
// is the same region and once to actually remove it, maybe we can add some kind of remove_if()?
|
||||
auto found_region = m_regions.find(region.vaddr().get());
|
||||
if (!found_region)
|
||||
return {};
|
||||
if (found_region->ptr() != ®ion)
|
||||
return {};
|
||||
return m_regions.unsafe_remove(region.vaddr().get());
|
||||
|
||||
auto found_region = m_regions.unsafe_remove(region.vaddr().get());
|
||||
VERIFY(found_region.ptr() == ®ion);
|
||||
return found_region;
|
||||
}
|
||||
|
||||
Region* Space::find_region_from_range(const Range& range)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue