1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 11:17:44 +00:00

Kernel: Remove unused ShouldDeallocateVirtualRange parameters

Since there is no separate virtual range allocator anymore, this is
no longer used for anything.
This commit is contained in:
Andreas Kling 2022-04-04 23:36:09 +02:00
parent b36c3a68d8
commit e3e1d79a7d
6 changed files with 21 additions and 38 deletions

View file

@ -71,11 +71,9 @@ ErrorOr<void> AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
return EPERM;
// Remove the old region from our regions tree, since were going to add another region
// with the exact same start address, but don't deallocate it yet.
// with the exact same start address.
auto region = take_region(*old_region);
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Region::ShouldDeallocateVirtualRange::No);
region->unmap();
auto new_regions = TRY(try_split_region_around_range(*region, range_to_unmap));
@ -113,11 +111,9 @@ ErrorOr<void> AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
}
// Remove the old region from our regions tree, since were going to add another region
// with the exact same start address, but don't deallocate it yet.
// with the exact same start address.
auto region = take_region(*old_region);
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Region::ShouldDeallocateVirtualRange::No);
region->unmap();
// Otherwise, split the regions and collect them for future mapping.
auto split_regions = TRY(try_split_region_around_range(*region, range_to_unmap));
@ -339,7 +335,7 @@ void AddressSpace::remove_all_regions(Badge<Process>)
SpinlockLocker pd_locker(m_page_directory->get_lock());
SpinlockLocker mm_locker(s_mm_lock);
for (auto& region : m_region_tree.regions())
region.unmap_with_locks_held(Region::ShouldDeallocateVirtualRange::No, ShouldFlushTLB::No, pd_locker, mm_locker);
region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker, mm_locker);
}
m_region_tree.delete_all_regions_assuming_they_are_unmapped();

View file

@ -73,7 +73,7 @@ Region::~Region()
// If the region is "PROT_NONE", we didn't map it in the first place.
} else {
SpinlockLocker mm_locker(s_mm_lock);
unmap_with_locks_held(ShouldDeallocateVirtualRange::Yes, ShouldFlushTLB::Yes, pd_locker, mm_locker);
unmap_with_locks_held(ShouldFlushTLB::Yes, pd_locker, mm_locker);
VERIFY(!m_page_directory);
}
}
@ -266,16 +266,16 @@ bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
return success;
}
void Region::unmap(ShouldDeallocateVirtualRange should_deallocate_range, ShouldFlushTLB should_flush_tlb)
void Region::unmap(ShouldFlushTLB should_flush_tlb)
{
if (!m_page_directory)
return;
SpinlockLocker pd_locker(m_page_directory->get_lock());
SpinlockLocker mm_locker(s_mm_lock);
unmap_with_locks_held(should_deallocate_range, should_flush_tlb, pd_locker, mm_locker);
unmap_with_locks_held(should_flush_tlb, pd_locker, mm_locker);
}
void Region::unmap_with_locks_held(ShouldDeallocateVirtualRange, ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&, SpinlockLocker<RecursiveSpinlock>&)
void Region::unmap_with_locks_held(ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&, SpinlockLocker<RecursiveSpinlock>&)
{
if (!m_page_directory)
return;

View file

@ -182,12 +182,8 @@ public:
void set_page_directory(PageDirectory&);
ErrorOr<void> map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes);
enum class ShouldDeallocateVirtualRange {
No,
Yes,
};
void unmap(ShouldDeallocateVirtualRange, ShouldFlushTLB = ShouldFlushTLB::Yes);
void unmap_with_locks_held(ShouldDeallocateVirtualRange, ShouldFlushTLB, SpinlockLocker<RecursiveSpinlock>& pd_locker, SpinlockLocker<RecursiveSpinlock>& mm_locker);
void unmap(ShouldFlushTLB = ShouldFlushTLB::Yes);
void unmap_with_locks_held(ShouldFlushTLB, SpinlockLocker<RecursiveSpinlock>& pd_locker, SpinlockLocker<RecursiveSpinlock>& mm_locker);
void remap();