mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 17:27:35 +00:00
Kernel: Don't release/relock spinlocks repeatedly during space teardown
Grab the page directory and MM locks once at the start of address space teardown, then hold onto them across all the region unmapping work.
This commit is contained in:
parent
2323cdd914
commit
d8206c1059
3 changed files with 15 additions and 5 deletions
|
@ -321,9 +321,11 @@ void AddressSpace::dump_regions()
|
||||||
void AddressSpace::remove_all_regions(Badge<Process>)
|
void AddressSpace::remove_all_regions(Badge<Process>)
|
||||||
{
|
{
|
||||||
VERIFY(Thread::current() == g_finalizer);
|
VERIFY(Thread::current() == g_finalizer);
|
||||||
SpinlockLocker lock(m_lock);
|
SpinlockLocker locker(m_lock);
|
||||||
|
SpinlockLocker pd_locker(m_page_directory->get_lock());
|
||||||
|
SpinlockLocker mm_locker(s_mm_lock);
|
||||||
for (auto& region : m_regions)
|
for (auto& region : m_regions)
|
||||||
(*region).unmap(Region::ShouldDeallocateVirtualRange::No, ShouldFlushTLB::No);
|
(*region).unmap_with_locks_held(Region::ShouldDeallocateVirtualRange::No, ShouldFlushTLB::No, pd_locker, mm_locker);
|
||||||
m_regions.clear();
|
m_regions.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -234,12 +234,19 @@ bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Region::unmap(ShouldDeallocateVirtualRange deallocate_range, ShouldFlushTLB should_flush_tlb)
|
void Region::unmap(ShouldDeallocateVirtualRange should_deallocate_range, ShouldFlushTLB should_flush_tlb)
|
||||||
|
{
|
||||||
|
if (!m_page_directory)
|
||||||
|
return;
|
||||||
|
SpinlockLocker pd_locker(m_page_directory->get_lock());
|
||||||
|
SpinlockLocker mm_locker(s_mm_lock);
|
||||||
|
unmap_with_locks_held(should_deallocate_range, should_flush_tlb, pd_locker, mm_locker);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Region::unmap_with_locks_held(ShouldDeallocateVirtualRange deallocate_range, ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&, SpinlockLocker<RecursiveSpinlock>&)
|
||||||
{
|
{
|
||||||
if (!m_page_directory)
|
if (!m_page_directory)
|
||||||
return;
|
return;
|
||||||
SpinlockLocker page_lock(m_page_directory->get_lock());
|
|
||||||
SpinlockLocker lock(s_mm_lock);
|
|
||||||
size_t count = page_count();
|
size_t count = page_count();
|
||||||
for (size_t i = 0; i < count; ++i) {
|
for (size_t i = 0; i < count; ++i) {
|
||||||
auto vaddr = vaddr_from_page_index(i);
|
auto vaddr = vaddr_from_page_index(i);
|
||||||
|
|
|
@ -179,6 +179,7 @@ public:
|
||||||
Yes,
|
Yes,
|
||||||
};
|
};
|
||||||
void unmap(ShouldDeallocateVirtualRange, ShouldFlushTLB = ShouldFlushTLB::Yes);
|
void unmap(ShouldDeallocateVirtualRange, ShouldFlushTLB = ShouldFlushTLB::Yes);
|
||||||
|
void unmap_with_locks_held(ShouldDeallocateVirtualRange, ShouldFlushTLB, SpinlockLocker<RecursiveSpinlock>& pd_locker, SpinlockLocker<RecursiveSpinlock>& mm_locker);
|
||||||
|
|
||||||
void remap();
|
void remap();
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue