From d8206c105947d0dd12e7431b9d35509f8e62735a Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Wed, 12 Jan 2022 14:32:21 +0100 Subject: [PATCH] Kernel: Don't release/relock spinlocks repeatedly during space teardown Grab the page directory and MM locks once at the start of address space teardown, then hold onto them across all the region unmapping work. --- Kernel/Memory/AddressSpace.cpp | 6 ++++-- Kernel/Memory/Region.cpp | 13 ++++++++++--- Kernel/Memory/Region.h | 1 + 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/Kernel/Memory/AddressSpace.cpp b/Kernel/Memory/AddressSpace.cpp index dbda1324e1..9be6e46914 100644 --- a/Kernel/Memory/AddressSpace.cpp +++ b/Kernel/Memory/AddressSpace.cpp @@ -321,9 +321,11 @@ void AddressSpace::dump_regions() void AddressSpace::remove_all_regions(Badge) { VERIFY(Thread::current() == g_finalizer); - SpinlockLocker lock(m_lock); + SpinlockLocker locker(m_lock); + SpinlockLocker pd_locker(m_page_directory->get_lock()); + SpinlockLocker mm_locker(s_mm_lock); for (auto& region : m_regions) - (*region).unmap(Region::ShouldDeallocateVirtualRange::No, ShouldFlushTLB::No); + (*region).unmap_with_locks_held(Region::ShouldDeallocateVirtualRange::No, ShouldFlushTLB::No, pd_locker, mm_locker); m_regions.clear(); } diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index 3c3a2fd83f..76e31d7406 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -234,12 +234,19 @@ bool Region::remap_vmobject_page(size_t page_index, bool with_flush) return success; } -void Region::unmap(ShouldDeallocateVirtualRange deallocate_range, ShouldFlushTLB should_flush_tlb) +void Region::unmap(ShouldDeallocateVirtualRange should_deallocate_range, ShouldFlushTLB should_flush_tlb) +{ + if (!m_page_directory) + return; + SpinlockLocker pd_locker(m_page_directory->get_lock()); + SpinlockLocker mm_locker(s_mm_lock); + unmap_with_locks_held(should_deallocate_range, should_flush_tlb, pd_locker, mm_locker); +} + +void Region::unmap_with_locks_held(ShouldDeallocateVirtualRange deallocate_range, ShouldFlushTLB should_flush_tlb, SpinlockLocker&, SpinlockLocker&) { if (!m_page_directory) return; - SpinlockLocker page_lock(m_page_directory->get_lock()); - SpinlockLocker lock(s_mm_lock); size_t count = page_count(); for (size_t i = 0; i < count; ++i) { auto vaddr = vaddr_from_page_index(i); diff --git a/Kernel/Memory/Region.h b/Kernel/Memory/Region.h index 5ca89bfd53..b66300c241 100644 --- a/Kernel/Memory/Region.h +++ b/Kernel/Memory/Region.h @@ -179,6 +179,7 @@ public: Yes, }; void unmap(ShouldDeallocateVirtualRange, ShouldFlushTLB = ShouldFlushTLB::Yes); + void unmap_with_locks_held(ShouldDeallocateVirtualRange, ShouldFlushTLB, SpinlockLocker& pd_locker, SpinlockLocker& mm_locker); void remap();