From 1415b2cfc300db1128fce1fa98e3e10a49810610 Mon Sep 17 00:00:00 2001 From: Brian Gianforcaro Date: Tue, 18 May 2021 01:45:05 -0700 Subject: [PATCH] Kernel: Do not allocate AnonymousVMObject's under spin lock Spinlocks guard short regions, with hopefully no other locks being taken in the process. Violating constraints usually had detrimental effects on platform stability as well as performance and scalability. Allocating memory takes it own locks, and can in some cases even allocate new regions, and thus violates these tenants. Move the AnonymousVMObject creation outside of the spinlock as creation does not modify any shared state. --- Kernel/VM/MemoryManager.cpp | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 00dfdae1ae..d278cc8159 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -470,40 +470,40 @@ OwnPtr MemoryManager::allocate_contiguous_kernel_region(size_t size, Str OwnPtr MemoryManager::allocate_kernel_region(size_t size, String name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable) { VERIFY(!(size % PAGE_SIZE)); + auto vm_object = AnonymousVMObject::create_with_size(size, strategy); + if (!vm_object) + return {}; ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; - auto vmobject = AnonymousVMObject::create_with_size(size, strategy); - if (!vmobject) - return {}; - return allocate_kernel_region_with_vmobject(range.value(), vmobject.release_nonnull(), move(name), access, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), vm_object.release_nonnull(), move(name), access, cacheable); } OwnPtr MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, String name, Region::Access access, Region::Cacheable cacheable) { + auto vm_object = AnonymousVMObject::create_for_physical_range(paddr, size); + if (!vm_object) + return {}; VERIFY(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; - auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size); - if (!vmobject) - return {}; - return allocate_kernel_region_with_vmobject(range.value(), *vmobject, move(name), access, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), *vm_object, move(name), access, cacheable); } OwnPtr MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, String name, Region::Access access, Region::Cacheable cacheable) { + auto vm_object = AnonymousVMObject::create_for_physical_range(paddr, size); + if (!vm_object) + return {}; VERIFY(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size); if (!range.has_value()) return {}; - auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size); - if (!vmobject) - return {}; - return allocate_kernel_region_with_vmobject(range.value(), *vmobject, move(name), access, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), *vm_object, move(name), access, cacheable); } OwnPtr MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, String name, Region::Access access, Region::Cacheable cacheable)