From a930877f31fb117ce4b38d4782dd288093700d8a Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Sun, 22 Aug 2021 12:57:58 +0200 Subject: [PATCH] Kernel: Mape quickmap functions VERIFY that MM lock is held The quickmap_page() and unquickmap_page() functions are used to map a single physical page at a kernel virtual address for temporary access. These use the per-CPU quickmap buffer in the page tables, and access to this is guarded by the MM lock. To prevent bugs, quickmap_page() should not *take* the MM lock, but rather verify that it is already held! This exposed two situations where we were using quickmap without holding the MM lock during page fault handling. This patch is forced to fix these issues (which is great!) :^) --- Kernel/Memory/AnonymousVMObject.cpp | 5 +++-- Kernel/Memory/MemoryManager.cpp | 4 ++-- Kernel/Memory/Region.cpp | 9 ++++++--- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/Kernel/Memory/AnonymousVMObject.cpp b/Kernel/Memory/AnonymousVMObject.cpp index a882671f6e..8cd4915432 100644 --- a/Kernel/Memory/AnonymousVMObject.cpp +++ b/Kernel/Memory/AnonymousVMObject.cpp @@ -346,9 +346,10 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual } } - u8* dest_ptr = MM.quickmap_page(*page); dbgln_if(PAGE_FAULT_DEBUG, " >> COW {} <- {}", page->paddr(), page_slot->paddr()); { + SpinlockLocker mm_locker(s_mm_lock); + u8* dest_ptr = MM.quickmap_page(*page); SmapDisabler disabler; void* fault_at; if (!safe_memcpy(dest_ptr, vaddr.as_ptr(), PAGE_SIZE, fault_at)) { @@ -361,9 +362,9 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual else VERIFY_NOT_REACHED(); } + MM.unquickmap_page(); } page_slot = move(page); - MM.unquickmap_page(); set_should_cow(page_index, false); return PageFaultResponse::Continue; } diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 237e60e6d7..324f9cb083 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -1004,9 +1004,9 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr) u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address) { VERIFY_INTERRUPTS_DISABLED(); + VERIFY(s_mm_lock.own_lock()); auto& mm_data = get_data(); mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock(); - SpinlockLocker lock(s_mm_lock); VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE); u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE; @@ -1025,7 +1025,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address) void MemoryManager::unquickmap_page() { VERIFY_INTERRUPTS_DISABLED(); - SpinlockLocker lock(s_mm_lock); + VERIFY(s_mm_lock.own_lock()); auto& mm_data = get_data(); VERIFY(mm_data.m_quickmap_in_use.is_locked()); VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE); diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index ad55d520df..c232e1bb7d 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -451,9 +451,12 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) return PageFaultResponse::OutOfMemory; } - u8* dest_ptr = MM.quickmap_page(*vmobject_physical_page_entry); - memcpy(dest_ptr, page_buffer, PAGE_SIZE); - MM.unquickmap_page(); + { + SpinlockLocker mm_locker(s_mm_lock); + u8* dest_ptr = MM.quickmap_page(*vmobject_physical_page_entry); + memcpy(dest_ptr, page_buffer, PAGE_SIZE); + MM.unquickmap_page(); + } remap_vmobject_page(page_index_in_vmobject); return PageFaultResponse::Continue;