mirror of
				https://github.com/RGBCube/serenity
				synced 2025-10-31 18:22:45 +00:00 
			
		
		
		
	Kernel: Mape quickmap functions VERIFY that MM lock is held
The quickmap_page() and unquickmap_page() functions are used to map a single physical page at a kernel virtual address for temporary access. These use the per-CPU quickmap buffer in the page tables, and access to this is guarded by the MM lock. To prevent bugs, quickmap_page() should not *take* the MM lock, but rather verify that it is already held! This exposed two situations where we were using quickmap without holding the MM lock during page fault handling. This patch is forced to fix these issues (which is great!) :^)
This commit is contained in:
		
							parent
							
								
									1b9916439f
								
							
						
					
					
						commit
						a930877f31
					
				
					 3 changed files with 11 additions and 7 deletions
				
			
		|  | @ -346,9 +346,10 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual | |||
|         } | ||||
|     } | ||||
| 
 | ||||
|     u8* dest_ptr = MM.quickmap_page(*page); | ||||
|     dbgln_if(PAGE_FAULT_DEBUG, "      >> COW {} <- {}", page->paddr(), page_slot->paddr()); | ||||
|     { | ||||
|         SpinlockLocker mm_locker(s_mm_lock); | ||||
|         u8* dest_ptr = MM.quickmap_page(*page); | ||||
|         SmapDisabler disabler; | ||||
|         void* fault_at; | ||||
|         if (!safe_memcpy(dest_ptr, vaddr.as_ptr(), PAGE_SIZE, fault_at)) { | ||||
|  | @ -361,9 +362,9 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual | |||
|             else | ||||
|                 VERIFY_NOT_REACHED(); | ||||
|         } | ||||
|         MM.unquickmap_page(); | ||||
|     } | ||||
|     page_slot = move(page); | ||||
|     MM.unquickmap_page(); | ||||
|     set_should_cow(page_index, false); | ||||
|     return PageFaultResponse::Continue; | ||||
| } | ||||
|  |  | |||
|  | @ -1004,9 +1004,9 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr) | |||
| u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address) | ||||
| { | ||||
|     VERIFY_INTERRUPTS_DISABLED(); | ||||
|     VERIFY(s_mm_lock.own_lock()); | ||||
|     auto& mm_data = get_data(); | ||||
|     mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock(); | ||||
|     SpinlockLocker lock(s_mm_lock); | ||||
| 
 | ||||
|     VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE); | ||||
|     u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE; | ||||
|  | @ -1025,7 +1025,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address) | |||
| void MemoryManager::unquickmap_page() | ||||
| { | ||||
|     VERIFY_INTERRUPTS_DISABLED(); | ||||
|     SpinlockLocker lock(s_mm_lock); | ||||
|     VERIFY(s_mm_lock.own_lock()); | ||||
|     auto& mm_data = get_data(); | ||||
|     VERIFY(mm_data.m_quickmap_in_use.is_locked()); | ||||
|     VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE); | ||||
|  |  | |||
|  | @ -451,9 +451,12 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) | |||
|         return PageFaultResponse::OutOfMemory; | ||||
|     } | ||||
| 
 | ||||
|     { | ||||
|         SpinlockLocker mm_locker(s_mm_lock); | ||||
|         u8* dest_ptr = MM.quickmap_page(*vmobject_physical_page_entry); | ||||
|         memcpy(dest_ptr, page_buffer, PAGE_SIZE); | ||||
|         MM.unquickmap_page(); | ||||
|     } | ||||
| 
 | ||||
|     remap_vmobject_page(page_index_in_vmobject); | ||||
|     return PageFaultResponse::Continue; | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Andreas Kling
						Andreas Kling