mirror of
				https://github.com/RGBCube/serenity
				synced 2025-10-31 09:52:44 +00:00 
			
		
		
		
	Kernel: Aggregate TLB flush requests for Regions for SMP
Rather than sending one TLB flush request for each page, aggregate them so that we're not spamming the other processors with FlushTLB IPIs.
This commit is contained in:
		
							parent
							
								
									56126d7a45
								
							
						
					
					
						commit
						06d50f64b0
					
				
					 3 changed files with 28 additions and 13 deletions
				
			
		|  | @ -1405,7 +1405,7 @@ void Processor::flush_tlb_local(VirtualAddress vaddr, size_t page_count) | ||||||
|     while (page_count > 0) { |     while (page_count > 0) { | ||||||
|         asm volatile("invlpg %0" |         asm volatile("invlpg %0" | ||||||
|              : |              : | ||||||
|              : "m"(*(char*)vaddr.get()) |              : "m"(*ptr) | ||||||
|              : "memory"); |              : "memory"); | ||||||
|         ptr += PAGE_SIZE; |         ptr += PAGE_SIZE; | ||||||
|         page_count--; |         page_count--; | ||||||
|  |  | ||||||
|  | @ -69,6 +69,7 @@ NonnullOwnPtr<Region> Region::clone() | ||||||
| { | { | ||||||
|     ASSERT(Process::current()); |     ASSERT(Process::current()); | ||||||
| 
 | 
 | ||||||
|  |     ScopedSpinLock lock(s_mm_lock); | ||||||
|     if (m_inherit_mode == InheritMode::ZeroedOnFork) { |     if (m_inherit_mode == InheritMode::ZeroedOnFork) { | ||||||
|         ASSERT(m_mmap); |         ASSERT(m_mmap); | ||||||
|         ASSERT(!m_shared); |         ASSERT(!m_shared); | ||||||
|  | @ -122,16 +123,21 @@ bool Region::commit() | ||||||
|     dbg() << "MM: Commit " << page_count() << " pages in Region " << this << " (VMO=" << &vmobject() << ") at " << vaddr(); |     dbg() << "MM: Commit " << page_count() << " pages in Region " << this << " (VMO=" << &vmobject() << ") at " << vaddr(); | ||||||
| #endif | #endif | ||||||
|     for (size_t i = 0; i < page_count(); ++i) { |     for (size_t i = 0; i < page_count(); ++i) { | ||||||
|         if (!commit(i)) |         if (!commit(i)) { | ||||||
|  |             // Flush what we did commit
 | ||||||
|  |             if (i > 0) | ||||||
|  |                 MM.flush_tlb(vaddr(), i + 1); | ||||||
|             return false; |             return false; | ||||||
|  |         } | ||||||
|     } |     } | ||||||
|  |     MM.flush_tlb(vaddr(), page_count()); | ||||||
|     return true; |     return true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| bool Region::commit(size_t page_index) | bool Region::commit(size_t page_index) | ||||||
| { | { | ||||||
|     ASSERT(vmobject().is_anonymous() || vmobject().is_purgeable()); |     ASSERT(vmobject().is_anonymous() || vmobject().is_purgeable()); | ||||||
|     ScopedSpinLock lock(s_mm_lock); |     ASSERT(s_mm_lock.own_lock()); | ||||||
|     auto& vmobject_physical_page_entry = physical_page_slot(page_index); |     auto& vmobject_physical_page_entry = physical_page_slot(page_index); | ||||||
|     if (!vmobject_physical_page_entry.is_null() && !vmobject_physical_page_entry->is_shared_zero_page()) |     if (!vmobject_physical_page_entry.is_null() && !vmobject_physical_page_entry->is_shared_zero_page()) | ||||||
|         return true; |         return true; | ||||||
|  | @ -142,7 +148,7 @@ bool Region::commit(size_t page_index) | ||||||
|         return false; |         return false; | ||||||
|     } |     } | ||||||
|     vmobject_physical_page_entry = move(physical_page); |     vmobject_physical_page_entry = move(physical_page); | ||||||
|     remap_page(page_index); |     remap_page(page_index, false); // caller is in charge of flushing tlb
 | ||||||
|     return true; |     return true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -224,7 +230,7 @@ Bitmap& Region::ensure_cow_map() const | ||||||
| 
 | 
 | ||||||
| void Region::map_individual_page_impl(size_t page_index) | void Region::map_individual_page_impl(size_t page_index) | ||||||
| { | { | ||||||
|     auto page_vaddr = vaddr().offset(page_index * PAGE_SIZE); |     auto page_vaddr = vaddr_from_page_index(page_index); | ||||||
|     auto& pte = MM.ensure_pte(*m_page_directory, page_vaddr); |     auto& pte = MM.ensure_pte(*m_page_directory, page_vaddr); | ||||||
|     auto* page = physical_page(page_index); |     auto* page = physical_page(page_index); | ||||||
|     if (!page || (!is_readable() && !is_writable())) { |     if (!page || (!is_readable() && !is_writable())) { | ||||||
|  | @ -244,15 +250,16 @@ void Region::map_individual_page_impl(size_t page_index) | ||||||
|         dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << page->paddr() << " (@" << page << ")"; |         dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << page->paddr() << " (@" << page << ")"; | ||||||
| #endif | #endif | ||||||
|     } |     } | ||||||
|     MM.flush_tlb(page_vaddr); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void Region::remap_page(size_t page_index) | void Region::remap_page(size_t page_index, bool with_flush) | ||||||
| { | { | ||||||
|     ASSERT(m_page_directory); |     ASSERT(m_page_directory); | ||||||
|     ScopedSpinLock lock(s_mm_lock); |     ScopedSpinLock lock(s_mm_lock); | ||||||
|     ASSERT(physical_page(page_index)); |     ASSERT(physical_page(page_index)); | ||||||
|     map_individual_page_impl(page_index); |     map_individual_page_impl(page_index); | ||||||
|  |     if (with_flush) | ||||||
|  |         MM.flush_tlb(vaddr_from_page_index(page_index)); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) | void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) | ||||||
|  | @ -260,15 +267,15 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) | ||||||
|     ScopedSpinLock lock(s_mm_lock); |     ScopedSpinLock lock(s_mm_lock); | ||||||
|     ASSERT(m_page_directory); |     ASSERT(m_page_directory); | ||||||
|     for (size_t i = 0; i < page_count(); ++i) { |     for (size_t i = 0; i < page_count(); ++i) { | ||||||
|         auto vaddr = this->vaddr().offset(i * PAGE_SIZE); |         auto vaddr = vaddr_from_page_index(i); | ||||||
|         auto& pte = MM.ensure_pte(*m_page_directory, vaddr); |         auto& pte = MM.ensure_pte(*m_page_directory, vaddr); | ||||||
|         pte.clear(); |         pte.clear(); | ||||||
|         MM.flush_tlb(vaddr); |  | ||||||
| #ifdef MM_DEBUG | #ifdef MM_DEBUG | ||||||
|         auto* page = physical_page(i); |         auto* page = physical_page(i); | ||||||
|         dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", page ? page->paddr().get() : 0) << " <<"; |         dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", page ? page->paddr().get() : 0) << " <<"; | ||||||
| #endif | #endif | ||||||
|     } |     } | ||||||
|  |     MM.flush_tlb(vaddr(), page_count()); | ||||||
|     if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes) { |     if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes) { | ||||||
|         if (m_page_directory->range_allocator().contains(range())) |         if (m_page_directory->range_allocator().contains(range())) | ||||||
|             m_page_directory->range_allocator().deallocate(range()); |             m_page_directory->range_allocator().deallocate(range()); | ||||||
|  | @ -281,18 +288,20 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) | ||||||
| void Region::set_page_directory(PageDirectory& page_directory) | void Region::set_page_directory(PageDirectory& page_directory) | ||||||
| { | { | ||||||
|     ASSERT(!m_page_directory || m_page_directory == &page_directory); |     ASSERT(!m_page_directory || m_page_directory == &page_directory); | ||||||
|     ScopedSpinLock lock(s_mm_lock); |     ASSERT(s_mm_lock.own_lock()); | ||||||
|     m_page_directory = page_directory; |     m_page_directory = page_directory; | ||||||
| } | } | ||||||
|  | 
 | ||||||
| void Region::map(PageDirectory& page_directory) | void Region::map(PageDirectory& page_directory) | ||||||
| { | { | ||||||
|     set_page_directory(page_directory); |  | ||||||
|     ScopedSpinLock lock(s_mm_lock); |     ScopedSpinLock lock(s_mm_lock); | ||||||
|  |     set_page_directory(page_directory); | ||||||
| #ifdef MM_DEBUG | #ifdef MM_DEBUG | ||||||
|     dbg() << "MM: Region::map() will map VMO pages " << first_page_index() << " - " << last_page_index() << " (VMO page count: " << vmobject().page_count() << ")"; |     dbg() << "MM: Region::map() will map VMO pages " << first_page_index() << " - " << last_page_index() << " (VMO page count: " << vmobject().page_count() << ")"; | ||||||
| #endif | #endif | ||||||
|     for (size_t page_index = 0; page_index < page_count(); ++page_index) |     for (size_t page_index = 0; page_index < page_count(); ++page_index) | ||||||
|         map_individual_page_impl(page_index); |         map_individual_page_impl(page_index); | ||||||
|  |     MM.flush_tlb(vaddr(), page_count()); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void Region::remap() | void Region::remap() | ||||||
|  |  | ||||||
|  | @ -117,6 +117,11 @@ public: | ||||||
|     { |     { | ||||||
|         return (vaddr - m_range.base()).get() / PAGE_SIZE; |         return (vaddr - m_range.base()).get() / PAGE_SIZE; | ||||||
|     } |     } | ||||||
|  |      | ||||||
|  |     VirtualAddress vaddr_from_page_index(size_t page_index) const | ||||||
|  |     { | ||||||
|  |         return vaddr().offset(page_index * PAGE_SIZE); | ||||||
|  |     } | ||||||
| 
 | 
 | ||||||
|     size_t first_page_index() const |     size_t first_page_index() const | ||||||
|     { |     { | ||||||
|  | @ -151,7 +156,6 @@ public: | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     bool commit(); |     bool commit(); | ||||||
|     bool commit(size_t page_index); |  | ||||||
| 
 | 
 | ||||||
|     size_t amount_resident() const; |     size_t amount_resident() const; | ||||||
|     size_t amount_shared() const; |     size_t amount_shared() const; | ||||||
|  | @ -175,7 +179,6 @@ public: | ||||||
|     void unmap(ShouldDeallocateVirtualMemoryRange = ShouldDeallocateVirtualMemoryRange::Yes); |     void unmap(ShouldDeallocateVirtualMemoryRange = ShouldDeallocateVirtualMemoryRange::Yes); | ||||||
| 
 | 
 | ||||||
|     void remap(); |     void remap(); | ||||||
|     void remap_page(size_t index); |  | ||||||
| 
 | 
 | ||||||
|     // For InlineLinkedListNode
 |     // For InlineLinkedListNode
 | ||||||
|     Region* m_next { nullptr }; |     Region* m_next { nullptr }; | ||||||
|  | @ -197,6 +200,9 @@ private: | ||||||
|             m_access &= ~access; |             m_access &= ~access; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     bool commit(size_t page_index); | ||||||
|  |     void remap_page(size_t index, bool with_flush = true); | ||||||
|  | 
 | ||||||
|     PageFaultResponse handle_cow_fault(size_t page_index); |     PageFaultResponse handle_cow_fault(size_t page_index); | ||||||
|     PageFaultResponse handle_inode_fault(size_t page_index); |     PageFaultResponse handle_inode_fault(size_t page_index); | ||||||
|     PageFaultResponse handle_zero_fault(size_t page_index); |     PageFaultResponse handle_zero_fault(size_t page_index); | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Tom
						Tom