1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 14:57:35 +00:00

Kernel: Minor MM optimization for SMP

MemoryManager::quickmap_pd and MemoryManager::quickmap_pt can only
be called by one processor at the time anyway, since anything using
these must have the MM lock held. So, no need to inform the other
CPUs to flush their TLBs, we can just flush our own.
This commit is contained in:
Tom 2020-07-06 09:11:52 -06:00 committed by Andreas Kling
parent bc107d0b33
commit 655f4daeb1
2 changed files with 18 additions and 12 deletions

View file

@ -60,6 +60,7 @@ MemoryManager& MM
MemoryManager::MemoryManager() MemoryManager::MemoryManager()
{ {
ScopedSpinLock lock(s_mm_lock);
m_kernel_page_directory = PageDirectory::create_kernel_page_directory(); m_kernel_page_directory = PageDirectory::create_kernel_page_directory();
parse_memory_map(); parse_memory_map();
write_cr3(kernel_page_directory().cr3()); write_cr3(kernel_page_directory().cr3());
@ -165,7 +166,7 @@ void MemoryManager::parse_memory_map()
const PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr) const PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr)
{ {
ASSERT_INTERRUPTS_DISABLED(); ASSERT_INTERRUPTS_DISABLED();
ScopedSpinLock lock(s_mm_lock); ASSERT(s_mm_lock.own_lock());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3; u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff; u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff; u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@ -181,7 +182,7 @@ const PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, Vi
PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr) PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
{ {
ASSERT_INTERRUPTS_DISABLED(); ASSERT_INTERRUPTS_DISABLED();
ScopedSpinLock lock(s_mm_lock); ASSERT(s_mm_lock.own_lock());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3; u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff; u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff; u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@ -554,7 +555,7 @@ extern "C" PageTableEntry boot_pd3_pt1023[1024];
PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index) PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
{ {
ScopedSpinLock lock(s_mm_lock); ASSERT(s_mm_lock.own_lock());
auto& pte = boot_pd3_pt1023[4]; auto& pte = boot_pd3_pt1023[4];
auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr(); auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
if (pte.physical_page_base() != pd_paddr.as_ptr()) { if (pte.physical_page_base() != pd_paddr.as_ptr()) {
@ -565,14 +566,17 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t
pte.set_present(true); pte.set_present(true);
pte.set_writable(true); pte.set_writable(true);
pte.set_user_allowed(false); pte.set_user_allowed(false);
flush_tlb(VirtualAddress(0xffe04000)); // Because we must continue to hold the MM lock while we use this
// mapping, it is sufficient to only flush on the current CPU. Other
// CPUs trying to use this API must wait on the MM lock anyway
flush_tlb_local(VirtualAddress(0xffe04000));
} }
return (PageDirectoryEntry*)0xffe04000; return (PageDirectoryEntry*)0xffe04000;
} }
PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr) PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
{ {
ScopedSpinLock lock(s_mm_lock); ASSERT(s_mm_lock.own_lock());
auto& pte = boot_pd3_pt1023[0]; auto& pte = boot_pd3_pt1023[0];
if (pte.physical_page_base() != pt_paddr.as_ptr()) { if (pte.physical_page_base() != pt_paddr.as_ptr()) {
#ifdef MM_DEBUG #ifdef MM_DEBUG
@ -582,7 +586,10 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
pte.set_present(true); pte.set_present(true);
pte.set_writable(true); pte.set_writable(true);
pte.set_user_allowed(false); pte.set_user_allowed(false);
flush_tlb(VirtualAddress(0xffe00000)); // Because we must continue to hold the MM lock while we use this
// mapping, it is sufficient to only flush on the current CPU. Other
// CPUs trying to use this API must wait on the MM lock anyway
flush_tlb_local(VirtualAddress(0xffe00000));
} }
return (PageTableEntry*)0xffe00000; return (PageTableEntry*)0xffe00000;
} }
@ -606,7 +613,7 @@ u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
pte.set_present(true); pte.set_present(true);
pte.set_writable(true); pte.set_writable(true);
pte.set_user_allowed(false); pte.set_user_allowed(false);
flush_tlb_local(vaddr, 1); flush_tlb_local(vaddr);
} }
return vaddr.as_ptr(); return vaddr.as_ptr();
} }
@ -621,7 +628,7 @@ void MemoryManager::unquickmap_page()
VirtualAddress vaddr(0xffe00000 + pte_idx * PAGE_SIZE); VirtualAddress vaddr(0xffe00000 + pte_idx * PAGE_SIZE);
auto& pte = boot_pd3_pt1023[pte_idx]; auto& pte = boot_pd3_pt1023[pte_idx];
pte.clear(); pte.clear();
flush_tlb_local(vaddr, 1); flush_tlb_local(vaddr);
mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags); mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags);
} }

View file

@ -48,7 +48,7 @@ static HashMap<u32, PageDirectory*>& cr3_map()
RefPtr<PageDirectory> PageDirectory::find_by_cr3(u32 cr3) RefPtr<PageDirectory> PageDirectory::find_by_cr3(u32 cr3)
{ {
InterruptDisabler disabler; ScopedSpinLock lock(s_mm_lock);
return cr3_map().get(cr3).value_or({}); return cr3_map().get(cr3).value_or({});
} }
@ -76,6 +76,7 @@ PageDirectory::PageDirectory()
PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_range_allocator) PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_range_allocator)
: m_process(&process) : m_process(&process)
{ {
ScopedSpinLock lock(s_mm_lock);
if (parent_range_allocator) { if (parent_range_allocator) {
m_range_allocator.initialize_from_parent(*parent_range_allocator); m_range_allocator.initialize_from_parent(*parent_range_allocator);
} else { } else {
@ -93,7 +94,6 @@ PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_rang
m_directory_pages[3] = MM.kernel_page_directory().m_directory_pages[3]; m_directory_pages[3] = MM.kernel_page_directory().m_directory_pages[3];
{ {
InterruptDisabler disabler;
auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*m_directory_table); auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*m_directory_table);
table.raw[0] = (u64)m_directory_pages[0]->paddr().as_ptr() | 1; table.raw[0] = (u64)m_directory_pages[0]->paddr().as_ptr() | 1;
table.raw[1] = (u64)m_directory_pages[1]->paddr().as_ptr() | 1; table.raw[1] = (u64)m_directory_pages[1]->paddr().as_ptr() | 1;
@ -109,7 +109,6 @@ PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_rang
auto* new_pd = MM.quickmap_pd(*this, 0); auto* new_pd = MM.quickmap_pd(*this, 0);
memcpy(new_pd, &buffer, sizeof(PageDirectoryEntry)); memcpy(new_pd, &buffer, sizeof(PageDirectoryEntry));
InterruptDisabler disabler;
cr3_map().set(cr3(), this); cr3_map().set(cr3(), this);
} }
@ -118,7 +117,7 @@ PageDirectory::~PageDirectory()
#ifdef MM_DEBUG #ifdef MM_DEBUG
dbg() << "MM: ~PageDirectory K" << this; dbg() << "MM: ~PageDirectory K" << this;
#endif #endif
InterruptDisabler disabler; ScopedSpinLock lock(s_mm_lock);
cr3_map().remove(cr3()); cr3_map().remove(cr3());
} }