1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 23:47:45 +00:00

Kernel: Stop taking MM lock while using regular quickmaps

You're still required to disable interrupts though, as the mappings are
per-CPU. This exposed the fact that our CR3 lookup map is insufficiently
protected (but we'll address that in a separate commit.)
This commit is contained in:
Andreas Kling 2022-08-22 15:23:32 +02:00
parent c8375c51ff
commit 6cd3695761
5 changed files with 15 additions and 14 deletions

View file

@ -7,11 +7,13 @@
#include <AK/Singleton.h> #include <AK/Singleton.h>
#include <Kernel/Arch/InterruptDisabler.h>
#include <Kernel/Memory/PageDirectory.h> #include <Kernel/Memory/PageDirectory.h>
#include <Kernel/Thread.h> #include <Kernel/Thread.h>
namespace Kernel::Memory { namespace Kernel::Memory {
// FIXME: This needs real locking:
static Singleton<IntrusiveRedBlackTree<&PageDirectory::m_tree_node>> s_cr3_map; static Singleton<IntrusiveRedBlackTree<&PageDirectory::m_tree_node>> s_cr3_map;
static IntrusiveRedBlackTree<&PageDirectory::m_tree_node>& cr3_map() static IntrusiveRedBlackTree<&PageDirectory::m_tree_node>& cr3_map()
@ -22,11 +24,13 @@ static IntrusiveRedBlackTree<&PageDirectory::m_tree_node>& cr3_map()
void PageDirectory::register_page_directory(PageDirectory* directory) void PageDirectory::register_page_directory(PageDirectory* directory)
{ {
InterruptDisabler disabler;
cr3_map().insert(directory->cr3(), *directory); cr3_map().insert(directory->cr3(), *directory);
} }
void PageDirectory::deregister_page_directory(PageDirectory* directory) void PageDirectory::deregister_page_directory(PageDirectory* directory)
{ {
InterruptDisabler disabler;
cr3_map().remove(directory->cr3()); cr3_map().remove(directory->cr3());
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org> * Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
* *
* SPDX-License-Identifier: BSD-2-Clause * SPDX-License-Identifier: BSD-2-Clause
*/ */
@ -360,7 +360,6 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
dbgln_if(PAGE_FAULT_DEBUG, " >> COW {} <- {}", page->paddr(), page_slot->paddr()); dbgln_if(PAGE_FAULT_DEBUG, " >> COW {} <- {}", page->paddr(), page_slot->paddr());
{ {
SpinlockLocker mm_locker(s_mm_lock);
u8* dest_ptr = MM.quickmap_page(*page); u8* dest_ptr = MM.quickmap_page(*page);
SmapDisabler disabler; SmapDisabler disabler;
void* fault_at; void* fault_at;

View file

@ -8,6 +8,7 @@
#include <AK/Memory.h> #include <AK/Memory.h>
#include <AK/StringView.h> #include <AK/StringView.h>
#include <Kernel/Arch/CPU.h> #include <Kernel/Arch/CPU.h>
#include <Kernel/Arch/InterruptDisabler.h>
#include <Kernel/Arch/PageDirectory.h> #include <Kernel/Arch/PageDirectory.h>
#include <Kernel/Arch/PageFault.h> #include <Kernel/Arch/PageFault.h>
#include <Kernel/Arch/RegisterState.h> #include <Kernel/Arch/RegisterState.h>
@ -880,7 +881,7 @@ void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
LockRefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed) LockRefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
{ {
VERIFY(s_mm_lock.is_locked()); SpinlockLocker mm_locker(s_mm_lock);
LockRefPtr<PhysicalPage> page; LockRefPtr<PhysicalPage> page;
if (committed) { if (committed) {
// Draw from the committed pages pool. We should always have these pages available // Draw from the committed pages pool. We should always have these pages available
@ -905,9 +906,9 @@ LockRefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
NonnullLockRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill) NonnullLockRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
{ {
SpinlockLocker lock(s_mm_lock);
auto page = find_free_physical_page(true); auto page = find_free_physical_page(true);
if (should_zero_fill == ShouldZeroFill::Yes) { if (should_zero_fill == ShouldZeroFill::Yes) {
InterruptDisabler disabler;
auto* ptr = quickmap_page(*page); auto* ptr = quickmap_page(*page);
memset(ptr, 0, PAGE_SIZE); memset(ptr, 0, PAGE_SIZE);
unquickmap_page(); unquickmap_page();
@ -1069,7 +1070,6 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address) u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
{ {
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.is_locked_by_current_processor());
auto& mm_data = get_data(); auto& mm_data = get_data();
mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock(); mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
@ -1090,7 +1090,6 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
void MemoryManager::unquickmap_page() void MemoryManager::unquickmap_page()
{ {
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.is_locked_by_current_processor());
auto& mm_data = get_data(); auto& mm_data = get_data();
VERIFY(mm_data.m_quickmap_in_use.is_locked()); VERIFY(mm_data.m_quickmap_in_use.is_locked());
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE); VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
@ -1184,7 +1183,6 @@ void CommittedPhysicalPageSet::uncommit_one()
void MemoryManager::copy_physical_page(PhysicalPage& physical_page, u8 page_buffer[PAGE_SIZE]) void MemoryManager::copy_physical_page(PhysicalPage& physical_page, u8 page_buffer[PAGE_SIZE])
{ {
SpinlockLocker locker(s_mm_lock);
auto* quickmapped_page = quickmap_page(physical_page); auto* quickmapped_page = quickmap_page(physical_page);
memcpy(page_buffer, quickmapped_page, PAGE_SIZE); memcpy(page_buffer, quickmapped_page, PAGE_SIZE);
unquickmap_page(); unquickmap_page();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org> * Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
* *
* SPDX-License-Identifier: BSD-2-Clause * SPDX-License-Identifier: BSD-2-Clause
*/ */
@ -7,6 +7,7 @@
#include <AK/Memory.h> #include <AK/Memory.h>
#include <AK/Singleton.h> #include <AK/Singleton.h>
#include <Kernel/Arch/CPU.h> #include <Kernel/Arch/CPU.h>
#include <Kernel/Arch/InterruptDisabler.h>
#include <Kernel/Arch/PageDirectory.h> #include <Kernel/Arch/PageDirectory.h>
#include <Kernel/Memory/MemoryManager.h> #include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/PageDirectory.h> #include <Kernel/Memory/PageDirectory.h>
@ -29,9 +30,6 @@ ErrorOr<NonnullLockRefPtr<PageDirectory>> PageDirectory::try_create_for_userspac
{ {
auto directory = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) PageDirectory)); auto directory = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) PageDirectory));
// NOTE: Take the MM lock since we need it for quickmap.
SpinlockLocker lock(s_mm_lock);
#if ARCH(X86_64) #if ARCH(X86_64)
directory->m_pml4t = TRY(MM.allocate_physical_page()); directory->m_pml4t = TRY(MM.allocate_physical_page());
#endif #endif
@ -47,6 +45,7 @@ ErrorOr<NonnullLockRefPtr<PageDirectory>> PageDirectory::try_create_for_userspac
#if ARCH(X86_64) #if ARCH(X86_64)
{ {
InterruptDisabler disabler;
auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*directory->m_pml4t); auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*directory->m_pml4t);
table.raw[0] = (FlatPtr)directory->m_directory_table->paddr().as_ptr() | 7; table.raw[0] = (FlatPtr)directory->m_directory_table->paddr().as_ptr() | 7;
MM.unquickmap_page(); MM.unquickmap_page();
@ -54,6 +53,7 @@ ErrorOr<NonnullLockRefPtr<PageDirectory>> PageDirectory::try_create_for_userspac
#endif #endif
{ {
InterruptDisabler disabler;
auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*directory->m_directory_table); auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*directory->m_directory_table);
for (size_t i = 0; i < sizeof(m_directory_pages) / sizeof(m_directory_pages[0]); i++) { for (size_t i = 0; i < sizeof(m_directory_pages) / sizeof(m_directory_pages[0]); i++) {
if (directory->m_directory_pages[i]) { if (directory->m_directory_pages[i]) {

View file

@ -1,11 +1,12 @@
/* /*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org> * Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
* *
* SPDX-License-Identifier: BSD-2-Clause * SPDX-License-Identifier: BSD-2-Clause
*/ */
#include <AK/Memory.h> #include <AK/Memory.h>
#include <AK/StringView.h> #include <AK/StringView.h>
#include <Kernel/Arch/InterruptDisabler.h>
#include <Kernel/Arch/PageDirectory.h> #include <Kernel/Arch/PageDirectory.h>
#include <Kernel/Arch/PageFault.h> #include <Kernel/Arch/PageFault.h>
#include <Kernel/Debug.h> #include <Kernel/Debug.h>
@ -516,8 +517,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
} }
auto new_physical_page = new_physical_page_or_error.release_value(); auto new_physical_page = new_physical_page_or_error.release_value();
{ {
// NOTE: The MM lock is required for quick-mapping. InterruptDisabler disabler;
SpinlockLocker mm_locker(s_mm_lock);
u8* dest_ptr = MM.quickmap_page(*new_physical_page); u8* dest_ptr = MM.quickmap_page(*new_physical_page);
memcpy(dest_ptr, page_buffer, PAGE_SIZE); memcpy(dest_ptr, page_buffer, PAGE_SIZE);
MM.unquickmap_page(); MM.unquickmap_page();