diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index fca0b8589a..f1c0c32591 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -87,6 +87,7 @@ MemoryManager::~MemoryManager() void MemoryManager::protect_kernel_image() { + ScopedSpinLock page_lock(kernel_page_directory().get_lock()); // Disable writing to the kernel text and rodata segments. for (size_t i = (FlatPtr)&start_of_kernel_text; i < (FlatPtr)&start_of_kernel_data; i += PAGE_SIZE) { auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); @@ -193,10 +194,11 @@ void MemoryManager::parse_memory_map() ASSERT(m_user_physical_pages > 0); } -PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr) +PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr) { ASSERT_INTERRUPTS_DISABLED(); ASSERT(s_mm_lock.own_lock()); + ASSERT(page_directory.get_lock().own_lock()); u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3; u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff; u32 page_table_index = (vaddr.get() >> 12) & 0x1ff; @@ -213,6 +215,7 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual { ASSERT_INTERRUPTS_DISABLED(); ASSERT(s_mm_lock.own_lock()); + ASSERT(page_directory.get_lock().own_lock()); u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3; u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff; u32 page_table_index = (vaddr.get() >> 12) & 0x1ff; @@ -259,6 +262,7 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va { ASSERT_INTERRUPTS_DISABLED(); ASSERT(s_mm_lock.own_lock()); + ASSERT(page_directory.get_lock().own_lock()); u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3; u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff; u32 page_table_index = (vaddr.get() >> 12) & 0x1ff; diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 24c6fedf70..3d62a05f43 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -191,7 +191,7 @@ private: PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index); PageTableEntry* quickmap_pt(PhysicalAddress); - PageTableEntry* pte(const PageDirectory&, VirtualAddress); + PageTableEntry* pte(PageDirectory&, VirtualAddress); PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress); void release_pte(PageDirectory&, VirtualAddress, bool); diff --git a/Kernel/VM/PageDirectory.h b/Kernel/VM/PageDirectory.h index 98c256b920..880524cc78 100644 --- a/Kernel/VM/PageDirectory.h +++ b/Kernel/VM/PageDirectory.h @@ -57,6 +57,8 @@ public: Process* process() { return m_process; } const Process* process() const { return m_process; } + RecursiveSpinLock& get_lock() { return m_lock; } + private: PageDirectory(Process&, const RangeAllocator* parent_range_allocator); PageDirectory(); @@ -67,6 +69,7 @@ private: RefPtr m_directory_table; RefPtr m_directory_pages[4]; HashMap> m_page_tables; + RecursiveSpinLock m_lock; }; } diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index a1d39daf24..9552264eb1 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -229,6 +229,7 @@ Bitmap& Region::ensure_cow_map() const bool Region::map_individual_page_impl(size_t page_index) { + ASSERT(m_page_directory->get_lock().own_lock()); auto page_vaddr = vaddr_from_page_index(page_index); auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr); if (!pte) { @@ -260,8 +261,9 @@ bool Region::map_individual_page_impl(size_t page_index) bool Region::remap_page(size_t page_index, bool with_flush) { - ASSERT(m_page_directory); ScopedSpinLock lock(s_mm_lock); + ASSERT(m_page_directory); + ScopedSpinLock page_lock(m_page_directory->get_lock()); ASSERT(physical_page(page_index)); bool success = map_individual_page_impl(page_index); if (with_flush) @@ -273,6 +275,7 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) { ScopedSpinLock lock(s_mm_lock); ASSERT(m_page_directory); + ScopedSpinLock page_lock(m_page_directory->get_lock()); size_t count = page_count(); for (size_t i = 0; i < count; ++i) { auto vaddr = vaddr_from_page_index(i); @@ -302,6 +305,7 @@ void Region::set_page_directory(PageDirectory& page_directory) bool Region::map(PageDirectory& page_directory) { ScopedSpinLock lock(s_mm_lock); + ScopedSpinLock page_lock(page_directory.get_lock()); set_page_directory(page_directory); #ifdef MM_DEBUG dbg() << "MM: Region::map() will map VMO pages " << first_page_index() << " - " << last_page_index() << " (VMO page count: " << vmobject().page_count() << ")";