diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 9642360e43..d0d427c5fb 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -391,7 +391,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault) return PageFaultResponse::ShouldCrash; } - return region->handle_fault(fault); + return region->handle_fault(fault, lock); } OwnPtr MemoryManager::allocate_contiguous_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable) diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index 3c18b1d65b..54f216dc90 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -281,7 +281,8 @@ bool Region::do_remap_vmobject_page_range(size_t page_index, size_t page_count) { bool success = true; ASSERT(s_mm_lock.own_lock()); - ASSERT(m_page_directory); + if (!m_page_directory) + return success; // not an error, region may have not yet mapped it if (!translate_vmobject_page_range(page_index, page_count)) return success; // not an error, region doesn't map this page range ScopedSpinLock page_lock(m_page_directory->get_lock()); @@ -318,7 +319,8 @@ bool Region::remap_vmobject_page_range(size_t page_index, size_t page_count) bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush) { ScopedSpinLock lock(s_mm_lock); - ASSERT(m_page_directory); + if (!m_page_directory) + return true; // not an error, region may have not yet mapped it if (!translate_vmobject_page(page_index)) return true; // not an error, region doesn't map this page ScopedSpinLock page_lock(m_page_directory->get_lock()); @@ -404,9 +406,8 @@ void Region::remap() map(*m_page_directory); } -PageFaultResponse Region::handle_fault(const PageFault& fault) +PageFaultResponse Region::handle_fault(const PageFault& fault, ScopedSpinLock& mm_lock) { - ScopedSpinLock lock(s_mm_lock); auto page_index_in_region = page_index_from_address(fault.vaddr()); if (fault.type() == PageFault::Type::PageNotPresent) { if (fault.is_read() && !is_readable()) { @@ -419,7 +420,7 @@ PageFaultResponse Region::handle_fault(const PageFault& fault) } if (vmobject().is_inode()) { dbgln("NP(inode) fault in Region({})[{}]", this, page_index_in_region); - return handle_inode_fault(page_index_in_region); + return handle_inode_fault(page_index_in_region, mm_lock); } auto& page_slot = physical_page_slot(page_index_in_region); @@ -514,13 +515,19 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region) return response; } -PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) +PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region, ScopedSpinLock& mm_lock) { ASSERT_INTERRUPTS_DISABLED(); ASSERT(vmobject().is_inode()); + mm_lock.unlock(); + ASSERT(!s_mm_lock.own_lock()); + ASSERT(!g_scheduler_lock.own_lock()); + LOCKER(vmobject().m_paging_lock); + mm_lock.lock(); + ASSERT_INTERRUPTS_DISABLED(); auto& inode_vmobject = static_cast(vmobject()); auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region); @@ -541,8 +548,13 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) u8 page_buffer[PAGE_SIZE]; auto& inode = inode_vmobject.inode(); + + // Reading the page may block, so release the MM lock temporarily + mm_lock.unlock(); auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer); auto nread = inode.read_bytes(page_index_in_vmobject * PAGE_SIZE, PAGE_SIZE, buffer, nullptr); + mm_lock.lock(); + if (nread < 0) { klog() << "MM: handle_inode_fault had error (" << nread << ") while reading!"; return PageFaultResponse::ShouldCrash; diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h index bee8c32e33..87b0f75004 100644 --- a/Kernel/VM/Region.h +++ b/Kernel/VM/Region.h @@ -93,7 +93,7 @@ public: bool is_kernel() const { return m_kernel || vaddr().get() >= 0xc0000000; } void set_kernel(bool kernel) { m_kernel = kernel; } - PageFaultResponse handle_fault(const PageFault&); + PageFaultResponse handle_fault(const PageFault&, ScopedSpinLock&); OwnPtr clone(Process&); @@ -254,7 +254,7 @@ private: bool remap_vmobject_page(size_t index, bool with_flush = true); PageFaultResponse handle_cow_fault(size_t page_index); - PageFaultResponse handle_inode_fault(size_t page_index); + PageFaultResponse handle_inode_fault(size_t page_index, ScopedSpinLock&); PageFaultResponse handle_zero_fault(size_t page_index); bool map_individual_page_impl(size_t page_index); diff --git a/Kernel/VM/SharedInodeVMObject.cpp b/Kernel/VM/SharedInodeVMObject.cpp index 4a04d5031f..33b0316f59 100644 --- a/Kernel/VM/SharedInodeVMObject.cpp +++ b/Kernel/VM/SharedInodeVMObject.cpp @@ -34,8 +34,8 @@ namespace Kernel { NonnullRefPtr SharedInodeVMObject::create_with_inode(Inode& inode) { size_t size = inode.size(); - if (inode.shared_vmobject()) - return *inode.shared_vmobject(); + if (auto shared_vmobject = inode.shared_vmobject()) + return shared_vmobject.release_nonnull(); auto vmobject = adopt(*new SharedInodeVMObject(inode, size)); vmobject->inode().set_shared_vmobject(*vmobject); return vmobject;