mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 03:07:44 +00:00
Kernel: Release MM lock while yielding from inode page fault handler
We need to make sure other processors can grab the MM lock while we wait, so release it when we might block. Reading the page from disk may also block, so release it during that time as well.
This commit is contained in:
parent
ac3927086f
commit
250a310454
4 changed files with 23 additions and 11 deletions
|
@ -391,7 +391,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
|
||||||
return PageFaultResponse::ShouldCrash;
|
return PageFaultResponse::ShouldCrash;
|
||||||
}
|
}
|
||||||
|
|
||||||
return region->handle_fault(fault);
|
return region->handle_fault(fault, lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
||||||
|
|
|
@ -281,7 +281,8 @@ bool Region::do_remap_vmobject_page_range(size_t page_index, size_t page_count)
|
||||||
{
|
{
|
||||||
bool success = true;
|
bool success = true;
|
||||||
ASSERT(s_mm_lock.own_lock());
|
ASSERT(s_mm_lock.own_lock());
|
||||||
ASSERT(m_page_directory);
|
if (!m_page_directory)
|
||||||
|
return success; // not an error, region may have not yet mapped it
|
||||||
if (!translate_vmobject_page_range(page_index, page_count))
|
if (!translate_vmobject_page_range(page_index, page_count))
|
||||||
return success; // not an error, region doesn't map this page range
|
return success; // not an error, region doesn't map this page range
|
||||||
ScopedSpinLock page_lock(m_page_directory->get_lock());
|
ScopedSpinLock page_lock(m_page_directory->get_lock());
|
||||||
|
@ -318,7 +319,8 @@ bool Region::remap_vmobject_page_range(size_t page_index, size_t page_count)
|
||||||
bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
|
bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
ASSERT(m_page_directory);
|
if (!m_page_directory)
|
||||||
|
return true; // not an error, region may have not yet mapped it
|
||||||
if (!translate_vmobject_page(page_index))
|
if (!translate_vmobject_page(page_index))
|
||||||
return true; // not an error, region doesn't map this page
|
return true; // not an error, region doesn't map this page
|
||||||
ScopedSpinLock page_lock(m_page_directory->get_lock());
|
ScopedSpinLock page_lock(m_page_directory->get_lock());
|
||||||
|
@ -404,9 +406,8 @@ void Region::remap()
|
||||||
map(*m_page_directory);
|
map(*m_page_directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
PageFaultResponse Region::handle_fault(const PageFault& fault)
|
PageFaultResponse Region::handle_fault(const PageFault& fault, ScopedSpinLock<RecursiveSpinLock>& mm_lock)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
|
||||||
auto page_index_in_region = page_index_from_address(fault.vaddr());
|
auto page_index_in_region = page_index_from_address(fault.vaddr());
|
||||||
if (fault.type() == PageFault::Type::PageNotPresent) {
|
if (fault.type() == PageFault::Type::PageNotPresent) {
|
||||||
if (fault.is_read() && !is_readable()) {
|
if (fault.is_read() && !is_readable()) {
|
||||||
|
@ -419,7 +420,7 @@ PageFaultResponse Region::handle_fault(const PageFault& fault)
|
||||||
}
|
}
|
||||||
if (vmobject().is_inode()) {
|
if (vmobject().is_inode()) {
|
||||||
dbgln<PAGE_FAULT_DEBUG>("NP(inode) fault in Region({})[{}]", this, page_index_in_region);
|
dbgln<PAGE_FAULT_DEBUG>("NP(inode) fault in Region({})[{}]", this, page_index_in_region);
|
||||||
return handle_inode_fault(page_index_in_region);
|
return handle_inode_fault(page_index_in_region, mm_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto& page_slot = physical_page_slot(page_index_in_region);
|
auto& page_slot = physical_page_slot(page_index_in_region);
|
||||||
|
@ -514,13 +515,19 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
|
PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region, ScopedSpinLock<RecursiveSpinLock>& mm_lock)
|
||||||
{
|
{
|
||||||
ASSERT_INTERRUPTS_DISABLED();
|
ASSERT_INTERRUPTS_DISABLED();
|
||||||
ASSERT(vmobject().is_inode());
|
ASSERT(vmobject().is_inode());
|
||||||
|
|
||||||
|
mm_lock.unlock();
|
||||||
|
ASSERT(!s_mm_lock.own_lock());
|
||||||
|
ASSERT(!g_scheduler_lock.own_lock());
|
||||||
|
|
||||||
LOCKER(vmobject().m_paging_lock);
|
LOCKER(vmobject().m_paging_lock);
|
||||||
|
|
||||||
|
mm_lock.lock();
|
||||||
|
|
||||||
ASSERT_INTERRUPTS_DISABLED();
|
ASSERT_INTERRUPTS_DISABLED();
|
||||||
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
|
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
|
||||||
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
|
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
|
||||||
|
@ -541,8 +548,13 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
|
||||||
|
|
||||||
u8 page_buffer[PAGE_SIZE];
|
u8 page_buffer[PAGE_SIZE];
|
||||||
auto& inode = inode_vmobject.inode();
|
auto& inode = inode_vmobject.inode();
|
||||||
|
|
||||||
|
// Reading the page may block, so release the MM lock temporarily
|
||||||
|
mm_lock.unlock();
|
||||||
auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer);
|
auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer);
|
||||||
auto nread = inode.read_bytes(page_index_in_vmobject * PAGE_SIZE, PAGE_SIZE, buffer, nullptr);
|
auto nread = inode.read_bytes(page_index_in_vmobject * PAGE_SIZE, PAGE_SIZE, buffer, nullptr);
|
||||||
|
mm_lock.lock();
|
||||||
|
|
||||||
if (nread < 0) {
|
if (nread < 0) {
|
||||||
klog() << "MM: handle_inode_fault had error (" << nread << ") while reading!";
|
klog() << "MM: handle_inode_fault had error (" << nread << ") while reading!";
|
||||||
return PageFaultResponse::ShouldCrash;
|
return PageFaultResponse::ShouldCrash;
|
||||||
|
|
|
@ -93,7 +93,7 @@ public:
|
||||||
bool is_kernel() const { return m_kernel || vaddr().get() >= 0xc0000000; }
|
bool is_kernel() const { return m_kernel || vaddr().get() >= 0xc0000000; }
|
||||||
void set_kernel(bool kernel) { m_kernel = kernel; }
|
void set_kernel(bool kernel) { m_kernel = kernel; }
|
||||||
|
|
||||||
PageFaultResponse handle_fault(const PageFault&);
|
PageFaultResponse handle_fault(const PageFault&, ScopedSpinLock<RecursiveSpinLock>&);
|
||||||
|
|
||||||
OwnPtr<Region> clone(Process&);
|
OwnPtr<Region> clone(Process&);
|
||||||
|
|
||||||
|
@ -254,7 +254,7 @@ private:
|
||||||
bool remap_vmobject_page(size_t index, bool with_flush = true);
|
bool remap_vmobject_page(size_t index, bool with_flush = true);
|
||||||
|
|
||||||
PageFaultResponse handle_cow_fault(size_t page_index);
|
PageFaultResponse handle_cow_fault(size_t page_index);
|
||||||
PageFaultResponse handle_inode_fault(size_t page_index);
|
PageFaultResponse handle_inode_fault(size_t page_index, ScopedSpinLock<RecursiveSpinLock>&);
|
||||||
PageFaultResponse handle_zero_fault(size_t page_index);
|
PageFaultResponse handle_zero_fault(size_t page_index);
|
||||||
|
|
||||||
bool map_individual_page_impl(size_t page_index);
|
bool map_individual_page_impl(size_t page_index);
|
||||||
|
|
|
@ -34,8 +34,8 @@ namespace Kernel {
|
||||||
NonnullRefPtr<SharedInodeVMObject> SharedInodeVMObject::create_with_inode(Inode& inode)
|
NonnullRefPtr<SharedInodeVMObject> SharedInodeVMObject::create_with_inode(Inode& inode)
|
||||||
{
|
{
|
||||||
size_t size = inode.size();
|
size_t size = inode.size();
|
||||||
if (inode.shared_vmobject())
|
if (auto shared_vmobject = inode.shared_vmobject())
|
||||||
return *inode.shared_vmobject();
|
return shared_vmobject.release_nonnull();
|
||||||
auto vmobject = adopt(*new SharedInodeVMObject(inode, size));
|
auto vmobject = adopt(*new SharedInodeVMObject(inode, size));
|
||||||
vmobject->inode().set_shared_vmobject(*vmobject);
|
vmobject->inode().set_shared_vmobject(*vmobject);
|
||||||
return vmobject;
|
return vmobject;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue