1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-24 19:27:35 +00:00

Kernel: If a VMObject is shared, broadcast page remappings

If we remap pages (e.g. lazy allocation) inside a VMObject that is
shared among more than one region, broadcast it to any other region
that may be mapping the same page.
This commit is contained in:
Tom 2021-01-02 12:03:14 -07:00 committed by Andreas Kling
parent e3190bd144
commit c630669304
5 changed files with 117 additions and 18 deletions

View file

@ -215,7 +215,7 @@ int AnonymousVMObject::purge_impl()
} else { } else {
klog() << "Purged " << purged_in_range << " pages from region " << region.name() << " (no ownership) at " << region.vaddr_from_page_index(range.base) << " - " << region.vaddr_from_page_index(range.base + range.count); klog() << "Purged " << purged_in_range << " pages from region " << region.name() << " (no ownership) at " << region.vaddr_from_page_index(range.base) << " - " << region.vaddr_from_page_index(range.base + range.count);
} }
region.remap_page_range(range.base, range.count); region.remap_vmobject_page_range(range.base, range.count);
} }
}); });
} }

View file

@ -51,12 +51,14 @@ Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offs
, m_cacheable(cacheable) , m_cacheable(cacheable)
, m_kernel(kernel) , m_kernel(kernel)
{ {
m_vmobject->ref_region();
register_purgeable_page_ranges(); register_purgeable_page_ranges();
MM.register_region(*this); MM.register_region(*this);
} }
Region::~Region() Region::~Region()
{ {
m_vmobject->unref_region();
unregister_purgeable_page_ranges(); unregister_purgeable_page_ranges();
// Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering. // Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering.
@ -153,7 +155,9 @@ void Region::set_vmobject(NonnullRefPtr<VMObject>&& obj)
if (m_vmobject.ptr() == obj.ptr()) if (m_vmobject.ptr() == obj.ptr())
return; return;
unregister_purgeable_page_ranges(); unregister_purgeable_page_ranges();
m_vmobject->unref_region();
m_vmobject = move(obj); m_vmobject = move(obj);
m_vmobject->ref_region();
register_purgeable_page_ranges(); register_purgeable_page_ranges();
} }
@ -299,11 +303,13 @@ bool Region::map_individual_page_impl(size_t page_index)
return true; return true;
} }
bool Region::remap_page_range(size_t page_index, size_t page_count) bool Region::do_remap_vmobject_page_range(size_t page_index, size_t page_count)
{ {
bool success = true; bool success = true;
ScopedSpinLock lock(s_mm_lock); ASSERT(s_mm_lock.own_lock());
ASSERT(m_page_directory); ASSERT(m_page_directory);
if (!translate_vmobject_page_range(page_index, page_count))
return success; // not an error, region doesn't map this page range
ScopedSpinLock page_lock(m_page_directory->get_lock()); ScopedSpinLock page_lock(m_page_directory->get_lock());
size_t index = page_index; size_t index = page_index;
while (index < page_index + page_count) { while (index < page_index + page_count) {
@ -318,10 +324,29 @@ bool Region::remap_page_range(size_t page_index, size_t page_count)
return success; return success;
} }
bool Region::remap_page(size_t page_index, bool with_flush) bool Region::remap_vmobject_page_range(size_t page_index, size_t page_count)
{
bool success = true;
ScopedSpinLock lock(s_mm_lock);
auto& vmobject = this->vmobject();
if (vmobject.is_shared_by_multiple_regions()) {
vmobject.for_each_region([&](auto& region) {
if (!region.do_remap_vmobject_page_range(page_index, page_count))
success = false;
});
} else {
if (!do_remap_vmobject_page_range(page_index, page_count))
success = false;
}
return success;
}
bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
{ {
ScopedSpinLock lock(s_mm_lock); ScopedSpinLock lock(s_mm_lock);
ASSERT(m_page_directory); ASSERT(m_page_directory);
if (!translate_vmobject_page(page_index))
return true; // not an error, region doesn't map this page
ScopedSpinLock page_lock(m_page_directory->get_lock()); ScopedSpinLock page_lock(m_page_directory->get_lock());
ASSERT(physical_page(page_index)); ASSERT(physical_page(page_index));
bool success = map_individual_page_impl(page_index); bool success = map_individual_page_impl(page_index);
@ -330,6 +355,23 @@ bool Region::remap_page(size_t page_index, bool with_flush)
return success; return success;
} }
bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
{
bool success = true;
ScopedSpinLock lock(s_mm_lock);
auto& vmobject = this->vmobject();
if (vmobject.is_shared_by_multiple_regions()) {
vmobject.for_each_region([&](auto& region) {
if (!region.do_remap_vmobject_page(page_index, with_flush))
success = false;
});
} else {
if (!do_remap_vmobject_page(page_index, with_flush))
success = false;
}
return success;
}
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
{ {
ScopedSpinLock lock(s_mm_lock); ScopedSpinLock lock(s_mm_lock);
@ -411,14 +453,15 @@ PageFaultResponse Region::handle_fault(const PageFault& fault)
auto& page_slot = physical_page_slot(page_index_in_region); auto& page_slot = physical_page_slot(page_index_in_region);
if (page_slot->is_lazy_committed_page()) { if (page_slot->is_lazy_committed_page()) {
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_region); auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
remap_page(page_index_in_region); page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_vmobject);
remap_vmobject_page(page_index_in_vmobject);
return PageFaultResponse::Continue; return PageFaultResponse::Continue;
} }
#ifdef MAP_SHARED_ZERO_PAGE_LAZILY #ifdef MAP_SHARED_ZERO_PAGE_LAZILY
if (fault.is_read()) { if (fault.is_read()) {
page_slot = MM.shared_zero_page(); page_slot = MM.shared_zero_page();
remap_page(page_index_in_region); remap_vmobject_page(translate_to_vmobject_page(page_index_in_region));
return PageFaultResponse::Continue; return PageFaultResponse::Continue;
} }
return handle_zero_fault(page_index_in_region); return handle_zero_fault(page_index_in_region);
@ -453,12 +496,13 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
LOCKER(vmobject().m_paging_lock); LOCKER(vmobject().m_paging_lock);
auto& page_slot = physical_page_slot(page_index_in_region); auto& page_slot = physical_page_slot(page_index_in_region);
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) { if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) {
#ifdef PAGE_FAULT_DEBUG #ifdef PAGE_FAULT_DEBUG
dbg() << "MM: zero_page() but page already present. Fine with me!"; dbg() << "MM: zero_page() but page already present. Fine with me!";
#endif #endif
if (!remap_page(page_index_in_region)) if (!remap_vmobject_page(page_index_in_vmobject))
return PageFaultResponse::OutOfMemory; return PageFaultResponse::OutOfMemory;
return PageFaultResponse::Continue; return PageFaultResponse::Continue;
} }
@ -468,7 +512,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
current_thread->did_zero_fault(); current_thread->did_zero_fault();
if (page_slot->is_lazy_committed_page()) { if (page_slot->is_lazy_committed_page()) {
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_region); page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_vmobject);
#ifdef PAGE_FAULT_DEBUG #ifdef PAGE_FAULT_DEBUG
dbg() << " >> ALLOCATED COMMITTED " << page_slot->paddr(); dbg() << " >> ALLOCATED COMMITTED " << page_slot->paddr();
#endif #endif
@ -483,7 +527,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
#endif #endif
} }
if (!remap_page(page_index_in_region)) { if (!remap_vmobject_page(page_index_in_vmobject)) {
klog() << "MM: handle_zero_fault was unable to allocate a page table to map " << page_slot; klog() << "MM: handle_zero_fault was unable to allocate a page table to map " << page_slot;
return PageFaultResponse::OutOfMemory; return PageFaultResponse::OutOfMemory;
} }
@ -500,8 +544,9 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
if (!vmobject().is_anonymous()) if (!vmobject().is_anonymous())
return PageFaultResponse::ShouldCrash; return PageFaultResponse::ShouldCrash;
auto response = reinterpret_cast<AnonymousVMObject&>(vmobject()).handle_cow_fault(first_page_index() + page_index_in_region, vaddr().offset(page_index_in_region * PAGE_SIZE)); auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
if (!remap_page(page_index_in_region)) auto response = reinterpret_cast<AnonymousVMObject&>(vmobject()).handle_cow_fault(page_index_in_vmobject, vaddr().offset(page_index_in_region * PAGE_SIZE));
if (!remap_vmobject_page(page_index_in_vmobject))
return PageFaultResponse::OutOfMemory; return PageFaultResponse::OutOfMemory;
return response; return response;
} }
@ -515,7 +560,8 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
ASSERT_INTERRUPTS_DISABLED(); ASSERT_INTERRUPTS_DISABLED();
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject()); auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[first_page_index() + page_index_in_region]; auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[page_index_in_vmobject];
#ifdef PAGE_FAULT_DEBUG #ifdef PAGE_FAULT_DEBUG
dbg() << "Inode fault in " << name() << " page index: " << page_index_in_region; dbg() << "Inode fault in " << name() << " page index: " << page_index_in_region;
@ -525,7 +571,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
#ifdef PAGE_FAULT_DEBUG #ifdef PAGE_FAULT_DEBUG
dbg() << ("MM: page_in_from_inode() but page already present. Fine with me!"); dbg() << ("MM: page_in_from_inode() but page already present. Fine with me!");
#endif #endif
if (!remap_page(page_index_in_region)) if (!remap_vmobject_page(page_index_in_vmobject))
return PageFaultResponse::OutOfMemory; return PageFaultResponse::OutOfMemory;
return PageFaultResponse::Continue; return PageFaultResponse::Continue;
} }
@ -541,7 +587,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
u8 page_buffer[PAGE_SIZE]; u8 page_buffer[PAGE_SIZE];
auto& inode = inode_vmobject.inode(); auto& inode = inode_vmobject.inode();
auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer); auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer);
auto nread = inode.read_bytes((first_page_index() + page_index_in_region) * PAGE_SIZE, PAGE_SIZE, buffer, nullptr); auto nread = inode.read_bytes(page_index_in_vmobject * PAGE_SIZE, PAGE_SIZE, buffer, nullptr);
if (nread < 0) { if (nread < 0) {
klog() << "MM: handle_inode_fault had error (" << nread << ") while reading!"; klog() << "MM: handle_inode_fault had error (" << nread << ") while reading!";
return PageFaultResponse::ShouldCrash; return PageFaultResponse::ShouldCrash;
@ -569,7 +615,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
} }
MM.unquickmap_page(); MM.unquickmap_page();
remap_page(page_index_in_region); remap_vmobject_page(page_index_in_vmobject);
return PageFaultResponse::Continue; return PageFaultResponse::Continue;
} }

View file

@ -122,6 +122,49 @@ public:
return vaddr().offset(page_index * PAGE_SIZE); return vaddr().offset(page_index * PAGE_SIZE);
} }
bool translate_vmobject_page(size_t& index) const
{
auto first_index = first_page_index();
if (index < first_index) {
index = first_index;
return false;
}
index -= first_index;
auto total_page_count = this->page_count();
if (index >= total_page_count) {
index = first_index + total_page_count - 1;
return false;
}
return true;
}
bool translate_vmobject_page_range(size_t& index, size_t& page_count) const
{
auto first_index = first_page_index();
if (index < first_index) {
auto delta = first_index - index;
index = first_index;
if (delta >= page_count) {
page_count = 0;
return false;
}
page_count -= delta;
}
index -= first_index;
auto total_page_count = this->page_count();
if (index + page_count > total_page_count) {
page_count = total_page_count - index;
if (page_count == 0)
return false;
}
return true;
}
ALWAYS_INLINE size_t translate_to_vmobject_page(size_t page_index) const
{
return first_page_index() + page_index;
}
size_t first_page_index() const size_t first_page_index() const
{ {
return m_offset_in_vmobject / PAGE_SIZE; return m_offset_in_vmobject / PAGE_SIZE;
@ -186,7 +229,7 @@ public:
void set_inherit_mode(InheritMode inherit_mode) { m_inherit_mode = inherit_mode; } void set_inherit_mode(InheritMode inherit_mode) { m_inherit_mode = inherit_mode; }
bool remap_page_range(size_t page_index, size_t page_count); bool remap_vmobject_page_range(size_t page_index, size_t page_count);
bool is_volatile(VirtualAddress vaddr, size_t size) const; bool is_volatile(VirtualAddress vaddr, size_t size) const;
enum class SetVolatileError { enum class SetVolatileError {
@ -199,6 +242,8 @@ public:
RefPtr<Process> get_owner(); RefPtr<Process> get_owner();
private: private:
bool do_remap_vmobject_page_range(size_t page_index, size_t page_count);
void set_access_bit(Access access, bool b) void set_access_bit(Access access, bool b)
{ {
if (b) if (b)
@ -207,7 +252,8 @@ private:
m_access &= ~access; m_access &= ~access;
} }
bool remap_page(size_t index, bool with_flush = true); bool do_remap_vmobject_page(size_t index, bool with_flush = true);
bool remap_vmobject_page(size_t index, bool with_flush = true);
PageFaultResponse handle_cow_fault(size_t page_index); PageFaultResponse handle_cow_fault(size_t page_index);
PageFaultResponse handle_inode_fault(size_t page_index); PageFaultResponse handle_inode_fault(size_t page_index);

View file

@ -46,6 +46,7 @@ VMObject::VMObject(size_t size)
VMObject::~VMObject() VMObject::~VMObject()
{ {
MM.unregister_vmobject(*this); MM.unregister_vmobject(*this);
ASSERT(m_regions_count.load(AK::MemoryOrder::memory_order_relaxed) == 0);
} }
} }

View file

@ -67,6 +67,10 @@ public:
VMObject* m_next { nullptr }; VMObject* m_next { nullptr };
VMObject* m_prev { nullptr }; VMObject* m_prev { nullptr };
ALWAYS_INLINE void ref_region() { m_regions_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed); }
ALWAYS_INLINE void unref_region() { m_regions_count.fetch_sub(1, AK::MemoryOrder::memory_order_relaxed); }
ALWAYS_INLINE bool is_shared_by_multiple_regions() const { return m_regions_count.load(AK::MemoryOrder::memory_order_relaxed) > 1; }
protected: protected:
explicit VMObject(size_t); explicit VMObject(size_t);
explicit VMObject(const VMObject&); explicit VMObject(const VMObject&);
@ -83,6 +87,8 @@ private:
VMObject& operator=(const VMObject&) = delete; VMObject& operator=(const VMObject&) = delete;
VMObject& operator=(VMObject&&) = delete; VMObject& operator=(VMObject&&) = delete;
VMObject(VMObject&&) = delete; VMObject(VMObject&&) = delete;
Atomic<u32> m_regions_count { 0 };
}; };
} }