diff --git a/Kernel/VM/AnonymousVMObject.cpp b/Kernel/VM/AnonymousVMObject.cpp index 2e3d6f80cf..c0e2f52d81 100644 --- a/Kernel/VM/AnonymousVMObject.cpp +++ b/Kernel/VM/AnonymousVMObject.cpp @@ -36,7 +36,8 @@ RefPtr AnonymousVMObject::try_clone() dbgln_if(COMMIT_DEBUG, "Cloning {:p}, need {} committed cow pages", this, new_cow_pages_needed); - if (!MM.commit_user_physical_pages(new_cow_pages_needed)) + auto committed_pages = MM.commit_user_physical_pages(new_cow_pages_needed); + if (!committed_pages.has_value()) return {}; // Create or replace the committed cow pages. When cloning a previously @@ -45,29 +46,43 @@ RefPtr AnonymousVMObject::try_clone() // one would keep the one it still has. This ensures that the original // one and this one, as well as the clone have sufficient resources // to cow all pages as needed - m_shared_committed_cow_pages = try_create(new_cow_pages_needed); + m_shared_committed_cow_pages = try_create(committed_pages.release_value()); - if (!m_shared_committed_cow_pages) { - MM.uncommit_user_physical_pages(new_cow_pages_needed); + if (!m_shared_committed_cow_pages) return {}; - } // Both original and clone become COW. So create a COW map for ourselves // or reset all pages to be copied again if we were previously cloned ensure_or_reset_cow_map(); // FIXME: If this allocation fails, we need to rollback all changes. - return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(*this)); + auto clone = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(*this)); + VERIFY(clone); + + if (m_unused_committed_pages.has_value() && !m_unused_committed_pages->is_empty()) { + // The parent vmobject didn't use up all committed pages. When + // cloning (fork) we will overcommit. For this purpose we drop all + // lazy-commit references and replace them with shared zero pages. + for (size_t i = 0; i < page_count(); i++) { + auto& page = clone->m_physical_pages[i]; + if (page && page->is_lazy_committed_page()) { + page = MM.shared_zero_page(); + } + } + } + + return clone; } -RefPtr AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy commit) +RefPtr AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy strategy) { - if (commit == AllocationStrategy::Reserve || commit == AllocationStrategy::AllocateNow) { - // We need to attempt to commit before actually creating the object - if (!MM.commit_user_physical_pages(ceil_div(size, static_cast(PAGE_SIZE)))) + Optional committed_pages; + if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) { + committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast(PAGE_SIZE))); + if (!committed_pages.has_value()) return {}; } - return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, commit)); + return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages))); } RefPtr AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size) @@ -78,14 +93,15 @@ RefPtr AnonymousVMObject::try_create_physically_contiguous_wi return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(contiguous_physical_pages.span())); } -RefPtr AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy commit) +RefPtr AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy strategy) { - if (commit == AllocationStrategy::Reserve || commit == AllocationStrategy::AllocateNow) { - // We need to attempt to commit before actually creating the object - if (!MM.commit_user_physical_pages(ceil_div(size, static_cast(PAGE_SIZE)))) + Optional committed_pages; + if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) { + committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast(PAGE_SIZE))); + if (!committed_pages.has_value()) return {}; } - auto vmobject = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, commit)); + auto vmobject = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages))); if (!vmobject) return {}; vmobject->m_purgeable = true; @@ -106,9 +122,9 @@ RefPtr AnonymousVMObject::try_create_for_physical_range(Physi return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(paddr, size)); } -AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy) +AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy, Optional committed_pages) : VMObject(size) - , m_unused_committed_pages(strategy == AllocationStrategy::Reserve ? page_count() : 0) + , m_unused_committed_pages(move(committed_pages)) { if (strategy == AllocationStrategy::AllocateNow) { // Allocate all pages right now. We know we can get all because we committed the amount needed @@ -139,7 +155,6 @@ AnonymousVMObject::AnonymousVMObject(Span> physical_ AnonymousVMObject::AnonymousVMObject(AnonymousVMObject const& other) : VMObject(other) - , m_unused_committed_pages(other.m_unused_committed_pages) , m_cow_map() // do *not* clone this , m_shared_committed_cow_pages(other.m_shared_committed_cow_pages) // share the pool , m_purgeable(other.m_purgeable) @@ -150,28 +165,10 @@ AnonymousVMObject::AnonymousVMObject(AnonymousVMObject const& other) // The clone also becomes COW ensure_or_reset_cow_map(); - - if (m_unused_committed_pages > 0) { - // The original vmobject didn't use up all committed pages. When - // cloning (fork) we will overcommit. For this purpose we drop all - // lazy-commit references and replace them with shared zero pages. - for (size_t i = 0; i < page_count(); i++) { - auto& phys_page = m_physical_pages[i]; - if (phys_page && phys_page->is_lazy_committed_page()) { - phys_page = MM.shared_zero_page(); - if (--m_unused_committed_pages == 0) - break; - } - } - VERIFY(m_unused_committed_pages == 0); - } } AnonymousVMObject::~AnonymousVMObject() { - // Return any unused committed pages - if (m_unused_committed_pages > 0) - MM.uncommit_user_physical_pages(m_unused_committed_pages); } size_t AnonymousVMObject::purge() @@ -218,11 +215,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged) page = MM.shared_zero_page(); } - if (m_unused_committed_pages) { - MM.uncommit_user_physical_pages(m_unused_committed_pages); - m_unused_committed_pages = 0; - } - + m_unused_committed_pages = {}; m_shared_committed_cow_pages = nullptr; if (!m_cow_map.is_null()) @@ -248,11 +241,10 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged) return KSuccess; } - if (!MM.commit_user_physical_pages(committed_pages_needed)) + m_unused_committed_pages = MM.commit_user_physical_pages(committed_pages_needed); + if (!m_unused_committed_pages.has_value()) return ENOMEM; - m_unused_committed_pages = committed_pages_needed; - for (auto& page : m_physical_pages) { if (page->is_shared_zero_page()) page = MM.lazy_committed_page(); @@ -266,12 +258,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged) NonnullRefPtr AnonymousVMObject::allocate_committed_page(Badge) { - { - ScopedSpinLock lock(m_lock); - VERIFY(m_unused_committed_pages > 0); - --m_unused_committed_pages; - } - return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes); + return m_unused_committed_pages->take_one(); } Bitmap& AnonymousVMObject::ensure_cow_map() @@ -339,7 +326,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual RefPtr page; if (m_shared_committed_cow_pages) { dbgln_if(PAGE_FAULT_DEBUG, " >> It's a committed COW page and it's time to COW!"); - page = m_shared_committed_cow_pages->allocate_one(); + page = m_shared_committed_cow_pages->take_one(); } else { dbgln_if(PAGE_FAULT_DEBUG, " >> It's a COW page and it's time to COW!"); page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No); @@ -371,24 +358,19 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual return PageFaultResponse::Continue; } -CommittedCowPages::CommittedCowPages(size_t committed_pages) - : m_committed_pages(committed_pages) +AnonymousVMObject::SharedCommittedCowPages::SharedCommittedCowPages(CommittedPhysicalPageSet&& committed_pages) + : m_committed_pages(move(committed_pages)) { } -CommittedCowPages::~CommittedCowPages() +AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages() { - // Return unused committed pages - if (m_committed_pages > 0) - MM.uncommit_user_physical_pages(m_committed_pages); } -NonnullRefPtr CommittedCowPages::allocate_one() +NonnullRefPtr AnonymousVMObject::SharedCommittedCowPages::take_one() { - VERIFY(m_committed_pages > 0); - m_committed_pages--; - - return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes); + ScopedSpinLock locker(m_lock); + return m_committed_pages.take_one(); } } diff --git a/Kernel/VM/AnonymousVMObject.h b/Kernel/VM/AnonymousVMObject.h index 277c7e9385..b37247dbf8 100644 --- a/Kernel/VM/AnonymousVMObject.h +++ b/Kernel/VM/AnonymousVMObject.h @@ -14,22 +14,6 @@ namespace Kernel { -class CommittedCowPages : public RefCounted { - AK_MAKE_NONCOPYABLE(CommittedCowPages); - -public: - CommittedCowPages() = delete; - - explicit CommittedCowPages(size_t); - ~CommittedCowPages(); - - [[nodiscard]] NonnullRefPtr allocate_one(); - [[nodiscard]] size_t is_empty() const { return m_committed_pages == 0; } - -public: - size_t m_committed_pages { 0 }; -}; - class AnonymousVMObject final : public VMObject { public: virtual ~AnonymousVMObject() override; @@ -55,7 +39,7 @@ public: size_t purge(); private: - explicit AnonymousVMObject(size_t, AllocationStrategy); + explicit AnonymousVMObject(size_t, AllocationStrategy, Optional); explicit AnonymousVMObject(PhysicalAddress, size_t); explicit AnonymousVMObject(Span>); explicit AnonymousVMObject(AnonymousVMObject const&); @@ -71,11 +55,28 @@ private: Bitmap& ensure_cow_map(); void ensure_or_reset_cow_map(); - size_t m_unused_committed_pages { 0 }; + Optional m_unused_committed_pages; Bitmap m_cow_map; - // We share a pool of committed cow-pages with clones - RefPtr m_shared_committed_cow_pages; + // AnonymousVMObject shares committed COW pages with cloned children (happens on fork) + class SharedCommittedCowPages : public RefCounted { + AK_MAKE_NONCOPYABLE(SharedCommittedCowPages); + + public: + SharedCommittedCowPages() = delete; + + explicit SharedCommittedCowPages(CommittedPhysicalPageSet&&); + ~SharedCommittedCowPages(); + + [[nodiscard]] NonnullRefPtr take_one(); + [[nodiscard]] bool is_empty() const { return m_committed_pages.is_empty(); } + + public: + SpinLock m_lock; + CommittedPhysicalPageSet m_committed_pages; + }; + + RefPtr m_shared_committed_cow_pages; bool m_purgeable { false }; bool m_volatile { false }; diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 09c88b52c4..6173221678 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -69,10 +69,9 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager() protect_kernel_image(); // We're temporarily "committing" to two pages that we need to allocate below - if (!commit_user_physical_pages(2)) - VERIFY_NOT_REACHED(); + auto committed_pages = commit_user_physical_pages(2); - m_shared_zero_page = allocate_committed_user_physical_page(); + m_shared_zero_page = committed_pages->take_one(); // We're wasting a page here, we just need a special tag (physical // address) so that we know when we need to lazily allocate a page @@ -80,7 +79,7 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager() // than potentially failing if no pages are available anymore. // By using a tag we don't have to query the VMObject for every page // whether it was committed or not - m_lazy_committed_page = allocate_committed_user_physical_page(); + m_lazy_committed_page = committed_pages->take_one(); } UNMAP_AFTER_INIT MemoryManager::~MemoryManager() @@ -766,21 +765,22 @@ OwnPtr MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, cacheable); } -bool MemoryManager::commit_user_physical_pages(size_t page_count) +Optional MemoryManager::commit_user_physical_pages(size_t page_count) { VERIFY(page_count > 0); ScopedSpinLock lock(s_mm_lock); if (m_system_memory_info.user_physical_pages_uncommitted < page_count) - return false; + return {}; m_system_memory_info.user_physical_pages_uncommitted -= page_count; m_system_memory_info.user_physical_pages_committed += page_count; - return true; + return CommittedPhysicalPageSet { {}, page_count }; } -void MemoryManager::uncommit_user_physical_pages(size_t page_count) +void MemoryManager::uncommit_user_physical_pages(Badge, size_t page_count) { VERIFY(page_count > 0); + ScopedSpinLock lock(s_mm_lock); VERIFY(m_system_memory_info.user_physical_pages_committed >= page_count); @@ -1124,4 +1124,17 @@ void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable flush_tlb(&kernel_page_directory(), vaddr); } +CommittedPhysicalPageSet::~CommittedPhysicalPageSet() +{ + if (m_page_count) + MM.uncommit_user_physical_pages({}, m_page_count); +} + +NonnullRefPtr CommittedPhysicalPageSet::take_one() +{ + VERIFY(m_page_count > 0); + --m_page_count; + return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes); +} + } diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index a17f759eae..d9e912504f 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -102,6 +102,37 @@ struct MemoryManagerData { extern RecursiveSpinLock s_mm_lock; +// This class represents a set of committed physical pages. +// When you ask MemoryManager to commit pages for you, you get one of these in return. +// You can allocate pages from it via `take_one()` +// It will uncommit any (unallocated) remaining pages when destroyed. +class CommittedPhysicalPageSet { + AK_MAKE_NONCOPYABLE(CommittedPhysicalPageSet); + +public: + CommittedPhysicalPageSet(Badge, size_t page_count) + : m_page_count(page_count) + { + } + + CommittedPhysicalPageSet(CommittedPhysicalPageSet&& other) + : m_page_count(exchange(other.m_page_count, 0)) + { + } + + ~CommittedPhysicalPageSet(); + + bool is_empty() const { return m_page_count == 0; } + size_t page_count() const { return m_page_count; } + + [[nodiscard]] NonnullRefPtr take_one(); + + void operator=(CommittedPhysicalPageSet&&) = delete; + +private: + size_t m_page_count { 0 }; +}; + class MemoryManager { AK_MAKE_ETERNAL friend class PageDirectory; @@ -139,8 +170,9 @@ public: Yes }; - bool commit_user_physical_pages(size_t); - void uncommit_user_physical_pages(size_t); + Optional commit_user_physical_pages(size_t page_count); + void uncommit_user_physical_pages(Badge, size_t page_count); + NonnullRefPtr allocate_committed_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes); RefPtr allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr); RefPtr allocate_supervisor_physical_page();