From 2f790cf78ff781543f27f4136138e3ced7f133ce Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Sun, 5 Sep 2021 21:54:12 +0200 Subject: [PATCH] Kernel: Make MM.commit_user_physical_pages() return KResultOr ..and use TRY() at call sites. :^) --- Kernel/Memory/AnonymousVMObject.cpp | 19 +++++-------------- Kernel/Memory/MemoryManager.cpp | 10 +++++----- Kernel/Memory/MemoryManager.h | 2 +- 3 files changed, 11 insertions(+), 20 deletions(-) diff --git a/Kernel/Memory/AnonymousVMObject.cpp b/Kernel/Memory/AnonymousVMObject.cpp index 8789eaae55..f332823b89 100644 --- a/Kernel/Memory/AnonymousVMObject.cpp +++ b/Kernel/Memory/AnonymousVMObject.cpp @@ -34,9 +34,7 @@ KResultOr> AnonymousVMObject::try_clone() dbgln_if(COMMIT_DEBUG, "Cloning {:p}, need {} committed cow pages", this, new_cow_pages_needed); - auto committed_pages = MM.commit_user_physical_pages(new_cow_pages_needed); - if (!committed_pages.has_value()) - return ENOMEM; + auto committed_pages = TRY(MM.commit_user_physical_pages(new_cow_pages_needed)); // Create or replace the committed cow pages. When cloning a previously // cloned vmobject, we want to essentially "fork", leaving us and the @@ -44,8 +42,7 @@ KResultOr> AnonymousVMObject::try_clone() // one would keep the one it still has. This ensures that the original // one and this one, as well as the clone have sufficient resources // to cow all pages as needed - auto new_shared_committed_cow_pages = try_make_ref_counted(committed_pages.release_value()); - + auto new_shared_committed_cow_pages = try_make_ref_counted(move(committed_pages)); if (!new_shared_committed_cow_pages) return ENOMEM; @@ -79,9 +76,7 @@ KResultOr> AnonymousVMObject::try_create_with_s { Optional committed_pages; if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) { - committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast(PAGE_SIZE))); - if (!committed_pages.has_value()) - return ENOMEM; + committed_pages = TRY(MM.commit_user_physical_pages(ceil_div(size, static_cast(PAGE_SIZE)))); } return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages))); @@ -100,9 +95,7 @@ KResultOr> AnonymousVMObject::try_create_purgea { Optional committed_pages; if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) { - committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast(PAGE_SIZE))); - if (!committed_pages.has_value()) - return ENOMEM; + committed_pages = TRY(MM.commit_user_physical_pages(ceil_div(size, static_cast(PAGE_SIZE)))); } auto vmobject = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages))); @@ -242,9 +235,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged) return KSuccess; } - m_unused_committed_pages = MM.commit_user_physical_pages(committed_pages_needed); - if (!m_unused_committed_pages.has_value()) - return ENOMEM; + m_unused_committed_pages = TRY(MM.commit_user_physical_pages(committed_pages_needed)); for (auto& page : m_physical_pages) { if (page->is_shared_zero_page()) diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 3f65dd290c..a3f921a50c 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -69,9 +69,9 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager() protect_kernel_image(); // We're temporarily "committing" to two pages that we need to allocate below - auto committed_pages = commit_user_physical_pages(2); + auto committed_pages = commit_user_physical_pages(2).release_value(); - m_shared_zero_page = committed_pages->take_one(); + m_shared_zero_page = committed_pages.take_one(); // We're wasting a page here, we just need a special tag (physical // address) so that we know when we need to lazily allocate a page @@ -79,7 +79,7 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager() // than potentially failing if no pages are available anymore. // By using a tag we don't have to query the VMObject for every page // whether it was committed or not - m_lazy_committed_page = committed_pages->take_one(); + m_lazy_committed_page = committed_pages.take_one(); } UNMAP_AFTER_INIT MemoryManager::~MemoryManager() @@ -763,12 +763,12 @@ OwnPtr MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, cacheable); } -Optional MemoryManager::commit_user_physical_pages(size_t page_count) +KResultOr MemoryManager::commit_user_physical_pages(size_t page_count) { VERIFY(page_count > 0); SpinlockLocker lock(s_mm_lock); if (m_system_memory_info.user_physical_pages_uncommitted < page_count) - return {}; + return ENOMEM; m_system_memory_info.user_physical_pages_uncommitted -= page_count; m_system_memory_info.user_physical_pages_committed += page_count; diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h index 39f73443b0..2c0fac7375 100644 --- a/Kernel/Memory/MemoryManager.h +++ b/Kernel/Memory/MemoryManager.h @@ -171,7 +171,7 @@ public: Yes }; - Optional commit_user_physical_pages(size_t page_count); + KResultOr commit_user_physical_pages(size_t page_count); void uncommit_user_physical_pages(Badge, size_t page_count); NonnullRefPtr allocate_committed_user_physical_page(Badge, ShouldZeroFill = ShouldZeroFill::Yes);