1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 08:08:12 +00:00

Kernel: Use RAII to manage committed physical pages

We had issues with committed physical pages getting miscounted in some
situations, and instead of figuring out what was going wrong and making
sure all the commits had matching uncommits, this patch makes the
problem go away by adding an RAII class to manage this instead. :^)

MemoryManager::commit_user_physical_pages() now returns an (optional)
CommittedPhysicalPageSet. You can then allocate pages from the page set
by calling take_one() on it. Any unallocated pages are uncommitted upon
destruction of the page set.
This commit is contained in:
Andreas Kling 2021-08-04 22:49:13 +02:00
parent ec49213f7b
commit fa627c1eb2
4 changed files with 121 additions and 93 deletions

View file

@ -36,7 +36,8 @@ RefPtr<VMObject> AnonymousVMObject::try_clone()
dbgln_if(COMMIT_DEBUG, "Cloning {:p}, need {} committed cow pages", this, new_cow_pages_needed);
if (!MM.commit_user_physical_pages(new_cow_pages_needed))
auto committed_pages = MM.commit_user_physical_pages(new_cow_pages_needed);
if (!committed_pages.has_value())
return {};
// Create or replace the committed cow pages. When cloning a previously
@ -45,29 +46,43 @@ RefPtr<VMObject> AnonymousVMObject::try_clone()
// one would keep the one it still has. This ensures that the original
// one and this one, as well as the clone have sufficient resources
// to cow all pages as needed
m_shared_committed_cow_pages = try_create<CommittedCowPages>(new_cow_pages_needed);
m_shared_committed_cow_pages = try_create<SharedCommittedCowPages>(committed_pages.release_value());
if (!m_shared_committed_cow_pages) {
MM.uncommit_user_physical_pages(new_cow_pages_needed);
if (!m_shared_committed_cow_pages)
return {};
}
// Both original and clone become COW. So create a COW map for ourselves
// or reset all pages to be copied again if we were previously cloned
ensure_or_reset_cow_map();
// FIXME: If this allocation fails, we need to rollback all changes.
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(*this));
auto clone = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(*this));
VERIFY(clone);
if (m_unused_committed_pages.has_value() && !m_unused_committed_pages->is_empty()) {
// The parent vmobject didn't use up all committed pages. When
// cloning (fork) we will overcommit. For this purpose we drop all
// lazy-commit references and replace them with shared zero pages.
for (size_t i = 0; i < page_count(); i++) {
auto& page = clone->m_physical_pages[i];
if (page && page->is_lazy_committed_page()) {
page = MM.shared_zero_page();
}
}
}
return clone;
}
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy commit)
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy strategy)
{
if (commit == AllocationStrategy::Reserve || commit == AllocationStrategy::AllocateNow) {
// We need to attempt to commit before actually creating the object
if (!MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))))
Optional<CommittedPhysicalPageSet> committed_pages;
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
if (!committed_pages.has_value())
return {};
}
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, commit));
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages)));
}
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size)
@ -78,14 +93,15 @@ RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_physically_contiguous_wi
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(contiguous_physical_pages.span()));
}
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy commit)
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy strategy)
{
if (commit == AllocationStrategy::Reserve || commit == AllocationStrategy::AllocateNow) {
// We need to attempt to commit before actually creating the object
if (!MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))))
Optional<CommittedPhysicalPageSet> committed_pages;
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
if (!committed_pages.has_value())
return {};
}
auto vmobject = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, commit));
auto vmobject = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages)));
if (!vmobject)
return {};
vmobject->m_purgeable = true;
@ -106,9 +122,9 @@ RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_for_physical_range(Physi
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(paddr, size));
}
AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy)
AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)
: VMObject(size)
, m_unused_committed_pages(strategy == AllocationStrategy::Reserve ? page_count() : 0)
, m_unused_committed_pages(move(committed_pages))
{
if (strategy == AllocationStrategy::AllocateNow) {
// Allocate all pages right now. We know we can get all because we committed the amount needed
@ -139,7 +155,6 @@ AnonymousVMObject::AnonymousVMObject(Span<NonnullRefPtr<PhysicalPage>> physical_
AnonymousVMObject::AnonymousVMObject(AnonymousVMObject const& other)
: VMObject(other)
, m_unused_committed_pages(other.m_unused_committed_pages)
, m_cow_map() // do *not* clone this
, m_shared_committed_cow_pages(other.m_shared_committed_cow_pages) // share the pool
, m_purgeable(other.m_purgeable)
@ -150,28 +165,10 @@ AnonymousVMObject::AnonymousVMObject(AnonymousVMObject const& other)
// The clone also becomes COW
ensure_or_reset_cow_map();
if (m_unused_committed_pages > 0) {
// The original vmobject didn't use up all committed pages. When
// cloning (fork) we will overcommit. For this purpose we drop all
// lazy-commit references and replace them with shared zero pages.
for (size_t i = 0; i < page_count(); i++) {
auto& phys_page = m_physical_pages[i];
if (phys_page && phys_page->is_lazy_committed_page()) {
phys_page = MM.shared_zero_page();
if (--m_unused_committed_pages == 0)
break;
}
}
VERIFY(m_unused_committed_pages == 0);
}
}
AnonymousVMObject::~AnonymousVMObject()
{
// Return any unused committed pages
if (m_unused_committed_pages > 0)
MM.uncommit_user_physical_pages(m_unused_committed_pages);
}
size_t AnonymousVMObject::purge()
@ -218,11 +215,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
page = MM.shared_zero_page();
}
if (m_unused_committed_pages) {
MM.uncommit_user_physical_pages(m_unused_committed_pages);
m_unused_committed_pages = 0;
}
m_unused_committed_pages = {};
m_shared_committed_cow_pages = nullptr;
if (!m_cow_map.is_null())
@ -248,11 +241,10 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
return KSuccess;
}
if (!MM.commit_user_physical_pages(committed_pages_needed))
m_unused_committed_pages = MM.commit_user_physical_pages(committed_pages_needed);
if (!m_unused_committed_pages.has_value())
return ENOMEM;
m_unused_committed_pages = committed_pages_needed;
for (auto& page : m_physical_pages) {
if (page->is_shared_zero_page())
page = MM.lazy_committed_page();
@ -266,12 +258,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
NonnullRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>)
{
{
ScopedSpinLock lock(m_lock);
VERIFY(m_unused_committed_pages > 0);
--m_unused_committed_pages;
}
return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
return m_unused_committed_pages->take_one();
}
Bitmap& AnonymousVMObject::ensure_cow_map()
@ -339,7 +326,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
RefPtr<PhysicalPage> page;
if (m_shared_committed_cow_pages) {
dbgln_if(PAGE_FAULT_DEBUG, " >> It's a committed COW page and it's time to COW!");
page = m_shared_committed_cow_pages->allocate_one();
page = m_shared_committed_cow_pages->take_one();
} else {
dbgln_if(PAGE_FAULT_DEBUG, " >> It's a COW page and it's time to COW!");
page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
@ -371,24 +358,19 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
return PageFaultResponse::Continue;
}
CommittedCowPages::CommittedCowPages(size_t committed_pages)
: m_committed_pages(committed_pages)
AnonymousVMObject::SharedCommittedCowPages::SharedCommittedCowPages(CommittedPhysicalPageSet&& committed_pages)
: m_committed_pages(move(committed_pages))
{
}
CommittedCowPages::~CommittedCowPages()
AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages()
{
// Return unused committed pages
if (m_committed_pages > 0)
MM.uncommit_user_physical_pages(m_committed_pages);
}
NonnullRefPtr<PhysicalPage> CommittedCowPages::allocate_one()
NonnullRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one()
{
VERIFY(m_committed_pages > 0);
m_committed_pages--;
return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
ScopedSpinLock locker(m_lock);
return m_committed_pages.take_one();
}
}

View file

@ -14,22 +14,6 @@
namespace Kernel {
class CommittedCowPages : public RefCounted<CommittedCowPages> {
AK_MAKE_NONCOPYABLE(CommittedCowPages);
public:
CommittedCowPages() = delete;
explicit CommittedCowPages(size_t);
~CommittedCowPages();
[[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_one();
[[nodiscard]] size_t is_empty() const { return m_committed_pages == 0; }
public:
size_t m_committed_pages { 0 };
};
class AnonymousVMObject final : public VMObject {
public:
virtual ~AnonymousVMObject() override;
@ -55,7 +39,7 @@ public:
size_t purge();
private:
explicit AnonymousVMObject(size_t, AllocationStrategy);
explicit AnonymousVMObject(size_t, AllocationStrategy, Optional<CommittedPhysicalPageSet>);
explicit AnonymousVMObject(PhysicalAddress, size_t);
explicit AnonymousVMObject(Span<NonnullRefPtr<PhysicalPage>>);
explicit AnonymousVMObject(AnonymousVMObject const&);
@ -71,11 +55,28 @@ private:
Bitmap& ensure_cow_map();
void ensure_or_reset_cow_map();
size_t m_unused_committed_pages { 0 };
Optional<CommittedPhysicalPageSet> m_unused_committed_pages;
Bitmap m_cow_map;
// We share a pool of committed cow-pages with clones
RefPtr<CommittedCowPages> m_shared_committed_cow_pages;
// AnonymousVMObject shares committed COW pages with cloned children (happens on fork)
class SharedCommittedCowPages : public RefCounted<SharedCommittedCowPages> {
AK_MAKE_NONCOPYABLE(SharedCommittedCowPages);
public:
SharedCommittedCowPages() = delete;
explicit SharedCommittedCowPages(CommittedPhysicalPageSet&&);
~SharedCommittedCowPages();
[[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
[[nodiscard]] bool is_empty() const { return m_committed_pages.is_empty(); }
public:
SpinLock<u8> m_lock;
CommittedPhysicalPageSet m_committed_pages;
};
RefPtr<SharedCommittedCowPages> m_shared_committed_cow_pages;
bool m_purgeable { false };
bool m_volatile { false };

View file

@ -69,10 +69,9 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager()
protect_kernel_image();
// We're temporarily "committing" to two pages that we need to allocate below
if (!commit_user_physical_pages(2))
VERIFY_NOT_REACHED();
auto committed_pages = commit_user_physical_pages(2);
m_shared_zero_page = allocate_committed_user_physical_page();
m_shared_zero_page = committed_pages->take_one();
// We're wasting a page here, we just need a special tag (physical
// address) so that we know when we need to lazily allocate a page
@ -80,7 +79,7 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager()
// than potentially failing if no pages are available anymore.
// By using a tag we don't have to query the VMObject for every page
// whether it was committed or not
m_lazy_committed_page = allocate_committed_user_physical_page();
m_lazy_committed_page = committed_pages->take_one();
}
UNMAP_AFTER_INIT MemoryManager::~MemoryManager()
@ -766,21 +765,22 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo
return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, cacheable);
}
bool MemoryManager::commit_user_physical_pages(size_t page_count)
Optional<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)
{
VERIFY(page_count > 0);
ScopedSpinLock lock(s_mm_lock);
if (m_system_memory_info.user_physical_pages_uncommitted < page_count)
return false;
return {};
m_system_memory_info.user_physical_pages_uncommitted -= page_count;
m_system_memory_info.user_physical_pages_committed += page_count;
return true;
return CommittedPhysicalPageSet { {}, page_count };
}
void MemoryManager::uncommit_user_physical_pages(size_t page_count)
void MemoryManager::uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count)
{
VERIFY(page_count > 0);
ScopedSpinLock lock(s_mm_lock);
VERIFY(m_system_memory_info.user_physical_pages_committed >= page_count);
@ -1124,4 +1124,17 @@ void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable
flush_tlb(&kernel_page_directory(), vaddr);
}
CommittedPhysicalPageSet::~CommittedPhysicalPageSet()
{
if (m_page_count)
MM.uncommit_user_physical_pages({}, m_page_count);
}
NonnullRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one()
{
VERIFY(m_page_count > 0);
--m_page_count;
return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
}
}

View file

@ -102,6 +102,37 @@ struct MemoryManagerData {
extern RecursiveSpinLock s_mm_lock;
// This class represents a set of committed physical pages.
// When you ask MemoryManager to commit pages for you, you get one of these in return.
// You can allocate pages from it via `take_one()`
// It will uncommit any (unallocated) remaining pages when destroyed.
class CommittedPhysicalPageSet {
AK_MAKE_NONCOPYABLE(CommittedPhysicalPageSet);
public:
CommittedPhysicalPageSet(Badge<MemoryManager>, size_t page_count)
: m_page_count(page_count)
{
}
CommittedPhysicalPageSet(CommittedPhysicalPageSet&& other)
: m_page_count(exchange(other.m_page_count, 0))
{
}
~CommittedPhysicalPageSet();
bool is_empty() const { return m_page_count == 0; }
size_t page_count() const { return m_page_count; }
[[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
void operator=(CommittedPhysicalPageSet&&) = delete;
private:
size_t m_page_count { 0 };
};
class MemoryManager {
AK_MAKE_ETERNAL
friend class PageDirectory;
@ -139,8 +170,9 @@ public:
Yes
};
bool commit_user_physical_pages(size_t);
void uncommit_user_physical_pages(size_t);
Optional<CommittedPhysicalPageSet> commit_user_physical_pages(size_t page_count);
void uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
RefPtr<PhysicalPage> allocate_supervisor_physical_page();