1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 20:57:35 +00:00

Kernel: Store VMObject physical pages in a FixedArray

Let's enforce the invariant that VMObjects don't shrink or grow by
storing the pages in a FixedArray.
This commit is contained in:
Andreas Kling 2021-07-11 17:38:28 +02:00
parent 373b8d7cfa
commit 59049ae4b7
3 changed files with 6 additions and 11 deletions

View file

@ -117,11 +117,11 @@ AnonymousVMObject::AnonymousVMObject(PhysicalPage& page)
}
AnonymousVMObject::AnonymousVMObject(NonnullRefPtrVector<PhysicalPage> physical_pages)
: VMObject()
: VMObject(physical_pages.size())
, m_volatile_ranges_cache({ 0, page_count() })
{
for (auto& page : physical_pages) {
m_physical_pages.append(page);
for (size_t i = 0; i < physical_pages.size(); ++i) {
m_physical_pages[i] = physical_pages[i];
}
}

View file

@ -15,14 +15,9 @@ VMObject::VMObject(const VMObject& other)
MM.register_vmobject(*this);
}
VMObject::VMObject()
{
MM.register_vmobject(*this);
}
VMObject::VMObject(size_t size)
: m_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE)))
{
m_physical_pages.resize(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
MM.register_vmobject(*this);
}

View file

@ -6,6 +6,7 @@
#pragma once
#include <AK/FixedArray.h>
#include <AK/HashTable.h>
#include <AK/IntrusiveList.h>
#include <AK/RefCounted.h>
@ -61,7 +62,6 @@ public:
}
protected:
VMObject();
explicit VMObject(size_t);
explicit VMObject(const VMObject&);
@ -69,7 +69,7 @@ protected:
void for_each_region(Callback);
IntrusiveListNode<VMObject> m_list_node;
Vector<RefPtr<PhysicalPage>, 16> m_physical_pages;
FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
Lock m_paging_lock { "VMObject" };
mutable SpinLock<u8> m_lock;