mirror of
https://github.com/RGBCube/serenity
synced 2025-05-28 16:25:06 +00:00
Kernel: Use a FixedArray for VMObject::m_physical_pages
This makes VMObject 8 bytes smaller since we can use the array size as the page count. The size() is now also computed from the page count instead of being a separate value. This makes sizes always be a multiple of PAGE_SIZE, which is sane.
This commit is contained in:
parent
5096eaa845
commit
b67200dfea
5 changed files with 19 additions and 39 deletions
|
@ -315,7 +315,7 @@ Optional<KBuffer> procfs$pid_vmo(InodeIdentifier identifier)
|
||||||
region.vmo().is_anonymous() ? "anonymous" : "file-backed",
|
region.vmo().is_anonymous() ? "anonymous" : "file-backed",
|
||||||
®ion.vmo(),
|
®ion.vmo(),
|
||||||
region.vmo().ref_count());
|
region.vmo().ref_count());
|
||||||
for (int i = 0; i < region.vmo().page_count(); ++i) {
|
for (size_t i = 0; i < region.vmo().page_count(); ++i) {
|
||||||
auto& physical_page = region.vmo().physical_pages()[i];
|
auto& physical_page = region.vmo().physical_pages()[i];
|
||||||
builder.appendf("P%x%s(%u) ",
|
builder.appendf("P%x%s(%u) ",
|
||||||
physical_page ? physical_page->paddr().get() : 0,
|
physical_page ? physical_page->paddr().get() : 0,
|
||||||
|
|
|
@ -3,27 +3,25 @@
|
||||||
|
|
||||||
NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_with_size(size_t size)
|
NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_with_size(size_t size)
|
||||||
{
|
{
|
||||||
size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
|
|
||||||
return adopt(*new AnonymousVMObject(size));
|
return adopt(*new AnonymousVMObject(size));
|
||||||
}
|
}
|
||||||
|
|
||||||
NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
|
NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
|
||||||
{
|
{
|
||||||
size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
|
|
||||||
return adopt(*new AnonymousVMObject(paddr, size));
|
return adopt(*new AnonymousVMObject(paddr, size));
|
||||||
}
|
}
|
||||||
|
|
||||||
AnonymousVMObject::AnonymousVMObject(size_t size)
|
AnonymousVMObject::AnonymousVMObject(size_t size)
|
||||||
: VMObject(size, ShouldFillPhysicalPages::Yes)
|
: VMObject(size)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
|
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
|
||||||
: VMObject(size, ShouldFillPhysicalPages::No)
|
: VMObject(size)
|
||||||
{
|
{
|
||||||
for (size_t i = 0; i < size; i += PAGE_SIZE)
|
ASSERT(paddr.page_base() == paddr.get());
|
||||||
m_physical_pages.append(PhysicalPage::create(paddr.offset(i), false, false));
|
for (size_t i = 0; i < page_count(); ++i)
|
||||||
ASSERT(m_physical_pages.size() == page_count());
|
physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
AnonymousVMObject::AnonymousVMObject(const AnonymousVMObject& other)
|
AnonymousVMObject::AnonymousVMObject(const AnonymousVMObject& other)
|
||||||
|
|
|
@ -19,7 +19,7 @@ NonnullRefPtr<VMObject> InodeVMObject::clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
InodeVMObject::InodeVMObject(Inode& inode)
|
InodeVMObject::InodeVMObject(Inode& inode)
|
||||||
: VMObject(ceil_div(inode.size(), PAGE_SIZE) * PAGE_SIZE, ShouldFillPhysicalPages::Yes)
|
: VMObject(inode.size())
|
||||||
, m_inode(inode)
|
, m_inode(inode)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -43,18 +43,8 @@ void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new
|
||||||
|
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
|
|
||||||
auto old_page_count = page_count();
|
auto new_page_count = PAGE_ROUND_UP(new_size);
|
||||||
m_size = new_size;
|
m_physical_pages.resize(new_page_count);
|
||||||
|
|
||||||
if (page_count() > old_page_count) {
|
|
||||||
// Add null pages and let the fault handler page these in when that day comes.
|
|
||||||
for (auto i = old_page_count; i < page_count(); ++i)
|
|
||||||
m_physical_pages.append(nullptr);
|
|
||||||
} else {
|
|
||||||
// Prune the no-longer valid pages. I'm not sure this is actually correct behavior.
|
|
||||||
for (auto i = page_count(); i < old_page_count; ++i)
|
|
||||||
m_physical_pages.take_last();
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
|
// FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
|
||||||
for_each_region([](Region& region) {
|
for_each_region([](Region& region) {
|
||||||
|
|
|
@ -4,18 +4,15 @@
|
||||||
#include <Kernel/VM/VMObject.h>
|
#include <Kernel/VM/VMObject.h>
|
||||||
|
|
||||||
VMObject::VMObject(const VMObject& other)
|
VMObject::VMObject(const VMObject& other)
|
||||||
: m_size(other.m_size)
|
: m_physical_pages(other.m_physical_pages)
|
||||||
, m_physical_pages(other.m_physical_pages)
|
|
||||||
{
|
{
|
||||||
MM.register_vmo(*this);
|
MM.register_vmo(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
VMObject::VMObject(size_t size, ShouldFillPhysicalPages should_fill_physical_pages)
|
VMObject::VMObject(size_t size)
|
||||||
: m_size(size)
|
: m_physical_pages(ceil_div(size, PAGE_SIZE))
|
||||||
{
|
{
|
||||||
MM.register_vmo(*this);
|
MM.register_vmo(*this);
|
||||||
if (should_fill_physical_pages == ShouldFillPhysicalPages::Yes)
|
|
||||||
m_physical_pages.resize(page_count());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VMObject::~VMObject()
|
VMObject::~VMObject()
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
#include <AK/RefCounted.h>
|
#include <AK/RefCounted.h>
|
||||||
#include <AK/RefPtr.h>
|
#include <AK/RefPtr.h>
|
||||||
#include <AK/Vector.h>
|
#include <AK/FixedArray.h>
|
||||||
#include <AK/Weakable.h>
|
#include <AK/Weakable.h>
|
||||||
#include <Kernel/Lock.h>
|
#include <Kernel/Lock.h>
|
||||||
|
|
||||||
|
@ -21,25 +21,20 @@ public:
|
||||||
virtual bool is_anonymous() const { return false; }
|
virtual bool is_anonymous() const { return false; }
|
||||||
virtual bool is_inode() const { return false; }
|
virtual bool is_inode() const { return false; }
|
||||||
|
|
||||||
int page_count() const { return m_size / PAGE_SIZE; }
|
size_t page_count() const { return m_physical_pages.size(); }
|
||||||
const Vector<RefPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
|
const FixedArray<RefPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
|
||||||
Vector<RefPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
|
FixedArray<RefPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
|
||||||
|
|
||||||
size_t size() const { return m_size; }
|
size_t size() const { return m_physical_pages.size() * PAGE_SIZE; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
enum ShouldFillPhysicalPages {
|
explicit VMObject(size_t);
|
||||||
No = 0,
|
|
||||||
Yes
|
|
||||||
};
|
|
||||||
VMObject(size_t, ShouldFillPhysicalPages);
|
|
||||||
explicit VMObject(const VMObject&);
|
explicit VMObject(const VMObject&);
|
||||||
|
|
||||||
template<typename Callback>
|
template<typename Callback>
|
||||||
void for_each_region(Callback);
|
void for_each_region(Callback);
|
||||||
|
|
||||||
size_t m_size { 0 };
|
FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
|
||||||
Vector<RefPtr<PhysicalPage>> m_physical_pages;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VMObject& operator=(const VMObject&) = delete;
|
VMObject& operator=(const VMObject&) = delete;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue