diff --git a/Kernel/FileSystem/ProcFS.cpp b/Kernel/FileSystem/ProcFS.cpp index c451f8f530..b6f0058b6d 100644 --- a/Kernel/FileSystem/ProcFS.cpp +++ b/Kernel/FileSystem/ProcFS.cpp @@ -315,7 +315,7 @@ Optional procfs$pid_vmo(InodeIdentifier identifier) region.vmo().is_anonymous() ? "anonymous" : "file-backed", ®ion.vmo(), region.vmo().ref_count()); - for (int i = 0; i < region.vmo().page_count(); ++i) { + for (size_t i = 0; i < region.vmo().page_count(); ++i) { auto& physical_page = region.vmo().physical_pages()[i]; builder.appendf("P%x%s(%u) ", physical_page ? physical_page->paddr().get() : 0, diff --git a/Kernel/VM/AnonymousVMObject.cpp b/Kernel/VM/AnonymousVMObject.cpp index ed0be7bf71..a7c3276227 100644 --- a/Kernel/VM/AnonymousVMObject.cpp +++ b/Kernel/VM/AnonymousVMObject.cpp @@ -3,27 +3,25 @@ NonnullRefPtr AnonymousVMObject::create_with_size(size_t size) { - size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE; return adopt(*new AnonymousVMObject(size)); } NonnullRefPtr AnonymousVMObject::create_for_physical_range(PhysicalAddress paddr, size_t size) { - size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE; return adopt(*new AnonymousVMObject(paddr, size)); } AnonymousVMObject::AnonymousVMObject(size_t size) - : VMObject(size, ShouldFillPhysicalPages::Yes) + : VMObject(size) { } AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size) - : VMObject(size, ShouldFillPhysicalPages::No) + : VMObject(size) { - for (size_t i = 0; i < size; i += PAGE_SIZE) - m_physical_pages.append(PhysicalPage::create(paddr.offset(i), false, false)); - ASSERT(m_physical_pages.size() == page_count()); + ASSERT(paddr.page_base() == paddr.get()); + for (size_t i = 0; i < page_count(); ++i) + physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), false, false); } AnonymousVMObject::AnonymousVMObject(const AnonymousVMObject& other) diff --git a/Kernel/VM/InodeVMObject.cpp b/Kernel/VM/InodeVMObject.cpp index 3033795f82..29ef9c957a 100644 --- a/Kernel/VM/InodeVMObject.cpp +++ b/Kernel/VM/InodeVMObject.cpp @@ -19,7 +19,7 @@ NonnullRefPtr InodeVMObject::clone() } InodeVMObject::InodeVMObject(Inode& inode) - : VMObject(ceil_div(inode.size(), PAGE_SIZE) * PAGE_SIZE, ShouldFillPhysicalPages::Yes) + : VMObject(inode.size()) , m_inode(inode) { } @@ -43,18 +43,8 @@ void InodeVMObject::inode_size_changed(Badge, size_t old_size, size_t new InterruptDisabler disabler; - auto old_page_count = page_count(); - m_size = new_size; - - if (page_count() > old_page_count) { - // Add null pages and let the fault handler page these in when that day comes. - for (auto i = old_page_count; i < page_count(); ++i) - m_physical_pages.append(nullptr); - } else { - // Prune the no-longer valid pages. I'm not sure this is actually correct behavior. - for (auto i = page_count(); i < old_page_count; ++i) - m_physical_pages.take_last(); - } + auto new_page_count = PAGE_ROUND_UP(new_size); + m_physical_pages.resize(new_page_count); // FIXME: Consolidate with inode_contents_changed() so we only do a single walk. for_each_region([](Region& region) { diff --git a/Kernel/VM/VMObject.cpp b/Kernel/VM/VMObject.cpp index e7885ffc4d..24c3b71352 100644 --- a/Kernel/VM/VMObject.cpp +++ b/Kernel/VM/VMObject.cpp @@ -4,18 +4,15 @@ #include VMObject::VMObject(const VMObject& other) - : m_size(other.m_size) - , m_physical_pages(other.m_physical_pages) + : m_physical_pages(other.m_physical_pages) { MM.register_vmo(*this); } -VMObject::VMObject(size_t size, ShouldFillPhysicalPages should_fill_physical_pages) - : m_size(size) +VMObject::VMObject(size_t size) + : m_physical_pages(ceil_div(size, PAGE_SIZE)) { MM.register_vmo(*this); - if (should_fill_physical_pages == ShouldFillPhysicalPages::Yes) - m_physical_pages.resize(page_count()); } VMObject::~VMObject() diff --git a/Kernel/VM/VMObject.h b/Kernel/VM/VMObject.h index bd8bab1a43..0013453ab3 100644 --- a/Kernel/VM/VMObject.h +++ b/Kernel/VM/VMObject.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include @@ -21,25 +21,20 @@ public: virtual bool is_anonymous() const { return false; } virtual bool is_inode() const { return false; } - int page_count() const { return m_size / PAGE_SIZE; } - const Vector>& physical_pages() const { return m_physical_pages; } - Vector>& physical_pages() { return m_physical_pages; } + size_t page_count() const { return m_physical_pages.size(); } + const FixedArray>& physical_pages() const { return m_physical_pages; } + FixedArray>& physical_pages() { return m_physical_pages; } - size_t size() const { return m_size; } + size_t size() const { return m_physical_pages.size() * PAGE_SIZE; } protected: - enum ShouldFillPhysicalPages { - No = 0, - Yes - }; - VMObject(size_t, ShouldFillPhysicalPages); + explicit VMObject(size_t); explicit VMObject(const VMObject&); template void for_each_region(Callback); - size_t m_size { 0 }; - Vector> m_physical_pages; + FixedArray> m_physical_pages; private: VMObject& operator=(const VMObject&) = delete;