diff --git a/Kernel/Devices/BXVGADevice.cpp b/Kernel/Devices/BXVGADevice.cpp index 9e689258b7..174ff4c5f9 100644 --- a/Kernel/Devices/BXVGADevice.cpp +++ b/Kernel/Devices/BXVGADevice.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include @@ -89,7 +90,7 @@ KResultOr BXVGADevice::mmap(Process& process, FileDescription&, Virtual { ASSERT(offset == 0); ASSERT(size == framebuffer_size_in_bytes()); - auto vmo = VMObject::create_for_physical_range(framebuffer_address(), framebuffer_size_in_bytes()); + auto vmo = AnonymousVMObject::create_for_physical_range(framebuffer_address(), framebuffer_size_in_bytes()); auto* region = process.allocate_region_with_vmo( preferred_vaddr, framebuffer_size_in_bytes(), diff --git a/Kernel/FileSystem/Inode.cpp b/Kernel/FileSystem/Inode.cpp index ae9a77a7fd..a028161de4 100644 --- a/Kernel/FileSystem/Inode.cpp +++ b/Kernel/FileSystem/Inode.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include HashTable& all_inodes() { diff --git a/Kernel/FileSystem/Inode.h b/Kernel/FileSystem/Inode.h index 797ec80c1f..c17357d454 100644 --- a/Kernel/FileSystem/Inode.h +++ b/Kernel/FileSystem/Inode.h @@ -11,9 +11,9 @@ #include class FileDescription; +class InodeVMObject; class InodeWatcher; class LocalSocket; -class VMObject; class Inode : public RefCounted, public Weakable { friend class VFS; @@ -69,8 +69,8 @@ public: void will_be_destroyed(); void set_vmo(VMObject&); - VMObject* vmo() { return m_vmo.ptr(); } - const VMObject* vmo() const { return m_vmo.ptr(); } + InodeVMObject* vmo() { return m_vmo.ptr(); } + const InodeVMObject* vmo() const { return m_vmo.ptr(); } static void sync(); @@ -88,7 +88,7 @@ protected: private: FS& m_fs; unsigned m_index { 0 }; - WeakPtr m_vmo; + WeakPtr m_vmo; RefPtr m_socket; HashTable m_watchers; bool m_metadata_dirty { false }; diff --git a/Kernel/FileSystem/SharedMemory.cpp b/Kernel/FileSystem/SharedMemory.cpp index 9edffedd14..431af2d951 100644 --- a/Kernel/FileSystem/SharedMemory.cpp +++ b/Kernel/FileSystem/SharedMemory.cpp @@ -2,7 +2,7 @@ #include #include #include -#include +#include Lockable>>& shared_memories() { @@ -59,7 +59,7 @@ KResult SharedMemory::truncate(int length) } if (!m_vmo) { - m_vmo = VMObject::create_anonymous(length); + m_vmo = AnonymousVMObject::create_with_size(length); return KSuccess; } diff --git a/Kernel/FileSystem/SharedMemory.h b/Kernel/FileSystem/SharedMemory.h index 81a5860748..7cadc38825 100644 --- a/Kernel/FileSystem/SharedMemory.h +++ b/Kernel/FileSystem/SharedMemory.h @@ -7,7 +7,7 @@ #include #include -class VMObject; +class AnonymousVMObject; class SharedMemory : public File { public: @@ -17,8 +17,8 @@ public: const String& name() const { return m_name; } virtual KResult truncate(off_t) override; - VMObject* vmo() { return m_vmo.ptr(); } - const VMObject* vmo() const { return m_vmo.ptr(); } + AnonymousVMObject* vmo() { return m_vmo.ptr(); } + const AnonymousVMObject* vmo() const { return m_vmo.ptr(); } uid_t uid() const { return m_uid; } gid_t gid() const { return m_gid; } @@ -39,5 +39,5 @@ private: uid_t m_uid { 0 }; gid_t m_gid { 0 }; mode_t m_mode { 0 }; - RefPtr m_vmo; + RefPtr m_vmo; }; diff --git a/Kernel/Makefile b/Kernel/Makefile index 22b8d466f1..77a48d38df 100644 --- a/Kernel/Makefile +++ b/Kernel/Makefile @@ -20,6 +20,8 @@ KERNEL_OBJS = \ VM/MemoryManager.o \ VM/Region.o \ VM/VMObject.o \ + VM/AnonymousVMObject.o \ + VM/InodeVMObject.o \ VM/PageDirectory.o \ VM/PhysicalPage.o \ VM/PhysicalRegion.o \ diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index 11cd0977d1..c12ee32541 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -339,7 +340,8 @@ int Process::do_exec(String path, Vector arguments, Vector envir #endif ProcessPagingScope paging_scope(*this); - auto vmo = VMObject::create_file_backed(description->inode()); + ASSERT(description->inode()); + auto vmo = InodeVMObject::create_with_inode(*description->inode()); RefPtr region = allocate_region_with_vmo(VirtualAddress(), metadata.size, vmo, 0, description->absolute_path(), PROT_READ); ASSERT(region); diff --git a/Kernel/SharedBuffer.h b/Kernel/SharedBuffer.h index bd2751471d..d5f58d5c48 100644 --- a/Kernel/SharedBuffer.h +++ b/Kernel/SharedBuffer.h @@ -1,7 +1,8 @@ #pragma once -#include #include +#include +#include struct SharedBuffer { private: @@ -15,10 +16,11 @@ private: unsigned count { 0 }; Region* region { nullptr }; }; + public: SharedBuffer(int id, int size) : m_shared_buffer_id(id) - , m_vmo(VMObject::create_anonymous(size)) + , m_vmo(AnonymousVMObject::create_with_size(size)) { #ifdef SHARED_BUFFER_DEBUG dbgprintf("Created shared buffer %d of size %d\n", m_shared_buffer_id, size); @@ -47,7 +49,7 @@ public: int m_shared_buffer_id { -1 }; bool m_writable { true }; bool m_global { false }; - NonnullRefPtr m_vmo; + NonnullRefPtr m_vmo; Vector m_refs; unsigned m_total_refs { 0 }; }; diff --git a/Kernel/VM/AnonymousVMObject.cpp b/Kernel/VM/AnonymousVMObject.cpp new file mode 100644 index 0000000000..ed0be7bf71 --- /dev/null +++ b/Kernel/VM/AnonymousVMObject.cpp @@ -0,0 +1,41 @@ +#include +#include + +NonnullRefPtr AnonymousVMObject::create_with_size(size_t size) +{ + size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE; + return adopt(*new AnonymousVMObject(size)); +} + +NonnullRefPtr AnonymousVMObject::create_for_physical_range(PhysicalAddress paddr, size_t size) +{ + size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE; + return adopt(*new AnonymousVMObject(paddr, size)); +} + +AnonymousVMObject::AnonymousVMObject(size_t size) + : VMObject(size, ShouldFillPhysicalPages::Yes) +{ +} + +AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size) + : VMObject(size, ShouldFillPhysicalPages::No) +{ + for (size_t i = 0; i < size; i += PAGE_SIZE) + m_physical_pages.append(PhysicalPage::create(paddr.offset(i), false, false)); + ASSERT(m_physical_pages.size() == page_count()); +} + +AnonymousVMObject::AnonymousVMObject(const AnonymousVMObject& other) + : VMObject(other) +{ +} + +AnonymousVMObject::~AnonymousVMObject() +{ +} + +NonnullRefPtr AnonymousVMObject::clone() +{ + return adopt(*new AnonymousVMObject(*this)); +} diff --git a/Kernel/VM/AnonymousVMObject.h b/Kernel/VM/AnonymousVMObject.h new file mode 100644 index 0000000000..ceec9de8d5 --- /dev/null +++ b/Kernel/VM/AnonymousVMObject.h @@ -0,0 +1,24 @@ +#pragma once + +#include +#include + +class AnonymousVMObject final : public VMObject { +public: + virtual ~AnonymousVMObject() override; + + static NonnullRefPtr create_with_size(size_t); + static NonnullRefPtr create_for_physical_range(PhysicalAddress, size_t); + virtual NonnullRefPtr clone() override; + +private: + explicit AnonymousVMObject(size_t); + explicit AnonymousVMObject(const AnonymousVMObject&); + AnonymousVMObject(PhysicalAddress, size_t); + + AnonymousVMObject& operator=(const AnonymousVMObject&) = delete; + AnonymousVMObject& operator=(AnonymousVMObject&&) = delete; + AnonymousVMObject(AnonymousVMObject&&) = delete; + + virtual bool is_anonymous() const override { return true; } +}; diff --git a/Kernel/VM/InodeVMObject.cpp b/Kernel/VM/InodeVMObject.cpp new file mode 100644 index 0000000000..3033795f82 --- /dev/null +++ b/Kernel/VM/InodeVMObject.cpp @@ -0,0 +1,131 @@ +#include +#include +#include +#include + +NonnullRefPtr InodeVMObject::create_with_inode(Inode& inode) +{ + InterruptDisabler disabler; + if (inode.vmo()) + return *inode.vmo(); + auto vmo = adopt(*new InodeVMObject(inode)); + vmo->inode().set_vmo(*vmo); + return vmo; +} + +NonnullRefPtr InodeVMObject::clone() +{ + return adopt(*new InodeVMObject(*this)); +} + +InodeVMObject::InodeVMObject(Inode& inode) + : VMObject(ceil_div(inode.size(), PAGE_SIZE) * PAGE_SIZE, ShouldFillPhysicalPages::Yes) + , m_inode(inode) +{ +} + +InodeVMObject::InodeVMObject(const InodeVMObject& other) + : VMObject(other) + , m_inode(other.m_inode) +{ +} + +InodeVMObject::~InodeVMObject() +{ + ASSERT(inode().vmo() == this); +} + +void InodeVMObject::inode_size_changed(Badge, size_t old_size, size_t new_size) +{ + dbgprintf("VMObject::inode_size_changed: {%u:%u} %u -> %u\n", + m_inode->fsid(), m_inode->index(), + old_size, new_size); + + InterruptDisabler disabler; + + auto old_page_count = page_count(); + m_size = new_size; + + if (page_count() > old_page_count) { + // Add null pages and let the fault handler page these in when that day comes. + for (auto i = old_page_count; i < page_count(); ++i) + m_physical_pages.append(nullptr); + } else { + // Prune the no-longer valid pages. I'm not sure this is actually correct behavior. + for (auto i = page_count(); i < old_page_count; ++i) + m_physical_pages.take_last(); + } + + // FIXME: Consolidate with inode_contents_changed() so we only do a single walk. + for_each_region([](Region& region) { + ASSERT(region.page_directory()); + MM.remap_region(*region.page_directory(), region); + }); +} + +void InodeVMObject::inode_contents_changed(Badge, off_t offset, ssize_t size, const u8* data) +{ + (void)size; + (void)data; + InterruptDisabler disabler; + ASSERT(offset >= 0); + + // FIXME: Only invalidate the parts that actually changed. + for (auto& physical_page : m_physical_pages) + physical_page = nullptr; + +#if 0 + size_t current_offset = offset; + size_t remaining_bytes = size; + const u8* data_ptr = data; + + auto to_page_index = [] (size_t offset) -> size_t { + return offset / PAGE_SIZE; + }; + + if (current_offset & PAGE_MASK) { + size_t page_index = to_page_index(current_offset); + size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK)); + if (m_physical_pages[page_index]) { + auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]); + memcpy(ptr, data_ptr, bytes_to_copy); + MM.unquickmap_page(); + } + current_offset += bytes_to_copy; + data += bytes_to_copy; + remaining_bytes -= bytes_to_copy; + } + + for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) { + size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK); + if (m_physical_pages[page_index]) { + auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]); + memcpy(ptr, data_ptr, bytes_to_copy); + MM.unquickmap_page(); + } + current_offset += bytes_to_copy; + data += bytes_to_copy; + } +#endif + + // FIXME: Consolidate with inode_size_changed() so we only do a single walk. + for_each_region([](Region& region) { + ASSERT(region.page_directory()); + MM.remap_region(*region.page_directory(), region); + }); +} + +template +void VMObject::for_each_region(Callback callback) +{ + // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes. + // Perhaps VMObject could have a Vector with all of his mappers? + for (auto* region : MM.m_user_regions) { + if (®ion->vmo() == this) + callback(*region); + } + for (auto* region : MM.m_kernel_regions) { + if (®ion->vmo() == this) + callback(*region); + } +} diff --git a/Kernel/VM/InodeVMObject.h b/Kernel/VM/InodeVMObject.h new file mode 100644 index 0000000000..f8c797a602 --- /dev/null +++ b/Kernel/VM/InodeVMObject.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include + +class InodeVMObject final : public VMObject { +public: + virtual ~InodeVMObject() override; + + static NonnullRefPtr create_with_inode(Inode&); + virtual NonnullRefPtr clone() override; + + Inode& inode() { return *m_inode; } + const Inode& inode() const { return *m_inode; } + + void inode_contents_changed(Badge, off_t, ssize_t, const u8*); + void inode_size_changed(Badge, size_t old_size, size_t new_size); + +private: + explicit InodeVMObject(Inode&); + explicit InodeVMObject(const InodeVMObject&); + + InodeVMObject& operator=(const InodeVMObject&) = delete; + InodeVMObject& operator=(InodeVMObject&&) = delete; + InodeVMObject(InodeVMObject&&) = delete; + + virtual bool is_inode() const override { return true; } + + NonnullRefPtr m_inode; +}; diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 319baf71b1..6a73d037d7 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -6,6 +6,8 @@ #include #include #include +#include +#include #include //#define MM_DEBUG @@ -352,10 +354,11 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re { ASSERT(region.page_directory()); auto& vmo = region.vmo(); - ASSERT(!vmo.is_anonymous()); - ASSERT(vmo.inode()); + ASSERT(vmo.is_inode()); - auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region]; + auto& inode_vmobject = static_cast(vmo); + + auto& vmo_page = inode_vmobject.physical_pages()[region.first_page_index() + page_index_in_region]; InterruptFlagSaver saver; @@ -374,8 +377,8 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re #endif sti(); u8 page_buffer[PAGE_SIZE]; - auto& inode = *vmo.inode(); - auto nread = inode.read_bytes(vmo.inode_offset() + ((region.first_page_index() + page_index_in_region) * PAGE_SIZE), PAGE_SIZE, page_buffer, nullptr); + auto& inode = inode_vmobject.inode(); + auto nread = inode.read_bytes((region.first_page_index() + page_index_in_region) * PAGE_SIZE, PAGE_SIZE, page_buffer, nullptr); if (nread < 0) { kprintf("MM: page_in_from_inode had error (%d) while reading!\n", nread); return false; @@ -435,7 +438,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault) } auto page_index_in_region = region->page_index_from_address(fault.vaddr()); if (fault.type() == PageFault::Type::PageNotPresent) { - if (region->vmo().inode()) { + if (region->vmo().is_inode()) { #ifdef PAGE_FAULT_DEBUG dbgprintf("NP(inode) fault in Region{%p}[%u]\n", region, page_index_in_region); #endif diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index 24bd907342..180a293a22 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -1,13 +1,14 @@ #include #include #include +#include +#include #include #include -#include Region::Region(const Range& range, const String& name, u8 access, bool cow) : m_range(range) - , m_vmo(VMObject::create_anonymous(size())) + , m_vmo(AnonymousVMObject::create_with_size(size())) , m_name(name) , m_access(access) , m_cow_map(Bitmap::create(m_vmo->page_count(), cow)) @@ -17,7 +18,7 @@ Region::Region(const Range& range, const String& name, u8 access, bool cow) Region::Region(const Range& range, RefPtr&& inode, const String& name, u8 access, bool cow) : m_range(range) - , m_vmo(VMObject::create_file_backed(move(inode))) + , m_vmo(InodeVMObject::create_with_inode(*inode)) , m_name(name) , m_access(access) , m_cow_map(Bitmap::create(m_vmo->page_count(), cow)) @@ -48,8 +49,7 @@ Region::~Region() bool Region::page_in() { ASSERT(m_page_directory); - ASSERT(!vmo().is_anonymous()); - ASSERT(vmo().inode()); + ASSERT(vmo().is_inode()); #ifdef MM_DEBUG dbgprintf("MM: page_in %u pages\n", page_count()); #endif diff --git a/Kernel/VM/VMObject.cpp b/Kernel/VM/VMObject.cpp index a695f18e6c..e7885ffc4d 100644 --- a/Kernel/VM/VMObject.cpp +++ b/Kernel/VM/VMObject.cpp @@ -3,166 +3,22 @@ #include #include -NonnullRefPtr VMObject::create_file_backed(RefPtr&& inode) -{ - InterruptDisabler disabler; - if (inode->vmo()) - return *inode->vmo(); - auto vmo = adopt(*new VMObject(move(inode))); - vmo->inode()->set_vmo(*vmo); - return vmo; -} - -NonnullRefPtr VMObject::create_anonymous(size_t size) -{ - size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE; - return adopt(*new VMObject(size)); -} - -NonnullRefPtr VMObject::create_for_physical_range(PhysicalAddress paddr, size_t size) -{ - size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE; - return adopt(*new VMObject(paddr, size)); -} - -NonnullRefPtr VMObject::clone() -{ - return adopt(*new VMObject(*this)); -} - -VMObject::VMObject(VMObject& other) - : m_inode_offset(other.m_inode_offset) - , m_size(other.m_size) - , m_inode(other.m_inode) +VMObject::VMObject(const VMObject& other) + : m_size(other.m_size) , m_physical_pages(other.m_physical_pages) { MM.register_vmo(*this); } -VMObject::VMObject(size_t size) +VMObject::VMObject(size_t size, ShouldFillPhysicalPages should_fill_physical_pages) : m_size(size) { MM.register_vmo(*this); - m_physical_pages.resize(page_count()); -} - -VMObject::VMObject(PhysicalAddress paddr, size_t size) - : m_size(size) -{ - MM.register_vmo(*this); - for (size_t i = 0; i < size; i += PAGE_SIZE) { - m_physical_pages.append(PhysicalPage::create(paddr.offset(i), false, false)); - } - ASSERT(m_physical_pages.size() == page_count()); -} - -VMObject::VMObject(RefPtr&& inode) - : m_inode(move(inode)) -{ - ASSERT(m_inode); - m_size = ceil_div(m_inode->size(), PAGE_SIZE) * PAGE_SIZE; - m_physical_pages.resize(page_count()); - MM.register_vmo(*this); + if (should_fill_physical_pages == ShouldFillPhysicalPages::Yes) + m_physical_pages.resize(page_count()); } VMObject::~VMObject() { - if (m_inode) - ASSERT(m_inode->vmo() == this); MM.unregister_vmo(*this); } - -template -void VMObject::for_each_region(Callback callback) -{ - // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes. - // Perhaps VMObject could have a Vector with all of his mappers? - for (auto* region : MM.m_user_regions) { - if (®ion->vmo() == this) - callback(*region); - } - for (auto* region : MM.m_kernel_regions) { - if (®ion->vmo() == this) - callback(*region); - } -} - -void VMObject::inode_size_changed(Badge, size_t old_size, size_t new_size) -{ - dbgprintf("VMObject::inode_size_changed: {%u:%u} %u -> %u\n", - m_inode->fsid(), m_inode->index(), - old_size, new_size); - - InterruptDisabler disabler; - - auto old_page_count = page_count(); - m_size = new_size; - - if (page_count() > old_page_count) { - // Add null pages and let the fault handler page these in when that day comes. - for (auto i = old_page_count; i < page_count(); ++i) - m_physical_pages.append(nullptr); - } else { - // Prune the no-longer valid pages. I'm not sure this is actually correct behavior. - for (auto i = page_count(); i < old_page_count; ++i) - m_physical_pages.take_last(); - } - - // FIXME: Consolidate with inode_contents_changed() so we only do a single walk. - for_each_region([](Region& region) { - ASSERT(region.page_directory()); - MM.remap_region(*region.page_directory(), region); - }); -} - -void VMObject::inode_contents_changed(Badge, off_t offset, ssize_t size, const u8* data) -{ - (void)size; - (void)data; - InterruptDisabler disabler; - ASSERT(offset >= 0); - - // FIXME: Only invalidate the parts that actually changed. - for (auto& physical_page : m_physical_pages) - physical_page = nullptr; - -#if 0 - size_t current_offset = offset; - size_t remaining_bytes = size; - const u8* data_ptr = data; - - auto to_page_index = [] (size_t offset) -> size_t { - return offset / PAGE_SIZE; - }; - - if (current_offset & PAGE_MASK) { - size_t page_index = to_page_index(current_offset); - size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK)); - if (m_physical_pages[page_index]) { - auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]); - memcpy(ptr, data_ptr, bytes_to_copy); - MM.unquickmap_page(); - } - current_offset += bytes_to_copy; - data += bytes_to_copy; - remaining_bytes -= bytes_to_copy; - } - - for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) { - size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK); - if (m_physical_pages[page_index]) { - auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]); - memcpy(ptr, data_ptr, bytes_to_copy); - MM.unquickmap_page(); - } - current_offset += bytes_to_copy; - data += bytes_to_copy; - } -#endif - - // FIXME: Consolidate with inode_size_changed() so we only do a single walk. - for_each_region([](Region& region) { - ASSERT(region.page_directory()); - MM.remap_region(*region.page_directory(), region); - }); -} diff --git a/Kernel/VM/VMObject.h b/Kernel/VM/VMObject.h index 3bc549fd83..bd8bab1a43 100644 --- a/Kernel/VM/VMObject.h +++ b/Kernel/VM/VMObject.h @@ -1,14 +1,10 @@ #pragma once -#include -#include -#include #include +#include #include #include #include -#include -#include class Inode; class PhysicalPage; @@ -18,39 +14,37 @@ class VMObject : public RefCounted friend class MemoryManager; public: - static NonnullRefPtr create_file_backed(RefPtr&&); - static NonnullRefPtr create_anonymous(size_t); - static NonnullRefPtr create_for_physical_range(PhysicalAddress, size_t); - NonnullRefPtr clone(); + virtual ~VMObject(); - ~VMObject(); - bool is_anonymous() const { return !m_inode; } + virtual NonnullRefPtr clone() = 0; - Inode* inode() { return m_inode.ptr(); } - const Inode* inode() const { return m_inode.ptr(); } - size_t inode_offset() const { return m_inode_offset; } + virtual bool is_anonymous() const { return false; } + virtual bool is_inode() const { return false; } int page_count() const { return m_size / PAGE_SIZE; } const Vector>& physical_pages() const { return m_physical_pages; } Vector>& physical_pages() { return m_physical_pages; } - void inode_contents_changed(Badge, off_t, ssize_t, const u8*); - void inode_size_changed(Badge, size_t old_size, size_t new_size); - size_t size() const { return m_size; } -private: - VMObject(RefPtr&&); - explicit VMObject(VMObject&); - explicit VMObject(size_t); - VMObject(PhysicalAddress, size_t); +protected: + enum ShouldFillPhysicalPages { + No = 0, + Yes + }; + VMObject(size_t, ShouldFillPhysicalPages); + explicit VMObject(const VMObject&); template void for_each_region(Callback); - off_t m_inode_offset { 0 }; size_t m_size { 0 }; - RefPtr m_inode; Vector> m_physical_pages; + +private: + VMObject& operator=(const VMObject&) = delete; + VMObject& operator=(VMObject&&) = delete; + VMObject(VMObject&&) = delete; + Lock m_paging_lock { "VMObject" }; };