diff --git a/Kernel/FileSystem/Inode.cpp b/Kernel/FileSystem/Inode.cpp index bbc09d0921..e9b2217b05 100644 --- a/Kernel/FileSystem/Inode.cpp +++ b/Kernel/FileSystem/Inode.cpp @@ -79,14 +79,14 @@ void Inode::will_be_destroyed() void Inode::inode_contents_changed(off_t offset, ssize_t size, const u8* data) { - if (m_vmo) - m_vmo->inode_contents_changed({}, offset, size, data); + if (m_vmobject) + m_vmobject->inode_contents_changed({}, offset, size, data); } void Inode::inode_size_changed(size_t old_size, size_t new_size) { - if (m_vmo) - m_vmo->inode_size_changed({}, old_size, new_size); + if (m_vmobject) + m_vmobject->inode_size_changed({}, old_size, new_size); } int Inode::set_atime(time_t) @@ -116,7 +116,7 @@ int Inode::decrement_link_count() void Inode::set_vmo(VMObject& vmo) { - m_vmo = vmo.make_weak_ptr(); + m_vmobject = vmo.make_weak_ptr(); } bool Inode::bind_socket(LocalSocket& socket) diff --git a/Kernel/FileSystem/Inode.h b/Kernel/FileSystem/Inode.h index 7153fdd6f7..ff61f91f8c 100644 --- a/Kernel/FileSystem/Inode.h +++ b/Kernel/FileSystem/Inode.h @@ -72,8 +72,8 @@ public: void will_be_destroyed(); void set_vmo(VMObject&); - InodeVMObject* vmo() { return m_vmo.ptr(); } - const InodeVMObject* vmo() const { return m_vmo.ptr(); } + InodeVMObject* vmobject() { return m_vmobject.ptr(); } + const InodeVMObject* vmobject() const { return m_vmobject.ptr(); } static void sync(); @@ -95,7 +95,7 @@ protected: private: FS& m_fs; unsigned m_index { 0 }; - WeakPtr m_vmo; + WeakPtr m_vmobject; RefPtr m_socket; HashTable m_watchers; bool m_metadata_dirty { false }; diff --git a/Kernel/FileSystem/ProcFS.cpp b/Kernel/FileSystem/ProcFS.cpp index 34debc2868..3be5c96cb1 100644 --- a/Kernel/FileSystem/ProcFS.cpp +++ b/Kernel/FileSystem/ProcFS.cpp @@ -417,11 +417,11 @@ Optional procfs$pid_vmo(InodeIdentifier identifier) region.size(), region.name().characters()); builder.appendf("VMO: %s @ %x(%u)\n", - region.vmo().is_anonymous() ? "anonymous" : "file-backed", - ®ion.vmo(), - region.vmo().ref_count()); - for (size_t i = 0; i < region.vmo().page_count(); ++i) { - auto& physical_page = region.vmo().physical_pages()[i]; + region.vmobject().is_anonymous() ? "anonymous" : "file-backed", + ®ion.vmobject(), + region.vmobject().ref_count()); + for (size_t i = 0; i < region.vmobject().page_count(); ++i) { + auto& physical_page = region.vmobject().physical_pages()[i]; builder.appendf("P%x%s(%u) ", physical_page ? physical_page->paddr().get() : 0, region.should_cow(i) ? "!" : "", diff --git a/Kernel/FileSystem/SharedMemory.cpp b/Kernel/FileSystem/SharedMemory.cpp index 431af2d951..fe9d27a3b8 100644 --- a/Kernel/FileSystem/SharedMemory.cpp +++ b/Kernel/FileSystem/SharedMemory.cpp @@ -54,12 +54,12 @@ SharedMemory::~SharedMemory() KResult SharedMemory::truncate(int length) { if (!length) { - m_vmo = nullptr; + m_vmobject = nullptr; return KSuccess; } - if (!m_vmo) { - m_vmo = AnonymousVMObject::create_with_size(length); + if (!m_vmobject) { + m_vmobject = AnonymousVMObject::create_with_size(length); return KSuccess; } @@ -91,7 +91,7 @@ int SharedMemory::write(FileDescription&, const u8* data, int data_size) KResultOr SharedMemory::mmap(Process& process, FileDescription&, VirtualAddress vaddr, size_t offset, size_t size, int prot) { - if (!vmo()) + if (!vmobject()) return KResult(-ENODEV); - return process.allocate_region_with_vmo(vaddr, size, *vmo(), offset, name(), prot); + return process.allocate_region_with_vmo(vaddr, size, *vmobject(), offset, name(), prot); } diff --git a/Kernel/FileSystem/SharedMemory.h b/Kernel/FileSystem/SharedMemory.h index 7cadc38825..e778d97d7b 100644 --- a/Kernel/FileSystem/SharedMemory.h +++ b/Kernel/FileSystem/SharedMemory.h @@ -17,8 +17,8 @@ public: const String& name() const { return m_name; } virtual KResult truncate(off_t) override; - AnonymousVMObject* vmo() { return m_vmo.ptr(); } - const AnonymousVMObject* vmo() const { return m_vmo.ptr(); } + AnonymousVMObject* vmobject() { return m_vmobject.ptr(); } + const AnonymousVMObject* vmobject() const { return m_vmobject.ptr(); } uid_t uid() const { return m_uid; } gid_t gid() const { return m_gid; } @@ -39,5 +39,5 @@ private: uid_t m_uid { 0 }; gid_t m_gid { 0 }; mode_t m_mode { 0 }; - RefPtr m_vmo; + RefPtr m_vmobject; }; diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index 7374735376..be18d487b8 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -252,7 +252,7 @@ int Process::sys$munmap(void* addr, size_t size) size_t new_range_offset_in_old_region = new_range.base().get() - old_region_range.base().get(); size_t first_physical_page_of_new_region_in_old_region = new_range_offset_in_old_region / PAGE_SIZE; for (size_t i = 0; i < new_region.page_count(); ++i) { - new_region.vmo().physical_pages()[i] = old_region->vmo().physical_pages()[first_physical_page_of_new_region_in_old_region + i]; + new_region.vmobject().physical_pages()[i] = old_region->vmobject().physical_pages()[first_physical_page_of_new_region_in_old_region + i]; } return new_region; }; diff --git a/Kernel/SharedBuffer.cpp b/Kernel/SharedBuffer.cpp index 5753a2a6b3..d95e9ba224 100644 --- a/Kernel/SharedBuffer.cpp +++ b/Kernel/SharedBuffer.cpp @@ -61,7 +61,7 @@ void* SharedBuffer::ref_for_process_and_get_address(Process& process) ref.count++; m_total_refs++; if (ref.region == nullptr) { - ref.region = process.allocate_region_with_vmo(VirtualAddress(), size(), m_vmo, 0, "SharedBuffer", PROT_READ | (m_writable ? PROT_WRITE : 0)); + ref.region = process.allocate_region_with_vmo(VirtualAddress(), size(), m_vmobject, 0, "SharedBuffer", PROT_READ | (m_writable ? PROT_WRITE : 0)); ref.region->set_shared(true); } sanity_check("ref_for_process_and_get_address"); diff --git a/Kernel/SharedBuffer.h b/Kernel/SharedBuffer.h index d5f58d5c48..b690c34be1 100644 --- a/Kernel/SharedBuffer.h +++ b/Kernel/SharedBuffer.h @@ -20,7 +20,7 @@ private: public: SharedBuffer(int id, int size) : m_shared_buffer_id(id) - , m_vmo(AnonymousVMObject::create_with_size(size)) + , m_vmobject(AnonymousVMObject::create_with_size(size)) { #ifdef SHARED_BUFFER_DEBUG dbgprintf("Created shared buffer %d of size %d\n", m_shared_buffer_id, size); @@ -41,7 +41,7 @@ public: void share_globally() { m_global = true; } void deref_for_process(Process& process); void disown(pid_t pid); - size_t size() const { return m_vmo->size(); } + size_t size() const { return m_vmobject->size(); } void destroy_if_unused(); void seal(); int id() const { return m_shared_buffer_id; } @@ -49,7 +49,7 @@ public: int m_shared_buffer_id { -1 }; bool m_writable { true }; bool m_global { false }; - NonnullRefPtr m_vmo; + NonnullRefPtr m_vmobject; Vector m_refs; unsigned m_total_refs { 0 }; }; diff --git a/Kernel/VM/InodeVMObject.cpp b/Kernel/VM/InodeVMObject.cpp index b3c5579401..e96e44b473 100644 --- a/Kernel/VM/InodeVMObject.cpp +++ b/Kernel/VM/InodeVMObject.cpp @@ -6,8 +6,8 @@ NonnullRefPtr InodeVMObject::create_with_inode(Inode& inode) { InterruptDisabler disabler; - if (inode.vmo()) - return *inode.vmo(); + if (inode.vmobject()) + return *inode.vmobject(); auto vmo = adopt(*new InodeVMObject(inode)); vmo->inode().set_vmo(*vmo); return vmo; @@ -32,7 +32,7 @@ InodeVMObject::InodeVMObject(const InodeVMObject& other) InodeVMObject::~InodeVMObject() { - ASSERT(inode().vmo() == this); + ASSERT(inode().vmobject() == this); } void InodeVMObject::inode_size_changed(Badge, size_t old_size, size_t new_size) @@ -111,11 +111,11 @@ void VMObject::for_each_region(Callback callback) // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes. // Perhaps VMObject could have a Vector with all of his mappers? for (auto& region : MM.m_user_regions) { - if (®ion.vmo() == this) + if (®ion.vmobject() == this) callback(region); } for (auto& region : MM.m_kernel_regions) { - if (®ion.vmo() == this) + if (®ion.vmobject() == this) callback(region); } } diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index f0bdebd866..5e8292e006 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -282,7 +282,7 @@ const Region* MemoryManager::region_from_vaddr(const Process& process, VirtualAd bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region) { ASSERT_INTERRUPTS_DISABLED(); - auto& vmo = region.vmo(); + auto& vmo = region.vmobject(); auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region]; sti(); LOCKER(vmo.m_paging_lock); @@ -307,7 +307,7 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region) bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region) { ASSERT_INTERRUPTS_DISABLED(); - auto& vmo = region.vmo(); + auto& vmo = region.vmobject(); if (vmo.physical_pages()[page_index_in_region]->ref_count() == 1) { #ifdef PAGE_FAULT_DEBUG dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n"); @@ -338,7 +338,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region) bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region) { ASSERT(region.page_directory()); - auto& vmo = region.vmo(); + auto& vmo = region.vmobject(); ASSERT(vmo.is_inode()); auto& inode_vmobject = static_cast(vmo); @@ -425,7 +425,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault) } auto page_index_in_region = region->page_index_from_address(fault.vaddr()); if (fault.type() == PageFault::Type::PageNotPresent) { - if (region->vmo().is_inode()) { + if (region->vmobject().is_inode()) { #ifdef PAGE_FAULT_DEBUG dbgprintf("NP(inode) fault in Region{%p}[%u]\n", region, page_index_in_region); #endif @@ -657,7 +657,7 @@ void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_reg InterruptDisabler disabler; auto page_vaddr = region.vaddr().offset(page_index_in_region * PAGE_SIZE); auto& pte = ensure_pte(*region.page_directory(), page_vaddr); - auto& physical_page = region.vmo().physical_pages()[page_index_in_region]; + auto& physical_page = region.vmobject().physical_pages()[page_index_in_region]; ASSERT(physical_page); pte.set_physical_page_base(physical_page->paddr().get()); pte.set_present(true); // FIXME: Maybe we should use the is_readable flag here? @@ -683,7 +683,7 @@ void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& { InterruptDisabler disabler; region.set_page_directory(page_directory); - auto& vmo = region.vmo(); + auto& vmo = region.vmobject(); #ifdef MM_DEBUG dbgprintf("MM: map_region_at_address will map VMO pages %u - %u (VMO page count: %u)\n", region.first_page_index(), region.last_page_index(), vmo.page_count()); #endif @@ -725,7 +725,7 @@ bool MemoryManager::unmap_region(Region& region) pte.set_user_allowed(false); region.page_directory()->flush(vaddr); #ifdef MM_DEBUG - auto& physical_page = region.vmo().physical_pages()[region.first_page_index() + i]; + auto& physical_page = region.vmobject().physical_pages()[region.first_page_index() + i]; dbgprintf("MM: >> Unmapped V%p => P%x <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0); #endif } diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index ebe9a576fb..31cbda862c 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -8,20 +8,20 @@ Region::Region(const Range& range, const String& name, u8 access, bool cow) : m_range(range) - , m_vmo(AnonymousVMObject::create_with_size(size())) + , m_vmobject(AnonymousVMObject::create_with_size(size())) , m_name(name) , m_access(access) - , m_cow_map(Bitmap::create(m_vmo->page_count(), cow)) + , m_cow_map(Bitmap::create(m_vmobject->page_count(), cow)) { MM.register_region(*this); } Region::Region(const Range& range, RefPtr&& inode, const String& name, u8 access, bool cow) : m_range(range) - , m_vmo(InodeVMObject::create_with_inode(*inode)) + , m_vmobject(InodeVMObject::create_with_inode(*inode)) , m_name(name) , m_access(access) - , m_cow_map(Bitmap::create(m_vmo->page_count(), cow)) + , m_cow_map(Bitmap::create(m_vmobject->page_count(), cow)) { MM.register_region(*this); } @@ -29,10 +29,10 @@ Region::Region(const Range& range, RefPtr&& inode, const String& name, u8 Region::Region(const Range& range, NonnullRefPtr vmo, size_t offset_in_vmo, const String& name, u8 access, bool cow) : m_range(range) , m_offset_in_vmo(offset_in_vmo) - , m_vmo(move(vmo)) + , m_vmobject(move(vmo)) , m_name(name) , m_access(access) - , m_cow_map(Bitmap::create(m_vmo->page_count(), cow)) + , m_cow_map(Bitmap::create(m_vmobject->page_count(), cow)) { MM.register_region(*this); } @@ -62,7 +62,7 @@ NonnullRefPtr Region::clone() vaddr().get()); #endif // Create a new region backed by the same VMObject. - return Region::create_user_accessible(m_range, m_vmo, m_offset_in_vmo, m_name, m_access); + return Region::create_user_accessible(m_range, m_vmobject, m_offset_in_vmo, m_name, m_access); } #ifdef MM_DEBUG @@ -75,24 +75,24 @@ NonnullRefPtr Region::clone() // Set up a COW region. The parent (this) region becomes COW as well! m_cow_map.fill(true); MM.remap_region(current->process().page_directory(), *this); - return Region::create_user_accessible(m_range, m_vmo->clone(), m_offset_in_vmo, m_name, m_access, true); + return Region::create_user_accessible(m_range, m_vmobject->clone(), m_offset_in_vmo, m_name, m_access, true); } int Region::commit() { InterruptDisabler disabler; #ifdef MM_DEBUG - dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at V%p\n", vmo().page_count(), this, &vmo(), vaddr().get()); + dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at V%p\n", vmobject().page_count(), this, &vmobject(), vaddr().get()); #endif for (size_t i = first_page_index(); i <= last_page_index(); ++i) { - if (!vmo().physical_pages()[i].is_null()) + if (!vmobject().physical_pages()[i].is_null()) continue; auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes); if (!physical_page) { kprintf("MM: commit was unable to allocate a physical page\n"); return -ENOMEM; } - vmo().physical_pages()[i] = move(physical_page); + vmobject().physical_pages()[i] = move(physical_page); MM.remap_region_page(*this, i); } return 0; @@ -102,7 +102,7 @@ size_t Region::amount_resident() const { size_t bytes = 0; for (size_t i = 0; i < page_count(); ++i) { - if (m_vmo->physical_pages()[first_page_index() + i]) + if (m_vmobject->physical_pages()[first_page_index() + i]) bytes += PAGE_SIZE; } return bytes; @@ -112,7 +112,7 @@ size_t Region::amount_shared() const { size_t bytes = 0; for (size_t i = 0; i < page_count(); ++i) { - auto& physical_page = m_vmo->physical_pages()[first_page_index() + i]; + auto& physical_page = m_vmobject->physical_pages()[first_page_index() + i]; if (physical_page && physical_page->ref_count() > 1) bytes += PAGE_SIZE; } diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h index 4f07ce9620..308166b892 100644 --- a/Kernel/VM/Region.h +++ b/Kernel/VM/Region.h @@ -38,8 +38,8 @@ public: void set_name(const String& name) { m_name = name; } - const VMObject& vmo() const { return *m_vmo; } - VMObject& vmo() { return *m_vmo; } + const VMObject& vmobject() const { return *m_vmobject; } + VMObject& vmobject() { return *m_vmobject; } bool is_shared() const { return m_shared; } void set_shared(bool shared) { m_shared = shared; } @@ -120,7 +120,7 @@ private: RefPtr m_page_directory; Range m_range; size_t m_offset_in_vmo { 0 }; - NonnullRefPtr m_vmo; + NonnullRefPtr m_vmobject; String m_name; u8 m_access { 0 }; bool m_shared { false };