1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 04:27:45 +00:00

Kernel: Rename "vmo" to "vmobject" everywhere

This commit is contained in:
Andreas Kling 2019-09-04 11:27:14 +02:00
parent e5500e2a22
commit e25ade7579
12 changed files with 54 additions and 54 deletions

View file

@ -6,8 +6,8 @@
NonnullRefPtr<InodeVMObject> InodeVMObject::create_with_inode(Inode& inode)
{
InterruptDisabler disabler;
if (inode.vmo())
return *inode.vmo();
if (inode.vmobject())
return *inode.vmobject();
auto vmo = adopt(*new InodeVMObject(inode));
vmo->inode().set_vmo(*vmo);
return vmo;
@ -32,7 +32,7 @@ InodeVMObject::InodeVMObject(const InodeVMObject& other)
InodeVMObject::~InodeVMObject()
{
ASSERT(inode().vmo() == this);
ASSERT(inode().vmobject() == this);
}
void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
@ -111,11 +111,11 @@ void VMObject::for_each_region(Callback callback)
// FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
// Perhaps VMObject could have a Vector<Region*> with all of his mappers?
for (auto& region : MM.m_user_regions) {
if (&region.vmo() == this)
if (&region.vmobject() == this)
callback(region);
}
for (auto& region : MM.m_kernel_regions) {
if (&region.vmo() == this)
if (&region.vmobject() == this)
callback(region);
}
}

View file

@ -282,7 +282,7 @@ const Region* MemoryManager::region_from_vaddr(const Process& process, VirtualAd
bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
{
ASSERT_INTERRUPTS_DISABLED();
auto& vmo = region.vmo();
auto& vmo = region.vmobject();
auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region];
sti();
LOCKER(vmo.m_paging_lock);
@ -307,7 +307,7 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
{
ASSERT_INTERRUPTS_DISABLED();
auto& vmo = region.vmo();
auto& vmo = region.vmobject();
if (vmo.physical_pages()[page_index_in_region]->ref_count() == 1) {
#ifdef PAGE_FAULT_DEBUG
dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
@ -338,7 +338,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
{
ASSERT(region.page_directory());
auto& vmo = region.vmo();
auto& vmo = region.vmobject();
ASSERT(vmo.is_inode());
auto& inode_vmobject = static_cast<InodeVMObject&>(vmo);
@ -425,7 +425,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
}
auto page_index_in_region = region->page_index_from_address(fault.vaddr());
if (fault.type() == PageFault::Type::PageNotPresent) {
if (region->vmo().is_inode()) {
if (region->vmobject().is_inode()) {
#ifdef PAGE_FAULT_DEBUG
dbgprintf("NP(inode) fault in Region{%p}[%u]\n", region, page_index_in_region);
#endif
@ -657,7 +657,7 @@ void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_reg
InterruptDisabler disabler;
auto page_vaddr = region.vaddr().offset(page_index_in_region * PAGE_SIZE);
auto& pte = ensure_pte(*region.page_directory(), page_vaddr);
auto& physical_page = region.vmo().physical_pages()[page_index_in_region];
auto& physical_page = region.vmobject().physical_pages()[page_index_in_region];
ASSERT(physical_page);
pte.set_physical_page_base(physical_page->paddr().get());
pte.set_present(true); // FIXME: Maybe we should use the is_readable flag here?
@ -683,7 +683,7 @@ void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region&
{
InterruptDisabler disabler;
region.set_page_directory(page_directory);
auto& vmo = region.vmo();
auto& vmo = region.vmobject();
#ifdef MM_DEBUG
dbgprintf("MM: map_region_at_address will map VMO pages %u - %u (VMO page count: %u)\n", region.first_page_index(), region.last_page_index(), vmo.page_count());
#endif
@ -725,7 +725,7 @@ bool MemoryManager::unmap_region(Region& region)
pte.set_user_allowed(false);
region.page_directory()->flush(vaddr);
#ifdef MM_DEBUG
auto& physical_page = region.vmo().physical_pages()[region.first_page_index() + i];
auto& physical_page = region.vmobject().physical_pages()[region.first_page_index() + i];
dbgprintf("MM: >> Unmapped V%p => P%x <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0);
#endif
}

View file

@ -8,20 +8,20 @@
Region::Region(const Range& range, const String& name, u8 access, bool cow)
: m_range(range)
, m_vmo(AnonymousVMObject::create_with_size(size()))
, m_vmobject(AnonymousVMObject::create_with_size(size()))
, m_name(name)
, m_access(access)
, m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
, m_cow_map(Bitmap::create(m_vmobject->page_count(), cow))
{
MM.register_region(*this);
}
Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, u8 access, bool cow)
: m_range(range)
, m_vmo(InodeVMObject::create_with_inode(*inode))
, m_vmobject(InodeVMObject::create_with_inode(*inode))
, m_name(name)
, m_access(access)
, m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
, m_cow_map(Bitmap::create(m_vmobject->page_count(), cow))
{
MM.register_region(*this);
}
@ -29,10 +29,10 @@ Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, u8
Region::Region(const Range& range, NonnullRefPtr<VMObject> vmo, size_t offset_in_vmo, const String& name, u8 access, bool cow)
: m_range(range)
, m_offset_in_vmo(offset_in_vmo)
, m_vmo(move(vmo))
, m_vmobject(move(vmo))
, m_name(name)
, m_access(access)
, m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
, m_cow_map(Bitmap::create(m_vmobject->page_count(), cow))
{
MM.register_region(*this);
}
@ -62,7 +62,7 @@ NonnullRefPtr<Region> Region::clone()
vaddr().get());
#endif
// Create a new region backed by the same VMObject.
return Region::create_user_accessible(m_range, m_vmo, m_offset_in_vmo, m_name, m_access);
return Region::create_user_accessible(m_range, m_vmobject, m_offset_in_vmo, m_name, m_access);
}
#ifdef MM_DEBUG
@ -75,24 +75,24 @@ NonnullRefPtr<Region> Region::clone()
// Set up a COW region. The parent (this) region becomes COW as well!
m_cow_map.fill(true);
MM.remap_region(current->process().page_directory(), *this);
return Region::create_user_accessible(m_range, m_vmo->clone(), m_offset_in_vmo, m_name, m_access, true);
return Region::create_user_accessible(m_range, m_vmobject->clone(), m_offset_in_vmo, m_name, m_access, true);
}
int Region::commit()
{
InterruptDisabler disabler;
#ifdef MM_DEBUG
dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at V%p\n", vmo().page_count(), this, &vmo(), vaddr().get());
dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at V%p\n", vmobject().page_count(), this, &vmobject(), vaddr().get());
#endif
for (size_t i = first_page_index(); i <= last_page_index(); ++i) {
if (!vmo().physical_pages()[i].is_null())
if (!vmobject().physical_pages()[i].is_null())
continue;
auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
if (!physical_page) {
kprintf("MM: commit was unable to allocate a physical page\n");
return -ENOMEM;
}
vmo().physical_pages()[i] = move(physical_page);
vmobject().physical_pages()[i] = move(physical_page);
MM.remap_region_page(*this, i);
}
return 0;
@ -102,7 +102,7 @@ size_t Region::amount_resident() const
{
size_t bytes = 0;
for (size_t i = 0; i < page_count(); ++i) {
if (m_vmo->physical_pages()[first_page_index() + i])
if (m_vmobject->physical_pages()[first_page_index() + i])
bytes += PAGE_SIZE;
}
return bytes;
@ -112,7 +112,7 @@ size_t Region::amount_shared() const
{
size_t bytes = 0;
for (size_t i = 0; i < page_count(); ++i) {
auto& physical_page = m_vmo->physical_pages()[first_page_index() + i];
auto& physical_page = m_vmobject->physical_pages()[first_page_index() + i];
if (physical_page && physical_page->ref_count() > 1)
bytes += PAGE_SIZE;
}

View file

@ -38,8 +38,8 @@ public:
void set_name(const String& name) { m_name = name; }
const VMObject& vmo() const { return *m_vmo; }
VMObject& vmo() { return *m_vmo; }
const VMObject& vmobject() const { return *m_vmobject; }
VMObject& vmobject() { return *m_vmobject; }
bool is_shared() const { return m_shared; }
void set_shared(bool shared) { m_shared = shared; }
@ -120,7 +120,7 @@ private:
RefPtr<PageDirectory> m_page_directory;
Range m_range;
size_t m_offset_in_vmo { 0 };
NonnullRefPtr<VMObject> m_vmo;
NonnullRefPtr<VMObject> m_vmobject;
String m_name;
u8 m_access { 0 };
bool m_shared { false };