mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 18:47:34 +00:00
Kernel: Rename "vmo" to "vmobject" everywhere
This commit is contained in:
parent
e5500e2a22
commit
e25ade7579
12 changed files with 54 additions and 54 deletions
|
@ -79,14 +79,14 @@ void Inode::will_be_destroyed()
|
||||||
|
|
||||||
void Inode::inode_contents_changed(off_t offset, ssize_t size, const u8* data)
|
void Inode::inode_contents_changed(off_t offset, ssize_t size, const u8* data)
|
||||||
{
|
{
|
||||||
if (m_vmo)
|
if (m_vmobject)
|
||||||
m_vmo->inode_contents_changed({}, offset, size, data);
|
m_vmobject->inode_contents_changed({}, offset, size, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Inode::inode_size_changed(size_t old_size, size_t new_size)
|
void Inode::inode_size_changed(size_t old_size, size_t new_size)
|
||||||
{
|
{
|
||||||
if (m_vmo)
|
if (m_vmobject)
|
||||||
m_vmo->inode_size_changed({}, old_size, new_size);
|
m_vmobject->inode_size_changed({}, old_size, new_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
int Inode::set_atime(time_t)
|
int Inode::set_atime(time_t)
|
||||||
|
@ -116,7 +116,7 @@ int Inode::decrement_link_count()
|
||||||
|
|
||||||
void Inode::set_vmo(VMObject& vmo)
|
void Inode::set_vmo(VMObject& vmo)
|
||||||
{
|
{
|
||||||
m_vmo = vmo.make_weak_ptr();
|
m_vmobject = vmo.make_weak_ptr();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Inode::bind_socket(LocalSocket& socket)
|
bool Inode::bind_socket(LocalSocket& socket)
|
||||||
|
|
|
@ -72,8 +72,8 @@ public:
|
||||||
void will_be_destroyed();
|
void will_be_destroyed();
|
||||||
|
|
||||||
void set_vmo(VMObject&);
|
void set_vmo(VMObject&);
|
||||||
InodeVMObject* vmo() { return m_vmo.ptr(); }
|
InodeVMObject* vmobject() { return m_vmobject.ptr(); }
|
||||||
const InodeVMObject* vmo() const { return m_vmo.ptr(); }
|
const InodeVMObject* vmobject() const { return m_vmobject.ptr(); }
|
||||||
|
|
||||||
static void sync();
|
static void sync();
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ protected:
|
||||||
private:
|
private:
|
||||||
FS& m_fs;
|
FS& m_fs;
|
||||||
unsigned m_index { 0 };
|
unsigned m_index { 0 };
|
||||||
WeakPtr<InodeVMObject> m_vmo;
|
WeakPtr<InodeVMObject> m_vmobject;
|
||||||
RefPtr<LocalSocket> m_socket;
|
RefPtr<LocalSocket> m_socket;
|
||||||
HashTable<InodeWatcher*> m_watchers;
|
HashTable<InodeWatcher*> m_watchers;
|
||||||
bool m_metadata_dirty { false };
|
bool m_metadata_dirty { false };
|
||||||
|
|
|
@ -417,11 +417,11 @@ Optional<KBuffer> procfs$pid_vmo(InodeIdentifier identifier)
|
||||||
region.size(),
|
region.size(),
|
||||||
region.name().characters());
|
region.name().characters());
|
||||||
builder.appendf("VMO: %s @ %x(%u)\n",
|
builder.appendf("VMO: %s @ %x(%u)\n",
|
||||||
region.vmo().is_anonymous() ? "anonymous" : "file-backed",
|
region.vmobject().is_anonymous() ? "anonymous" : "file-backed",
|
||||||
®ion.vmo(),
|
®ion.vmobject(),
|
||||||
region.vmo().ref_count());
|
region.vmobject().ref_count());
|
||||||
for (size_t i = 0; i < region.vmo().page_count(); ++i) {
|
for (size_t i = 0; i < region.vmobject().page_count(); ++i) {
|
||||||
auto& physical_page = region.vmo().physical_pages()[i];
|
auto& physical_page = region.vmobject().physical_pages()[i];
|
||||||
builder.appendf("P%x%s(%u) ",
|
builder.appendf("P%x%s(%u) ",
|
||||||
physical_page ? physical_page->paddr().get() : 0,
|
physical_page ? physical_page->paddr().get() : 0,
|
||||||
region.should_cow(i) ? "!" : "",
|
region.should_cow(i) ? "!" : "",
|
||||||
|
|
|
@ -54,12 +54,12 @@ SharedMemory::~SharedMemory()
|
||||||
KResult SharedMemory::truncate(int length)
|
KResult SharedMemory::truncate(int length)
|
||||||
{
|
{
|
||||||
if (!length) {
|
if (!length) {
|
||||||
m_vmo = nullptr;
|
m_vmobject = nullptr;
|
||||||
return KSuccess;
|
return KSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!m_vmo) {
|
if (!m_vmobject) {
|
||||||
m_vmo = AnonymousVMObject::create_with_size(length);
|
m_vmobject = AnonymousVMObject::create_with_size(length);
|
||||||
return KSuccess;
|
return KSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ int SharedMemory::write(FileDescription&, const u8* data, int data_size)
|
||||||
|
|
||||||
KResultOr<Region*> SharedMemory::mmap(Process& process, FileDescription&, VirtualAddress vaddr, size_t offset, size_t size, int prot)
|
KResultOr<Region*> SharedMemory::mmap(Process& process, FileDescription&, VirtualAddress vaddr, size_t offset, size_t size, int prot)
|
||||||
{
|
{
|
||||||
if (!vmo())
|
if (!vmobject())
|
||||||
return KResult(-ENODEV);
|
return KResult(-ENODEV);
|
||||||
return process.allocate_region_with_vmo(vaddr, size, *vmo(), offset, name(), prot);
|
return process.allocate_region_with_vmo(vaddr, size, *vmobject(), offset, name(), prot);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,8 +17,8 @@ public:
|
||||||
|
|
||||||
const String& name() const { return m_name; }
|
const String& name() const { return m_name; }
|
||||||
virtual KResult truncate(off_t) override;
|
virtual KResult truncate(off_t) override;
|
||||||
AnonymousVMObject* vmo() { return m_vmo.ptr(); }
|
AnonymousVMObject* vmobject() { return m_vmobject.ptr(); }
|
||||||
const AnonymousVMObject* vmo() const { return m_vmo.ptr(); }
|
const AnonymousVMObject* vmobject() const { return m_vmobject.ptr(); }
|
||||||
uid_t uid() const { return m_uid; }
|
uid_t uid() const { return m_uid; }
|
||||||
gid_t gid() const { return m_gid; }
|
gid_t gid() const { return m_gid; }
|
||||||
|
|
||||||
|
@ -39,5 +39,5 @@ private:
|
||||||
uid_t m_uid { 0 };
|
uid_t m_uid { 0 };
|
||||||
gid_t m_gid { 0 };
|
gid_t m_gid { 0 };
|
||||||
mode_t m_mode { 0 };
|
mode_t m_mode { 0 };
|
||||||
RefPtr<AnonymousVMObject> m_vmo;
|
RefPtr<AnonymousVMObject> m_vmobject;
|
||||||
};
|
};
|
||||||
|
|
|
@ -252,7 +252,7 @@ int Process::sys$munmap(void* addr, size_t size)
|
||||||
size_t new_range_offset_in_old_region = new_range.base().get() - old_region_range.base().get();
|
size_t new_range_offset_in_old_region = new_range.base().get() - old_region_range.base().get();
|
||||||
size_t first_physical_page_of_new_region_in_old_region = new_range_offset_in_old_region / PAGE_SIZE;
|
size_t first_physical_page_of_new_region_in_old_region = new_range_offset_in_old_region / PAGE_SIZE;
|
||||||
for (size_t i = 0; i < new_region.page_count(); ++i) {
|
for (size_t i = 0; i < new_region.page_count(); ++i) {
|
||||||
new_region.vmo().physical_pages()[i] = old_region->vmo().physical_pages()[first_physical_page_of_new_region_in_old_region + i];
|
new_region.vmobject().physical_pages()[i] = old_region->vmobject().physical_pages()[first_physical_page_of_new_region_in_old_region + i];
|
||||||
}
|
}
|
||||||
return new_region;
|
return new_region;
|
||||||
};
|
};
|
||||||
|
|
|
@ -61,7 +61,7 @@ void* SharedBuffer::ref_for_process_and_get_address(Process& process)
|
||||||
ref.count++;
|
ref.count++;
|
||||||
m_total_refs++;
|
m_total_refs++;
|
||||||
if (ref.region == nullptr) {
|
if (ref.region == nullptr) {
|
||||||
ref.region = process.allocate_region_with_vmo(VirtualAddress(), size(), m_vmo, 0, "SharedBuffer", PROT_READ | (m_writable ? PROT_WRITE : 0));
|
ref.region = process.allocate_region_with_vmo(VirtualAddress(), size(), m_vmobject, 0, "SharedBuffer", PROT_READ | (m_writable ? PROT_WRITE : 0));
|
||||||
ref.region->set_shared(true);
|
ref.region->set_shared(true);
|
||||||
}
|
}
|
||||||
sanity_check("ref_for_process_and_get_address");
|
sanity_check("ref_for_process_and_get_address");
|
||||||
|
|
|
@ -20,7 +20,7 @@ private:
|
||||||
public:
|
public:
|
||||||
SharedBuffer(int id, int size)
|
SharedBuffer(int id, int size)
|
||||||
: m_shared_buffer_id(id)
|
: m_shared_buffer_id(id)
|
||||||
, m_vmo(AnonymousVMObject::create_with_size(size))
|
, m_vmobject(AnonymousVMObject::create_with_size(size))
|
||||||
{
|
{
|
||||||
#ifdef SHARED_BUFFER_DEBUG
|
#ifdef SHARED_BUFFER_DEBUG
|
||||||
dbgprintf("Created shared buffer %d of size %d\n", m_shared_buffer_id, size);
|
dbgprintf("Created shared buffer %d of size %d\n", m_shared_buffer_id, size);
|
||||||
|
@ -41,7 +41,7 @@ public:
|
||||||
void share_globally() { m_global = true; }
|
void share_globally() { m_global = true; }
|
||||||
void deref_for_process(Process& process);
|
void deref_for_process(Process& process);
|
||||||
void disown(pid_t pid);
|
void disown(pid_t pid);
|
||||||
size_t size() const { return m_vmo->size(); }
|
size_t size() const { return m_vmobject->size(); }
|
||||||
void destroy_if_unused();
|
void destroy_if_unused();
|
||||||
void seal();
|
void seal();
|
||||||
int id() const { return m_shared_buffer_id; }
|
int id() const { return m_shared_buffer_id; }
|
||||||
|
@ -49,7 +49,7 @@ public:
|
||||||
int m_shared_buffer_id { -1 };
|
int m_shared_buffer_id { -1 };
|
||||||
bool m_writable { true };
|
bool m_writable { true };
|
||||||
bool m_global { false };
|
bool m_global { false };
|
||||||
NonnullRefPtr<AnonymousVMObject> m_vmo;
|
NonnullRefPtr<AnonymousVMObject> m_vmobject;
|
||||||
Vector<Reference, 2> m_refs;
|
Vector<Reference, 2> m_refs;
|
||||||
unsigned m_total_refs { 0 };
|
unsigned m_total_refs { 0 };
|
||||||
};
|
};
|
||||||
|
|
|
@ -6,8 +6,8 @@
|
||||||
NonnullRefPtr<InodeVMObject> InodeVMObject::create_with_inode(Inode& inode)
|
NonnullRefPtr<InodeVMObject> InodeVMObject::create_with_inode(Inode& inode)
|
||||||
{
|
{
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
if (inode.vmo())
|
if (inode.vmobject())
|
||||||
return *inode.vmo();
|
return *inode.vmobject();
|
||||||
auto vmo = adopt(*new InodeVMObject(inode));
|
auto vmo = adopt(*new InodeVMObject(inode));
|
||||||
vmo->inode().set_vmo(*vmo);
|
vmo->inode().set_vmo(*vmo);
|
||||||
return vmo;
|
return vmo;
|
||||||
|
@ -32,7 +32,7 @@ InodeVMObject::InodeVMObject(const InodeVMObject& other)
|
||||||
|
|
||||||
InodeVMObject::~InodeVMObject()
|
InodeVMObject::~InodeVMObject()
|
||||||
{
|
{
|
||||||
ASSERT(inode().vmo() == this);
|
ASSERT(inode().vmobject() == this);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
|
void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
|
||||||
|
@ -111,11 +111,11 @@ void VMObject::for_each_region(Callback callback)
|
||||||
// FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
|
// FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
|
||||||
// Perhaps VMObject could have a Vector<Region*> with all of his mappers?
|
// Perhaps VMObject could have a Vector<Region*> with all of his mappers?
|
||||||
for (auto& region : MM.m_user_regions) {
|
for (auto& region : MM.m_user_regions) {
|
||||||
if (®ion.vmo() == this)
|
if (®ion.vmobject() == this)
|
||||||
callback(region);
|
callback(region);
|
||||||
}
|
}
|
||||||
for (auto& region : MM.m_kernel_regions) {
|
for (auto& region : MM.m_kernel_regions) {
|
||||||
if (®ion.vmo() == this)
|
if (®ion.vmobject() == this)
|
||||||
callback(region);
|
callback(region);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -282,7 +282,7 @@ const Region* MemoryManager::region_from_vaddr(const Process& process, VirtualAd
|
||||||
bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
|
bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
|
||||||
{
|
{
|
||||||
ASSERT_INTERRUPTS_DISABLED();
|
ASSERT_INTERRUPTS_DISABLED();
|
||||||
auto& vmo = region.vmo();
|
auto& vmo = region.vmobject();
|
||||||
auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region];
|
auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region];
|
||||||
sti();
|
sti();
|
||||||
LOCKER(vmo.m_paging_lock);
|
LOCKER(vmo.m_paging_lock);
|
||||||
|
@ -307,7 +307,7 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
|
||||||
bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
|
bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
|
||||||
{
|
{
|
||||||
ASSERT_INTERRUPTS_DISABLED();
|
ASSERT_INTERRUPTS_DISABLED();
|
||||||
auto& vmo = region.vmo();
|
auto& vmo = region.vmobject();
|
||||||
if (vmo.physical_pages()[page_index_in_region]->ref_count() == 1) {
|
if (vmo.physical_pages()[page_index_in_region]->ref_count() == 1) {
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
|
dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
|
||||||
|
@ -338,7 +338,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
|
||||||
bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
|
bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
|
||||||
{
|
{
|
||||||
ASSERT(region.page_directory());
|
ASSERT(region.page_directory());
|
||||||
auto& vmo = region.vmo();
|
auto& vmo = region.vmobject();
|
||||||
ASSERT(vmo.is_inode());
|
ASSERT(vmo.is_inode());
|
||||||
|
|
||||||
auto& inode_vmobject = static_cast<InodeVMObject&>(vmo);
|
auto& inode_vmobject = static_cast<InodeVMObject&>(vmo);
|
||||||
|
@ -425,7 +425,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
|
||||||
}
|
}
|
||||||
auto page_index_in_region = region->page_index_from_address(fault.vaddr());
|
auto page_index_in_region = region->page_index_from_address(fault.vaddr());
|
||||||
if (fault.type() == PageFault::Type::PageNotPresent) {
|
if (fault.type() == PageFault::Type::PageNotPresent) {
|
||||||
if (region->vmo().is_inode()) {
|
if (region->vmobject().is_inode()) {
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
dbgprintf("NP(inode) fault in Region{%p}[%u]\n", region, page_index_in_region);
|
dbgprintf("NP(inode) fault in Region{%p}[%u]\n", region, page_index_in_region);
|
||||||
#endif
|
#endif
|
||||||
|
@ -657,7 +657,7 @@ void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_reg
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
auto page_vaddr = region.vaddr().offset(page_index_in_region * PAGE_SIZE);
|
auto page_vaddr = region.vaddr().offset(page_index_in_region * PAGE_SIZE);
|
||||||
auto& pte = ensure_pte(*region.page_directory(), page_vaddr);
|
auto& pte = ensure_pte(*region.page_directory(), page_vaddr);
|
||||||
auto& physical_page = region.vmo().physical_pages()[page_index_in_region];
|
auto& physical_page = region.vmobject().physical_pages()[page_index_in_region];
|
||||||
ASSERT(physical_page);
|
ASSERT(physical_page);
|
||||||
pte.set_physical_page_base(physical_page->paddr().get());
|
pte.set_physical_page_base(physical_page->paddr().get());
|
||||||
pte.set_present(true); // FIXME: Maybe we should use the is_readable flag here?
|
pte.set_present(true); // FIXME: Maybe we should use the is_readable flag here?
|
||||||
|
@ -683,7 +683,7 @@ void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region&
|
||||||
{
|
{
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
region.set_page_directory(page_directory);
|
region.set_page_directory(page_directory);
|
||||||
auto& vmo = region.vmo();
|
auto& vmo = region.vmobject();
|
||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
dbgprintf("MM: map_region_at_address will map VMO pages %u - %u (VMO page count: %u)\n", region.first_page_index(), region.last_page_index(), vmo.page_count());
|
dbgprintf("MM: map_region_at_address will map VMO pages %u - %u (VMO page count: %u)\n", region.first_page_index(), region.last_page_index(), vmo.page_count());
|
||||||
#endif
|
#endif
|
||||||
|
@ -725,7 +725,7 @@ bool MemoryManager::unmap_region(Region& region)
|
||||||
pte.set_user_allowed(false);
|
pte.set_user_allowed(false);
|
||||||
region.page_directory()->flush(vaddr);
|
region.page_directory()->flush(vaddr);
|
||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
auto& physical_page = region.vmo().physical_pages()[region.first_page_index() + i];
|
auto& physical_page = region.vmobject().physical_pages()[region.first_page_index() + i];
|
||||||
dbgprintf("MM: >> Unmapped V%p => P%x <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0);
|
dbgprintf("MM: >> Unmapped V%p => P%x <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,20 +8,20 @@
|
||||||
|
|
||||||
Region::Region(const Range& range, const String& name, u8 access, bool cow)
|
Region::Region(const Range& range, const String& name, u8 access, bool cow)
|
||||||
: m_range(range)
|
: m_range(range)
|
||||||
, m_vmo(AnonymousVMObject::create_with_size(size()))
|
, m_vmobject(AnonymousVMObject::create_with_size(size()))
|
||||||
, m_name(name)
|
, m_name(name)
|
||||||
, m_access(access)
|
, m_access(access)
|
||||||
, m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
|
, m_cow_map(Bitmap::create(m_vmobject->page_count(), cow))
|
||||||
{
|
{
|
||||||
MM.register_region(*this);
|
MM.register_region(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, u8 access, bool cow)
|
Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, u8 access, bool cow)
|
||||||
: m_range(range)
|
: m_range(range)
|
||||||
, m_vmo(InodeVMObject::create_with_inode(*inode))
|
, m_vmobject(InodeVMObject::create_with_inode(*inode))
|
||||||
, m_name(name)
|
, m_name(name)
|
||||||
, m_access(access)
|
, m_access(access)
|
||||||
, m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
|
, m_cow_map(Bitmap::create(m_vmobject->page_count(), cow))
|
||||||
{
|
{
|
||||||
MM.register_region(*this);
|
MM.register_region(*this);
|
||||||
}
|
}
|
||||||
|
@ -29,10 +29,10 @@ Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, u8
|
||||||
Region::Region(const Range& range, NonnullRefPtr<VMObject> vmo, size_t offset_in_vmo, const String& name, u8 access, bool cow)
|
Region::Region(const Range& range, NonnullRefPtr<VMObject> vmo, size_t offset_in_vmo, const String& name, u8 access, bool cow)
|
||||||
: m_range(range)
|
: m_range(range)
|
||||||
, m_offset_in_vmo(offset_in_vmo)
|
, m_offset_in_vmo(offset_in_vmo)
|
||||||
, m_vmo(move(vmo))
|
, m_vmobject(move(vmo))
|
||||||
, m_name(name)
|
, m_name(name)
|
||||||
, m_access(access)
|
, m_access(access)
|
||||||
, m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
|
, m_cow_map(Bitmap::create(m_vmobject->page_count(), cow))
|
||||||
{
|
{
|
||||||
MM.register_region(*this);
|
MM.register_region(*this);
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ NonnullRefPtr<Region> Region::clone()
|
||||||
vaddr().get());
|
vaddr().get());
|
||||||
#endif
|
#endif
|
||||||
// Create a new region backed by the same VMObject.
|
// Create a new region backed by the same VMObject.
|
||||||
return Region::create_user_accessible(m_range, m_vmo, m_offset_in_vmo, m_name, m_access);
|
return Region::create_user_accessible(m_range, m_vmobject, m_offset_in_vmo, m_name, m_access);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
|
@ -75,24 +75,24 @@ NonnullRefPtr<Region> Region::clone()
|
||||||
// Set up a COW region. The parent (this) region becomes COW as well!
|
// Set up a COW region. The parent (this) region becomes COW as well!
|
||||||
m_cow_map.fill(true);
|
m_cow_map.fill(true);
|
||||||
MM.remap_region(current->process().page_directory(), *this);
|
MM.remap_region(current->process().page_directory(), *this);
|
||||||
return Region::create_user_accessible(m_range, m_vmo->clone(), m_offset_in_vmo, m_name, m_access, true);
|
return Region::create_user_accessible(m_range, m_vmobject->clone(), m_offset_in_vmo, m_name, m_access, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
int Region::commit()
|
int Region::commit()
|
||||||
{
|
{
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at V%p\n", vmo().page_count(), this, &vmo(), vaddr().get());
|
dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at V%p\n", vmobject().page_count(), this, &vmobject(), vaddr().get());
|
||||||
#endif
|
#endif
|
||||||
for (size_t i = first_page_index(); i <= last_page_index(); ++i) {
|
for (size_t i = first_page_index(); i <= last_page_index(); ++i) {
|
||||||
if (!vmo().physical_pages()[i].is_null())
|
if (!vmobject().physical_pages()[i].is_null())
|
||||||
continue;
|
continue;
|
||||||
auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
|
auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
|
||||||
if (!physical_page) {
|
if (!physical_page) {
|
||||||
kprintf("MM: commit was unable to allocate a physical page\n");
|
kprintf("MM: commit was unable to allocate a physical page\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
vmo().physical_pages()[i] = move(physical_page);
|
vmobject().physical_pages()[i] = move(physical_page);
|
||||||
MM.remap_region_page(*this, i);
|
MM.remap_region_page(*this, i);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -102,7 +102,7 @@ size_t Region::amount_resident() const
|
||||||
{
|
{
|
||||||
size_t bytes = 0;
|
size_t bytes = 0;
|
||||||
for (size_t i = 0; i < page_count(); ++i) {
|
for (size_t i = 0; i < page_count(); ++i) {
|
||||||
if (m_vmo->physical_pages()[first_page_index() + i])
|
if (m_vmobject->physical_pages()[first_page_index() + i])
|
||||||
bytes += PAGE_SIZE;
|
bytes += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
return bytes;
|
return bytes;
|
||||||
|
@ -112,7 +112,7 @@ size_t Region::amount_shared() const
|
||||||
{
|
{
|
||||||
size_t bytes = 0;
|
size_t bytes = 0;
|
||||||
for (size_t i = 0; i < page_count(); ++i) {
|
for (size_t i = 0; i < page_count(); ++i) {
|
||||||
auto& physical_page = m_vmo->physical_pages()[first_page_index() + i];
|
auto& physical_page = m_vmobject->physical_pages()[first_page_index() + i];
|
||||||
if (physical_page && physical_page->ref_count() > 1)
|
if (physical_page && physical_page->ref_count() > 1)
|
||||||
bytes += PAGE_SIZE;
|
bytes += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,8 +38,8 @@ public:
|
||||||
|
|
||||||
void set_name(const String& name) { m_name = name; }
|
void set_name(const String& name) { m_name = name; }
|
||||||
|
|
||||||
const VMObject& vmo() const { return *m_vmo; }
|
const VMObject& vmobject() const { return *m_vmobject; }
|
||||||
VMObject& vmo() { return *m_vmo; }
|
VMObject& vmobject() { return *m_vmobject; }
|
||||||
|
|
||||||
bool is_shared() const { return m_shared; }
|
bool is_shared() const { return m_shared; }
|
||||||
void set_shared(bool shared) { m_shared = shared; }
|
void set_shared(bool shared) { m_shared = shared; }
|
||||||
|
@ -120,7 +120,7 @@ private:
|
||||||
RefPtr<PageDirectory> m_page_directory;
|
RefPtr<PageDirectory> m_page_directory;
|
||||||
Range m_range;
|
Range m_range;
|
||||||
size_t m_offset_in_vmo { 0 };
|
size_t m_offset_in_vmo { 0 };
|
||||||
NonnullRefPtr<VMObject> m_vmo;
|
NonnullRefPtr<VMObject> m_vmobject;
|
||||||
String m_name;
|
String m_name;
|
||||||
u8 m_access { 0 };
|
u8 m_access { 0 };
|
||||||
bool m_shared { false };
|
bool m_shared { false };
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue