mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 02:47:35 +00:00
Kernel: Split VMObject into two classes: Anonymous- and InodeVMObject
InodeVMObject is a VMObject with an underlying Inode in the filesystem. AnonymousVMObject has no Inode. I'm happy that InodeVMObject::inode() can now return Inode& instead of VMObject::inode() return Inode*. :^)
This commit is contained in:
parent
cb2d572a14
commit
6bdb81ad87
16 changed files with 286 additions and 200 deletions
|
@ -2,6 +2,7 @@
|
||||||
#include <Kernel/IO.h>
|
#include <Kernel/IO.h>
|
||||||
#include <Kernel/PCI.h>
|
#include <Kernel/PCI.h>
|
||||||
#include <Kernel/Process.h>
|
#include <Kernel/Process.h>
|
||||||
|
#include <Kernel/VM/AnonymousVMObject.h>
|
||||||
#include <Kernel/VM/MemoryManager.h>
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
#include <LibC/errno_numbers.h>
|
#include <LibC/errno_numbers.h>
|
||||||
|
|
||||||
|
@ -89,7 +90,7 @@ KResultOr<Region*> BXVGADevice::mmap(Process& process, FileDescription&, Virtual
|
||||||
{
|
{
|
||||||
ASSERT(offset == 0);
|
ASSERT(offset == 0);
|
||||||
ASSERT(size == framebuffer_size_in_bytes());
|
ASSERT(size == framebuffer_size_in_bytes());
|
||||||
auto vmo = VMObject::create_for_physical_range(framebuffer_address(), framebuffer_size_in_bytes());
|
auto vmo = AnonymousVMObject::create_for_physical_range(framebuffer_address(), framebuffer_size_in_bytes());
|
||||||
auto* region = process.allocate_region_with_vmo(
|
auto* region = process.allocate_region_with_vmo(
|
||||||
preferred_vaddr,
|
preferred_vaddr,
|
||||||
framebuffer_size_in_bytes(),
|
framebuffer_size_in_bytes(),
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#include <Kernel/FileSystem/Inode.h>
|
#include <Kernel/FileSystem/Inode.h>
|
||||||
#include <Kernel/FileSystem/InodeWatcher.h>
|
#include <Kernel/FileSystem/InodeWatcher.h>
|
||||||
#include <Kernel/Net/LocalSocket.h>
|
#include <Kernel/Net/LocalSocket.h>
|
||||||
#include <Kernel/VM/VMObject.h>
|
#include <Kernel/VM/InodeVMObject.h>
|
||||||
|
|
||||||
HashTable<Inode*>& all_inodes()
|
HashTable<Inode*>& all_inodes()
|
||||||
{
|
{
|
||||||
|
|
|
@ -11,9 +11,9 @@
|
||||||
#include <Kernel/Lock.h>
|
#include <Kernel/Lock.h>
|
||||||
|
|
||||||
class FileDescription;
|
class FileDescription;
|
||||||
|
class InodeVMObject;
|
||||||
class InodeWatcher;
|
class InodeWatcher;
|
||||||
class LocalSocket;
|
class LocalSocket;
|
||||||
class VMObject;
|
|
||||||
|
|
||||||
class Inode : public RefCounted<Inode>, public Weakable<Inode> {
|
class Inode : public RefCounted<Inode>, public Weakable<Inode> {
|
||||||
friend class VFS;
|
friend class VFS;
|
||||||
|
@ -69,8 +69,8 @@ public:
|
||||||
void will_be_destroyed();
|
void will_be_destroyed();
|
||||||
|
|
||||||
void set_vmo(VMObject&);
|
void set_vmo(VMObject&);
|
||||||
VMObject* vmo() { return m_vmo.ptr(); }
|
InodeVMObject* vmo() { return m_vmo.ptr(); }
|
||||||
const VMObject* vmo() const { return m_vmo.ptr(); }
|
const InodeVMObject* vmo() const { return m_vmo.ptr(); }
|
||||||
|
|
||||||
static void sync();
|
static void sync();
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ protected:
|
||||||
private:
|
private:
|
||||||
FS& m_fs;
|
FS& m_fs;
|
||||||
unsigned m_index { 0 };
|
unsigned m_index { 0 };
|
||||||
WeakPtr<VMObject> m_vmo;
|
WeakPtr<InodeVMObject> m_vmo;
|
||||||
RefPtr<LocalSocket> m_socket;
|
RefPtr<LocalSocket> m_socket;
|
||||||
HashTable<InodeWatcher*> m_watchers;
|
HashTable<InodeWatcher*> m_watchers;
|
||||||
bool m_metadata_dirty { false };
|
bool m_metadata_dirty { false };
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
#include <Kernel/FileSystem/SharedMemory.h>
|
#include <Kernel/FileSystem/SharedMemory.h>
|
||||||
#include <Kernel/Lock.h>
|
#include <Kernel/Lock.h>
|
||||||
#include <Kernel/Process.h>
|
#include <Kernel/Process.h>
|
||||||
#include <Kernel/VM/VMObject.h>
|
#include <Kernel/VM/AnonymousVMObject.h>
|
||||||
|
|
||||||
Lockable<HashMap<String, RefPtr<SharedMemory>>>& shared_memories()
|
Lockable<HashMap<String, RefPtr<SharedMemory>>>& shared_memories()
|
||||||
{
|
{
|
||||||
|
@ -59,7 +59,7 @@ KResult SharedMemory::truncate(int length)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!m_vmo) {
|
if (!m_vmo) {
|
||||||
m_vmo = VMObject::create_anonymous(length);
|
m_vmo = AnonymousVMObject::create_with_size(length);
|
||||||
return KSuccess;
|
return KSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
#include <Kernel/KResult.h>
|
#include <Kernel/KResult.h>
|
||||||
#include <Kernel/UnixTypes.h>
|
#include <Kernel/UnixTypes.h>
|
||||||
|
|
||||||
class VMObject;
|
class AnonymousVMObject;
|
||||||
|
|
||||||
class SharedMemory : public File {
|
class SharedMemory : public File {
|
||||||
public:
|
public:
|
||||||
|
@ -17,8 +17,8 @@ public:
|
||||||
|
|
||||||
const String& name() const { return m_name; }
|
const String& name() const { return m_name; }
|
||||||
virtual KResult truncate(off_t) override;
|
virtual KResult truncate(off_t) override;
|
||||||
VMObject* vmo() { return m_vmo.ptr(); }
|
AnonymousVMObject* vmo() { return m_vmo.ptr(); }
|
||||||
const VMObject* vmo() const { return m_vmo.ptr(); }
|
const AnonymousVMObject* vmo() const { return m_vmo.ptr(); }
|
||||||
uid_t uid() const { return m_uid; }
|
uid_t uid() const { return m_uid; }
|
||||||
gid_t gid() const { return m_gid; }
|
gid_t gid() const { return m_gid; }
|
||||||
|
|
||||||
|
@ -39,5 +39,5 @@ private:
|
||||||
uid_t m_uid { 0 };
|
uid_t m_uid { 0 };
|
||||||
gid_t m_gid { 0 };
|
gid_t m_gid { 0 };
|
||||||
mode_t m_mode { 0 };
|
mode_t m_mode { 0 };
|
||||||
RefPtr<VMObject> m_vmo;
|
RefPtr<AnonymousVMObject> m_vmo;
|
||||||
};
|
};
|
||||||
|
|
|
@ -20,6 +20,8 @@ KERNEL_OBJS = \
|
||||||
VM/MemoryManager.o \
|
VM/MemoryManager.o \
|
||||||
VM/Region.o \
|
VM/Region.o \
|
||||||
VM/VMObject.o \
|
VM/VMObject.o \
|
||||||
|
VM/AnonymousVMObject.o \
|
||||||
|
VM/InodeVMObject.o \
|
||||||
VM/PageDirectory.o \
|
VM/PageDirectory.o \
|
||||||
VM/PhysicalPage.o \
|
VM/PhysicalPage.o \
|
||||||
VM/PhysicalRegion.o \
|
VM/PhysicalRegion.o \
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include <Kernel/StdLib.h>
|
#include <Kernel/StdLib.h>
|
||||||
#include <Kernel/Syscall.h>
|
#include <Kernel/Syscall.h>
|
||||||
#include <Kernel/TTY/MasterPTY.h>
|
#include <Kernel/TTY/MasterPTY.h>
|
||||||
|
#include <Kernel/VM/InodeVMObject.h>
|
||||||
#include <Kernel/kmalloc.h>
|
#include <Kernel/kmalloc.h>
|
||||||
#include <LibC/errno_numbers.h>
|
#include <LibC/errno_numbers.h>
|
||||||
#include <LibC/signal_numbers.h>
|
#include <LibC/signal_numbers.h>
|
||||||
|
@ -339,7 +340,8 @@ int Process::do_exec(String path, Vector<String> arguments, Vector<String> envir
|
||||||
#endif
|
#endif
|
||||||
ProcessPagingScope paging_scope(*this);
|
ProcessPagingScope paging_scope(*this);
|
||||||
|
|
||||||
auto vmo = VMObject::create_file_backed(description->inode());
|
ASSERT(description->inode());
|
||||||
|
auto vmo = InodeVMObject::create_with_inode(*description->inode());
|
||||||
RefPtr<Region> region = allocate_region_with_vmo(VirtualAddress(), metadata.size, vmo, 0, description->absolute_path(), PROT_READ);
|
RefPtr<Region> region = allocate_region_with_vmo(VirtualAddress(), metadata.size, vmo, 0, description->absolute_path(), PROT_READ);
|
||||||
ASSERT(region);
|
ASSERT(region);
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Kernel/VM/MemoryManager.h>
|
|
||||||
#include <AK/OwnPtr.h>
|
#include <AK/OwnPtr.h>
|
||||||
|
#include <Kernel/VM/AnonymousVMObject.h>
|
||||||
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
|
|
||||||
struct SharedBuffer {
|
struct SharedBuffer {
|
||||||
private:
|
private:
|
||||||
|
@ -15,10 +16,11 @@ private:
|
||||||
unsigned count { 0 };
|
unsigned count { 0 };
|
||||||
Region* region { nullptr };
|
Region* region { nullptr };
|
||||||
};
|
};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SharedBuffer(int id, int size)
|
SharedBuffer(int id, int size)
|
||||||
: m_shared_buffer_id(id)
|
: m_shared_buffer_id(id)
|
||||||
, m_vmo(VMObject::create_anonymous(size))
|
, m_vmo(AnonymousVMObject::create_with_size(size))
|
||||||
{
|
{
|
||||||
#ifdef SHARED_BUFFER_DEBUG
|
#ifdef SHARED_BUFFER_DEBUG
|
||||||
dbgprintf("Created shared buffer %d of size %d\n", m_shared_buffer_id, size);
|
dbgprintf("Created shared buffer %d of size %d\n", m_shared_buffer_id, size);
|
||||||
|
@ -47,7 +49,7 @@ public:
|
||||||
int m_shared_buffer_id { -1 };
|
int m_shared_buffer_id { -1 };
|
||||||
bool m_writable { true };
|
bool m_writable { true };
|
||||||
bool m_global { false };
|
bool m_global { false };
|
||||||
NonnullRefPtr<VMObject> m_vmo;
|
NonnullRefPtr<AnonymousVMObject> m_vmo;
|
||||||
Vector<Reference, 2> m_refs;
|
Vector<Reference, 2> m_refs;
|
||||||
unsigned m_total_refs { 0 };
|
unsigned m_total_refs { 0 };
|
||||||
};
|
};
|
||||||
|
|
41
Kernel/VM/AnonymousVMObject.cpp
Normal file
41
Kernel/VM/AnonymousVMObject.cpp
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
#include <Kernel/VM/AnonymousVMObject.h>
|
||||||
|
#include <Kernel/VM/PhysicalPage.h>
|
||||||
|
|
||||||
|
NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_with_size(size_t size)
|
||||||
|
{
|
||||||
|
size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
|
||||||
|
return adopt(*new AnonymousVMObject(size));
|
||||||
|
}
|
||||||
|
|
||||||
|
NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
|
||||||
|
{
|
||||||
|
size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
|
||||||
|
return adopt(*new AnonymousVMObject(paddr, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
AnonymousVMObject::AnonymousVMObject(size_t size)
|
||||||
|
: VMObject(size, ShouldFillPhysicalPages::Yes)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
|
||||||
|
: VMObject(size, ShouldFillPhysicalPages::No)
|
||||||
|
{
|
||||||
|
for (size_t i = 0; i < size; i += PAGE_SIZE)
|
||||||
|
m_physical_pages.append(PhysicalPage::create(paddr.offset(i), false, false));
|
||||||
|
ASSERT(m_physical_pages.size() == page_count());
|
||||||
|
}
|
||||||
|
|
||||||
|
AnonymousVMObject::AnonymousVMObject(const AnonymousVMObject& other)
|
||||||
|
: VMObject(other)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
AnonymousVMObject::~AnonymousVMObject()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
NonnullRefPtr<VMObject> AnonymousVMObject::clone()
|
||||||
|
{
|
||||||
|
return adopt(*new AnonymousVMObject(*this));
|
||||||
|
}
|
24
Kernel/VM/AnonymousVMObject.h
Normal file
24
Kernel/VM/AnonymousVMObject.h
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Kernel/VM/PhysicalAddress.h>
|
||||||
|
#include <Kernel/VM/VMObject.h>
|
||||||
|
|
||||||
|
class AnonymousVMObject final : public VMObject {
|
||||||
|
public:
|
||||||
|
virtual ~AnonymousVMObject() override;
|
||||||
|
|
||||||
|
static NonnullRefPtr<AnonymousVMObject> create_with_size(size_t);
|
||||||
|
static NonnullRefPtr<AnonymousVMObject> create_for_physical_range(PhysicalAddress, size_t);
|
||||||
|
virtual NonnullRefPtr<VMObject> clone() override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
explicit AnonymousVMObject(size_t);
|
||||||
|
explicit AnonymousVMObject(const AnonymousVMObject&);
|
||||||
|
AnonymousVMObject(PhysicalAddress, size_t);
|
||||||
|
|
||||||
|
AnonymousVMObject& operator=(const AnonymousVMObject&) = delete;
|
||||||
|
AnonymousVMObject& operator=(AnonymousVMObject&&) = delete;
|
||||||
|
AnonymousVMObject(AnonymousVMObject&&) = delete;
|
||||||
|
|
||||||
|
virtual bool is_anonymous() const override { return true; }
|
||||||
|
};
|
131
Kernel/VM/InodeVMObject.cpp
Normal file
131
Kernel/VM/InodeVMObject.cpp
Normal file
|
@ -0,0 +1,131 @@
|
||||||
|
#include <Kernel/FileSystem/Inode.h>
|
||||||
|
#include <Kernel/VM/InodeVMObject.h>
|
||||||
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
|
#include <Kernel/VM/Region.h>
|
||||||
|
|
||||||
|
NonnullRefPtr<InodeVMObject> InodeVMObject::create_with_inode(Inode& inode)
|
||||||
|
{
|
||||||
|
InterruptDisabler disabler;
|
||||||
|
if (inode.vmo())
|
||||||
|
return *inode.vmo();
|
||||||
|
auto vmo = adopt(*new InodeVMObject(inode));
|
||||||
|
vmo->inode().set_vmo(*vmo);
|
||||||
|
return vmo;
|
||||||
|
}
|
||||||
|
|
||||||
|
NonnullRefPtr<VMObject> InodeVMObject::clone()
|
||||||
|
{
|
||||||
|
return adopt(*new InodeVMObject(*this));
|
||||||
|
}
|
||||||
|
|
||||||
|
InodeVMObject::InodeVMObject(Inode& inode)
|
||||||
|
: VMObject(ceil_div(inode.size(), PAGE_SIZE) * PAGE_SIZE, ShouldFillPhysicalPages::Yes)
|
||||||
|
, m_inode(inode)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
InodeVMObject::InodeVMObject(const InodeVMObject& other)
|
||||||
|
: VMObject(other)
|
||||||
|
, m_inode(other.m_inode)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
InodeVMObject::~InodeVMObject()
|
||||||
|
{
|
||||||
|
ASSERT(inode().vmo() == this);
|
||||||
|
}
|
||||||
|
|
||||||
|
void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
|
||||||
|
{
|
||||||
|
dbgprintf("VMObject::inode_size_changed: {%u:%u} %u -> %u\n",
|
||||||
|
m_inode->fsid(), m_inode->index(),
|
||||||
|
old_size, new_size);
|
||||||
|
|
||||||
|
InterruptDisabler disabler;
|
||||||
|
|
||||||
|
auto old_page_count = page_count();
|
||||||
|
m_size = new_size;
|
||||||
|
|
||||||
|
if (page_count() > old_page_count) {
|
||||||
|
// Add null pages and let the fault handler page these in when that day comes.
|
||||||
|
for (auto i = old_page_count; i < page_count(); ++i)
|
||||||
|
m_physical_pages.append(nullptr);
|
||||||
|
} else {
|
||||||
|
// Prune the no-longer valid pages. I'm not sure this is actually correct behavior.
|
||||||
|
for (auto i = page_count(); i < old_page_count; ++i)
|
||||||
|
m_physical_pages.take_last();
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
|
||||||
|
for_each_region([](Region& region) {
|
||||||
|
ASSERT(region.page_directory());
|
||||||
|
MM.remap_region(*region.page_directory(), region);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
void InodeVMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
|
||||||
|
{
|
||||||
|
(void)size;
|
||||||
|
(void)data;
|
||||||
|
InterruptDisabler disabler;
|
||||||
|
ASSERT(offset >= 0);
|
||||||
|
|
||||||
|
// FIXME: Only invalidate the parts that actually changed.
|
||||||
|
for (auto& physical_page : m_physical_pages)
|
||||||
|
physical_page = nullptr;
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
size_t current_offset = offset;
|
||||||
|
size_t remaining_bytes = size;
|
||||||
|
const u8* data_ptr = data;
|
||||||
|
|
||||||
|
auto to_page_index = [] (size_t offset) -> size_t {
|
||||||
|
return offset / PAGE_SIZE;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (current_offset & PAGE_MASK) {
|
||||||
|
size_t page_index = to_page_index(current_offset);
|
||||||
|
size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
|
||||||
|
if (m_physical_pages[page_index]) {
|
||||||
|
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
|
||||||
|
memcpy(ptr, data_ptr, bytes_to_copy);
|
||||||
|
MM.unquickmap_page();
|
||||||
|
}
|
||||||
|
current_offset += bytes_to_copy;
|
||||||
|
data += bytes_to_copy;
|
||||||
|
remaining_bytes -= bytes_to_copy;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
|
||||||
|
size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
|
||||||
|
if (m_physical_pages[page_index]) {
|
||||||
|
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
|
||||||
|
memcpy(ptr, data_ptr, bytes_to_copy);
|
||||||
|
MM.unquickmap_page();
|
||||||
|
}
|
||||||
|
current_offset += bytes_to_copy;
|
||||||
|
data += bytes_to_copy;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// FIXME: Consolidate with inode_size_changed() so we only do a single walk.
|
||||||
|
for_each_region([](Region& region) {
|
||||||
|
ASSERT(region.page_directory());
|
||||||
|
MM.remap_region(*region.page_directory(), region);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename Callback>
|
||||||
|
void VMObject::for_each_region(Callback callback)
|
||||||
|
{
|
||||||
|
// FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
|
||||||
|
// Perhaps VMObject could have a Vector<Region*> with all of his mappers?
|
||||||
|
for (auto* region : MM.m_user_regions) {
|
||||||
|
if (®ion->vmo() == this)
|
||||||
|
callback(*region);
|
||||||
|
}
|
||||||
|
for (auto* region : MM.m_kernel_regions) {
|
||||||
|
if (®ion->vmo() == this)
|
||||||
|
callback(*region);
|
||||||
|
}
|
||||||
|
}
|
30
Kernel/VM/InodeVMObject.h
Normal file
30
Kernel/VM/InodeVMObject.h
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Kernel/UnixTypes.h>
|
||||||
|
#include <Kernel/VM/VMObject.h>
|
||||||
|
|
||||||
|
class InodeVMObject final : public VMObject {
|
||||||
|
public:
|
||||||
|
virtual ~InodeVMObject() override;
|
||||||
|
|
||||||
|
static NonnullRefPtr<InodeVMObject> create_with_inode(Inode&);
|
||||||
|
virtual NonnullRefPtr<VMObject> clone() override;
|
||||||
|
|
||||||
|
Inode& inode() { return *m_inode; }
|
||||||
|
const Inode& inode() const { return *m_inode; }
|
||||||
|
|
||||||
|
void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const u8*);
|
||||||
|
void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
|
||||||
|
|
||||||
|
private:
|
||||||
|
explicit InodeVMObject(Inode&);
|
||||||
|
explicit InodeVMObject(const InodeVMObject&);
|
||||||
|
|
||||||
|
InodeVMObject& operator=(const InodeVMObject&) = delete;
|
||||||
|
InodeVMObject& operator=(InodeVMObject&&) = delete;
|
||||||
|
InodeVMObject(InodeVMObject&&) = delete;
|
||||||
|
|
||||||
|
virtual bool is_inode() const override { return true; }
|
||||||
|
|
||||||
|
NonnullRefPtr<Inode> m_inode;
|
||||||
|
};
|
|
@ -6,6 +6,8 @@
|
||||||
#include <Kernel/Arch/i386/CPU.h>
|
#include <Kernel/Arch/i386/CPU.h>
|
||||||
#include <Kernel/FileSystem/Inode.h>
|
#include <Kernel/FileSystem/Inode.h>
|
||||||
#include <Kernel/Multiboot.h>
|
#include <Kernel/Multiboot.h>
|
||||||
|
#include <Kernel/VM/AnonymousVMObject.h>
|
||||||
|
#include <Kernel/VM/InodeVMObject.h>
|
||||||
#include <Kernel/VM/MemoryManager.h>
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
|
|
||||||
//#define MM_DEBUG
|
//#define MM_DEBUG
|
||||||
|
@ -352,10 +354,11 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
|
||||||
{
|
{
|
||||||
ASSERT(region.page_directory());
|
ASSERT(region.page_directory());
|
||||||
auto& vmo = region.vmo();
|
auto& vmo = region.vmo();
|
||||||
ASSERT(!vmo.is_anonymous());
|
ASSERT(vmo.is_inode());
|
||||||
ASSERT(vmo.inode());
|
|
||||||
|
|
||||||
auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region];
|
auto& inode_vmobject = static_cast<InodeVMObject&>(vmo);
|
||||||
|
|
||||||
|
auto& vmo_page = inode_vmobject.physical_pages()[region.first_page_index() + page_index_in_region];
|
||||||
|
|
||||||
InterruptFlagSaver saver;
|
InterruptFlagSaver saver;
|
||||||
|
|
||||||
|
@ -374,8 +377,8 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
|
||||||
#endif
|
#endif
|
||||||
sti();
|
sti();
|
||||||
u8 page_buffer[PAGE_SIZE];
|
u8 page_buffer[PAGE_SIZE];
|
||||||
auto& inode = *vmo.inode();
|
auto& inode = inode_vmobject.inode();
|
||||||
auto nread = inode.read_bytes(vmo.inode_offset() + ((region.first_page_index() + page_index_in_region) * PAGE_SIZE), PAGE_SIZE, page_buffer, nullptr);
|
auto nread = inode.read_bytes((region.first_page_index() + page_index_in_region) * PAGE_SIZE, PAGE_SIZE, page_buffer, nullptr);
|
||||||
if (nread < 0) {
|
if (nread < 0) {
|
||||||
kprintf("MM: page_in_from_inode had error (%d) while reading!\n", nread);
|
kprintf("MM: page_in_from_inode had error (%d) while reading!\n", nread);
|
||||||
return false;
|
return false;
|
||||||
|
@ -435,7 +438,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
|
||||||
}
|
}
|
||||||
auto page_index_in_region = region->page_index_from_address(fault.vaddr());
|
auto page_index_in_region = region->page_index_from_address(fault.vaddr());
|
||||||
if (fault.type() == PageFault::Type::PageNotPresent) {
|
if (fault.type() == PageFault::Type::PageNotPresent) {
|
||||||
if (region->vmo().inode()) {
|
if (region->vmo().is_inode()) {
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
dbgprintf("NP(inode) fault in Region{%p}[%u]\n", region, page_index_in_region);
|
dbgprintf("NP(inode) fault in Region{%p}[%u]\n", region, page_index_in_region);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
#include <Kernel/FileSystem/Inode.h>
|
#include <Kernel/FileSystem/Inode.h>
|
||||||
#include <Kernel/Process.h>
|
#include <Kernel/Process.h>
|
||||||
#include <Kernel/Thread.h>
|
#include <Kernel/Thread.h>
|
||||||
|
#include <Kernel/VM/AnonymousVMObject.h>
|
||||||
|
#include <Kernel/VM/InodeVMObject.h>
|
||||||
#include <Kernel/VM/MemoryManager.h>
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
#include <Kernel/VM/Region.h>
|
#include <Kernel/VM/Region.h>
|
||||||
#include <Kernel/VM/VMObject.h>
|
|
||||||
|
|
||||||
Region::Region(const Range& range, const String& name, u8 access, bool cow)
|
Region::Region(const Range& range, const String& name, u8 access, bool cow)
|
||||||
: m_range(range)
|
: m_range(range)
|
||||||
, m_vmo(VMObject::create_anonymous(size()))
|
, m_vmo(AnonymousVMObject::create_with_size(size()))
|
||||||
, m_name(name)
|
, m_name(name)
|
||||||
, m_access(access)
|
, m_access(access)
|
||||||
, m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
|
, m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
|
||||||
|
@ -17,7 +18,7 @@ Region::Region(const Range& range, const String& name, u8 access, bool cow)
|
||||||
|
|
||||||
Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, u8 access, bool cow)
|
Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, u8 access, bool cow)
|
||||||
: m_range(range)
|
: m_range(range)
|
||||||
, m_vmo(VMObject::create_file_backed(move(inode)))
|
, m_vmo(InodeVMObject::create_with_inode(*inode))
|
||||||
, m_name(name)
|
, m_name(name)
|
||||||
, m_access(access)
|
, m_access(access)
|
||||||
, m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
|
, m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
|
||||||
|
@ -48,8 +49,7 @@ Region::~Region()
|
||||||
bool Region::page_in()
|
bool Region::page_in()
|
||||||
{
|
{
|
||||||
ASSERT(m_page_directory);
|
ASSERT(m_page_directory);
|
||||||
ASSERT(!vmo().is_anonymous());
|
ASSERT(vmo().is_inode());
|
||||||
ASSERT(vmo().inode());
|
|
||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
dbgprintf("MM: page_in %u pages\n", page_count());
|
dbgprintf("MM: page_in %u pages\n", page_count());
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -3,166 +3,22 @@
|
||||||
#include <Kernel/VM/MemoryManager.h>
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
#include <Kernel/VM/VMObject.h>
|
#include <Kernel/VM/VMObject.h>
|
||||||
|
|
||||||
NonnullRefPtr<VMObject> VMObject::create_file_backed(RefPtr<Inode>&& inode)
|
VMObject::VMObject(const VMObject& other)
|
||||||
{
|
: m_size(other.m_size)
|
||||||
InterruptDisabler disabler;
|
|
||||||
if (inode->vmo())
|
|
||||||
return *inode->vmo();
|
|
||||||
auto vmo = adopt(*new VMObject(move(inode)));
|
|
||||||
vmo->inode()->set_vmo(*vmo);
|
|
||||||
return vmo;
|
|
||||||
}
|
|
||||||
|
|
||||||
NonnullRefPtr<VMObject> VMObject::create_anonymous(size_t size)
|
|
||||||
{
|
|
||||||
size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
|
|
||||||
return adopt(*new VMObject(size));
|
|
||||||
}
|
|
||||||
|
|
||||||
NonnullRefPtr<VMObject> VMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
|
|
||||||
{
|
|
||||||
size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
|
|
||||||
return adopt(*new VMObject(paddr, size));
|
|
||||||
}
|
|
||||||
|
|
||||||
NonnullRefPtr<VMObject> VMObject::clone()
|
|
||||||
{
|
|
||||||
return adopt(*new VMObject(*this));
|
|
||||||
}
|
|
||||||
|
|
||||||
VMObject::VMObject(VMObject& other)
|
|
||||||
: m_inode_offset(other.m_inode_offset)
|
|
||||||
, m_size(other.m_size)
|
|
||||||
, m_inode(other.m_inode)
|
|
||||||
, m_physical_pages(other.m_physical_pages)
|
, m_physical_pages(other.m_physical_pages)
|
||||||
{
|
{
|
||||||
MM.register_vmo(*this);
|
MM.register_vmo(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
VMObject::VMObject(size_t size)
|
VMObject::VMObject(size_t size, ShouldFillPhysicalPages should_fill_physical_pages)
|
||||||
: m_size(size)
|
: m_size(size)
|
||||||
{
|
{
|
||||||
MM.register_vmo(*this);
|
MM.register_vmo(*this);
|
||||||
m_physical_pages.resize(page_count());
|
if (should_fill_physical_pages == ShouldFillPhysicalPages::Yes)
|
||||||
}
|
m_physical_pages.resize(page_count());
|
||||||
|
|
||||||
VMObject::VMObject(PhysicalAddress paddr, size_t size)
|
|
||||||
: m_size(size)
|
|
||||||
{
|
|
||||||
MM.register_vmo(*this);
|
|
||||||
for (size_t i = 0; i < size; i += PAGE_SIZE) {
|
|
||||||
m_physical_pages.append(PhysicalPage::create(paddr.offset(i), false, false));
|
|
||||||
}
|
|
||||||
ASSERT(m_physical_pages.size() == page_count());
|
|
||||||
}
|
|
||||||
|
|
||||||
VMObject::VMObject(RefPtr<Inode>&& inode)
|
|
||||||
: m_inode(move(inode))
|
|
||||||
{
|
|
||||||
ASSERT(m_inode);
|
|
||||||
m_size = ceil_div(m_inode->size(), PAGE_SIZE) * PAGE_SIZE;
|
|
||||||
m_physical_pages.resize(page_count());
|
|
||||||
MM.register_vmo(*this);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VMObject::~VMObject()
|
VMObject::~VMObject()
|
||||||
{
|
{
|
||||||
if (m_inode)
|
|
||||||
ASSERT(m_inode->vmo() == this);
|
|
||||||
MM.unregister_vmo(*this);
|
MM.unregister_vmo(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Callback>
|
|
||||||
void VMObject::for_each_region(Callback callback)
|
|
||||||
{
|
|
||||||
// FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
|
|
||||||
// Perhaps VMObject could have a Vector<Region*> with all of his mappers?
|
|
||||||
for (auto* region : MM.m_user_regions) {
|
|
||||||
if (®ion->vmo() == this)
|
|
||||||
callback(*region);
|
|
||||||
}
|
|
||||||
for (auto* region : MM.m_kernel_regions) {
|
|
||||||
if (®ion->vmo() == this)
|
|
||||||
callback(*region);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void VMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
|
|
||||||
{
|
|
||||||
dbgprintf("VMObject::inode_size_changed: {%u:%u} %u -> %u\n",
|
|
||||||
m_inode->fsid(), m_inode->index(),
|
|
||||||
old_size, new_size);
|
|
||||||
|
|
||||||
InterruptDisabler disabler;
|
|
||||||
|
|
||||||
auto old_page_count = page_count();
|
|
||||||
m_size = new_size;
|
|
||||||
|
|
||||||
if (page_count() > old_page_count) {
|
|
||||||
// Add null pages and let the fault handler page these in when that day comes.
|
|
||||||
for (auto i = old_page_count; i < page_count(); ++i)
|
|
||||||
m_physical_pages.append(nullptr);
|
|
||||||
} else {
|
|
||||||
// Prune the no-longer valid pages. I'm not sure this is actually correct behavior.
|
|
||||||
for (auto i = page_count(); i < old_page_count; ++i)
|
|
||||||
m_physical_pages.take_last();
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
|
|
||||||
for_each_region([](Region& region) {
|
|
||||||
ASSERT(region.page_directory());
|
|
||||||
MM.remap_region(*region.page_directory(), region);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void VMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
|
|
||||||
{
|
|
||||||
(void)size;
|
|
||||||
(void)data;
|
|
||||||
InterruptDisabler disabler;
|
|
||||||
ASSERT(offset >= 0);
|
|
||||||
|
|
||||||
// FIXME: Only invalidate the parts that actually changed.
|
|
||||||
for (auto& physical_page : m_physical_pages)
|
|
||||||
physical_page = nullptr;
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
size_t current_offset = offset;
|
|
||||||
size_t remaining_bytes = size;
|
|
||||||
const u8* data_ptr = data;
|
|
||||||
|
|
||||||
auto to_page_index = [] (size_t offset) -> size_t {
|
|
||||||
return offset / PAGE_SIZE;
|
|
||||||
};
|
|
||||||
|
|
||||||
if (current_offset & PAGE_MASK) {
|
|
||||||
size_t page_index = to_page_index(current_offset);
|
|
||||||
size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
|
|
||||||
if (m_physical_pages[page_index]) {
|
|
||||||
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
|
|
||||||
memcpy(ptr, data_ptr, bytes_to_copy);
|
|
||||||
MM.unquickmap_page();
|
|
||||||
}
|
|
||||||
current_offset += bytes_to_copy;
|
|
||||||
data += bytes_to_copy;
|
|
||||||
remaining_bytes -= bytes_to_copy;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
|
|
||||||
size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
|
|
||||||
if (m_physical_pages[page_index]) {
|
|
||||||
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
|
|
||||||
memcpy(ptr, data_ptr, bytes_to_copy);
|
|
||||||
MM.unquickmap_page();
|
|
||||||
}
|
|
||||||
current_offset += bytes_to_copy;
|
|
||||||
data += bytes_to_copy;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// FIXME: Consolidate with inode_size_changed() so we only do a single walk.
|
|
||||||
for_each_region([](Region& region) {
|
|
||||||
ASSERT(region.page_directory());
|
|
||||||
MM.remap_region(*region.page_directory(), region);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,14 +1,10 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <AK/AKString.h>
|
|
||||||
#include <AK/Badge.h>
|
|
||||||
#include <AK/RefPtr.h>
|
|
||||||
#include <AK/RefCounted.h>
|
#include <AK/RefCounted.h>
|
||||||
|
#include <AK/RefPtr.h>
|
||||||
#include <AK/Vector.h>
|
#include <AK/Vector.h>
|
||||||
#include <AK/Weakable.h>
|
#include <AK/Weakable.h>
|
||||||
#include <Kernel/Lock.h>
|
#include <Kernel/Lock.h>
|
||||||
#include <Kernel/UnixTypes.h>
|
|
||||||
#include <Kernel/VM/PhysicalAddress.h>
|
|
||||||
|
|
||||||
class Inode;
|
class Inode;
|
||||||
class PhysicalPage;
|
class PhysicalPage;
|
||||||
|
@ -18,39 +14,37 @@ class VMObject : public RefCounted<VMObject>
|
||||||
friend class MemoryManager;
|
friend class MemoryManager;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static NonnullRefPtr<VMObject> create_file_backed(RefPtr<Inode>&&);
|
virtual ~VMObject();
|
||||||
static NonnullRefPtr<VMObject> create_anonymous(size_t);
|
|
||||||
static NonnullRefPtr<VMObject> create_for_physical_range(PhysicalAddress, size_t);
|
|
||||||
NonnullRefPtr<VMObject> clone();
|
|
||||||
|
|
||||||
~VMObject();
|
virtual NonnullRefPtr<VMObject> clone() = 0;
|
||||||
bool is_anonymous() const { return !m_inode; }
|
|
||||||
|
|
||||||
Inode* inode() { return m_inode.ptr(); }
|
virtual bool is_anonymous() const { return false; }
|
||||||
const Inode* inode() const { return m_inode.ptr(); }
|
virtual bool is_inode() const { return false; }
|
||||||
size_t inode_offset() const { return m_inode_offset; }
|
|
||||||
|
|
||||||
int page_count() const { return m_size / PAGE_SIZE; }
|
int page_count() const { return m_size / PAGE_SIZE; }
|
||||||
const Vector<RefPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
|
const Vector<RefPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
|
||||||
Vector<RefPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
|
Vector<RefPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
|
||||||
|
|
||||||
void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const u8*);
|
|
||||||
void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
|
|
||||||
|
|
||||||
size_t size() const { return m_size; }
|
size_t size() const { return m_size; }
|
||||||
|
|
||||||
private:
|
protected:
|
||||||
VMObject(RefPtr<Inode>&&);
|
enum ShouldFillPhysicalPages {
|
||||||
explicit VMObject(VMObject&);
|
No = 0,
|
||||||
explicit VMObject(size_t);
|
Yes
|
||||||
VMObject(PhysicalAddress, size_t);
|
};
|
||||||
|
VMObject(size_t, ShouldFillPhysicalPages);
|
||||||
|
explicit VMObject(const VMObject&);
|
||||||
|
|
||||||
template<typename Callback>
|
template<typename Callback>
|
||||||
void for_each_region(Callback);
|
void for_each_region(Callback);
|
||||||
|
|
||||||
off_t m_inode_offset { 0 };
|
|
||||||
size_t m_size { 0 };
|
size_t m_size { 0 };
|
||||||
RefPtr<Inode> m_inode;
|
|
||||||
Vector<RefPtr<PhysicalPage>> m_physical_pages;
|
Vector<RefPtr<PhysicalPage>> m_physical_pages;
|
||||||
|
|
||||||
|
private:
|
||||||
|
VMObject& operator=(const VMObject&) = delete;
|
||||||
|
VMObject& operator=(VMObject&&) = delete;
|
||||||
|
VMObject(VMObject&&) = delete;
|
||||||
|
|
||||||
Lock m_paging_lock { "VMObject" };
|
Lock m_paging_lock { "VMObject" };
|
||||||
};
|
};
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue