1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 06:17:34 +00:00

Kernel: Split VMObject into two classes: Anonymous- and InodeVMObject

InodeVMObject is a VMObject with an underlying Inode in the filesystem.
AnonymousVMObject has no Inode.

I'm happy that InodeVMObject::inode() can now return Inode& instead of
VMObject::inode() return Inode*. :^)
This commit is contained in:
Andreas Kling 2019-08-07 18:06:17 +02:00
parent cb2d572a14
commit 6bdb81ad87
16 changed files with 286 additions and 200 deletions

View file

@ -1,14 +1,10 @@
#pragma once
#include <AK/AKString.h>
#include <AK/Badge.h>
#include <AK/RefPtr.h>
#include <AK/RefCounted.h>
#include <AK/RefPtr.h>
#include <AK/Vector.h>
#include <AK/Weakable.h>
#include <Kernel/Lock.h>
#include <Kernel/UnixTypes.h>
#include <Kernel/VM/PhysicalAddress.h>
class Inode;
class PhysicalPage;
@ -18,39 +14,37 @@ class VMObject : public RefCounted<VMObject>
friend class MemoryManager;
public:
static NonnullRefPtr<VMObject> create_file_backed(RefPtr<Inode>&&);
static NonnullRefPtr<VMObject> create_anonymous(size_t);
static NonnullRefPtr<VMObject> create_for_physical_range(PhysicalAddress, size_t);
NonnullRefPtr<VMObject> clone();
virtual ~VMObject();
~VMObject();
bool is_anonymous() const { return !m_inode; }
virtual NonnullRefPtr<VMObject> clone() = 0;
Inode* inode() { return m_inode.ptr(); }
const Inode* inode() const { return m_inode.ptr(); }
size_t inode_offset() const { return m_inode_offset; }
virtual bool is_anonymous() const { return false; }
virtual bool is_inode() const { return false; }
int page_count() const { return m_size / PAGE_SIZE; }
const Vector<RefPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
Vector<RefPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const u8*);
void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
size_t size() const { return m_size; }
private:
VMObject(RefPtr<Inode>&&);
explicit VMObject(VMObject&);
explicit VMObject(size_t);
VMObject(PhysicalAddress, size_t);
protected:
enum ShouldFillPhysicalPages {
No = 0,
Yes
};
VMObject(size_t, ShouldFillPhysicalPages);
explicit VMObject(const VMObject&);
template<typename Callback>
void for_each_region(Callback);
off_t m_inode_offset { 0 };
size_t m_size { 0 };
RefPtr<Inode> m_inode;
Vector<RefPtr<PhysicalPage>> m_physical_pages;
private:
VMObject& operator=(const VMObject&) = delete;
VMObject& operator=(VMObject&&) = delete;
VMObject(VMObject&&) = delete;
Lock m_paging_lock { "VMObject" };
};