mirror of
https://github.com/RGBCube/serenity
synced 2025-05-30 21:58:10 +00:00
Kernel: Split InodeVMObject into two subclasses
We now have PrivateInodeVMObject and SharedInodeVMObject, corresponding to MAP_PRIVATE and MAP_SHARED respectively. Note that PrivateInodeVMObject is not used yet.
This commit is contained in:
parent
07a26aece3
commit
651417a085
9 changed files with 371 additions and 166 deletions
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
||||
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -25,9 +25,9 @@
|
|||
*/
|
||||
|
||||
#include <Kernel/FileSystem/Inode.h>
|
||||
#include <Kernel/VM/SharedInodeVMObject.h>
|
||||
#include <Kernel/VM/MemoryManager.h>
|
||||
#include <Kernel/VM/Region.h>
|
||||
#include <Kernel/VM/SharedInodeVMObject.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
|
@ -47,15 +47,12 @@ NonnullRefPtr<VMObject> SharedInodeVMObject::clone()
|
|||
}
|
||||
|
||||
SharedInodeVMObject::SharedInodeVMObject(Inode& inode, size_t size)
|
||||
: VMObject(size)
|
||||
, m_inode(inode)
|
||||
, m_dirty_pages(page_count(), false)
|
||||
: InodeVMObject(inode, size)
|
||||
{
|
||||
}
|
||||
|
||||
SharedInodeVMObject::SharedInodeVMObject(const SharedInodeVMObject& other)
|
||||
: VMObject(other)
|
||||
, m_inode(other.m_inode)
|
||||
: InodeVMObject(other)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -64,135 +61,4 @@ SharedInodeVMObject::~SharedInodeVMObject()
|
|||
ASSERT(inode().shared_vmobject() == this);
|
||||
}
|
||||
|
||||
size_t SharedInodeVMObject::amount_clean() const
|
||||
{
|
||||
size_t count = 0;
|
||||
ASSERT(page_count() == (size_t)m_dirty_pages.size());
|
||||
for (size_t i = 0; i < page_count(); ++i) {
|
||||
if (!m_dirty_pages.get(i) && m_physical_pages[i])
|
||||
++count;
|
||||
}
|
||||
return count * PAGE_SIZE;
|
||||
}
|
||||
|
||||
size_t SharedInodeVMObject::amount_dirty() const
|
||||
{
|
||||
size_t count = 0;
|
||||
for (size_t i = 0; i < m_dirty_pages.size(); ++i) {
|
||||
if (m_dirty_pages.get(i))
|
||||
++count;
|
||||
}
|
||||
return count * PAGE_SIZE;
|
||||
}
|
||||
|
||||
void SharedInodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
|
||||
{
|
||||
dbg() << "VMObject::inode_size_changed: {" << m_inode->fsid() << ":" << m_inode->index() << "} " << old_size << " -> " << new_size;
|
||||
|
||||
InterruptDisabler disabler;
|
||||
|
||||
auto new_page_count = PAGE_ROUND_UP(new_size) / PAGE_SIZE;
|
||||
m_physical_pages.resize(new_page_count);
|
||||
|
||||
m_dirty_pages.grow(new_page_count, false);
|
||||
|
||||
// FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
|
||||
for_each_region([](auto& region) {
|
||||
region.remap();
|
||||
});
|
||||
}
|
||||
|
||||
void SharedInodeVMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
|
||||
{
|
||||
(void)size;
|
||||
(void)data;
|
||||
InterruptDisabler disabler;
|
||||
ASSERT(offset >= 0);
|
||||
|
||||
// FIXME: Only invalidate the parts that actually changed.
|
||||
for (auto& physical_page : m_physical_pages)
|
||||
physical_page = nullptr;
|
||||
|
||||
#if 0
|
||||
size_t current_offset = offset;
|
||||
size_t remaining_bytes = size;
|
||||
const u8* data_ptr = data;
|
||||
|
||||
auto to_page_index = [] (size_t offset) -> size_t {
|
||||
return offset / PAGE_SIZE;
|
||||
};
|
||||
|
||||
if (current_offset & PAGE_MASK) {
|
||||
size_t page_index = to_page_index(current_offset);
|
||||
size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
|
||||
if (m_physical_pages[page_index]) {
|
||||
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
|
||||
memcpy(ptr, data_ptr, bytes_to_copy);
|
||||
MM.unquickmap_page();
|
||||
}
|
||||
current_offset += bytes_to_copy;
|
||||
data += bytes_to_copy;
|
||||
remaining_bytes -= bytes_to_copy;
|
||||
}
|
||||
|
||||
for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
|
||||
size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
|
||||
if (m_physical_pages[page_index]) {
|
||||
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
|
||||
memcpy(ptr, data_ptr, bytes_to_copy);
|
||||
MM.unquickmap_page();
|
||||
}
|
||||
current_offset += bytes_to_copy;
|
||||
data += bytes_to_copy;
|
||||
}
|
||||
#endif
|
||||
|
||||
// FIXME: Consolidate with inode_size_changed() so we only do a single walk.
|
||||
for_each_region([](auto& region) {
|
||||
region.remap();
|
||||
});
|
||||
}
|
||||
|
||||
int SharedInodeVMObject::release_all_clean_pages()
|
||||
{
|
||||
LOCKER(m_paging_lock);
|
||||
return release_all_clean_pages_impl();
|
||||
}
|
||||
|
||||
int SharedInodeVMObject::release_all_clean_pages_impl()
|
||||
{
|
||||
int count = 0;
|
||||
InterruptDisabler disabler;
|
||||
for (size_t i = 0; i < page_count(); ++i) {
|
||||
if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
|
||||
m_physical_pages[i] = nullptr;
|
||||
++count;
|
||||
}
|
||||
}
|
||||
for_each_region([](auto& region) {
|
||||
region.remap();
|
||||
});
|
||||
return count;
|
||||
}
|
||||
|
||||
u32 SharedInodeVMObject::writable_mappings() const
|
||||
{
|
||||
u32 count = 0;
|
||||
const_cast<SharedInodeVMObject&>(*this).for_each_region([&](auto& region) {
|
||||
if (region.is_writable())
|
||||
++count;
|
||||
});
|
||||
return count;
|
||||
}
|
||||
|
||||
u32 SharedInodeVMObject::executable_mappings() const
|
||||
{
|
||||
u32 count = 0;
|
||||
const_cast<SharedInodeVMObject&>(*this).for_each_region([&](auto& region) {
|
||||
if (region.is_executable())
|
||||
++count;
|
||||
});
|
||||
return count;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue