diff --git a/Kernel/MemoryManager.cpp b/Kernel/MemoryManager.cpp index e404560c65..0a401eddbd 100644 --- a/Kernel/MemoryManager.cpp +++ b/Kernel/MemoryManager.cpp @@ -5,6 +5,7 @@ #include "i386.h" #include "StdLib.h" #include "Process.h" +#include //#define MM_DEBUG //#define PAGE_FAULT_DEBUG @@ -226,7 +227,8 @@ Region* MemoryManager::region_from_laddr(Process& process, LinearAddress laddr) bool MemoryManager::copy_on_write(Process& process, Region& region, unsigned page_index_in_region) { ASSERT_INTERRUPTS_DISABLED(); - if (region.physical_pages[page_index_in_region]->retain_count() == 1) { + auto& vmo = region.vmo(); + if (vmo.physical_pages()[page_index_in_region]->retain_count() == 1) { #ifdef PAGE_FAULT_DEBUG dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n"); #endif @@ -238,7 +240,7 @@ bool MemoryManager::copy_on_write(Process& process, Region& region, unsigned pag #ifdef PAGE_FAULT_DEBUG dbgprintf(" >> It's a COW page and it's time to COW!\n"); #endif - auto physical_page_to_copy = move(region.physical_pages[page_index_in_region]); + auto physical_page_to_copy = move(vmo.physical_pages()[page_index_in_region]); auto ppages = allocate_physical_pages(1); ASSERT(ppages.size() == 1); byte* dest_ptr = quickmap_page(*ppages[0]); @@ -247,7 +249,7 @@ bool MemoryManager::copy_on_write(Process& process, Region& region, unsigned pag dbgprintf(" >> COW P%x <- P%x\n", ppages[0]->paddr().get(), physical_page_to_copy->paddr().get()); #endif memcpy(dest_ptr, src_ptr, PAGE_SIZE); - region.physical_pages[page_index_in_region] = move(ppages[0]); + vmo.physical_pages()[page_index_in_region] = move(ppages[0]); unquickmap_page(); region.cow_map.set(page_index_in_region, false); remap_region_page(process.m_page_directory, region, page_index_in_region, true); @@ -256,9 +258,13 @@ bool MemoryManager::copy_on_write(Process& process, Region& region, unsigned pag bool MemoryManager::page_in_from_vnode(Process& process, Region& region, unsigned page_index_in_region) { - ASSERT(region.physical_pages[page_index_in_region].is_null()); - region.physical_pages[page_index_in_region] = allocate_physical_page(); - if (region.physical_pages[page_index_in_region].is_null()) { + auto& vmo = region.vmo(); + ASSERT(!vmo.is_anonymous()); + ASSERT(vmo.vnode()); + auto& vnode = *vmo.vnode(); + ASSERT(vmo.physical_pages()[page_index_in_region].is_null()); + vmo.physical_pages()[page_index_in_region] = allocate_physical_page(); + if (vmo.physical_pages()[page_index_in_region].is_null()) { kprintf("MM: page_in_from_vnode was unable to allocate a physical page\n"); return false; } @@ -266,7 +272,7 @@ bool MemoryManager::page_in_from_vnode(Process& process, Region& region, unsigne byte* dest_ptr = region.linearAddress.offset(page_index_in_region * PAGE_SIZE).asPtr(); dbgprintf("MM: page_in_from_vnode ready to read from vnode, will write to L%x!\n", dest_ptr); sti(); // Oh god here we go... - auto nread = region.m_vnode->fileSystem()->readInodeBytes(region.m_vnode->inode, region.m_file_offset, PAGE_SIZE, dest_ptr, nullptr); + auto nread = vnode.fileSystem()->readInodeBytes(vnode.inode, vmo.vnode_offset(), PAGE_SIZE, dest_ptr, nullptr); if (nread < 0) { kprintf("MM: page_in_form_vnode had error (%d) while reading!\n", nread); return false; @@ -289,7 +295,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault) ASSERT(region); auto page_index_in_region = region->page_index_from_address(fault.laddr()); if (fault.is_not_present()) { - if (region->m_vnode) { + if (region->vmo().vnode()) { dbgprintf("NP(vnode) fault in Region{%p}[%u]\n", region, page_index_in_region); page_in_from_vnode(*current, *region, page_index_in_region); return PageFaultResponse::Continue; @@ -407,7 +413,7 @@ void MemoryManager::remap_region_page(PageDirectory* page_directory, Region& reg InterruptDisabler disabler; auto page_laddr = region.linearAddress.offset(page_index_in_region * PAGE_SIZE); auto pte = ensurePTE(page_directory, page_laddr); - auto& physical_page = region.physical_pages[page_index_in_region]; + auto& physical_page = region.vmo().physical_pages()[page_index_in_region]; ASSERT(physical_page); pte.setPhysicalPageBase(physical_page->paddr().get()); pte.setPresent(true); // FIXME: Maybe we should use the is_readable flag here? @@ -431,10 +437,11 @@ void MemoryManager::remap_region(Process& process, Region& region) void MemoryManager::map_region_at_address(PageDirectory* page_directory, Region& region, LinearAddress laddr, bool user_allowed) { InterruptDisabler disabler; - for (size_t i = 0; i < region.physical_pages.size(); ++i) { + auto& vmo = region.vmo(); + for (size_t i = 0; i < vmo.page_count(); ++i) { auto page_laddr = laddr.offset(i * PAGE_SIZE); auto pte = ensurePTE(page_directory, page_laddr); - auto& physical_page = region.physical_pages[i]; + auto& physical_page = vmo.physical_pages()[i]; if (physical_page) { pte.setPhysicalPageBase(physical_page->paddr().get()); pte.setPresent(true); // FIXME: Maybe we should use the is_readable flag here? @@ -510,7 +517,8 @@ void MemoryManager::remove_kernel_alias_for_region(Region& region, byte* addr) bool MemoryManager::unmapRegion(Process& process, Region& region) { InterruptDisabler disabler; - for (size_t i = 0; i < region.physical_pages.size(); ++i) { + auto& vmo = region.vmo(); + for (size_t i = 0; i < vmo.page_count(); ++i) { auto laddr = region.linearAddress.offset(i * PAGE_SIZE); auto pte = ensurePTE(process.m_page_directory, laddr); pte.setPhysicalPageBase(0); @@ -519,7 +527,7 @@ bool MemoryManager::unmapRegion(Process& process, Region& region) pte.setUserAllowed(false); flushTLB(laddr); #ifdef MM_DEBUG - auto& physical_page = region.physical_pages[i]; + auto& physical_page = vmo.physical_pages()[i]; dbgprintf("MM: >> Unmapped L%x => P%x <<\n", laddr, physical_page ? physical_page->paddr().get() : 0); #endif } @@ -569,26 +577,51 @@ RetainPtr Region::clone() InterruptDisabler disabler; if (is_readable && !is_writable) { - // Create a new region backed by the same physical pages. - return adopt(*new Region(linearAddress, size, physical_pages, String(name), is_readable, is_writable)); + // Create a new region backed by the same VMObject. + return adopt(*new Region(linearAddress, size, m_vmo.copyRef(), String(name), is_readable, is_writable)); } // Set up a COW region. The parent (this) region becomes COW as well! - for (size_t i = 0; i < physical_pages.size(); ++i) + for (size_t i = 0; i < vmo().page_count(); ++i) cow_map.set(i, true); MM.remap_region(*current, *this); - return adopt(*new Region(linearAddress, size, physical_pages, String(name), is_readable, is_writable, true)); + return adopt(*new Region(linearAddress, size, m_vmo->clone(), String(name), is_readable, is_writable, true)); } -Region::Region(LinearAddress a, size_t s, Vector> pp, String&& n, bool r, bool w, bool cow) +Region::Region(LinearAddress a, size_t s, String&& n, bool r, bool w, bool cow) : linearAddress(a) , size(s) - , physical_pages(move(pp)) + , m_vmo(VMObject::create_anonymous(s)) , name(move(n)) , is_readable(r) , is_writable(w) - , cow_map(Bitmap::create(physical_pages.size(), cow)) + , cow_map(Bitmap::create(m_vmo->page_count(), cow)) { + m_vmo->set_name(name); +} + +Region::Region(LinearAddress a, size_t s, RetainPtr&& vnode, String&& n, bool r, bool w) + : linearAddress(a) + , size(s) + , m_vmo(VMObject::create_file_backed(move(vnode), s)) + , name(move(n)) + , is_readable(r) + , is_writable(w) + , cow_map(Bitmap::create(m_vmo->page_count())) +{ + m_vmo->set_name(name); +} + +Region::Region(LinearAddress a, size_t s, RetainPtr&& vmo, String&& n, bool r, bool w, bool cow) + : linearAddress(a) + , size(s) + , m_vmo(move(vmo)) + , name(move(n)) + , is_readable(r) + , is_writable(w) + , cow_map(Bitmap::create(m_vmo->page_count(), cow)) +{ + m_vmo->set_name(name); } Region::~Region() @@ -604,3 +637,66 @@ void PhysicalPage::return_to_freelist() dbgprintf("MM: P%x released to freelist\n", m_paddr.get()); #endif } + +RetainPtr VMObject::create_file_backed(RetainPtr&& vnode, size_t size) +{ + return adopt(*new VMObject(move(vnode), size)); +} + +RetainPtr VMObject::create_anonymous(size_t size) +{ + return adopt(*new VMObject(size)); +} + +RetainPtr VMObject::clone() +{ + return adopt(*new VMObject(*this)); +} + +VMObject::VMObject(VMObject& other) + : m_name(other.m_name) + , m_anonymous(other.m_anonymous) + , m_vnode_offset(other.m_vnode_offset) + , m_size(other.m_size) + , m_vnode(other.m_vnode) + , m_physical_pages(other.m_physical_pages) +{ +} + +VMObject::VMObject(size_t size) + : m_anonymous(true) + , m_size(size) +{ + m_physical_pages.resize(page_count()); +} + +VMObject::VMObject(RetainPtr&& vnode, size_t size) + : m_size(size) + , m_vnode(move(vnode)) +{ + m_physical_pages.resize(page_count()); +} + +VMObject::~VMObject() +{ +} + +int Region::commit(Process& process) +{ + InterruptDisabler disabler; +#ifdef MM_DEBUG + dbgprintf("MM: commit %u pages in at L%x\n", vmo().page_count(), linearAddress.get()); +#endif + for (size_t i = 0; i < vmo().page_count(); ++i) { + if (!vmo().physical_pages()[i].is_null()) + continue; + auto physical_page = MM.allocate_physical_page(); + if (!physical_page) { + kprintf("MM: page_in_from_vnode was unable to allocate a physical page\n"); + return -ENOMEM; + } + vmo().physical_pages()[i] = move(physical_page); + MM.remap_region_page(process.m_page_directory, *this, i, true); + } + return 0; +} diff --git a/Kernel/MemoryManager.h b/Kernel/MemoryManager.h index 946b1ba244..4feb0a0103 100644 --- a/Kernel/MemoryManager.h +++ b/Kernel/MemoryManager.h @@ -57,10 +57,47 @@ struct PageDirectory { RetainPtr physical_pages[1024]; }; +class VMObject : public Retainable { +public: + static RetainPtr create_file_backed(RetainPtr&&, size_t); + static RetainPtr create_anonymous(size_t); + RetainPtr clone(); + + ~VMObject(); + bool is_anonymous() const { return m_anonymous; } + + VirtualFileSystem::Node* vnode() { return m_vnode.ptr(); } + const VirtualFileSystem::Node* vnode() const { return m_vnode.ptr(); } + size_t vnode_offset() const { return m_vnode_offset; } + + String name() const { return m_name; } + void set_name(const String& name) { m_name = name; } + + size_t page_count() const { return m_size / PAGE_SIZE; } + const Vector>& physical_pages() const { return m_physical_pages; } + Vector>& physical_pages() { return m_physical_pages; } + +private: + VMObject(RetainPtr&&, size_t); + explicit VMObject(VMObject&); + explicit VMObject(size_t); + String m_name; + bool m_anonymous { false }; + Unix::off_t m_vnode_offset { 0 }; + size_t m_size { 0 }; + RetainPtr m_vnode; + Vector> m_physical_pages; +}; + struct Region : public Retainable { - Region(LinearAddress, size_t, Vector>, String&&, bool r, bool w, bool cow = false); + Region(LinearAddress, size_t, String&&, bool r, bool w, bool cow = false); + Region(LinearAddress, size_t, RetainPtr&&, String&&, bool r, bool w, bool cow = false); + Region(LinearAddress, size_t, RetainPtr&&, String&&, bool r, bool w); ~Region(); + const VMObject& vmo() const { return *m_vmo; } + VMObject& vmo() { return *m_vmo; } + RetainPtr clone(); bool contains(LinearAddress laddr) const { @@ -72,12 +109,12 @@ struct Region : public Retainable { return (laddr - linearAddress).get() / PAGE_SIZE; } - RetainPtr m_vnode; - Unix::off_t m_file_offset { 0 }; + int commit(Process&); + int decommit(Process&); LinearAddress linearAddress; size_t size { 0 }; - Vector> physical_pages; + RetainPtr m_vmo; String name; bool is_readable { true }; bool is_writable { true }; @@ -89,6 +126,7 @@ struct Region : public Retainable { class MemoryManager { AK_MAKE_ETERNAL friend class PhysicalPage; + friend class Region; friend ByteBuffer procfs$mm(); public: static MemoryManager& the() PURE; diff --git a/Kernel/ProcFileSystem.cpp b/Kernel/ProcFileSystem.cpp index 1fb5681da5..b704dd3ee1 100644 --- a/Kernel/ProcFileSystem.cpp +++ b/Kernel/ProcFileSystem.cpp @@ -59,8 +59,8 @@ ByteBuffer procfs$pid_vm(Process& process) region->linearAddress.offset(region->size - 1).get(), region->size, region->name.characters()); - for (size_t i = 0; i < region->physical_pages.size(); ++i) { - auto& physical_page = region->physical_pages[i]; + for (size_t i = 0; i < region->vmo().page_count(); ++i) { + auto& physical_page = region->vmo().physical_pages()[i]; ptr += ksprintf(ptr, "P%x%s(%u) ", physical_page ? physical_page->paddr().get() : 0, region->cow_map.get(i) ? "!" : "", diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index aa97ba053b..7b854decb9 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -100,8 +100,8 @@ Region* Process::allocate_region(LinearAddress laddr, size_t size, String&& name unsigned page_count = ceilDiv(size, PAGE_SIZE); auto physical_pages = MM.allocate_physical_pages(page_count); ASSERT(physical_pages.size() == page_count); - - m_regions.append(adopt(*new Region(laddr, size, move(physical_pages), move(name), is_readable, is_writable))); + m_regions.append(adopt(*new Region(laddr, size, move(name), is_readable, is_writable))); + m_regions.last()->commit(*this); MM.mapRegion(*this, *m_regions.last()); return m_regions.last().ptr(); } @@ -122,8 +122,7 @@ Region* Process::allocate_file_backed_region(LinearAddress laddr, size_t size, R Vector> physical_pages; physical_pages.resize(page_count); // Start out with no physical pages! - m_regions.append(adopt(*new Region(laddr, size, move(physical_pages), move(name), is_readable, is_writable))); - m_regions.last()->m_vnode = move(vnode); + m_regions.append(adopt(*new Region(laddr, size, move(vnode), move(name), is_readable, is_writable))); MM.mapRegion(*this, *m_regions.last()); return m_regions.last().ptr(); } diff --git a/Kernel/Process.h b/Kernel/Process.h index d926fb94da..6c5e8f29de 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -202,6 +202,7 @@ public: private: friend class MemoryManager; friend class Scheduler; + friend class Region; Process(String&& name, uid_t, gid_t, pid_t ppid, RingLevel, RetainPtr&& cwd = nullptr, RetainPtr&& executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr); diff --git a/Kernel/i386.h b/Kernel/i386.h index 714b78fdc2..10ad9ea4ba 100644 --- a/Kernel/i386.h +++ b/Kernel/i386.h @@ -2,7 +2,7 @@ #include "types.h" -#define PAGE_SIZE 4096u +#define PAGE_SIZE 4096 #define PAGE_MASK 0xfffff000 union Descriptor {