From c3451899bcbdb1b268ff862ae37263d6ee99a423 Mon Sep 17 00:00:00 2001 From: Tom Date: Thu, 3 Sep 2020 15:06:25 -0600 Subject: [PATCH] Kernel: Add MAP_NORESERVE support to mmap Rather than lazily committing regions by default, we now commit the entire region unless MAP_NORESERVE is specified. This solves random crashes in low-memory situations where e.g. the malloc heap allocated memory, but using pages that haven't been used before triggers a crash when no more physical memory is available. Use this flag to create large regions without actually committing the backing memory. madvise() can be used to commit arbitrary areas of such regions after creating them. --- Kernel/Process.cpp | 16 ++++++++++------ Kernel/Process.h | 4 ++-- Kernel/Syscalls/mmap.cpp | 6 +++--- Kernel/UnixTypes.h | 1 + Kernel/VM/PurgeableVMObject.cpp | 2 +- Kernel/VM/Region.cpp | 13 +++++++++++-- Kernel/VM/Region.h | 3 ++- 7 files changed, 30 insertions(+), 15 deletions(-) diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index dd750f8889..9c74dd59da 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -146,8 +146,9 @@ Region* Process::allocate_region(const Range& range, const String& name, int pro ASSERT(range.is_valid()); auto vmobject = AnonymousVMObject::create_with_size(range.size()); auto region = Region::create_user_accessible(this, range, vmobject, 0, name, prot_to_region_access_flags(prot)); - region->map(page_directory()); - if (should_commit && !region->commit()) + if (!region->map(page_directory())) + return nullptr; + if (should_commit && region->can_commit() && !region->commit()) return nullptr; return &add_region(move(region)); } @@ -160,7 +161,7 @@ Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String return allocate_region(range, name, prot, should_commit); } -Region* Process::allocate_region_with_vmobject(const Range& range, NonnullRefPtr vmobject, size_t offset_in_vmobject, const String& name, int prot) +Region* Process::allocate_region_with_vmobject(const Range& range, NonnullRefPtr vmobject, size_t offset_in_vmobject, const String& name, int prot, bool should_commit) { ASSERT(range.is_valid()); size_t end_in_vmobject = offset_in_vmobject + range.size(); @@ -178,16 +179,19 @@ Region* Process::allocate_region_with_vmobject(const Range& range, NonnullRefPtr } offset_in_vmobject &= PAGE_MASK; auto& region = add_region(Region::create_user_accessible(this, range, move(vmobject), offset_in_vmobject, name, prot_to_region_access_flags(prot))); - region.map(page_directory()); + if (!region.map(page_directory())) + return nullptr; + if (should_commit && region.can_commit() && !region.commit()) + return nullptr; return ®ion; } -Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size, NonnullRefPtr vmobject, size_t offset_in_vmobject, const String& name, int prot) +Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size, NonnullRefPtr vmobject, size_t offset_in_vmobject, const String& name, int prot, bool should_commit) { auto range = allocate_range(vaddr, size); if (!range.is_valid()) return nullptr; - return allocate_region_with_vmobject(range, move(vmobject), offset_in_vmobject, name, prot); + return allocate_region_with_vmobject(range, move(vmobject), offset_in_vmobject, name, prot, should_commit); } bool Process::deallocate_region(Region& region) diff --git a/Kernel/Process.h b/Kernel/Process.h index d859dfd745..ac4670deab 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -436,9 +436,9 @@ public: return m_euid == 0; } - Region* allocate_region_with_vmobject(VirtualAddress, size_t, NonnullRefPtr, size_t offset_in_vmobject, const String& name, int prot); + Region* allocate_region_with_vmobject(VirtualAddress, size_t, NonnullRefPtr, size_t offset_in_vmobject, const String& name, int prot, bool should_commit = true); Region* allocate_region(VirtualAddress, size_t, const String& name, int prot = PROT_READ | PROT_WRITE, bool should_commit = true); - Region* allocate_region_with_vmobject(const Range&, NonnullRefPtr, size_t offset_in_vmobject, const String& name, int prot); + Region* allocate_region_with_vmobject(const Range&, NonnullRefPtr, size_t offset_in_vmobject, const String& name, int prot, bool should_commit = true); Region* allocate_region(const Range&, const String& name, int prot = PROT_READ | PROT_WRITE, bool should_commit = true); bool deallocate_region(Region& region); diff --git a/Kernel/Syscalls/mmap.cpp b/Kernel/Syscalls/mmap.cpp index 20e9f379ae..429f2be7cd 100644 --- a/Kernel/Syscalls/mmap.cpp +++ b/Kernel/Syscalls/mmap.cpp @@ -120,6 +120,7 @@ void* Process::sys$mmap(Userspace user_params) bool map_private = flags & MAP_PRIVATE; bool map_stack = flags & MAP_STACK; bool map_fixed = flags & MAP_FIXED; + bool map_noreserve = flags & MAP_NORESERVE; if (map_shared && map_private) return (void*)-EINVAL; @@ -148,10 +149,9 @@ void* Process::sys$mmap(Userspace user_params) if (!region && (!map_fixed && addr != 0)) region = allocate_region_with_vmobject({}, size, vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot); } else if (map_anonymous) { - - region = allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, false); + region = allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, !map_noreserve); if (!region && (!map_fixed && addr != 0)) - region = allocate_region(allocate_range({}, size), !name.is_null() ? name : "mmap", prot, false); + region = allocate_region(allocate_range({}, size), !name.is_null() ? name : "mmap", prot, !map_noreserve); } else { if (offset < 0) return (void*)-EINVAL; diff --git a/Kernel/UnixTypes.h b/Kernel/UnixTypes.h index feb03e44f0..5b1830513d 100644 --- a/Kernel/UnixTypes.h +++ b/Kernel/UnixTypes.h @@ -93,6 +93,7 @@ enum { #define MAP_ANON MAP_ANONYMOUS #define MAP_STACK 0x40 #define MAP_PURGEABLE 0x80 +#define MAP_NORESERVE 0x100 #define PROT_READ 0x1 #define PROT_WRITE 0x2 diff --git a/Kernel/VM/PurgeableVMObject.cpp b/Kernel/VM/PurgeableVMObject.cpp index c944b3882d..8c67d401c4 100644 --- a/Kernel/VM/PurgeableVMObject.cpp +++ b/Kernel/VM/PurgeableVMObject.cpp @@ -291,7 +291,7 @@ int PurgeableVMObject::purge_impl() } else { klog() << "Purged " << purged_in_range << " pages from region " << region.name() << " (no ownership) at " << region.vaddr_from_page_index(range.base) << " - " << region.vaddr_from_page_index(range.base + range.count); } - region.remap_page_range(range.base, range.count); + region.remap_page_range(range.base, range.count, false); } }); } diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index 22d49c60a1..5f40c88b8e 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -187,13 +187,18 @@ auto Region::set_volatile(VirtualAddress vaddr, size_t size, bool is_volatile, b // Attempt to remap the page range. We want to make sure we have // enough memory, if not we need to inform the caller of that // fact - if (!remap_page_range(first_page_index, last_page_index - first_page_index)) + if (!remap_page_range(first_page_index, last_page_index - first_page_index, true)) return SetVolatileError::OutOfMemory; } } return SetVolatileError::Success; } +bool Region::can_commit() const +{ + return vmobject().is_anonymous() || vmobject().is_purgeable(); +} + bool Region::commit() { ScopedSpinLock lock(s_mm_lock); @@ -339,7 +344,7 @@ bool Region::map_individual_page_impl(size_t page_index) return true; } -bool Region::remap_page_range(size_t page_index, size_t page_count) +bool Region::remap_page_range(size_t page_index, size_t page_count, bool do_commit) { bool success = true; ScopedSpinLock lock(s_mm_lock); @@ -347,6 +352,10 @@ bool Region::remap_page_range(size_t page_index, size_t page_count) ScopedSpinLock page_lock(m_page_directory->get_lock()); size_t index = page_index; while (index < page_index + page_count) { + if (do_commit && !commit(index)) { + success = false; + break; + } if (!map_individual_page_impl(index)) { success = false; break; diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h index 46159d8c8c..e1ec47a6bc 100644 --- a/Kernel/VM/Region.h +++ b/Kernel/VM/Region.h @@ -159,6 +159,7 @@ public: return m_offset_in_vmobject; } + bool can_commit() const; bool commit(); size_t amount_resident() const; @@ -193,7 +194,7 @@ public: void set_inherit_mode(InheritMode inherit_mode) { m_inherit_mode = inherit_mode; } - bool remap_page_range(size_t page_index, size_t page_count); + bool remap_page_range(size_t page_index, size_t page_count, bool do_commit); bool is_volatile(VirtualAddress vaddr, size_t size) const; enum class SetVolatileError {