From 4b96d9c8133527992b212314bf5b8dd1a53e6a5f Mon Sep 17 00:00:00 2001 From: int16 Date: Thu, 10 Mar 2022 11:53:40 +1100 Subject: [PATCH] Kernel: Move mmap validation functions to Process --- Kernel/Process.h | 3 +++ Kernel/Syscalls/mmap.cpp | 22 +++++++++++----------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/Kernel/Process.h b/Kernel/Process.h index b90fb36225..63578db51d 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -524,6 +524,9 @@ public: ErrorOr require_promise(Pledge); ErrorOr require_no_promises() const; + bool validate_mmap_prot(int prot, bool map_stack, bool map_anonymous, Memory::Region const* region = nullptr) const; + bool validate_inode_mmap_prot(int prot, const Inode& inode, bool map_shared) const; + private: friend class MemoryManager; friend class Scheduler; diff --git a/Kernel/Syscalls/mmap.cpp b/Kernel/Syscalls/mmap.cpp index 45527868ae..6f736a88e4 100644 --- a/Kernel/Syscalls/mmap.cpp +++ b/Kernel/Syscalls/mmap.cpp @@ -69,7 +69,7 @@ static bool should_make_executable_exception_for_dynamic_loader(bool make_readab return true; } -static bool validate_mmap_prot(const Process& process, int prot, bool map_stack, bool map_anonymous, Memory::Region const* region = nullptr) +bool Process::validate_mmap_prot(int prot, bool map_stack, bool map_anonymous, Memory::Region const* region) const { bool make_readable = prot & PROT_READ; bool make_writable = prot & PROT_WRITE; @@ -99,17 +99,17 @@ static bool validate_mmap_prot(const Process& process, int prot, bool map_stack, return true; } -static bool validate_inode_mmap_prot(const Process& process, int prot, const Inode& inode, bool map_shared) +bool Process::validate_inode_mmap_prot(int prot, const Inode& inode, bool map_shared) const { auto metadata = inode.metadata(); - if ((prot & PROT_READ) && !metadata.may_read(process)) + if ((prot & PROT_READ) && !metadata.may_read(*this)) return false; if (map_shared) { // FIXME: What about readonly filesystem mounts? We cannot make a // decision here without knowing the mount flags, so we would need to // keep a Custody or something from mmap time. - if ((prot & PROT_WRITE) && !metadata.may_write(process)) + if ((prot & PROT_WRITE) && !metadata.may_write(*this)) return false; if (auto shared_vmobject = inode.shared_vmobject()) { if ((prot & PROT_EXEC) && shared_vmobject->writable_mappings()) @@ -180,7 +180,7 @@ ErrorOr Process::sys$mmap(Userspace use if ((map_fixed || map_fixed_noreplace) && map_randomized) return EINVAL; - if (!validate_mmap_prot(*this, prot, map_stack, map_anonymous)) + if (!validate_mmap_prot(prot, map_stack, map_anonymous)) return EINVAL; if (map_stack && (!map_private || !map_anonymous)) @@ -232,7 +232,7 @@ ErrorOr Process::sys$mmap(Userspace use return EACCES; } if (description->inode()) { - if (!validate_inode_mmap_prot(*this, prot, *description->inode(), map_shared)) + if (!validate_inode_mmap_prot(prot, *description->inode(), map_shared)) return EACCES; } @@ -273,12 +273,12 @@ ErrorOr Process::sys$mprotect(Userspace addr, size_t size, int p if (auto* whole_region = address_space().find_region_from_range(range_to_mprotect)) { if (!whole_region->is_mmap()) return EPERM; - if (!validate_mmap_prot(*this, prot, whole_region->is_stack(), whole_region->vmobject().is_anonymous(), whole_region)) + if (!validate_mmap_prot(prot, whole_region->is_stack(), whole_region->vmobject().is_anonymous(), whole_region)) return EINVAL; if (whole_region->access() == Memory::prot_to_region_access_flags(prot)) return 0; if (whole_region->vmobject().is_inode() - && !validate_inode_mmap_prot(*this, prot, static_cast(whole_region->vmobject()).inode(), whole_region->is_shared())) { + && !validate_inode_mmap_prot(prot, static_cast(whole_region->vmobject()).inode(), whole_region->is_shared())) { return EACCES; } whole_region->set_readable(prot & PROT_READ); @@ -293,12 +293,12 @@ ErrorOr Process::sys$mprotect(Userspace addr, size_t size, int p if (auto* old_region = address_space().find_region_containing(range_to_mprotect)) { if (!old_region->is_mmap()) return EPERM; - if (!validate_mmap_prot(*this, prot, old_region->is_stack(), old_region->vmobject().is_anonymous(), old_region)) + if (!validate_mmap_prot(prot, old_region->is_stack(), old_region->vmobject().is_anonymous(), old_region)) return EINVAL; if (old_region->access() == Memory::prot_to_region_access_flags(prot)) return 0; if (old_region->vmobject().is_inode() - && !validate_inode_mmap_prot(*this, prot, static_cast(old_region->vmobject()).inode(), old_region->is_shared())) { + && !validate_inode_mmap_prot(prot, static_cast(old_region->vmobject()).inode(), old_region->is_shared())) { return EACCES; } @@ -333,7 +333,7 @@ ErrorOr Process::sys$mprotect(Userspace addr, size_t size, int p for (const auto* region : regions) { if (!region->is_mmap()) return EPERM; - if (!validate_mmap_prot(*this, prot, region->is_stack(), region->vmobject().is_anonymous(), region)) + if (!validate_mmap_prot(prot, region->is_stack(), region->vmobject().is_anonymous(), region)) return EINVAL; if (region->vmobject().is_inode() && !validate_inode_mmap_prot(*this, prot, static_cast(region->vmobject()).inode(), region->is_shared())) {