From b7476d7a1b71ecee6488b6daa4cf41753b699646 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Fri, 6 Aug 2021 13:57:39 +0200 Subject: [PATCH] Kernel: Rename Memory::Space => Memory::AddressSpace --- Kernel/CMakeLists.txt | 2 +- Kernel/Forward.h | 6 +-- Kernel/Memory/{Space.cpp => AddressSpace.cpp} | 52 +++++++++---------- Kernel/Memory/{Space.h => AddressSpace.h} | 8 +-- Kernel/Memory/MemoryManager.cpp | 12 ++--- Kernel/Memory/MemoryManager.h | 12 ++--- Kernel/Memory/PageDirectory.h | 8 +-- Kernel/Process.cpp | 2 +- Kernel/Process.h | 8 +-- Kernel/Syscalls/execve.cpp | 6 +-- 10 files changed, 58 insertions(+), 58 deletions(-) rename Kernel/Memory/{Space.cpp => AddressSpace.cpp} (87%) rename Kernel/Memory/{Space.h => AddressSpace.h} (93%) diff --git a/Kernel/CMakeLists.txt b/Kernel/CMakeLists.txt index 6e37718508..79c9093aaa 100644 --- a/Kernel/CMakeLists.txt +++ b/Kernel/CMakeLists.txt @@ -131,6 +131,7 @@ set(KERNEL_SOURCES KLexicalPath.cpp KString.cpp KSyms.cpp + Memory/AddressSpace.cpp Memory/AnonymousVMObject.cpp Memory/InodeVMObject.cpp Memory/MemoryManager.cpp @@ -144,7 +145,6 @@ set(KERNEL_SOURCES Memory/RingBuffer.cpp Memory/ScatterGatherList.cpp Memory/SharedInodeVMObject.cpp - Memory/Space.cpp Memory/VMObject.cpp Memory/VirtualRange.cpp Memory/VirtualRangeAllocator.cpp diff --git a/Kernel/Forward.h b/Kernel/Forward.h index c44087389b..ecb4d5e5f6 100644 --- a/Kernel/Forward.h +++ b/Kernel/Forward.h @@ -66,6 +66,7 @@ class WaitQueue; class WorkQueue; namespace Memory { +class AddressSpace; class AnonymousVMObject; class InodeVMObject; class MappedROM; @@ -74,12 +75,11 @@ class PageDirectory; class PhysicalPage; class PhysicalRegion; class PrivateInodeVMObject; -class VirtualRange; -class VirtualRangeAllocator; class Region; class SharedInodeVMObject; -class Space; class VMObject; +class VirtualRange; +class VirtualRangeAllocator; } template diff --git a/Kernel/Memory/Space.cpp b/Kernel/Memory/AddressSpace.cpp similarity index 87% rename from Kernel/Memory/Space.cpp rename to Kernel/Memory/AddressSpace.cpp index 97c48100c1..1f939e30d3 100644 --- a/Kernel/Memory/Space.cpp +++ b/Kernel/Memory/AddressSpace.cpp @@ -5,39 +5,39 @@ * SPDX-License-Identifier: BSD-2-Clause */ +#include #include #include #include -#include #include #include #include namespace Kernel::Memory { -OwnPtr Space::try_create(Process& process, Space const* parent) +OwnPtr AddressSpace::try_create(Process& process, AddressSpace const* parent) { auto page_directory = PageDirectory::try_create_for_userspace(parent ? &parent->page_directory().range_allocator() : nullptr); if (!page_directory) return {}; - auto space = adopt_own_if_nonnull(new (nothrow) Space(process, page_directory.release_nonnull())); + auto space = adopt_own_if_nonnull(new (nothrow) AddressSpace(process, page_directory.release_nonnull())); if (!space) return {}; space->page_directory().set_space({}, *space); return space; } -Space::Space(Process& process, NonnullRefPtr page_directory) +AddressSpace::AddressSpace(Process& process, NonnullRefPtr page_directory) : m_process(&process) , m_page_directory(move(page_directory)) { } -Space::~Space() +AddressSpace::~AddressSpace() { } -KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size) +KResult AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size) { if (!size) return EINVAL; @@ -139,7 +139,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size) return KSuccess; } -Optional Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment) +Optional AddressSpace::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment) { vaddr.mask(PAGE_MASK); size = page_round_up(size); @@ -148,7 +148,7 @@ Optional Space::allocate_range(VirtualAddress vaddr, size_t size, return page_directory().range_allocator().allocate_specific(vaddr, size); } -KResultOr Space::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject) +KResultOr AddressSpace::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject) { auto new_region = Region::try_create_user_accessible( range, source_region.vmobject(), offset_in_vmobject, KString::try_create(source_region.name()), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared()); @@ -168,7 +168,7 @@ KResultOr Space::try_allocate_split_region(Region const& source_region, return region; } -KResultOr Space::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy) +KResultOr AddressSpace::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy) { VERIFY(range.is_valid()); auto vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy); @@ -185,7 +185,7 @@ KResultOr Space::allocate_region(VirtualRange const& range, StringView return added_region; } -KResultOr Space::allocate_region_with_vmobject(VirtualRange const& range, NonnullRefPtr vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared) +KResultOr AddressSpace::allocate_region_with_vmobject(VirtualRange const& range, NonnullRefPtr vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared) { VERIFY(range.is_valid()); size_t end_in_vmobject = offset_in_vmobject + range.size(); @@ -215,12 +215,12 @@ KResultOr Space::allocate_region_with_vmobject(VirtualRange const& rang return added_region; } -void Space::deallocate_region(Region& region) +void AddressSpace::deallocate_region(Region& region) { take_region(region); } -NonnullOwnPtr Space::take_region(Region& region) +NonnullOwnPtr AddressSpace::take_region(Region& region) { ScopedSpinLock lock(m_lock); @@ -232,7 +232,7 @@ NonnullOwnPtr Space::take_region(Region& region) return found_region; } -Region* Space::find_region_from_range(VirtualRange const& range) +Region* AddressSpace::find_region_from_range(VirtualRange const& range) { ScopedSpinLock lock(m_lock); if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region) @@ -250,7 +250,7 @@ Region* Space::find_region_from_range(VirtualRange const& range) return region; } -Region* Space::find_region_containing(VirtualRange const& range) +Region* AddressSpace::find_region_containing(VirtualRange const& range) { ScopedSpinLock lock(m_lock); auto candidate = m_regions.find_largest_not_above(range.base().get()); @@ -259,7 +259,7 @@ Region* Space::find_region_containing(VirtualRange const& range) return (*candidate)->range().contains(range) ? candidate->ptr() : nullptr; } -Vector Space::find_regions_intersecting(VirtualRange const& range) +Vector AddressSpace::find_regions_intersecting(VirtualRange const& range) { Vector regions = {}; size_t total_size_collected = 0; @@ -282,7 +282,7 @@ Vector Space::find_regions_intersecting(VirtualRange const& range) return regions; } -Region* Space::add_region(NonnullOwnPtr region) +Region* AddressSpace::add_region(NonnullOwnPtr region) { auto* ptr = region.ptr(); ScopedSpinLock lock(m_lock); @@ -291,7 +291,7 @@ Region* Space::add_region(NonnullOwnPtr region) } // Carve out a virtual address range from a region and return the two regions on either side -KResultOr> Space::try_split_region_around_range(const Region& source_region, VirtualRange const& desired_range) +KResultOr> AddressSpace::try_split_region_around_range(const Region& source_region, VirtualRange const& desired_range) { VirtualRange old_region_range = source_region.range(); auto remaining_ranges_after_unmap = old_region_range.carve(desired_range); @@ -312,7 +312,7 @@ KResultOr> Space::try_split_region_around_range(const Region& return new_regions; } -void Space::dump_regions() +void AddressSpace::dump_regions() { dbgln("Process regions:"); #if ARCH(I386) @@ -339,13 +339,13 @@ void Space::dump_regions() MM.dump_kernel_regions(); } -void Space::remove_all_regions(Badge) +void AddressSpace::remove_all_regions(Badge) { ScopedSpinLock lock(m_lock); m_regions.clear(); } -size_t Space::amount_dirty_private() const +size_t AddressSpace::amount_dirty_private() const { ScopedSpinLock lock(m_lock); // FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject. @@ -359,7 +359,7 @@ size_t Space::amount_dirty_private() const return amount; } -size_t Space::amount_clean_inode() const +size_t AddressSpace::amount_clean_inode() const { ScopedSpinLock lock(m_lock); HashTable vmobjects; @@ -373,7 +373,7 @@ size_t Space::amount_clean_inode() const return amount; } -size_t Space::amount_virtual() const +size_t AddressSpace::amount_virtual() const { ScopedSpinLock lock(m_lock); size_t amount = 0; @@ -383,7 +383,7 @@ size_t Space::amount_virtual() const return amount; } -size_t Space::amount_resident() const +size_t AddressSpace::amount_resident() const { ScopedSpinLock lock(m_lock); // FIXME: This will double count if multiple regions use the same physical page. @@ -394,7 +394,7 @@ size_t Space::amount_resident() const return amount; } -size_t Space::amount_shared() const +size_t AddressSpace::amount_shared() const { ScopedSpinLock lock(m_lock); // FIXME: This will double count if multiple regions use the same physical page. @@ -408,7 +408,7 @@ size_t Space::amount_shared() const return amount; } -size_t Space::amount_purgeable_volatile() const +size_t AddressSpace::amount_purgeable_volatile() const { ScopedSpinLock lock(m_lock); size_t amount = 0; @@ -422,7 +422,7 @@ size_t Space::amount_purgeable_volatile() const return amount; } -size_t Space::amount_purgeable_nonvolatile() const +size_t AddressSpace::amount_purgeable_nonvolatile() const { ScopedSpinLock lock(m_lock); size_t amount = 0; diff --git a/Kernel/Memory/Space.h b/Kernel/Memory/AddressSpace.h similarity index 93% rename from Kernel/Memory/Space.h rename to Kernel/Memory/AddressSpace.h index b8bf401779..973a7f4fe3 100644 --- a/Kernel/Memory/Space.h +++ b/Kernel/Memory/AddressSpace.h @@ -16,10 +16,10 @@ namespace Kernel::Memory { -class Space { +class AddressSpace { public: - static OwnPtr try_create(Process&, Space const* parent); - ~Space(); + static OwnPtr try_create(Process&, AddressSpace const* parent); + ~AddressSpace(); PageDirectory& page_directory() { return *m_page_directory; } const PageDirectory& page_directory() const { return *m_page_directory; } @@ -66,7 +66,7 @@ public: size_t amount_purgeable_nonvolatile() const; private: - Space(Process&, NonnullRefPtr); + AddressSpace(Process&, NonnullRefPtr); Process* m_process { nullptr }; mutable RecursiveSpinLock m_lock; diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 95c61f9e25..7623408abc 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -612,19 +612,19 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr) return nullptr; } -Region* MemoryManager::find_user_region_from_vaddr_no_lock(Space& space, VirtualAddress vaddr) +Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space, VirtualAddress vaddr) { VERIFY(space.get_lock().own_lock()); return space.find_region_containing({ vaddr, 1 }); } -Region* MemoryManager::find_user_region_from_vaddr(Space& space, VirtualAddress vaddr) +Region* MemoryManager::find_user_region_from_vaddr(AddressSpace& space, VirtualAddress vaddr) { ScopedSpinLock lock(space.get_lock()); return find_user_region_from_vaddr_no_lock(space, vaddr); } -void MemoryManager::validate_syscall_preconditions(Space& space, RegisterState const& regs) +void MemoryManager::validate_syscall_preconditions(AddressSpace& space, RegisterState const& regs) { // We take the space lock once here and then use the no_lock variants // to avoid excessive spinlock recursion in this extemely common path. @@ -933,7 +933,7 @@ void MemoryManager::enter_process_paging_scope(Process& process) enter_space(process.space()); } -void MemoryManager::enter_space(Space& space) +void MemoryManager::enter_space(AddressSpace& space) { auto current_thread = Thread::current(); VERIFY(current_thread != nullptr); @@ -1039,7 +1039,7 @@ void MemoryManager::unquickmap_page() mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags); } -bool MemoryManager::validate_user_stack_no_lock(Space& space, VirtualAddress vaddr) const +bool MemoryManager::validate_user_stack_no_lock(AddressSpace& space, VirtualAddress vaddr) const { VERIFY(space.get_lock().own_lock()); @@ -1050,7 +1050,7 @@ bool MemoryManager::validate_user_stack_no_lock(Space& space, VirtualAddress vad return region && region->is_user() && region->is_stack(); } -bool MemoryManager::validate_user_stack(Space& space, VirtualAddress vaddr) const +bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const { ScopedSpinLock lock(space.get_lock()); return validate_user_stack_no_lock(space, vaddr); diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h index 96a209852b..5070b6706e 100644 --- a/Kernel/Memory/MemoryManager.h +++ b/Kernel/Memory/MemoryManager.h @@ -161,10 +161,10 @@ public: void unmap_ksyms_after_init(); static void enter_process_paging_scope(Process&); - static void enter_space(Space&); + static void enter_space(AddressSpace&); - bool validate_user_stack_no_lock(Space&, VirtualAddress) const; - bool validate_user_stack(Space&, VirtualAddress) const; + bool validate_user_stack_no_lock(AddressSpace&, VirtualAddress) const; + bool validate_user_stack(AddressSpace&, VirtualAddress) const; enum class ShouldZeroFill { No, @@ -219,9 +219,9 @@ public: callback(vmobject); } - static Region* find_user_region_from_vaddr(Space&, VirtualAddress); - static Region* find_user_region_from_vaddr_no_lock(Space&, VirtualAddress); - static void validate_syscall_preconditions(Space&, RegisterState const&); + static Region* find_user_region_from_vaddr(AddressSpace&, VirtualAddress); + static Region* find_user_region_from_vaddr_no_lock(AddressSpace&, VirtualAddress); + static void validate_syscall_preconditions(AddressSpace&, RegisterState const&); void dump_kernel_regions(); diff --git a/Kernel/Memory/PageDirectory.h b/Kernel/Memory/PageDirectory.h index 9141fc4329..72d1249c7a 100644 --- a/Kernel/Memory/PageDirectory.h +++ b/Kernel/Memory/PageDirectory.h @@ -41,17 +41,17 @@ public: VirtualRangeAllocator& identity_range_allocator() { return m_identity_range_allocator; } - Space* space() { return m_space; } - const Space* space() const { return m_space; } + AddressSpace* space() { return m_space; } + const AddressSpace* space() const { return m_space; } - void set_space(Badge, Space& space) { m_space = &space; } + void set_space(Badge, AddressSpace& space) { m_space = &space; } RecursiveSpinLock& get_lock() { return m_lock; } private: PageDirectory(); - Space* m_space { nullptr }; + AddressSpace* m_space { nullptr }; VirtualRangeAllocator m_range_allocator; VirtualRangeAllocator m_identity_range_allocator; #if ARCH(X86_64) diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index f2ebe0ac4a..1a73da758d 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -267,7 +267,7 @@ Process::Process(const String& name, uid_t uid, gid_t gid, ProcessID ppid, bool KResult Process::attach_resources(RefPtr& first_thread, Process* fork_parent) { - m_space = Memory::Space::try_create(*this, fork_parent ? &fork_parent->space() : nullptr); + m_space = Memory::AddressSpace::try_create(*this, fork_parent ? &fork_parent->space() : nullptr); if (!m_space) return ENOMEM; diff --git a/Kernel/Process.h b/Kernel/Process.h index eef1c5c765..b298fa7189 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -515,8 +515,8 @@ public: PerformanceEventBuffer* perf_events() { return m_perf_event_buffer; } - Memory::Space& space() { return *m_space; } - Memory::Space const& space() const { return *m_space; } + Memory::AddressSpace& space() { return *m_space; } + Memory::AddressSpace const& space() const { return *m_space; } VirtualAddress signal_trampoline() const { return m_signal_trampoline; } @@ -582,7 +582,7 @@ private: String m_name; - OwnPtr m_space; + OwnPtr m_space; RefPtr m_pg; diff --git a/Kernel/Syscalls/execve.cpp b/Kernel/Syscalls/execve.cpp index 692b2e674b..8d6b865c05 100644 --- a/Kernel/Syscalls/execve.cpp +++ b/Kernel/Syscalls/execve.cpp @@ -30,7 +30,7 @@ namespace Kernel { extern Memory::Region* g_signal_trampoline_region; struct LoadResult { - OwnPtr space; + OwnPtr space; FlatPtr load_base { 0 }; FlatPtr entry_eip { 0 }; size_t size { 0 }; @@ -263,7 +263,7 @@ enum class ShouldAllowSyscalls { Yes, }; -static KResultOr load_elf_object(NonnullOwnPtr new_space, FileDescription& object_description, +static KResultOr load_elf_object(NonnullOwnPtr new_space, FileDescription& object_description, FlatPtr load_offset, ShouldAllocateTls should_allocate_tls, ShouldAllowSyscalls should_allow_syscalls) { auto& inode = *(object_description.inode()); @@ -453,7 +453,7 @@ static KResultOr load_elf_object(NonnullOwnPtr new_sp KResultOr Process::load(NonnullRefPtr main_program_description, RefPtr interpreter_description, const ElfW(Ehdr) & main_program_header) { - auto new_space = Memory::Space::try_create(*this, nullptr); + auto new_space = Memory::AddressSpace::try_create(*this, nullptr); if (!new_space) return ENOMEM;