mirror of
				https://github.com/RGBCube/serenity
				synced 2025-10-31 09:42:45 +00:00 
			
		
		
		
	Kernel: Rename Memory::Space => Memory::AddressSpace
This commit is contained in:
		
							parent
							
								
									cd5faf4e42
								
							
						
					
					
						commit
						b7476d7a1b
					
				
					 10 changed files with 58 additions and 58 deletions
				
			
		|  | @ -131,6 +131,7 @@ set(KERNEL_SOURCES | ||||||
|     KLexicalPath.cpp |     KLexicalPath.cpp | ||||||
|     KString.cpp |     KString.cpp | ||||||
|     KSyms.cpp |     KSyms.cpp | ||||||
|  |     Memory/AddressSpace.cpp | ||||||
|     Memory/AnonymousVMObject.cpp |     Memory/AnonymousVMObject.cpp | ||||||
|     Memory/InodeVMObject.cpp |     Memory/InodeVMObject.cpp | ||||||
|     Memory/MemoryManager.cpp |     Memory/MemoryManager.cpp | ||||||
|  | @ -144,7 +145,6 @@ set(KERNEL_SOURCES | ||||||
|     Memory/RingBuffer.cpp |     Memory/RingBuffer.cpp | ||||||
|     Memory/ScatterGatherList.cpp |     Memory/ScatterGatherList.cpp | ||||||
|     Memory/SharedInodeVMObject.cpp |     Memory/SharedInodeVMObject.cpp | ||||||
|     Memory/Space.cpp |  | ||||||
|     Memory/VMObject.cpp |     Memory/VMObject.cpp | ||||||
|     Memory/VirtualRange.cpp |     Memory/VirtualRange.cpp | ||||||
|     Memory/VirtualRangeAllocator.cpp |     Memory/VirtualRangeAllocator.cpp | ||||||
|  |  | ||||||
|  | @ -66,6 +66,7 @@ class WaitQueue; | ||||||
| class WorkQueue; | class WorkQueue; | ||||||
| 
 | 
 | ||||||
| namespace Memory { | namespace Memory { | ||||||
|  | class AddressSpace; | ||||||
| class AnonymousVMObject; | class AnonymousVMObject; | ||||||
| class InodeVMObject; | class InodeVMObject; | ||||||
| class MappedROM; | class MappedROM; | ||||||
|  | @ -74,12 +75,11 @@ class PageDirectory; | ||||||
| class PhysicalPage; | class PhysicalPage; | ||||||
| class PhysicalRegion; | class PhysicalRegion; | ||||||
| class PrivateInodeVMObject; | class PrivateInodeVMObject; | ||||||
| class VirtualRange; |  | ||||||
| class VirtualRangeAllocator; |  | ||||||
| class Region; | class Region; | ||||||
| class SharedInodeVMObject; | class SharedInodeVMObject; | ||||||
| class Space; |  | ||||||
| class VMObject; | class VMObject; | ||||||
|  | class VirtualRange; | ||||||
|  | class VirtualRangeAllocator; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| template<typename BaseType> | template<typename BaseType> | ||||||
|  |  | ||||||
|  | @ -5,39 +5,39 @@ | ||||||
|  * SPDX-License-Identifier: BSD-2-Clause |  * SPDX-License-Identifier: BSD-2-Clause | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
|  | #include <Kernel/Memory/AddressSpace.h> | ||||||
| #include <Kernel/Memory/AnonymousVMObject.h> | #include <Kernel/Memory/AnonymousVMObject.h> | ||||||
| #include <Kernel/Memory/InodeVMObject.h> | #include <Kernel/Memory/InodeVMObject.h> | ||||||
| #include <Kernel/Memory/MemoryManager.h> | #include <Kernel/Memory/MemoryManager.h> | ||||||
| #include <Kernel/Memory/Space.h> |  | ||||||
| #include <Kernel/PerformanceManager.h> | #include <Kernel/PerformanceManager.h> | ||||||
| #include <Kernel/Process.h> | #include <Kernel/Process.h> | ||||||
| #include <Kernel/SpinLock.h> | #include <Kernel/SpinLock.h> | ||||||
| 
 | 
 | ||||||
| namespace Kernel::Memory { | namespace Kernel::Memory { | ||||||
| 
 | 
 | ||||||
| OwnPtr<Space> Space::try_create(Process& process, Space const* parent) | OwnPtr<AddressSpace> AddressSpace::try_create(Process& process, AddressSpace const* parent) | ||||||
| { | { | ||||||
|     auto page_directory = PageDirectory::try_create_for_userspace(parent ? &parent->page_directory().range_allocator() : nullptr); |     auto page_directory = PageDirectory::try_create_for_userspace(parent ? &parent->page_directory().range_allocator() : nullptr); | ||||||
|     if (!page_directory) |     if (!page_directory) | ||||||
|         return {}; |         return {}; | ||||||
|     auto space = adopt_own_if_nonnull(new (nothrow) Space(process, page_directory.release_nonnull())); |     auto space = adopt_own_if_nonnull(new (nothrow) AddressSpace(process, page_directory.release_nonnull())); | ||||||
|     if (!space) |     if (!space) | ||||||
|         return {}; |         return {}; | ||||||
|     space->page_directory().set_space({}, *space); |     space->page_directory().set_space({}, *space); | ||||||
|     return space; |     return space; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Space::Space(Process& process, NonnullRefPtr<PageDirectory> page_directory) | AddressSpace::AddressSpace(Process& process, NonnullRefPtr<PageDirectory> page_directory) | ||||||
|     : m_process(&process) |     : m_process(&process) | ||||||
|     , m_page_directory(move(page_directory)) |     , m_page_directory(move(page_directory)) | ||||||
| { | { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Space::~Space() | AddressSpace::~AddressSpace() | ||||||
| { | { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size) | KResult AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size) | ||||||
| { | { | ||||||
|     if (!size) |     if (!size) | ||||||
|         return EINVAL; |         return EINVAL; | ||||||
|  | @ -139,7 +139,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size) | ||||||
|     return KSuccess; |     return KSuccess; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Optional<VirtualRange> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment) | Optional<VirtualRange> AddressSpace::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment) | ||||||
| { | { | ||||||
|     vaddr.mask(PAGE_MASK); |     vaddr.mask(PAGE_MASK); | ||||||
|     size = page_round_up(size); |     size = page_round_up(size); | ||||||
|  | @ -148,7 +148,7 @@ Optional<VirtualRange> Space::allocate_range(VirtualAddress vaddr, size_t size, | ||||||
|     return page_directory().range_allocator().allocate_specific(vaddr, size); |     return page_directory().range_allocator().allocate_specific(vaddr, size); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject) | KResultOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject) | ||||||
| { | { | ||||||
|     auto new_region = Region::try_create_user_accessible( |     auto new_region = Region::try_create_user_accessible( | ||||||
|         range, source_region.vmobject(), offset_in_vmobject, KString::try_create(source_region.name()), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared()); |         range, source_region.vmobject(), offset_in_vmobject, KString::try_create(source_region.name()), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared()); | ||||||
|  | @ -168,7 +168,7 @@ KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region, | ||||||
|     return region; |     return region; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| KResultOr<Region*> Space::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy) | KResultOr<Region*> AddressSpace::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy) | ||||||
| { | { | ||||||
|     VERIFY(range.is_valid()); |     VERIFY(range.is_valid()); | ||||||
|     auto vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy); |     auto vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy); | ||||||
|  | @ -185,7 +185,7 @@ KResultOr<Region*> Space::allocate_region(VirtualRange const& range, StringView | ||||||
|     return added_region; |     return added_region; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| KResultOr<Region*> Space::allocate_region_with_vmobject(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared) | KResultOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared) | ||||||
| { | { | ||||||
|     VERIFY(range.is_valid()); |     VERIFY(range.is_valid()); | ||||||
|     size_t end_in_vmobject = offset_in_vmobject + range.size(); |     size_t end_in_vmobject = offset_in_vmobject + range.size(); | ||||||
|  | @ -215,12 +215,12 @@ KResultOr<Region*> Space::allocate_region_with_vmobject(VirtualRange const& rang | ||||||
|     return added_region; |     return added_region; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void Space::deallocate_region(Region& region) | void AddressSpace::deallocate_region(Region& region) | ||||||
| { | { | ||||||
|     take_region(region); |     take_region(region); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| NonnullOwnPtr<Region> Space::take_region(Region& region) | NonnullOwnPtr<Region> AddressSpace::take_region(Region& region) | ||||||
| { | { | ||||||
|     ScopedSpinLock lock(m_lock); |     ScopedSpinLock lock(m_lock); | ||||||
| 
 | 
 | ||||||
|  | @ -232,7 +232,7 @@ NonnullOwnPtr<Region> Space::take_region(Region& region) | ||||||
|     return found_region; |     return found_region; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Region* Space::find_region_from_range(VirtualRange const& range) | Region* AddressSpace::find_region_from_range(VirtualRange const& range) | ||||||
| { | { | ||||||
|     ScopedSpinLock lock(m_lock); |     ScopedSpinLock lock(m_lock); | ||||||
|     if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region) |     if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region) | ||||||
|  | @ -250,7 +250,7 @@ Region* Space::find_region_from_range(VirtualRange const& range) | ||||||
|     return region; |     return region; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Region* Space::find_region_containing(VirtualRange const& range) | Region* AddressSpace::find_region_containing(VirtualRange const& range) | ||||||
| { | { | ||||||
|     ScopedSpinLock lock(m_lock); |     ScopedSpinLock lock(m_lock); | ||||||
|     auto candidate = m_regions.find_largest_not_above(range.base().get()); |     auto candidate = m_regions.find_largest_not_above(range.base().get()); | ||||||
|  | @ -259,7 +259,7 @@ Region* Space::find_region_containing(VirtualRange const& range) | ||||||
|     return (*candidate)->range().contains(range) ? candidate->ptr() : nullptr; |     return (*candidate)->range().contains(range) ? candidate->ptr() : nullptr; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Vector<Region*> Space::find_regions_intersecting(VirtualRange const& range) | Vector<Region*> AddressSpace::find_regions_intersecting(VirtualRange const& range) | ||||||
| { | { | ||||||
|     Vector<Region*> regions = {}; |     Vector<Region*> regions = {}; | ||||||
|     size_t total_size_collected = 0; |     size_t total_size_collected = 0; | ||||||
|  | @ -282,7 +282,7 @@ Vector<Region*> Space::find_regions_intersecting(VirtualRange const& range) | ||||||
|     return regions; |     return regions; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Region* Space::add_region(NonnullOwnPtr<Region> region) | Region* AddressSpace::add_region(NonnullOwnPtr<Region> region) | ||||||
| { | { | ||||||
|     auto* ptr = region.ptr(); |     auto* ptr = region.ptr(); | ||||||
|     ScopedSpinLock lock(m_lock); |     ScopedSpinLock lock(m_lock); | ||||||
|  | @ -291,7 +291,7 @@ Region* Space::add_region(NonnullOwnPtr<Region> region) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Carve out a virtual address range from a region and return the two regions on either side
 | // Carve out a virtual address range from a region and return the two regions on either side
 | ||||||
| KResultOr<Vector<Region*, 2>> Space::try_split_region_around_range(const Region& source_region, VirtualRange const& desired_range) | KResultOr<Vector<Region*, 2>> AddressSpace::try_split_region_around_range(const Region& source_region, VirtualRange const& desired_range) | ||||||
| { | { | ||||||
|     VirtualRange old_region_range = source_region.range(); |     VirtualRange old_region_range = source_region.range(); | ||||||
|     auto remaining_ranges_after_unmap = old_region_range.carve(desired_range); |     auto remaining_ranges_after_unmap = old_region_range.carve(desired_range); | ||||||
|  | @ -312,7 +312,7 @@ KResultOr<Vector<Region*, 2>> Space::try_split_region_around_range(const Region& | ||||||
|     return new_regions; |     return new_regions; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void Space::dump_regions() | void AddressSpace::dump_regions() | ||||||
| { | { | ||||||
|     dbgln("Process regions:"); |     dbgln("Process regions:"); | ||||||
| #if ARCH(I386) | #if ARCH(I386) | ||||||
|  | @ -339,13 +339,13 @@ void Space::dump_regions() | ||||||
|     MM.dump_kernel_regions(); |     MM.dump_kernel_regions(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void Space::remove_all_regions(Badge<Process>) | void AddressSpace::remove_all_regions(Badge<Process>) | ||||||
| { | { | ||||||
|     ScopedSpinLock lock(m_lock); |     ScopedSpinLock lock(m_lock); | ||||||
|     m_regions.clear(); |     m_regions.clear(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| size_t Space::amount_dirty_private() const | size_t AddressSpace::amount_dirty_private() const | ||||||
| { | { | ||||||
|     ScopedSpinLock lock(m_lock); |     ScopedSpinLock lock(m_lock); | ||||||
|     // FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
 |     // FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
 | ||||||
|  | @ -359,7 +359,7 @@ size_t Space::amount_dirty_private() const | ||||||
|     return amount; |     return amount; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| size_t Space::amount_clean_inode() const | size_t AddressSpace::amount_clean_inode() const | ||||||
| { | { | ||||||
|     ScopedSpinLock lock(m_lock); |     ScopedSpinLock lock(m_lock); | ||||||
|     HashTable<const InodeVMObject*> vmobjects; |     HashTable<const InodeVMObject*> vmobjects; | ||||||
|  | @ -373,7 +373,7 @@ size_t Space::amount_clean_inode() const | ||||||
|     return amount; |     return amount; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| size_t Space::amount_virtual() const | size_t AddressSpace::amount_virtual() const | ||||||
| { | { | ||||||
|     ScopedSpinLock lock(m_lock); |     ScopedSpinLock lock(m_lock); | ||||||
|     size_t amount = 0; |     size_t amount = 0; | ||||||
|  | @ -383,7 +383,7 @@ size_t Space::amount_virtual() const | ||||||
|     return amount; |     return amount; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| size_t Space::amount_resident() const | size_t AddressSpace::amount_resident() const | ||||||
| { | { | ||||||
|     ScopedSpinLock lock(m_lock); |     ScopedSpinLock lock(m_lock); | ||||||
|     // FIXME: This will double count if multiple regions use the same physical page.
 |     // FIXME: This will double count if multiple regions use the same physical page.
 | ||||||
|  | @ -394,7 +394,7 @@ size_t Space::amount_resident() const | ||||||
|     return amount; |     return amount; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| size_t Space::amount_shared() const | size_t AddressSpace::amount_shared() const | ||||||
| { | { | ||||||
|     ScopedSpinLock lock(m_lock); |     ScopedSpinLock lock(m_lock); | ||||||
|     // FIXME: This will double count if multiple regions use the same physical page.
 |     // FIXME: This will double count if multiple regions use the same physical page.
 | ||||||
|  | @ -408,7 +408,7 @@ size_t Space::amount_shared() const | ||||||
|     return amount; |     return amount; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| size_t Space::amount_purgeable_volatile() const | size_t AddressSpace::amount_purgeable_volatile() const | ||||||
| { | { | ||||||
|     ScopedSpinLock lock(m_lock); |     ScopedSpinLock lock(m_lock); | ||||||
|     size_t amount = 0; |     size_t amount = 0; | ||||||
|  | @ -422,7 +422,7 @@ size_t Space::amount_purgeable_volatile() const | ||||||
|     return amount; |     return amount; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| size_t Space::amount_purgeable_nonvolatile() const | size_t AddressSpace::amount_purgeable_nonvolatile() const | ||||||
| { | { | ||||||
|     ScopedSpinLock lock(m_lock); |     ScopedSpinLock lock(m_lock); | ||||||
|     size_t amount = 0; |     size_t amount = 0; | ||||||
|  | @ -16,10 +16,10 @@ | ||||||
| 
 | 
 | ||||||
| namespace Kernel::Memory { | namespace Kernel::Memory { | ||||||
| 
 | 
 | ||||||
| class Space { | class AddressSpace { | ||||||
| public: | public: | ||||||
|     static OwnPtr<Space> try_create(Process&, Space const* parent); |     static OwnPtr<AddressSpace> try_create(Process&, AddressSpace const* parent); | ||||||
|     ~Space(); |     ~AddressSpace(); | ||||||
| 
 | 
 | ||||||
|     PageDirectory& page_directory() { return *m_page_directory; } |     PageDirectory& page_directory() { return *m_page_directory; } | ||||||
|     const PageDirectory& page_directory() const { return *m_page_directory; } |     const PageDirectory& page_directory() const { return *m_page_directory; } | ||||||
|  | @ -66,7 +66,7 @@ public: | ||||||
|     size_t amount_purgeable_nonvolatile() const; |     size_t amount_purgeable_nonvolatile() const; | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     Space(Process&, NonnullRefPtr<PageDirectory>); |     AddressSpace(Process&, NonnullRefPtr<PageDirectory>); | ||||||
| 
 | 
 | ||||||
|     Process* m_process { nullptr }; |     Process* m_process { nullptr }; | ||||||
|     mutable RecursiveSpinLock m_lock; |     mutable RecursiveSpinLock m_lock; | ||||||
|  | @ -612,19 +612,19 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr) | ||||||
|     return nullptr; |     return nullptr; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Region* MemoryManager::find_user_region_from_vaddr_no_lock(Space& space, VirtualAddress vaddr) | Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space, VirtualAddress vaddr) | ||||||
| { | { | ||||||
|     VERIFY(space.get_lock().own_lock()); |     VERIFY(space.get_lock().own_lock()); | ||||||
|     return space.find_region_containing({ vaddr, 1 }); |     return space.find_region_containing({ vaddr, 1 }); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Region* MemoryManager::find_user_region_from_vaddr(Space& space, VirtualAddress vaddr) | Region* MemoryManager::find_user_region_from_vaddr(AddressSpace& space, VirtualAddress vaddr) | ||||||
| { | { | ||||||
|     ScopedSpinLock lock(space.get_lock()); |     ScopedSpinLock lock(space.get_lock()); | ||||||
|     return find_user_region_from_vaddr_no_lock(space, vaddr); |     return find_user_region_from_vaddr_no_lock(space, vaddr); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void MemoryManager::validate_syscall_preconditions(Space& space, RegisterState const& regs) | void MemoryManager::validate_syscall_preconditions(AddressSpace& space, RegisterState const& regs) | ||||||
| { | { | ||||||
|     // We take the space lock once here and then use the no_lock variants
 |     // We take the space lock once here and then use the no_lock variants
 | ||||||
|     // to avoid excessive spinlock recursion in this extemely common path.
 |     // to avoid excessive spinlock recursion in this extemely common path.
 | ||||||
|  | @ -933,7 +933,7 @@ void MemoryManager::enter_process_paging_scope(Process& process) | ||||||
|     enter_space(process.space()); |     enter_space(process.space()); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void MemoryManager::enter_space(Space& space) | void MemoryManager::enter_space(AddressSpace& space) | ||||||
| { | { | ||||||
|     auto current_thread = Thread::current(); |     auto current_thread = Thread::current(); | ||||||
|     VERIFY(current_thread != nullptr); |     VERIFY(current_thread != nullptr); | ||||||
|  | @ -1039,7 +1039,7 @@ void MemoryManager::unquickmap_page() | ||||||
|     mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags); |     mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| bool MemoryManager::validate_user_stack_no_lock(Space& space, VirtualAddress vaddr) const | bool MemoryManager::validate_user_stack_no_lock(AddressSpace& space, VirtualAddress vaddr) const | ||||||
| { | { | ||||||
|     VERIFY(space.get_lock().own_lock()); |     VERIFY(space.get_lock().own_lock()); | ||||||
| 
 | 
 | ||||||
|  | @ -1050,7 +1050,7 @@ bool MemoryManager::validate_user_stack_no_lock(Space& space, VirtualAddress vad | ||||||
|     return region && region->is_user() && region->is_stack(); |     return region && region->is_user() && region->is_stack(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| bool MemoryManager::validate_user_stack(Space& space, VirtualAddress vaddr) const | bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const | ||||||
| { | { | ||||||
|     ScopedSpinLock lock(space.get_lock()); |     ScopedSpinLock lock(space.get_lock()); | ||||||
|     return validate_user_stack_no_lock(space, vaddr); |     return validate_user_stack_no_lock(space, vaddr); | ||||||
|  |  | ||||||
|  | @ -161,10 +161,10 @@ public: | ||||||
|     void unmap_ksyms_after_init(); |     void unmap_ksyms_after_init(); | ||||||
| 
 | 
 | ||||||
|     static void enter_process_paging_scope(Process&); |     static void enter_process_paging_scope(Process&); | ||||||
|     static void enter_space(Space&); |     static void enter_space(AddressSpace&); | ||||||
| 
 | 
 | ||||||
|     bool validate_user_stack_no_lock(Space&, VirtualAddress) const; |     bool validate_user_stack_no_lock(AddressSpace&, VirtualAddress) const; | ||||||
|     bool validate_user_stack(Space&, VirtualAddress) const; |     bool validate_user_stack(AddressSpace&, VirtualAddress) const; | ||||||
| 
 | 
 | ||||||
|     enum class ShouldZeroFill { |     enum class ShouldZeroFill { | ||||||
|         No, |         No, | ||||||
|  | @ -219,9 +219,9 @@ public: | ||||||
|             callback(vmobject); |             callback(vmobject); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     static Region* find_user_region_from_vaddr(Space&, VirtualAddress); |     static Region* find_user_region_from_vaddr(AddressSpace&, VirtualAddress); | ||||||
|     static Region* find_user_region_from_vaddr_no_lock(Space&, VirtualAddress); |     static Region* find_user_region_from_vaddr_no_lock(AddressSpace&, VirtualAddress); | ||||||
|     static void validate_syscall_preconditions(Space&, RegisterState const&); |     static void validate_syscall_preconditions(AddressSpace&, RegisterState const&); | ||||||
| 
 | 
 | ||||||
|     void dump_kernel_regions(); |     void dump_kernel_regions(); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -41,17 +41,17 @@ public: | ||||||
| 
 | 
 | ||||||
|     VirtualRangeAllocator& identity_range_allocator() { return m_identity_range_allocator; } |     VirtualRangeAllocator& identity_range_allocator() { return m_identity_range_allocator; } | ||||||
| 
 | 
 | ||||||
|     Space* space() { return m_space; } |     AddressSpace* space() { return m_space; } | ||||||
|     const Space* space() const { return m_space; } |     const AddressSpace* space() const { return m_space; } | ||||||
| 
 | 
 | ||||||
|     void set_space(Badge<Space>, Space& space) { m_space = &space; } |     void set_space(Badge<AddressSpace>, AddressSpace& space) { m_space = &space; } | ||||||
| 
 | 
 | ||||||
|     RecursiveSpinLock& get_lock() { return m_lock; } |     RecursiveSpinLock& get_lock() { return m_lock; } | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     PageDirectory(); |     PageDirectory(); | ||||||
| 
 | 
 | ||||||
|     Space* m_space { nullptr }; |     AddressSpace* m_space { nullptr }; | ||||||
|     VirtualRangeAllocator m_range_allocator; |     VirtualRangeAllocator m_range_allocator; | ||||||
|     VirtualRangeAllocator m_identity_range_allocator; |     VirtualRangeAllocator m_identity_range_allocator; | ||||||
| #if ARCH(X86_64) | #if ARCH(X86_64) | ||||||
|  |  | ||||||
|  | @ -267,7 +267,7 @@ Process::Process(const String& name, uid_t uid, gid_t gid, ProcessID ppid, bool | ||||||
| 
 | 
 | ||||||
| KResult Process::attach_resources(RefPtr<Thread>& first_thread, Process* fork_parent) | KResult Process::attach_resources(RefPtr<Thread>& first_thread, Process* fork_parent) | ||||||
| { | { | ||||||
|     m_space = Memory::Space::try_create(*this, fork_parent ? &fork_parent->space() : nullptr); |     m_space = Memory::AddressSpace::try_create(*this, fork_parent ? &fork_parent->space() : nullptr); | ||||||
|     if (!m_space) |     if (!m_space) | ||||||
|         return ENOMEM; |         return ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -20,7 +20,7 @@ | ||||||
| #include <Kernel/FileSystem/InodeMetadata.h> | #include <Kernel/FileSystem/InodeMetadata.h> | ||||||
| #include <Kernel/Forward.h> | #include <Kernel/Forward.h> | ||||||
| #include <Kernel/FutexQueue.h> | #include <Kernel/FutexQueue.h> | ||||||
| #include <Kernel/Memory/Space.h> | #include <Kernel/Memory/AddressSpace.h> | ||||||
| #include <Kernel/Mutex.h> | #include <Kernel/Mutex.h> | ||||||
| #include <Kernel/PerformanceEventBuffer.h> | #include <Kernel/PerformanceEventBuffer.h> | ||||||
| #include <Kernel/ProcessGroup.h> | #include <Kernel/ProcessGroup.h> | ||||||
|  | @ -515,8 +515,8 @@ public: | ||||||
| 
 | 
 | ||||||
|     PerformanceEventBuffer* perf_events() { return m_perf_event_buffer; } |     PerformanceEventBuffer* perf_events() { return m_perf_event_buffer; } | ||||||
| 
 | 
 | ||||||
|     Memory::Space& space() { return *m_space; } |     Memory::AddressSpace& space() { return *m_space; } | ||||||
|     Memory::Space const& space() const { return *m_space; } |     Memory::AddressSpace const& space() const { return *m_space; } | ||||||
| 
 | 
 | ||||||
|     VirtualAddress signal_trampoline() const { return m_signal_trampoline; } |     VirtualAddress signal_trampoline() const { return m_signal_trampoline; } | ||||||
| 
 | 
 | ||||||
|  | @ -582,7 +582,7 @@ private: | ||||||
| 
 | 
 | ||||||
|     String m_name; |     String m_name; | ||||||
| 
 | 
 | ||||||
|     OwnPtr<Memory::Space> m_space; |     OwnPtr<Memory::AddressSpace> m_space; | ||||||
| 
 | 
 | ||||||
|     RefPtr<ProcessGroup> m_pg; |     RefPtr<ProcessGroup> m_pg; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -30,7 +30,7 @@ namespace Kernel { | ||||||
| extern Memory::Region* g_signal_trampoline_region; | extern Memory::Region* g_signal_trampoline_region; | ||||||
| 
 | 
 | ||||||
| struct LoadResult { | struct LoadResult { | ||||||
|     OwnPtr<Memory::Space> space; |     OwnPtr<Memory::AddressSpace> space; | ||||||
|     FlatPtr load_base { 0 }; |     FlatPtr load_base { 0 }; | ||||||
|     FlatPtr entry_eip { 0 }; |     FlatPtr entry_eip { 0 }; | ||||||
|     size_t size { 0 }; |     size_t size { 0 }; | ||||||
|  | @ -263,7 +263,7 @@ enum class ShouldAllowSyscalls { | ||||||
|     Yes, |     Yes, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::Space> new_space, FileDescription& object_description, | static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> new_space, FileDescription& object_description, | ||||||
|     FlatPtr load_offset, ShouldAllocateTls should_allocate_tls, ShouldAllowSyscalls should_allow_syscalls) |     FlatPtr load_offset, ShouldAllocateTls should_allocate_tls, ShouldAllowSyscalls should_allow_syscalls) | ||||||
| { | { | ||||||
|     auto& inode = *(object_description.inode()); |     auto& inode = *(object_description.inode()); | ||||||
|  | @ -453,7 +453,7 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::Space> new_sp | ||||||
| KResultOr<LoadResult> Process::load(NonnullRefPtr<FileDescription> main_program_description, | KResultOr<LoadResult> Process::load(NonnullRefPtr<FileDescription> main_program_description, | ||||||
|     RefPtr<FileDescription> interpreter_description, const ElfW(Ehdr) & main_program_header) |     RefPtr<FileDescription> interpreter_description, const ElfW(Ehdr) & main_program_header) | ||||||
| { | { | ||||||
|     auto new_space = Memory::Space::try_create(*this, nullptr); |     auto new_space = Memory::AddressSpace::try_create(*this, nullptr); | ||||||
|     if (!new_space) |     if (!new_space) | ||||||
|         return ENOMEM; |         return ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Andreas Kling
						Andreas Kling