mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 18:57:35 +00:00
Kernel: Rename Process::space() => Process::address_space()
We commonly talk about "a process's address space" so let's nudge the code towards matching how we talk about it. :^)
This commit is contained in:
parent
b7476d7a1b
commit
208147c77c
24 changed files with 80 additions and 80 deletions
|
@ -672,7 +672,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
|
|||
regs.rip = load_result.entry_eip;
|
||||
regs.rsp = new_userspace_sp;
|
||||
#endif
|
||||
regs.cr3 = space().page_directory().cr3();
|
||||
regs.cr3 = address_space().page_directory().cr3();
|
||||
|
||||
{
|
||||
TemporaryChange profiling_disabler(m_profiling, was_profiling);
|
||||
|
|
|
@ -42,7 +42,7 @@ KResultOr<FlatPtr> Process::sys$fork(RegisterState& regs)
|
|||
}
|
||||
|
||||
dbgln_if(FORK_DEBUG, "fork: child={}", child);
|
||||
child->space().set_enforces_syscall_regions(space().enforces_syscall_regions());
|
||||
child->address_space().set_enforces_syscall_regions(address_space().enforces_syscall_regions());
|
||||
|
||||
#if ARCH(I386)
|
||||
auto& child_regs = child_first_thread->m_regs;
|
||||
|
@ -92,8 +92,8 @@ KResultOr<FlatPtr> Process::sys$fork(RegisterState& regs)
|
|||
#endif
|
||||
|
||||
{
|
||||
ScopedSpinLock lock(space().get_lock());
|
||||
for (auto& region : space().regions()) {
|
||||
ScopedSpinLock lock(address_space().get_lock());
|
||||
for (auto& region : address_space().regions()) {
|
||||
dbgln_if(FORK_DEBUG, "fork: cloning Region({}) '{}' @ {}", region, region->name(), region->vaddr());
|
||||
auto region_clone = region->clone();
|
||||
if (!region_clone) {
|
||||
|
@ -102,13 +102,13 @@ KResultOr<FlatPtr> Process::sys$fork(RegisterState& regs)
|
|||
return ENOMEM;
|
||||
}
|
||||
|
||||
auto* child_region = child->space().add_region(region_clone.release_nonnull());
|
||||
auto* child_region = child->address_space().add_region(region_clone.release_nonnull());
|
||||
if (!child_region) {
|
||||
dbgln("fork: Cannot add region, insufficient memory");
|
||||
// TODO: tear down new process?
|
||||
return ENOMEM;
|
||||
}
|
||||
child_region->map(child->space().page_directory(), Memory::ShouldFlushTLB::No);
|
||||
child_region->map(child->address_space().page_directory(), Memory::ShouldFlushTLB::No);
|
||||
|
||||
if (region == m_master_tls_region.unsafe_ptr())
|
||||
child->m_master_tls_region = child_region;
|
||||
|
|
|
@ -129,7 +129,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
|||
// acquiring the queue lock
|
||||
RefPtr<Memory::VMObject> vmobject, vmobject2;
|
||||
if (!is_private) {
|
||||
auto region = space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset }, sizeof(u32) });
|
||||
auto region = address_space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset }, sizeof(u32) });
|
||||
if (!region)
|
||||
return EFAULT;
|
||||
vmobject = region->vmobject();
|
||||
|
@ -139,7 +139,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
|||
case FUTEX_REQUEUE:
|
||||
case FUTEX_CMP_REQUEUE:
|
||||
case FUTEX_WAKE_OP: {
|
||||
auto region2 = space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset2 }, sizeof(u32) });
|
||||
auto region2 = address_space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset2 }, sizeof(u32) });
|
||||
if (!region2)
|
||||
return EFAULT;
|
||||
vmobject2 = region2->vmobject();
|
||||
|
|
|
@ -14,7 +14,7 @@ KResultOr<FlatPtr> Process::sys$get_stack_bounds(Userspace<FlatPtr*> user_stack_
|
|||
VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
|
||||
auto& regs = Thread::current()->get_register_dump_from_stack();
|
||||
FlatPtr stack_pointer = regs.userspace_sp();
|
||||
auto* stack_region = space().find_region_containing(Memory::VirtualRange { VirtualAddress(stack_pointer), 1 });
|
||||
auto* stack_region = address_space().find_region_containing(Memory::VirtualRange { VirtualAddress(stack_pointer), 1 });
|
||||
|
||||
// The syscall handler should have killed us if we had an invalid stack pointer.
|
||||
VERIFY(stack_region);
|
||||
|
|
|
@ -202,13 +202,13 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
|
|||
Optional<Memory::VirtualRange> range;
|
||||
|
||||
if (map_randomized) {
|
||||
range = space().page_directory().range_allocator().allocate_randomized(Memory::page_round_up(size), alignment);
|
||||
range = address_space().page_directory().range_allocator().allocate_randomized(Memory::page_round_up(size), alignment);
|
||||
} else {
|
||||
range = space().allocate_range(VirtualAddress(addr), size, alignment);
|
||||
range = address_space().allocate_range(VirtualAddress(addr), size, alignment);
|
||||
if (!range.has_value()) {
|
||||
if (addr && !map_fixed) {
|
||||
// If there's an address but MAP_FIXED wasn't specified, the address is just a hint.
|
||||
range = space().allocate_range({}, size, alignment);
|
||||
range = address_space().allocate_range({}, size, alignment);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
|
|||
vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(size), strategy);
|
||||
if (!vmobject)
|
||||
return ENOMEM;
|
||||
auto region_or_error = space().allocate_region_with_vmobject(range.value(), vmobject.release_nonnull(), 0, {}, prot, map_shared);
|
||||
auto region_or_error = address_space().allocate_region_with_vmobject(range.value(), vmobject.release_nonnull(), 0, {}, prot, map_shared);
|
||||
if (region_or_error.is_error())
|
||||
return region_or_error.error().error();
|
||||
region = region_or_error.value();
|
||||
|
@ -309,7 +309,7 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
|
|||
if (!is_user_range(range_to_mprotect))
|
||||
return EFAULT;
|
||||
|
||||
if (auto* whole_region = space().find_region_from_range(range_to_mprotect)) {
|
||||
if (auto* whole_region = address_space().find_region_from_range(range_to_mprotect)) {
|
||||
if (!whole_region->is_mmap())
|
||||
return EPERM;
|
||||
if (!validate_mmap_prot(prot, whole_region->is_stack(), whole_region->vmobject().is_anonymous(), whole_region))
|
||||
|
@ -329,7 +329,7 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
|
|||
}
|
||||
|
||||
// Check if we can carve out the desired range from an existing region
|
||||
if (auto* old_region = space().find_region_containing(range_to_mprotect)) {
|
||||
if (auto* old_region = address_space().find_region_containing(range_to_mprotect)) {
|
||||
if (!old_region->is_mmap())
|
||||
return EPERM;
|
||||
if (!validate_mmap_prot(prot, old_region->is_stack(), old_region->vmobject().is_anonymous(), old_region))
|
||||
|
@ -343,20 +343,20 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
|
|||
|
||||
// Remove the old region from our regions tree, since were going to add another region
|
||||
// with the exact same start address, but dont deallocate it yet
|
||||
auto region = space().take_region(*old_region);
|
||||
auto region = address_space().take_region(*old_region);
|
||||
|
||||
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||
region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
|
||||
|
||||
// This vector is the region(s) adjacent to our range.
|
||||
// We need to allocate a new region for the range we wanted to change permission bits on.
|
||||
auto adjacent_regions_or_error = space().try_split_region_around_range(*region, range_to_mprotect);
|
||||
auto adjacent_regions_or_error = address_space().try_split_region_around_range(*region, range_to_mprotect);
|
||||
if (adjacent_regions_or_error.is_error())
|
||||
return adjacent_regions_or_error.error();
|
||||
auto& adjacent_regions = adjacent_regions_or_error.value();
|
||||
|
||||
size_t new_range_offset_in_vmobject = region->offset_in_vmobject() + (range_to_mprotect.base().get() - region->range().base().get());
|
||||
auto new_region_or_error = space().try_allocate_split_region(*region, range_to_mprotect, new_range_offset_in_vmobject);
|
||||
auto new_region_or_error = address_space().try_allocate_split_region(*region, range_to_mprotect, new_range_offset_in_vmobject);
|
||||
if (new_region_or_error.is_error())
|
||||
return new_region_or_error.error();
|
||||
auto& new_region = *new_region_or_error.value();
|
||||
|
@ -366,13 +366,13 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
|
|||
|
||||
// Map the new regions using our page directory (they were just allocated and don't have one).
|
||||
for (auto* adjacent_region : adjacent_regions) {
|
||||
adjacent_region->map(space().page_directory());
|
||||
adjacent_region->map(address_space().page_directory());
|
||||
}
|
||||
new_region.map(space().page_directory());
|
||||
new_region.map(address_space().page_directory());
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (const auto& regions = space().find_regions_intersecting(range_to_mprotect); regions.size()) {
|
||||
if (const auto& regions = address_space().find_regions_intersecting(range_to_mprotect); regions.size()) {
|
||||
size_t full_size_found = 0;
|
||||
// first check before doing anything
|
||||
for (const auto* region : regions) {
|
||||
|
@ -406,14 +406,14 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
|
|||
}
|
||||
// Remove the old region from our regions tree, since were going to add another region
|
||||
// with the exact same start address, but dont deallocate it yet
|
||||
auto region = space().take_region(*old_region);
|
||||
auto region = address_space().take_region(*old_region);
|
||||
|
||||
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||
region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
|
||||
|
||||
// This vector is the region(s) adjacent to our range.
|
||||
// We need to allocate a new region for the range we wanted to change permission bits on.
|
||||
auto adjacent_regions_or_error = space().try_split_region_around_range(*old_region, intersection_to_mprotect);
|
||||
auto adjacent_regions_or_error = address_space().try_split_region_around_range(*old_region, intersection_to_mprotect);
|
||||
if (adjacent_regions_or_error.is_error())
|
||||
return adjacent_regions_or_error.error();
|
||||
auto& adjacent_regions = adjacent_regions_or_error.value();
|
||||
|
@ -422,7 +422,7 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
|
|||
VERIFY(adjacent_regions.size() == 1);
|
||||
|
||||
size_t new_range_offset_in_vmobject = old_region->offset_in_vmobject() + (intersection_to_mprotect.base().get() - old_region->range().base().get());
|
||||
auto new_region_or_error = space().try_allocate_split_region(*region, intersection_to_mprotect, new_range_offset_in_vmobject);
|
||||
auto new_region_or_error = address_space().try_allocate_split_region(*region, intersection_to_mprotect, new_range_offset_in_vmobject);
|
||||
if (new_region_or_error.is_error())
|
||||
return new_region_or_error.error();
|
||||
|
||||
|
@ -433,9 +433,9 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
|
|||
|
||||
// Map the new region using our page directory (they were just allocated and don't have one) if any.
|
||||
if (adjacent_regions.size())
|
||||
adjacent_regions[0]->map(space().page_directory());
|
||||
adjacent_regions[0]->map(address_space().page_directory());
|
||||
|
||||
new_region.map(space().page_directory());
|
||||
new_region.map(address_space().page_directory());
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -461,7 +461,7 @@ KResultOr<FlatPtr> Process::sys$madvise(Userspace<void*> address, size_t size, i
|
|||
if (!is_user_range(range_to_madvise))
|
||||
return EFAULT;
|
||||
|
||||
auto* region = space().find_region_from_range(range_to_madvise);
|
||||
auto* region = address_space().find_region_from_range(range_to_madvise);
|
||||
if (!region)
|
||||
return EINVAL;
|
||||
if (!region->is_mmap())
|
||||
|
@ -508,7 +508,7 @@ KResultOr<FlatPtr> Process::sys$set_mmap_name(Userspace<const Syscall::SC_set_mm
|
|||
|
||||
auto range = range_or_error.value();
|
||||
|
||||
auto* region = space().find_region_from_range(range);
|
||||
auto* region = address_space().find_region_from_range(range);
|
||||
if (!region)
|
||||
return EINVAL;
|
||||
if (!region->is_mmap())
|
||||
|
@ -525,7 +525,7 @@ KResultOr<FlatPtr> Process::sys$munmap(Userspace<void*> addr, size_t size)
|
|||
VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this)
|
||||
REQUIRE_PROMISE(stdio);
|
||||
|
||||
auto result = space().unmap_mmap_range(VirtualAddress { addr }, size);
|
||||
auto result = address_space().unmap_mmap_range(VirtualAddress { addr }, size);
|
||||
if (result.is_error())
|
||||
return result;
|
||||
return 0;
|
||||
|
@ -546,7 +546,7 @@ KResultOr<FlatPtr> Process::sys$mremap(Userspace<const Syscall::SC_mremap_params
|
|||
|
||||
auto old_range = range_or_error.value();
|
||||
|
||||
auto* old_region = space().find_region_from_range(old_range);
|
||||
auto* old_region = address_space().find_region_from_range(old_range);
|
||||
if (!old_region)
|
||||
return EINVAL;
|
||||
|
||||
|
@ -567,9 +567,9 @@ KResultOr<FlatPtr> Process::sys$mremap(Userspace<const Syscall::SC_mremap_params
|
|||
|
||||
// Unmap without deallocating the VM range since we're going to reuse it.
|
||||
old_region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
|
||||
space().deallocate_region(*old_region);
|
||||
address_space().deallocate_region(*old_region);
|
||||
|
||||
auto new_region_or_error = space().allocate_region_with_vmobject(range, new_vmobject.release_nonnull(), old_offset, old_name->view(), old_prot, false);
|
||||
auto new_region_or_error = address_space().allocate_region_with_vmobject(range, new_vmobject.release_nonnull(), old_offset, old_name->view(), old_prot, false);
|
||||
if (new_region_or_error.is_error())
|
||||
return new_region_or_error.error().error();
|
||||
auto& new_region = *new_region_or_error.value();
|
||||
|
@ -608,11 +608,11 @@ KResultOr<FlatPtr> Process::sys$allocate_tls(Userspace<const char*> initial_data
|
|||
if (multiple_threads)
|
||||
return EINVAL;
|
||||
|
||||
auto range = space().allocate_range({}, size);
|
||||
auto range = address_space().allocate_range({}, size);
|
||||
if (!range.has_value())
|
||||
return ENOMEM;
|
||||
|
||||
auto region_or_error = space().allocate_region(range.value(), String("Master TLS"), PROT_READ | PROT_WRITE);
|
||||
auto region_or_error = address_space().allocate_region(range.value(), String("Master TLS"), PROT_READ | PROT_WRITE);
|
||||
if (region_or_error.is_error())
|
||||
return region_or_error.error().error();
|
||||
|
||||
|
@ -646,18 +646,18 @@ KResultOr<FlatPtr> Process::sys$allocate_tls(Userspace<const char*> initial_data
|
|||
KResultOr<FlatPtr> Process::sys$msyscall(Userspace<void*> address)
|
||||
{
|
||||
VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this)
|
||||
if (space().enforces_syscall_regions())
|
||||
if (address_space().enforces_syscall_regions())
|
||||
return EPERM;
|
||||
|
||||
if (!address) {
|
||||
space().set_enforces_syscall_regions(true);
|
||||
address_space().set_enforces_syscall_regions(true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!Memory::is_user_address(VirtualAddress { address }))
|
||||
return EFAULT;
|
||||
|
||||
auto* region = space().find_region_containing(Memory::VirtualRange { VirtualAddress { address }, 1 });
|
||||
auto* region = address_space().find_region_containing(Memory::VirtualRange { VirtualAddress { address }, 1 });
|
||||
if (!region)
|
||||
return EINVAL;
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ KResultOr<u32> Process::peek_user_data(Userspace<const u32*> address)
|
|||
KResult Process::poke_user_data(Userspace<u32*> address, u32 data)
|
||||
{
|
||||
Memory::VirtualRange range = { VirtualAddress(address), sizeof(u32) };
|
||||
auto* region = space().find_region_containing(range);
|
||||
auto* region = address_space().find_region_containing(range);
|
||||
if (!region)
|
||||
return EFAULT;
|
||||
ProcessPagingScope scope(*this);
|
||||
|
|
|
@ -31,7 +31,7 @@ KResultOr<FlatPtr> Process::sys$create_thread(void* (*entry)(void*), Userspace<c
|
|||
if (user_sp.has_overflow())
|
||||
return EOVERFLOW;
|
||||
|
||||
if (!MM.validate_user_stack(this->space(), VirtualAddress(user_sp.value() - 4)))
|
||||
if (!MM.validate_user_stack(this->address_space(), VirtualAddress(user_sp.value() - 4)))
|
||||
return EFAULT;
|
||||
|
||||
// FIXME: return EAGAIN if Thread::all_threads().size() is greater than PTHREAD_THREADS_MAX
|
||||
|
@ -73,7 +73,7 @@ KResultOr<FlatPtr> Process::sys$create_thread(void* (*entry)(void*), Userspace<c
|
|||
regs.rdx = params.rdx;
|
||||
regs.rcx = params.rcx;
|
||||
#endif
|
||||
regs.cr3 = space().page_directory().cr3();
|
||||
regs.cr3 = address_space().page_directory().cr3();
|
||||
|
||||
auto tsr_result = thread->make_thread_specific_region({});
|
||||
if (tsr_result.is_error())
|
||||
|
@ -102,7 +102,7 @@ void Process::sys$exit_thread(Userspace<void*> exit_value, Userspace<void*> stac
|
|||
PerformanceManager::add_thread_exit_event(*current_thread);
|
||||
|
||||
if (stack_location) {
|
||||
auto unmap_result = space().unmap_mmap_range(VirtualAddress { stack_location }, stack_size);
|
||||
auto unmap_result = address_space().unmap_mmap_range(VirtualAddress { stack_location }, stack_size);
|
||||
if (unmap_result.is_error())
|
||||
dbgln("Failed to unmap thread stack, terminating thread anyway. Error code: {}", unmap_result.error());
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue