1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 00:37:45 +00:00

Kernel: Factor address space management out of the Process class

This patch adds Space, a class representing a process's address space.

- Each Process has a Space.
- The Space owns the PageDirectory and all Regions in the Process.

This allows us to reorganize sys$execve() so that it constructs and
populates a new Space fully before committing to it.

Previously, we would construct the new address space while still
running in the old one, and encountering an error meant we had to do
tedious and error-prone rollback.

Those problems are now gone, replaced by what's hopefully a set of much
smaller problems and missing cleanups. :^)
This commit is contained in:
Andreas Kling 2021-02-08 15:45:40 +01:00
parent b2cba3036e
commit f1b5def8fd
27 changed files with 494 additions and 404 deletions

View file

@ -47,6 +47,19 @@
namespace Kernel {
struct LoadResult {
OwnPtr<Space> space;
FlatPtr load_base { 0 };
FlatPtr entry_eip { 0 };
size_t size { 0 };
FlatPtr program_headers { 0 };
size_t num_program_headers { 0 };
WeakPtr<Region> tls_region;
size_t tls_size { 0 };
size_t tls_alignment { 0 };
WeakPtr<Region> stack_region;
};
static Vector<ELF::AuxiliaryValue> generate_auxiliary_vector(FlatPtr load_base, FlatPtr entry_eip, uid_t uid, uid_t euid, gid_t gid, gid_t egid, String executable_path, int main_program_fd);
static bool validate_stack_size(const Vector<String>& arguments, const Vector<String>& environment)
@ -142,7 +155,7 @@ static KResultOr<FlatPtr> make_userspace_stack_for_main_thread(Region& region, V
return new_esp;
}
KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_description, FlatPtr load_offset, ShouldAllocateTls should_allocate_tls)
static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, FileDescription& object_description, FlatPtr load_offset, Process::ShouldAllocateTls should_allocate_tls)
{
auto& inode = *(object_description.inode());
auto vmobject = SharedInodeVMObject::create_with_inode(inode);
@ -172,10 +185,12 @@ KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_
String elf_name = object_description.absolute_path();
ASSERT(!Processor::current().in_critical());
MemoryManager::enter_space(*new_space);
KResult ph_load_result = KSuccess;
elf_image.for_each_program_header([&](const ELF::Image::ProgramHeader& program_header) {
if (program_header.type() == PT_TLS) {
ASSERT(should_allocate_tls == ShouldAllocateTls::Yes);
ASSERT(should_allocate_tls == Process::ShouldAllocateTls::Yes);
ASSERT(program_header.size_in_memory());
if (!elf_image.is_within_image(program_header.raw_data(), program_header.size_in_image())) {
@ -184,13 +199,13 @@ KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_
return IterationDecision::Break;
}
auto range = allocate_range({}, program_header.size_in_memory());
auto range = new_space->allocate_range({}, program_header.size_in_memory());
if (!range.has_value()) {
ph_load_result = ENOMEM;
return IterationDecision::Break;
}
auto region_or_error = allocate_region(range.value(), String::formatted("{} (master-tls)", elf_name), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve);
auto region_or_error = new_space->allocate_region(range.value(), String::formatted("{} (master-tls)", elf_name), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve);
if (region_or_error.is_error()) {
ph_load_result = region_or_error.error();
return IterationDecision::Break;
@ -225,12 +240,12 @@ KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_
if (program_header.is_writable())
prot |= PROT_WRITE;
auto region_name = String::formatted("{} (data-{}{})", elf_name, program_header.is_readable() ? "r" : "", program_header.is_writable() ? "w" : "");
auto range = allocate_range(program_header.vaddr().offset(load_offset), program_header.size_in_memory());
auto range = new_space->allocate_range(program_header.vaddr().offset(load_offset), program_header.size_in_memory());
if (!range.has_value()) {
ph_load_result = ENOMEM;
return IterationDecision::Break;
}
auto region_or_error = allocate_region(range.value(), region_name, prot, AllocationStrategy::Reserve);
auto region_or_error = new_space->allocate_region(range.value(), region_name, prot, AllocationStrategy::Reserve);
if (region_or_error.is_error()) {
ph_load_result = region_or_error.error();
return IterationDecision::Break;
@ -262,12 +277,12 @@ KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_
prot |= PROT_WRITE;
if (program_header.is_executable())
prot |= PROT_EXEC;
auto range = allocate_range(program_header.vaddr().offset(load_offset), program_header.size_in_memory());
auto range = new_space->allocate_range(program_header.vaddr().offset(load_offset), program_header.size_in_memory());
if (!range.has_value()) {
ph_load_result = ENOMEM;
return IterationDecision::Break;
}
auto region_or_error = allocate_region_with_vmobject(range.value(), *vmobject, program_header.offset(), elf_name, prot, true);
auto region_or_error = new_space->allocate_region_with_vmobject(range.value(), *vmobject, program_header.offset(), elf_name, prot, true);
if (region_or_error.is_error()) {
ph_load_result = region_or_error.error();
return IterationDecision::Break;
@ -287,19 +302,20 @@ KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_
return ENOEXEC;
}
auto stack_range = allocate_range({}, Thread::default_userspace_stack_size);
auto stack_range = new_space->allocate_range({}, Thread::default_userspace_stack_size);
if (!stack_range.has_value()) {
dbgln("do_exec: Failed to allocate VM range for stack");
return ENOMEM;
}
auto stack_region_or_error = allocate_region(stack_range.value(), "Stack (Main thread)", PROT_READ | PROT_WRITE, AllocationStrategy::Reserve);
auto stack_region_or_error = new_space->allocate_region(stack_range.value(), "Stack (Main thread)", PROT_READ | PROT_WRITE, AllocationStrategy::Reserve);
if (stack_region_or_error.is_error())
return stack_region_or_error.error();
auto& stack_region = *stack_region_or_error.value();
stack_region.set_stack(true);
return LoadResult {
move(new_space),
load_base_address,
elf_image.entry().offset(load_offset).get(),
executable_size,
@ -312,44 +328,20 @@ KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_
};
}
KResultOr<Process::LoadResult> Process::load(NonnullRefPtr<FileDescription> main_program_description, RefPtr<FileDescription> interpreter_description, const Elf32_Ehdr& main_program_header)
KResultOr<LoadResult> Process::load(NonnullRefPtr<FileDescription> main_program_description, RefPtr<FileDescription> interpreter_description, const Elf32_Ehdr& main_program_header)
{
RefPtr<PageDirectory> old_page_directory;
NonnullOwnPtrVector<Region> old_regions;
auto new_space = Space::create(*this, nullptr);
if (!new_space)
return ENOMEM;
{
auto page_directory = PageDirectory::create_for_userspace(*this);
if (!page_directory)
return ENOMEM;
// Need to make sure we don't swap contexts in the middle
ScopedCritical critical;
old_page_directory = move(m_page_directory);
old_regions = move(m_regions);
m_page_directory = page_directory.release_nonnull();
MM.enter_process_paging_scope(*this);
}
ArmedScopeGuard rollback_regions_guard([&]() {
ASSERT(Process::current() == this);
// Need to make sure we don't swap contexts in the middle
ScopedCritical critical;
// Explicitly clear m_regions *before* restoring the page directory,
// otherwise we may silently corrupt memory!
m_regions.clear();
// Now that we freed the regions, revert to the original page directory
// and restore the original regions
m_page_directory = move(old_page_directory);
MM.enter_process_paging_scope(*this);
m_regions = move(old_regions);
ScopeGuard space_guard([&]() {
MemoryManager::enter_process_paging_scope(*this);
});
if (interpreter_description.is_null()) {
auto result = load_elf_object(main_program_description, FlatPtr { 0 }, ShouldAllocateTls::Yes);
auto result = load_elf_object(new_space.release_nonnull(), main_program_description, FlatPtr { 0 }, ShouldAllocateTls::Yes);
if (result.is_error())
return result.error();
rollback_regions_guard.disarm();
return result;
}
@ -358,7 +350,7 @@ KResultOr<Process::LoadResult> Process::load(NonnullRefPtr<FileDescription> main
return interpreter_load_offset.error();
}
auto interpreter_load_result = load_elf_object(*interpreter_description, interpreter_load_offset.value(), ShouldAllocateTls::No);
auto interpreter_load_result = load_elf_object(new_space.release_nonnull(), *interpreter_description, interpreter_load_offset.value(), ShouldAllocateTls::No);
if (interpreter_load_result.is_error())
return interpreter_load_result.error();
@ -368,7 +360,6 @@ KResultOr<Process::LoadResult> Process::load(NonnullRefPtr<FileDescription> main
ASSERT(!interpreter_load_result.value().tls_alignment);
ASSERT(!interpreter_load_result.value().tls_size);
rollback_regions_guard.disarm();
return interpreter_load_result;
}
@ -481,34 +472,22 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
if (parts.is_empty())
return -ENOENT;
auto main_program_metadata = main_program_description->metadata();
auto load_result_or_error = load(main_program_description, interpreter_description, main_program_header);
if (load_result_or_error.is_error()) {
dbgln("do_exec({}): Failed to load main program or interpreter", path);
return load_result_or_error.error();
}
// We commit to the new executable at this point. There is no turning back!
// Disable profiling temporarily in case it's running on this process.
TemporaryChange profiling_disabler(m_profiling, false);
// Mark this thread as the current thread that does exec
// No other thread from this process will be scheduled to run
auto current_thread = Thread::current();
m_exec_tid = current_thread->tid();
// NOTE: We switch credentials before altering the memory layout of the process.
// This ensures that ptrace access control takes the right credentials into account.
// FIXME: This still feels rickety. Perhaps it would be better to simply block ptrace
// clients until we're ready to be traced? Or reject them with EPERM?
auto main_program_metadata = main_program_description->metadata();
auto old_euid = m_euid;
auto old_suid = m_suid;
auto old_egid = m_egid;
auto old_sgid = m_sgid;
ArmedScopeGuard cred_restore_guard = [&] {
m_euid = old_euid;
m_suid = old_suid;
m_egid = old_egid;
m_sgid = old_sgid;
};
kill_threads_except_self();
auto& load_result = load_result_or_error.value();
bool executable_is_setid = false;
if (!(main_program_description->custody()->mount_flags() & MS_NOSUID)) {
@ -522,17 +501,8 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
}
}
auto load_result_or_error = load(main_program_description, interpreter_description, main_program_header);
if (load_result_or_error.is_error()) {
dbgln("do_exec({}): Failed to load main program or interpreter", path);
return load_result_or_error.error();
}
auto& load_result = load_result_or_error.value();
// We can commit to the new credentials at this point.
cred_restore_guard.disarm();
kill_threads_except_self();
m_space = load_result.space.release_nonnull();
MemoryManager::enter_space(*m_space);
#if EXEC_DEBUG
dbgln("Memory layout after ELF load:");
@ -549,20 +519,17 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
m_execpromises = 0;
m_has_execpromises = false;
m_enforces_syscall_regions = false;
m_veil_state = VeilState::None;
m_unveiled_paths.clear();
m_coredump_metadata.clear();
auto current_thread = Thread::current();
current_thread->set_default_signal_dispositions();
current_thread->clear_signals();
clear_futex_queues_on_exec();
m_region_lookup_cache = {};
set_dumpable(!executable_is_setid);
for (size_t i = 0; i < m_fds.size(); ++i) {
@ -616,8 +583,10 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
// FIXME: PID/TID ISSUE
m_pid = new_main_thread->tid().value();
auto tsr_result = new_main_thread->make_thread_specific_region({});
if (tsr_result.is_error())
return tsr_result.error();
if (tsr_result.is_error()) {
// FIXME: We cannot fail this late. Refactor this so the allocation happens before we commit to the new executable.
ASSERT_NOT_REACHED();
}
new_main_thread->reset_fpu_state();
auto& tss = new_main_thread->m_tss;
@ -629,7 +598,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
tss.gs = GDT_SELECTOR_TLS | 3;
tss.eip = load_result.entry_eip;
tss.esp = new_userspace_esp;
tss.cr3 = m_page_directory->cr3();
tss.cr3 = space().page_directory().cr3();
tss.ss2 = m_pid.value();
// Throw away any recorded performance events in this process.
@ -870,8 +839,6 @@ int Process::exec(String path, Vector<String> arguments, Vector<String> environm
u32 prev_flags = 0;
int rc = do_exec(move(description), move(arguments), move(environment), move(interpreter_description), new_main_thread, prev_flags, *main_program_header);
m_exec_tid = 0;
if (rc < 0)
return rc;

View file

@ -47,15 +47,14 @@ pid_t Process::sys$fork(RegisterState& regs)
child->m_has_execpromises = m_has_execpromises;
child->m_veil_state = m_veil_state;
child->m_unveiled_paths = m_unveiled_paths.deep_copy();
child->m_enforces_syscall_regions = m_enforces_syscall_regions;
child->m_fds = m_fds;
child->m_sid = m_sid;
child->m_pg = m_pg;
child->m_umask = m_umask;
child->m_extra_gids = m_extra_gids;
dbgln_if(FORK_DEBUG, "fork: child={}", child);
child->m_extra_gids = m_extra_gids;
child->space().set_enforces_syscall_regions(space().enforces_syscall_regions());
auto& child_tss = child_first_thread->m_tss;
child_tss.eax = 0; // fork() returns 0 in the child :^)
@ -80,8 +79,8 @@ pid_t Process::sys$fork(RegisterState& regs)
#endif
{
ScopedSpinLock lock(m_lock);
for (auto& region : m_regions) {
ScopedSpinLock lock(space().get_lock());
for (auto& region : space().regions()) {
dbgln_if(FORK_DEBUG, "fork: cloning Region({}) '{}' @ {}", &region, region.name(), region.vaddr());
auto region_clone = region.clone(*child);
if (!region_clone) {
@ -90,8 +89,8 @@ pid_t Process::sys$fork(RegisterState& regs)
return -ENOMEM;
}
auto& child_region = child->add_region(region_clone.release_nonnull());
child_region.map(child->page_directory());
auto& child_region = child->space().add_region(region_clone.release_nonnull());
child_region.map(child->space().page_directory());
if (&region == m_master_tls_region.unsafe_ptr())
child->m_master_tls_region = child_region;

View file

@ -147,7 +147,7 @@ int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
if (!is_private) {
if (!Kernel::is_user_range(VirtualAddress(user_address_or_offset), sizeof(u32)))
return -EFAULT;
auto region = MM.find_region_from_vaddr(*Process::current(), VirtualAddress(user_address_or_offset));
auto region = MM.find_region_from_vaddr(space(), VirtualAddress(user_address_or_offset));
if (!region)
return -EFAULT;
vmobject = region->vmobject();
@ -159,7 +159,7 @@ int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
case FUTEX_WAKE_OP: {
if (!Kernel::is_user_range(VirtualAddress(user_address_or_offset2), sizeof(u32)))
return -EFAULT;
auto region2 = MM.find_region_from_vaddr(*Process::current(), VirtualAddress(user_address_or_offset2));
auto region2 = MM.find_region_from_vaddr(space(), VirtualAddress(user_address_or_offset2));
if (!region2)
return -EFAULT;
vmobject2 = region2->vmobject();

View file

@ -32,7 +32,7 @@ namespace Kernel {
int Process::sys$get_stack_bounds(FlatPtr* user_stack_base, size_t* user_stack_size)
{
FlatPtr stack_pointer = Thread::current()->get_register_dump_from_stack().userspace_esp;
auto* stack_region = MM.find_region_from_vaddr(*this, VirtualAddress(stack_pointer));
auto* stack_region = MM.find_region_from_vaddr(space(), VirtualAddress(stack_pointer));
if (!stack_region) {
ASSERT_NOT_REACHED();
return -EINVAL;

View file

@ -204,13 +204,13 @@ void* Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> user_params)
Optional<Range> range;
if (map_randomized) {
range = page_directory().range_allocator().allocate_randomized(PAGE_ROUND_UP(size), alignment);
range = space().page_directory().range_allocator().allocate_randomized(PAGE_ROUND_UP(size), alignment);
} else {
range = allocate_range(VirtualAddress(addr), size, alignment);
range = space().allocate_range(VirtualAddress(addr), size, alignment);
if (!range.has_value()) {
if (addr && !map_fixed) {
// If there's an address but MAP_FIXED wasn't specified, the address is just a hint.
range = allocate_range({}, size, alignment);
range = space().allocate_range({}, size, alignment);
}
}
}
@ -220,7 +220,7 @@ void* Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> user_params)
if (map_anonymous) {
auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
auto region_or_error = allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, strategy);
auto region_or_error = space().allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, strategy);
if (region_or_error.is_error())
return (void*)region_or_error.error().error();
region = region_or_error.value();
@ -280,7 +280,7 @@ int Process::sys$mprotect(void* addr, size_t size, int prot)
Range range_to_mprotect = { VirtualAddress(addr), size };
if (auto* whole_region = find_region_from_range(range_to_mprotect)) {
if (auto* whole_region = space().find_region_from_range(range_to_mprotect)) {
if (!whole_region->is_mmap())
return -EPERM;
if (!validate_mmap_prot(prot, whole_region->is_stack(), whole_region->vmobject().is_anonymous(), whole_region))
@ -300,7 +300,7 @@ int Process::sys$mprotect(void* addr, size_t size, int prot)
}
// Check if we can carve out the desired range from an existing region
if (auto* old_region = find_region_containing(range_to_mprotect)) {
if (auto* old_region = space().find_region_containing(range_to_mprotect)) {
if (!old_region->is_mmap())
return -EPERM;
if (!validate_mmap_prot(prot, old_region->is_stack(), old_region->vmobject().is_anonymous(), old_region))
@ -314,23 +314,23 @@ int Process::sys$mprotect(void* addr, size_t size, int prot)
// This vector is the region(s) adjacent to our range.
// We need to allocate a new region for the range we wanted to change permission bits on.
auto adjacent_regions = split_region_around_range(*old_region, range_to_mprotect);
auto adjacent_regions = space().split_region_around_range(*old_region, range_to_mprotect);
size_t new_range_offset_in_vmobject = old_region->offset_in_vmobject() + (range_to_mprotect.base().get() - old_region->range().base().get());
auto& new_region = allocate_split_region(*old_region, range_to_mprotect, new_range_offset_in_vmobject);
auto& new_region = space().allocate_split_region(*old_region, range_to_mprotect, new_range_offset_in_vmobject);
new_region.set_readable(prot & PROT_READ);
new_region.set_writable(prot & PROT_WRITE);
new_region.set_executable(prot & PROT_EXEC);
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
deallocate_region(*old_region);
space().deallocate_region(*old_region);
// Map the new regions using our page directory (they were just allocated and don't have one).
for (auto* adjacent_region : adjacent_regions) {
adjacent_region->map(page_directory());
adjacent_region->map(space().page_directory());
}
new_region.map(page_directory());
new_region.map(space().page_directory());
return 0;
}
@ -349,7 +349,7 @@ int Process::sys$madvise(void* address, size_t size, int advice)
if (!is_user_range(VirtualAddress(address), size))
return -EFAULT;
auto* region = find_region_from_range({ VirtualAddress(address), size });
auto* region = space().find_region_from_range({ VirtualAddress(address), size });
if (!region)
return -EINVAL;
if (!region->is_mmap())
@ -397,7 +397,7 @@ int Process::sys$set_mmap_name(Userspace<const Syscall::SC_set_mmap_name_params*
if (name.is_null())
return -EFAULT;
auto* region = find_region_from_range({ VirtualAddress(params.addr), params.size });
auto* region = space().find_region_from_range({ VirtualAddress(params.addr), params.size });
if (!region)
return -EINVAL;
if (!region->is_mmap())
@ -406,24 +406,6 @@ int Process::sys$set_mmap_name(Userspace<const Syscall::SC_set_mmap_name_params*
return 0;
}
// Carve out a virtual address range from a region and return the two regions on either side
Vector<Region*, 2> Process::split_region_around_range(const Region& source_region, const Range& desired_range)
{
Range old_region_range = source_region.range();
auto remaining_ranges_after_unmap = old_region_range.carve(desired_range);
ASSERT(!remaining_ranges_after_unmap.is_empty());
auto make_replacement_region = [&](const Range& new_range) -> Region& {
ASSERT(old_region_range.contains(new_range));
size_t new_range_offset_in_vmobject = source_region.offset_in_vmobject() + (new_range.base().get() - old_region_range.base().get());
return allocate_split_region(source_region, new_range, new_range_offset_in_vmobject);
};
Vector<Region*, 2> new_regions;
for (auto& new_range : remaining_ranges_after_unmap) {
new_regions.unchecked_append(&make_replacement_region(new_range));
}
return new_regions;
}
int Process::sys$munmap(void* addr, size_t size)
{
REQUIRE_PROMISE(stdio);
@ -435,30 +417,30 @@ int Process::sys$munmap(void* addr, size_t size)
return -EFAULT;
Range range_to_unmap { VirtualAddress(addr), size };
if (auto* whole_region = find_region_from_range(range_to_unmap)) {
if (auto* whole_region = space().find_region_from_range(range_to_unmap)) {
if (!whole_region->is_mmap())
return -EPERM;
bool success = deallocate_region(*whole_region);
bool success = space().deallocate_region(*whole_region);
ASSERT(success);
return 0;
}
if (auto* old_region = find_region_containing(range_to_unmap)) {
if (auto* old_region = space().find_region_containing(range_to_unmap)) {
if (!old_region->is_mmap())
return -EPERM;
auto new_regions = split_region_around_range(*old_region, range_to_unmap);
auto new_regions = space().split_region_around_range(*old_region, range_to_unmap);
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
deallocate_region(*old_region);
space().deallocate_region(*old_region);
// Instead we give back the unwanted VM manually.
page_directory().range_allocator().deallocate(range_to_unmap);
space().page_directory().range_allocator().deallocate(range_to_unmap);
// And finally we map the new region(s) using our page directory (they were just allocated and don't have one).
for (auto* new_region : new_regions) {
new_region->map(page_directory());
new_region->map(space().page_directory());
}
return 0;
}
@ -476,7 +458,7 @@ void* Process::sys$mremap(Userspace<const Syscall::SC_mremap_params*> user_param
if (!copy_from_user(&params, user_params))
return (void*)-EFAULT;
auto* old_region = find_region_from_range(Range { VirtualAddress(params.old_address), params.old_size });
auto* old_region = space().find_region_from_range(Range { VirtualAddress(params.old_address), params.old_size });
if (!old_region)
return (void*)-EINVAL;
@ -491,11 +473,11 @@ void* Process::sys$mremap(Userspace<const Syscall::SC_mremap_params*> user_param
// Unmap without deallocating the VM range since we're going to reuse it.
old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
deallocate_region(*old_region);
space().deallocate_region(*old_region);
auto new_vmobject = PrivateInodeVMObject::create_with_inode(inode);
auto new_region_or_error = allocate_region_with_vmobject(range, new_vmobject, 0, old_name, old_prot, false);
auto new_region_or_error = space().allocate_region_with_vmobject(range, new_vmobject, 0, old_name, old_prot, false);
if (new_region_or_error.is_error())
return (void*)new_region_or_error.error().error();
auto& new_region = *new_region_or_error.value();
@ -527,11 +509,11 @@ void* Process::sys$allocate_tls(size_t size)
});
ASSERT(main_thread);
auto range = allocate_range({}, size);
auto range = space().allocate_range({}, size);
if (!range.has_value())
return (void*)-ENOMEM;
auto region_or_error = allocate_region(range.value(), String(), PROT_READ | PROT_WRITE);
auto region_or_error = space().allocate_region(range.value(), String(), PROT_READ | PROT_WRITE);
if (region_or_error.is_error())
return (void*)region_or_error.error().error();
@ -552,15 +534,15 @@ void* Process::sys$allocate_tls(size_t size)
int Process::sys$msyscall(void* address)
{
if (m_enforces_syscall_regions)
if (space().enforces_syscall_regions())
return -EPERM;
if (!address) {
m_enforces_syscall_regions = true;
space().set_enforces_syscall_regions(true);
return 0;
}
auto* region = find_region_containing(Range { VirtualAddress { address }, 1 });
auto* region = space().find_region_containing(Range { VirtualAddress { address }, 1 });
if (!region)
return -EINVAL;

View file

@ -73,7 +73,7 @@ KResultOr<u32> Process::peek_user_data(Userspace<const u32*> address)
KResult Process::poke_user_data(Userspace<u32*> address, u32 data)
{
Range range = { VirtualAddress(address), sizeof(u32) };
auto* region = find_region_containing(range);
auto* region = space().find_region_containing(range);
if (!region)
return EFAULT;
ProcessPagingScope scope(*this);

View file

@ -80,7 +80,7 @@ int Process::sys$create_thread(void* (*entry)(void*), Userspace<const Syscall::S
auto& tss = thread->tss();
tss.eip = (FlatPtr)entry;
tss.eflags = 0x0202;
tss.cr3 = page_directory().cr3();
tss.cr3 = space().page_directory().cr3();
tss.esp = (u32)user_stack_address;
auto tsr_result = thread->make_thread_specific_region({});