1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 15:38:10 +00:00

Kernel: Wrap process address spaces in SpinlockProtected

This forces anyone who wants to look into and/or manipulate an address
space to lock it. And this replaces the previous, more flimsy, manual
spinlock use.

Note that pointers *into* the address space are not safe to use after
you unlock the space. We've got many issues like this, and we'll have
to track those down as wlel.
This commit is contained in:
Andreas Kling 2022-08-23 17:58:05 +02:00
parent d6ef18f587
commit cf16b2c8e6
38 changed files with 708 additions and 627 deletions

View file

@ -14,16 +14,18 @@ ErrorOr<FlatPtr> Process::sys$get_stack_bounds(Userspace<FlatPtr*> user_stack_ba
VERIFY_NO_PROCESS_BIG_LOCK(this);
auto& regs = Thread::current()->get_register_dump_from_stack();
FlatPtr stack_pointer = regs.userspace_sp();
auto* stack_region = address_space().find_region_containing(Memory::VirtualRange { VirtualAddress(stack_pointer), 1 });
return address_space().with([&](auto& space) -> ErrorOr<FlatPtr> {
auto* stack_region = space->find_region_containing(Memory::VirtualRange { VirtualAddress(stack_pointer), 1 });
// The syscall handler should have killed us if we had an invalid stack pointer.
VERIFY(stack_region);
// The syscall handler should have killed us if we had an invalid stack pointer.
VERIFY(stack_region);
FlatPtr stack_base = stack_region->range().base().get();
size_t stack_size = stack_region->size();
TRY(copy_to_user(user_stack_base, &stack_base));
TRY(copy_to_user(user_stack_size, &stack_size));
return 0;
FlatPtr stack_base = stack_region->range().base().get();
size_t stack_size = stack_region->size();
TRY(copy_to_user(user_stack_base, &stack_base));
TRY(copy_to_user(user_stack_size, &stack_size));
return 0;
});
}
}