1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-28 15:35:08 +00:00

Kernel: No lock validate_user_stack variant, switch to Space as argument

The entire process is not needed, just require the user to pass in the
Space. Also provide no_lock variant to use when you already have the
VM/Space lock acquired, to avoid unnecessary recursive spinlock
acquisitions.
This commit is contained in:
Brian Gianforcaro 2021-07-18 08:53:37 -07:00 committed by Gunnar Beutner
parent 59b6169b51
commit 308396bca1
5 changed files with 16 additions and 7 deletions

View file

@ -311,7 +311,7 @@ void page_fault_handler(TrapFrame* trap)
};
VirtualAddress userspace_sp = VirtualAddress { regs.userspace_sp() };
if (!faulted_in_kernel && !MM.validate_user_stack(current_thread->process(), userspace_sp)) {
if (!faulted_in_kernel && !MM.validate_user_stack(current_thread->process().space(), userspace_sp)) {
dbgln("Invalid stack pointer: {}", userspace_sp);
handle_crash(regs, "Bad stack on page fault", SIGSTKFLT);
}

View file

@ -200,7 +200,7 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap)
#else
userspace_sp = VirtualAddress { regs.userspace_rsp };
#endif
if (!MM.validate_user_stack(process, userspace_sp)) {
if (!MM.validate_user_stack(process.space(), userspace_sp)) {
dbgln("Invalid stack pointer: {:p}", userspace_sp);
handle_crash(regs, "Bad stack on syscall entry", SIGSTKFLT);
}

View file

@ -33,7 +33,7 @@ KResultOr<FlatPtr> Process::sys$create_thread(void* (*entry)(void*), Userspace<c
if (user_sp.has_overflow())
return EOVERFLOW;
if (!MM.validate_user_stack(*this, VirtualAddress(user_sp.value() - 4)))
if (!MM.validate_user_stack(this->space(), VirtualAddress(user_sp.value() - 4)))
return EFAULT;
// FIXME: return EAGAIN if Thread::all_threads().size() is greater than PTHREAD_THREADS_MAX

View file

@ -1032,15 +1032,23 @@ void MemoryManager::unquickmap_page()
mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags);
}
bool MemoryManager::validate_user_stack(Process const& process, VirtualAddress vaddr) const
bool MemoryManager::validate_user_stack_no_lock(Space& space, VirtualAddress vaddr) const
{
VERIFY(space.get_lock().own_lock());
if (!is_user_address(vaddr))
return false;
ScopedSpinLock lock(s_mm_lock);
auto* region = find_user_region_from_vaddr(const_cast<Process&>(process).space(), vaddr);
auto* region = find_user_region_from_vaddr(space, vaddr);
return region && region->is_user() && region->is_stack();
}
bool MemoryManager::validate_user_stack(Space& space, VirtualAddress vaddr) const
{
ScopedSpinLock lock(space.get_lock());
return validate_user_stack_no_lock(space, vaddr);
}
void MemoryManager::register_vmobject(VMObject& vmobject)
{
ScopedSpinLock lock(s_mm_lock);

View file

@ -134,7 +134,8 @@ public:
static void enter_process_paging_scope(Process&);
static void enter_space(Space&);
bool validate_user_stack(Process const&, VirtualAddress) const;
bool validate_user_stack_no_lock(Space&, VirtualAddress) const;
bool validate_user_stack(Space&, VirtualAddress) const;
enum class ShouldZeroFill {
No,