1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 19:47:44 +00:00

Kernel+LibC: Implement sigaltstack()

This is required for compiling wine for serenity
This commit is contained in:
Idan Horowitz 2021-10-28 23:33:41 +03:00
parent d5d0eb45bf
commit f415218afe
9 changed files with 229 additions and 2 deletions

View file

@ -1,5 +1,6 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2021, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
@ -129,4 +130,181 @@ ErrorOr<FlatPtr> Process::sys$sigreturn([[maybe_unused]] RegisterState& register
#endif
}
ErrorOr<void> Process::remap_range_as_stack(FlatPtr address, size_t size)
{
// FIXME: This duplicates a lot of logic from sys$mprotect, this should be abstracted out somehow
auto range_to_remap = TRY(Memory::expand_range_to_page_boundaries(address, size));
if (!range_to_remap.size())
return EINVAL;
if (!is_user_range(range_to_remap))
return EFAULT;
if (auto* whole_region = address_space().find_region_from_range(range_to_remap)) {
if (!whole_region->is_mmap())
return EPERM;
if (!whole_region->vmobject().is_anonymous() || whole_region->is_shared())
return EINVAL;
whole_region->unsafe_clear_access();
whole_region->set_readable(true);
whole_region->set_writable(true);
whole_region->set_stack(true);
whole_region->set_syscall_region(false);
whole_region->clear_to_zero();
whole_region->remap();
return {};
}
if (auto* old_region = address_space().find_region_containing(range_to_remap)) {
if (!old_region->is_mmap())
return EPERM;
if (!old_region->vmobject().is_anonymous() || old_region->is_shared())
return EINVAL;
// Remove the old region from our regions tree, since were going to add another region
// with the exact same start address, but do not deallocate it yet
auto region = address_space().take_region(*old_region);
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Memory::Region::ShouldDeallocateVirtualRange::No);
// This vector is the region(s) adjacent to our range.
// We need to allocate a new region for the range we wanted to change permission bits on.
auto adjacent_regions = TRY(address_space().try_split_region_around_range(*region, range_to_remap));
size_t new_range_offset_in_vmobject = region->offset_in_vmobject() + (range_to_remap.base().get() - region->range().base().get());
auto new_region = TRY(address_space().try_allocate_split_region(*region, range_to_remap, new_range_offset_in_vmobject));
new_region->unsafe_clear_access();
new_region->set_readable(true);
new_region->set_writable(true);
new_region->set_stack(true);
new_region->set_syscall_region(false);
new_region->clear_to_zero();
// Map the new regions using our page directory (they were just allocated and don't have one).
for (auto* adjacent_region : adjacent_regions) {
TRY(adjacent_region->map(address_space().page_directory()));
}
TRY(new_region->map(address_space().page_directory()));
return {};
}
if (const auto& regions = address_space().find_regions_intersecting(range_to_remap); regions.size()) {
size_t full_size_found = 0;
// Check that all intersecting regions are compatible.
for (const auto* region : regions) {
if (!region->is_mmap())
return EPERM;
if (!region->vmobject().is_anonymous() || region->is_shared())
return EINVAL;
full_size_found += region->range().intersect(range_to_remap).size();
}
if (full_size_found != range_to_remap.size())
return ENOMEM;
// Finally, iterate over each region, either updating its access flags if the range covers it wholly,
// or carving out a new subregion with the appropriate access flags set.
for (auto* old_region : regions) {
const auto intersection_to_remap = range_to_remap.intersect(old_region->range());
// If the region is completely covered by range, simply update the access flags
if (intersection_to_remap == old_region->range()) {
old_region->unsafe_clear_access();
old_region->set_readable(true);
old_region->set_writable(true);
old_region->set_stack(true);
old_region->set_syscall_region(false);
old_region->clear_to_zero();
old_region->remap();
continue;
}
// Remove the old region from our regions tree, since were going to add another region
// with the exact same start address, but dont deallocate it yet
auto region = address_space().take_region(*old_region);
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Memory::Region::ShouldDeallocateVirtualRange::No);
// This vector is the region(s) adjacent to our range.
// We need to allocate a new region for the range we wanted to change permission bits on.
auto adjacent_regions = TRY(address_space().try_split_region_around_range(*old_region, intersection_to_remap));
// Since the range is not contained in a single region, it can only partially cover its starting and ending region,
// therefore carving out a chunk from the region will always produce a single extra region, and not two.
VERIFY(adjacent_regions.size() == 1);
size_t new_range_offset_in_vmobject = old_region->offset_in_vmobject() + (intersection_to_remap.base().get() - old_region->range().base().get());
auto* new_region = TRY(address_space().try_allocate_split_region(*region, intersection_to_remap, new_range_offset_in_vmobject));
new_region->unsafe_clear_access();
new_region->set_readable(true);
new_region->set_writable(true);
new_region->set_stack(true);
new_region->set_syscall_region(false);
new_region->clear_to_zero();
// Map the new region using our page directory (they were just allocated and don't have one) if any.
if (adjacent_regions.size())
TRY(adjacent_regions[0]->map(address_space().page_directory()));
TRY(new_region->map(address_space().page_directory()));
}
return {};
}
return EINVAL;
}
ErrorOr<FlatPtr> Process::sys$sigaltstack(Userspace<const stack_t*> ss, Userspace<stack_t*> old_ss)
{
VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this)
REQUIRE_PROMISE(sigaction);
if (old_ss) {
stack_t old_ss_value;
old_ss_value.ss_sp = (void*)Thread::current()->m_alternative_signal_stack;
old_ss_value.ss_size = Thread::current()->m_alternative_signal_stack_size;
old_ss_value.ss_flags = 0;
if (!Thread::current()->has_alternative_signal_stack())
old_ss_value.ss_flags = SS_DISABLE;
else if (Thread::current()->is_in_alternative_signal_stack())
old_ss_value.ss_flags = SS_ONSTACK;
TRY(copy_to_user(old_ss, &old_ss_value));
}
if (ss) {
stack_t ss_value;
TRY(copy_from_user(&ss_value, ss));
if (Thread::current()->is_in_alternative_signal_stack())
return EPERM;
if (ss_value.ss_flags == SS_DISABLE) {
Thread::current()->m_alternative_signal_stack_size = 0;
Thread::current()->m_alternative_signal_stack = 0;
} else if (ss_value.ss_flags == 0) {
if (ss_value.ss_size <= MINSIGSTKSZ)
return ENOMEM;
if (Checked<FlatPtr>::addition_would_overflow((FlatPtr)ss_value.ss_sp, ss_value.ss_size))
return ENOMEM;
// In order to preserve compatibility with our MAP_STACK, W^X and syscall region
// protections, sigaltstack ranges are carved out of their regions, zeroed, and
// turned into read/writable MAP_STACK-enabled regions.
// This is inspired by OpenBSD's solution: https://man.openbsd.org/sigaltstack.2
TRY(remap_range_as_stack((FlatPtr)ss_value.ss_sp, ss_value.ss_size));
Thread::current()->m_alternative_signal_stack = (FlatPtr)ss_value.ss_sp;
Thread::current()->m_alternative_signal_stack_size = ss_value.ss_size;
} else {
return EINVAL;
}
}
return 0;
}
}