1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 05:57:45 +00:00

Kernel: Propagate overflow errors from Memory::page_round_up

Fixes #11402.
This commit is contained in:
Guilherme Goncalves 2021-12-24 11:22:11 -03:00 committed by Andreas Kling
parent 11599a3342
commit 33b78915d3
31 changed files with 112 additions and 100 deletions

View file

@ -166,8 +166,8 @@ static ErrorOr<RequiredLoadRange> get_required_load_range(OpenFileDescription& p
auto vmobject = TRY(Memory::SharedInodeVMObject::try_create_with_inode(inode));
size_t executable_size = inode.size();
auto region = TRY(MM.allocate_kernel_region_with_vmobject(*vmobject, Memory::page_round_up(executable_size), "ELF memory range calculation", Memory::Region::Access::Read));
size_t rounded_executable_size = TRY(Memory::page_round_up(executable_size));
auto region = TRY(MM.allocate_kernel_region_with_vmobject(*vmobject, rounded_executable_size, "ELF memory range calculation", Memory::Region::Access::Read));
auto elf_image = ELF::Image(region->vaddr().as_ptr(), executable_size);
if (!elf_image.is_valid()) {
return EINVAL;
@ -261,8 +261,9 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
}
size_t executable_size = inode.size();
size_t rounded_executable_size = TRY(Memory::page_round_up(executable_size));
auto executable_region = TRY(MM.allocate_kernel_region_with_vmobject(*vmobject, Memory::page_round_up(executable_size), "ELF loading", Memory::Region::Access::Read));
auto executable_region = TRY(MM.allocate_kernel_region_with_vmobject(*vmobject, rounded_executable_size, "ELF loading", Memory::Region::Access::Read));
auto elf_image = ELF::Image(executable_region->vaddr().as_ptr(), executable_size);
if (!elf_image.is_valid())
@ -313,7 +314,8 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
auto region_name = String::formatted("{} (data-{}{})", elf_name, program_header.is_readable() ? "r" : "", program_header.is_writable() ? "w" : "");
auto range_base = VirtualAddress { Memory::page_round_down(program_header.vaddr().offset(load_offset).get()) };
auto range_end = VirtualAddress { Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()) };
size_t rounded_range_end = TRY(Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()));
auto range_end = VirtualAddress { rounded_range_end };
auto range = TRY(new_space->try_allocate_range(range_base, range_end.get() - range_base.get()));
auto region = TRY(new_space->allocate_region(range, region_name, prot, AllocationStrategy::Reserve));
@ -349,7 +351,8 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
prot |= PROT_EXEC;
auto range_base = VirtualAddress { Memory::page_round_down(program_header.vaddr().offset(load_offset).get()) };
auto range_end = VirtualAddress { Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()) };
size_t rounded_range_end = TRY(Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()));
auto range_end = VirtualAddress { rounded_range_end };
auto range = TRY(new_space->try_allocate_range(range_base, range_end.get() - range_base.get()));
auto region = TRY(new_space->allocate_region_with_vmobject(range, *vmobject, program_header.offset(), elf_name->view(), prot, true));

View file

@ -142,10 +142,8 @@ ErrorOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> use
if (alignment & ~PAGE_MASK)
return EINVAL;
if (Memory::page_round_up_would_wrap(size))
return EINVAL;
if (!Memory::is_user_range(VirtualAddress(addr), Memory::page_round_up(size)))
size_t rounded_size = TRY(Memory::page_round_up(size));
if (!Memory::is_user_range(VirtualAddress(addr), rounded_size))
return EFAULT;
OwnPtr<KString> name;
@ -188,7 +186,7 @@ ErrorOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> use
auto range = TRY([&]() -> ErrorOr<Memory::VirtualRange> {
if (map_randomized)
return address_space().page_directory().range_allocator().try_allocate_randomized(Memory::page_round_up(size), alignment);
return address_space().page_directory().range_allocator().try_allocate_randomized(rounded_size, alignment);
// If MAP_FIXED is specified, existing mappings that intersect the requested range are removed.
if (map_fixed)
@ -208,9 +206,9 @@ ErrorOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> use
auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
RefPtr<Memory::AnonymousVMObject> vmobject;
if (flags & MAP_PURGEABLE) {
vmobject = TRY(Memory::AnonymousVMObject::try_create_purgeable_with_size(Memory::page_round_up(size), strategy));
vmobject = TRY(Memory::AnonymousVMObject::try_create_purgeable_with_size(rounded_size, strategy));
} else {
vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(size), strategy));
vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(rounded_size, strategy));
}
region = TRY(address_space().allocate_region_with_vmobject(range, vmobject.release_nonnull(), 0, {}, prot, map_shared));
@ -587,15 +585,11 @@ ErrorOr<FlatPtr> Process::sys$msync(Userspace<void*> address, size_t size, int f
if (address.ptr() % PAGE_SIZE != 0)
return EINVAL;
if (Memory::page_round_up_would_wrap(size)) {
return EINVAL;
}
// Note: This is not specified
size = Memory::page_round_up(size);
auto rounded_size = TRY(Memory::page_round_up(size));
// FIXME: We probably want to sync all mappings in the address+size range.
auto* region = address_space().find_region_containing(Memory::VirtualRange { address.vaddr(), size });
auto* region = address_space().find_region_containing(Memory::VirtualRange { address.vaddr(), rounded_size });
// All regions from address upto address+size shall be mapped
if (!region)
return ENOMEM;