1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-28 22:15:07 +00:00

Kernel: Validate the full range of user memory passed to syscalls

We now validate the full range of userspace memory passed into syscalls
instead of just checking that the first and last byte of the memory are
in process-owned regions.

This fixes an issue where it was possible to avoid rejection of invalid
addresses that sat between two valid ones, simply by passing a valid
address and a size large enough to put the end of the range at another
valid address.

I added a little test utility that tries to provoke EFAULT in various
ways to help verify this. I'm sure we can think of more ways to test
this but it's at least a start. :^)

Thanks to mozjag for pointing out that this code was still lacking!

Incidentally this also makes backtraces work again.

Fixes #989.
This commit is contained in:
Andreas Kling 2020-01-02 02:09:25 +01:00
parent e5ffa960d7
commit 3dcec260ed
4 changed files with 121 additions and 25 deletions

View file

@ -606,6 +606,35 @@ static inline bool is_user_address(VirtualAddress vaddr)
return vaddr.get() >= (8 * MB) && vaddr.get() < 0xc0000000;
}
template<MemoryManager::AccessSpace space, MemoryManager::AccessType access_type>
bool MemoryManager::validate_range(const Process& process, VirtualAddress base_vaddr, size_t size) const
{
ASSERT(size);
VirtualAddress vaddr = base_vaddr.page_base();
VirtualAddress end_vaddr = base_vaddr.offset(size - 1).page_base();
if (end_vaddr < vaddr) {
dbg() << *current << " Shenanigans! Asked to validate " << base_vaddr << " size=" << size;
return false;
}
const Region* region = nullptr;
while (vaddr <= end_vaddr) {
if (!region || !region->contains(vaddr)) {
if (space == AccessSpace::Kernel)
region = kernel_region_from_vaddr(vaddr);
if (!region || !region->contains(vaddr))
region = user_region_from_vaddr(const_cast<Process&>(process), vaddr);
if (!region
|| (space == AccessSpace::User && !region->is_user_accessible())
|| (access_type == AccessType::Read && !region->is_readable())
|| (access_type == AccessType::Write && !region->is_writable())) {
return false;
}
}
vaddr = vaddr.offset(PAGE_SIZE);
}
return true;
}
bool MemoryManager::validate_user_stack(const Process& process, VirtualAddress vaddr) const
{
if (!is_user_address(vaddr))
@ -614,20 +643,23 @@ bool MemoryManager::validate_user_stack(const Process& process, VirtualAddress v
return region && region->is_user_accessible() && region->is_stack();
}
bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr) const
bool MemoryManager::validate_kernel_read(const Process& process, VirtualAddress vaddr, size_t size) const
{
if (!is_user_address(vaddr))
return false;
auto* region = user_region_from_vaddr(const_cast<Process&>(process), vaddr);
return region && region->is_user_accessible() && region->is_readable();
return validate_range<AccessSpace::Kernel, AccessType::Read>(process, vaddr, size);
}
bool MemoryManager::validate_user_write(const Process& process, VirtualAddress vaddr) const
bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr, size_t size) const
{
if (!is_user_address(vaddr))
return false;
auto* region = user_region_from_vaddr(const_cast<Process&>(process), vaddr);
return region && region->is_user_accessible() && region->is_writable();
return validate_range<AccessSpace::User, AccessType::Read>(process, vaddr, size);
}
bool MemoryManager::validate_user_write(const Process& process, VirtualAddress vaddr, size_t size) const
{
if (!is_user_address(vaddr))
return false;
return validate_range<AccessSpace::User, AccessType::Write>(process, vaddr, size);
}
void MemoryManager::register_vmobject(VMObject& vmobject)