1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-25 21:55:07 +00:00

Kernel: Make copy_to/from_user safe and remove unnecessary checks

Since the CPU already does almost all necessary validation steps
for us, we don't really need to attempt to do this. Doing it
ourselves doesn't really work very reliably, because we'd have to
account for other processors modifying virtual memory, and we'd
have to account for e.g. pages not being able to be allocated
due to insufficient resources.

So change the copy_to/from_user (and associated helper functions)
to use the new safe_memcpy, which will return whether it succeeded
or not. The only manual validation step needed (which the CPU
can't perform for us) is making sure the pointers provided by user
mode aren't pointing to kernel mappings.

To make it easier to read/write from/to either kernel or user mode
data add the UserOrKernelBuffer helper class, which will internally
either use copy_from/to_user or directly memcpy, or pass the data
through directly using a temporary buffer on the stack.

Last but not least we need to keep syscall params trivial as we
need to copy them from/to user mode using copy_from/to_user.
This commit is contained in:
Tom 2020-09-11 21:11:07 -06:00 committed by Andreas Kling
parent 7d1b8417bd
commit c8d9f1b9c9
149 changed files with 1585 additions and 1244 deletions

View file

@ -35,27 +35,68 @@
String copy_string_from_user(const char* user_str, size_t user_str_size)
{
bool is_user = Kernel::is_user_range(VirtualAddress(user_str), user_str_size);
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
size_t length = strnlen(user_str, user_str_size);
return String(user_str, length);
void* fault_at;
ssize_t length = Kernel::safe_strnlen(user_str, user_str_size, fault_at);
if (length < 0) {
klog() << "copy_string_from_user(" << user_str << ", " << user_str_size << ") failed at " << VirtualAddress(fault_at) << " (strnlen)";
return {};
}
if (length == 0)
return String::empty();
char* buffer;
auto copied_string = StringImpl::create_uninitialized((size_t)length, buffer);
if (!Kernel::safe_memcpy(buffer, user_str, (size_t)length, fault_at)) {
klog() << "copy_string_from_user(" << user_str << ", " << user_str_size << ") failed at " << VirtualAddress(fault_at) << " (memcpy)";
return {};
}
return copied_string;
}
String copy_string_from_user(Userspace<const char*> user_str, size_t user_str_size)
{
return copy_string_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
}
extern "C" {
void copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
{
ASSERT(Kernel::is_user_range(VirtualAddress(dest_ptr), n));
bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
if (!is_user)
return false;
ASSERT(!Kernel::is_user_range(VirtualAddress(src_ptr), n));
Kernel::SmapDisabler disabler;
memcpy(dest_ptr, src_ptr, n);
void* fault_at;
if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
ASSERT(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
klog() << "copy_to_user(" << dest_ptr << ", " << src_ptr << ", " << n << ") failed at " << VirtualAddress(fault_at);
return false;
}
return true;
}
void copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
bool copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
{
ASSERT(Kernel::is_user_range(VirtualAddress(src_ptr), n));
bool is_user = Kernel::is_user_range(VirtualAddress(src_ptr), n);
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
if (!is_user)
return false;
ASSERT(!Kernel::is_user_range(VirtualAddress(dest_ptr), n));
Kernel::SmapDisabler disabler;
memcpy(dest_ptr, src_ptr, n);
void* fault_at;
if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
ASSERT(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
klog() << "copy_from_user(" << dest_ptr << ", " << src_ptr << ", " << n << ") failed at " << VirtualAddress(fault_at);
return false;
}
return true;
}
void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
@ -97,11 +138,19 @@ const void* memmem(const void* haystack, size_t haystack_length, const void* nee
return AK::memmem(haystack, haystack_length, needle, needle_length);
}
void memset_user(void* dest_ptr, int c, size_t n)
[[nodiscard]] bool memset_user(void* dest_ptr, int c, size_t n)
{
ASSERT(Kernel::is_user_range(VirtualAddress(dest_ptr), n));
bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
if (!is_user)
return false;
Kernel::SmapDisabler disabler;
memset(dest_ptr, c, n);
void* fault_at;
if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
klog() << "memset(" << dest_ptr << ", " << n << ") failed at " << VirtualAddress(fault_at);
return false;
}
return true;
}
void* memset(void* dest_ptr, int c, size_t n)