mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 14:48:14 +00:00
Kernel: Add safe atomic functions
This allows us to perform atomic operations on potentially unsafe user space pointers.
This commit is contained in:
parent
992f513ad2
commit
b17a889320
10 changed files with 573 additions and 166 deletions
|
@ -63,6 +63,115 @@ String copy_string_from_user(Userspace<const char*> user_str, size_t user_str_si
|
|||
return copy_string_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
|
||||
}
|
||||
|
||||
Optional<u32> user_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
|
||||
{
|
||||
if (FlatPtr(var) & 3)
|
||||
return {}; // not aligned!
|
||||
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
||||
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
|
||||
if (!is_user)
|
||||
return {};
|
||||
Kernel::SmapDisabler disabler;
|
||||
return Kernel::safe_atomic_fetch_add_relaxed(var, val);
|
||||
}
|
||||
|
||||
Optional<u32> user_atomic_exchange_relaxed(volatile u32* var, u32 val)
|
||||
{
|
||||
if (FlatPtr(var) & 3)
|
||||
return {}; // not aligned!
|
||||
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
||||
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
|
||||
if (!is_user)
|
||||
return {};
|
||||
Kernel::SmapDisabler disabler;
|
||||
return Kernel::safe_atomic_exchange_relaxed(var, val);
|
||||
}
|
||||
|
||||
Optional<u32> user_atomic_load_relaxed(volatile u32* var)
|
||||
{
|
||||
if (FlatPtr(var) & 3)
|
||||
return {}; // not aligned!
|
||||
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
||||
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
|
||||
if (!is_user)
|
||||
return {};
|
||||
Kernel::SmapDisabler disabler;
|
||||
return Kernel::safe_atomic_load_relaxed(var);
|
||||
}
|
||||
|
||||
bool user_atomic_store_relaxed(volatile u32* var, u32 val)
|
||||
{
|
||||
if (FlatPtr(var) & 3)
|
||||
return false; // not aligned!
|
||||
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
||||
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
|
||||
if (!is_user)
|
||||
return false;
|
||||
Kernel::SmapDisabler disabler;
|
||||
return Kernel::safe_atomic_store_relaxed(var, val);
|
||||
}
|
||||
|
||||
Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
|
||||
{
|
||||
if (FlatPtr(var) & 3)
|
||||
return {}; // not aligned!
|
||||
ASSERT(!Kernel::is_user_range(VirtualAddress(&expected), sizeof(expected)));
|
||||
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
||||
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
|
||||
if (!is_user)
|
||||
return {};
|
||||
Kernel::SmapDisabler disabler;
|
||||
return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
|
||||
}
|
||||
|
||||
Optional<u32> user_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
|
||||
{
|
||||
if (FlatPtr(var) & 3)
|
||||
return {}; // not aligned!
|
||||
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
||||
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
|
||||
if (!is_user)
|
||||
return {};
|
||||
Kernel::SmapDisabler disabler;
|
||||
return Kernel::safe_atomic_fetch_and_relaxed(var, val);
|
||||
}
|
||||
|
||||
Optional<u32> user_atomic_fetch_and_not_relaxed(volatile u32* var, u32 val)
|
||||
{
|
||||
if (FlatPtr(var) & 3)
|
||||
return {}; // not aligned!
|
||||
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
||||
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
|
||||
if (!is_user)
|
||||
return {};
|
||||
Kernel::SmapDisabler disabler;
|
||||
return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
|
||||
}
|
||||
|
||||
Optional<u32> user_atomic_fetch_or_relaxed(volatile u32* var, u32 val)
|
||||
{
|
||||
if (FlatPtr(var) & 3)
|
||||
return {}; // not aligned!
|
||||
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
||||
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
|
||||
if (!is_user)
|
||||
return {};
|
||||
Kernel::SmapDisabler disabler;
|
||||
return Kernel::safe_atomic_fetch_or_relaxed(var, val);
|
||||
}
|
||||
|
||||
Optional<u32> user_atomic_fetch_xor_relaxed(volatile u32* var, u32 val)
|
||||
{
|
||||
if (FlatPtr(var) & 3)
|
||||
return {}; // not aligned!
|
||||
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
||||
ASSERT(is_user); // For now assert to catch bugs, but technically not an error
|
||||
if (!is_user)
|
||||
return {};
|
||||
Kernel::SmapDisabler disabler;
|
||||
return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
|
||||
bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue