From ab279c850b4ce5b945c9e8444e95ab5f1c50cf3a Mon Sep 17 00:00:00 2001 From: Daniel Bertalan Date: Fri, 19 May 2023 11:52:52 +0200 Subject: [PATCH] Kernel/aarch64: Stub out atomic SafeMem functions These are used in futexes, which are needed if we want to get further in `run-tests`. For now, we have no way to return a non-fatal error if an access fault is raised while executing these, so the kernel will panic. Some would consider this a DoS vulnerability where a malicious userspace app can crash the kernel by passing bogus pointers to it, but I prefer to call it progress :^) --- Kernel/Arch/aarch64/SafeMem.cpp | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/Kernel/Arch/aarch64/SafeMem.cpp b/Kernel/Arch/aarch64/SafeMem.cpp index 86f6d4f1c3..29e58668ee 100644 --- a/Kernel/Arch/aarch64/SafeMem.cpp +++ b/Kernel/Arch/aarch64/SafeMem.cpp @@ -29,34 +29,35 @@ bool safe_memcpy(void* dest_ptr, void const* src_ptr, unsigned long n, void*&) return true; } -Optional safe_atomic_compare_exchange_relaxed(u32 volatile*, u32&, u32) +Optional safe_atomic_compare_exchange_relaxed(u32 volatile* var, u32& expected, u32 val) { - TODO_AARCH64(); - return {}; + // FIXME: Handle access faults. + return AK::atomic_compare_exchange_strong(var, expected, val, AK::memory_order_relaxed); } -Optional safe_atomic_load_relaxed(u32 volatile*) +Optional safe_atomic_load_relaxed(u32 volatile* var) { - TODO_AARCH64(); - return {}; + // FIXME: Handle access faults. + return AK::atomic_load(var, AK::memory_order_relaxed); } -Optional safe_atomic_fetch_add_relaxed(u32 volatile*, u32) +Optional safe_atomic_fetch_add_relaxed(u32 volatile* var, u32 val) { - TODO_AARCH64(); - return {}; + // FIXME: Handle access faults. + return AK::atomic_fetch_add(var, val, AK::memory_order_relaxed); } -Optional safe_atomic_exchange_relaxed(u32 volatile*, u32) +Optional safe_atomic_exchange_relaxed(u32 volatile* var, u32 val) { - TODO_AARCH64(); - return {}; + // FIXME: Handle access faults. + return AK::atomic_exchange(var, val, AK::memory_order_relaxed); } -bool safe_atomic_store_relaxed(u32 volatile*, u32) +bool safe_atomic_store_relaxed(u32 volatile* var, u32 val) { - TODO_AARCH64(); - return {}; + // FIXME: Handle access faults. + AK::atomic_store(var, val); + return true; } bool handle_safe_access_fault(RegisterState&, FlatPtr)