mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 12:37:44 +00:00
AK: Add a full memory barrier function based on atomic operations
We use atomic_signal_fence and atomic_thread_fence together to prevent reordering of memory accesses by the CPU and the compiler. The usage of these functions was suggested by @tomuta so we can be sure that important memory accesses happen in the expected order :)
This commit is contained in:
parent
b59e45e65c
commit
4a5cf8c789
1 changed files with 12 additions and 0 deletions
12
AK/Atomic.h
12
AK/Atomic.h
|
@ -31,11 +31,22 @@
|
||||||
|
|
||||||
namespace AK {
|
namespace AK {
|
||||||
|
|
||||||
|
static inline void atomic_signal_fence(MemoryOrder order) noexcept
|
||||||
|
{
|
||||||
|
return __atomic_signal_fence(order);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void atomic_thread_fence(MemoryOrder order) noexcept
|
static inline void atomic_thread_fence(MemoryOrder order) noexcept
|
||||||
{
|
{
|
||||||
return __atomic_thread_fence(order);
|
return __atomic_thread_fence(order);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void full_memory_barrier() noexcept
|
||||||
|
{
|
||||||
|
atomic_signal_fence(AK::MemoryOrder::memory_order_acq_rel);
|
||||||
|
atomic_thread_fence(AK::MemoryOrder::memory_order_acq_rel);
|
||||||
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
static inline T atomic_exchange(volatile T* var, T desired, MemoryOrder order = memory_order_seq_cst) noexcept
|
static inline T atomic_exchange(volatile T* var, T desired, MemoryOrder order = memory_order_seq_cst) noexcept
|
||||||
{
|
{
|
||||||
|
@ -376,3 +387,4 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
using AK::Atomic;
|
using AK::Atomic;
|
||||||
|
using AK::full_memory_barrier;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue