1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 06:57:45 +00:00

AK: Run clang-format on Atomic.h

Also use <AK/Types.h> instead of <stddef.h>
This commit is contained in:
Andreas Kling 2020-01-12 18:44:51 +01:00
parent f3eb06a46f
commit 61e6b1fb7c

View file

@ -1,240 +1,234 @@
#pragma once #pragma once
#ifndef KERNEL #include <AK/Types.h>
#include <stddef.h>
#endif
namespace AK { namespace AK {
enum MemoryOrder enum MemoryOrder {
{ memory_order_relaxed = __ATOMIC_RELAXED,
memory_order_relaxed = __ATOMIC_RELAXED, memory_order_consume = __ATOMIC_CONSUME,
memory_order_consume = __ATOMIC_CONSUME, memory_order_acquire = __ATOMIC_ACQUIRE,
memory_order_acquire = __ATOMIC_ACQUIRE, memory_order_release = __ATOMIC_RELEASE,
memory_order_release = __ATOMIC_RELEASE, memory_order_acq_rel = __ATOMIC_ACQ_REL,
memory_order_acq_rel = __ATOMIC_ACQ_REL, memory_order_seq_cst = __ATOMIC_SEQ_CST
memory_order_seq_cst = __ATOMIC_SEQ_CST
}; };
template <typename T> template<typename T>
class Atomic class Atomic {
{ T m_value { 0 };
T m_value { 0 };
public: public:
Atomic() noexcept = default; Atomic() noexcept = default;
Atomic(const Atomic&) = delete; Atomic(const Atomic&) = delete;
Atomic& operator=(const Atomic&) volatile = delete; Atomic& operator=(const Atomic&) volatile = delete;
Atomic(T val) noexcept: Atomic(T val) noexcept
m_value(val) : m_value(val)
{ {
} }
T exchange(T desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept T exchange(T desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept
{ {
return __atomic_exchange_n(&m_value, desired, order); return __atomic_exchange_n(&m_value, desired, order);
} }
bool compare_exchange_strong(T& expected, T desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept bool compare_exchange_strong(T& expected, T desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept
{ {
if (order == memory_order_acq_rel || order == memory_order_release) if (order == memory_order_acq_rel || order == memory_order_release)
return __atomic_compare_exchange_n(&m_value, &expected, desired, false, memory_order_release, memory_order_acquire); return __atomic_compare_exchange_n(&m_value, &expected, desired, false, memory_order_release, memory_order_acquire);
else else
return __atomic_compare_exchange_n(&m_value, &expected, desired, false, order, order); return __atomic_compare_exchange_n(&m_value, &expected, desired, false, order, order);
} }
T operator++() volatile noexcept T operator++() volatile noexcept
{ {
return fetch_add(1) + 1; return fetch_add(1) + 1;
} }
T operator++(int) volatile noexcept T operator++(int) volatile noexcept
{ {
return fetch_add(1); return fetch_add(1);
} }
T operator+=(T val) volatile noexcept T operator+=(T val) volatile noexcept
{ {
return fetch_add(val) + val; return fetch_add(val) + val;
} }
T fetch_add(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept T fetch_add(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
{ {
return __atomic_fetch_add(&m_value, val, order); return __atomic_fetch_add(&m_value, val, order);
} }
T operator--() volatile noexcept T operator--() volatile noexcept
{ {
return fetch_sub(1) - 1; return fetch_sub(1) - 1;
} }
T operator--(int) volatile noexcept T operator--(int) volatile noexcept
{ {
return fetch_sub(1); return fetch_sub(1);
} }
T operator-=(T val) volatile noexcept T operator-=(T val) volatile noexcept
{ {
return fetch_sub(val) - val; return fetch_sub(val) - val;
} }
T fetch_sub(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept T fetch_sub(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
{ {
return __atomic_fetch_sub(&m_value, val, order); return __atomic_fetch_sub(&m_value, val, order);
} }
T operator&=(T val) volatile noexcept T operator&=(T val) volatile noexcept
{ {
return fetch_and(val) & val; return fetch_and(val) & val;
} }
T fetch_and(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept T fetch_and(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
{ {
return __atomic_fetch_and(&m_value, val, order); return __atomic_fetch_and(&m_value, val, order);
} }
T operator|=(T val) volatile noexcept T operator|=(T val) volatile noexcept
{ {
return fetch_or(val) | val; return fetch_or(val) | val;
} }
T fetch_or(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept T fetch_or(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
{ {
return __atomic_fetch_or(&m_value, val, order); return __atomic_fetch_or(&m_value, val, order);
} }
T operator^=(T val) volatile noexcept T operator^=(T val) volatile noexcept
{ {
return fetch_xor(val) ^ val; return fetch_xor(val) ^ val;
} }
T fetch_xor(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept T fetch_xor(T val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
{ {
return __atomic_fetch_xor(&m_value, val, order); return __atomic_fetch_xor(&m_value, val, order);
} }
operator T() const volatile noexcept operator T() const volatile noexcept
{ {
return load(); return load();
} }
T load(MemoryOrder order = memory_order_seq_cst) const volatile noexcept T load(MemoryOrder order = memory_order_seq_cst) const volatile noexcept
{ {
return __atomic_load_n(&m_value, order); return __atomic_load_n(&m_value, order);
} }
T operator=(T desired) volatile noexcept T operator=(T desired) volatile noexcept
{ {
store(desired); store(desired);
return desired; return desired;
} }
void store(T desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept void store(T desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept
{ {
__atomic_store_n(&m_value, desired, order); __atomic_store_n(&m_value, desired, order);
} }
bool is_lock_free() const volatile noexcept bool is_lock_free() const volatile noexcept
{ {
return __atomic_is_lock_free(sizeof(m_value), &m_value); return __atomic_is_lock_free(sizeof(m_value), &m_value);
} }
}; };
template<typename T>
template <typename T> class Atomic<T*> {
class Atomic<T*> T* m_value { nullptr };
{
T* m_value { nullptr };
public: public:
Atomic() noexcept = default; Atomic() noexcept = default;
Atomic(const Atomic&) = delete; Atomic(const Atomic&) = delete;
Atomic& operator=(const Atomic&) volatile = delete; Atomic& operator=(const Atomic&) volatile = delete;
Atomic(T* val) noexcept: Atomic(T* val) noexcept
m_value(val) : m_value(val)
{ {
} }
T* exchange(T* desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept T* exchange(T* desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept
{ {
return __atomic_exchange_n(&m_value, desired, order); return __atomic_exchange_n(&m_value, desired, order);
} }
bool compare_exchange_strong(T*& expected, T* desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept bool compare_exchange_strong(T*& expected, T* desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept
{ {
if (order == memory_order_acq_rel || order == memory_order_release) if (order == memory_order_acq_rel || order == memory_order_release)
return __atomic_compare_exchange_n(&m_value, &expected, desired, false, memory_order_release, memory_order_acquire); return __atomic_compare_exchange_n(&m_value, &expected, desired, false, memory_order_release, memory_order_acquire);
else else
return __atomic_compare_exchange_n(&m_value, &expected, desired, false, order, order); return __atomic_compare_exchange_n(&m_value, &expected, desired, false, order, order);
} }
T* operator++() volatile noexcept T* operator++() volatile noexcept
{ {
return fetch_add(1) + 1; return fetch_add(1) + 1;
} }
T* operator++(int) volatile noexcept T* operator++(int) volatile noexcept
{ {
return fetch_add(1); return fetch_add(1);
} }
T* operator+=(ptrdiff_t val) volatile noexcept T* operator+=(ptrdiff_t val) volatile noexcept
{ {
return fetch_add(val) + val; return fetch_add(val) + val;
} }
T* fetch_add(ptrdiff_t val, MemoryOrder order = memory_order_seq_cst) volatile noexcept T* fetch_add(ptrdiff_t val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
{ {
return __atomic_fetch_add(&m_value, val * sizeof(*m_value), order); return __atomic_fetch_add(&m_value, val * sizeof(*m_value), order);
} }
T* operator--() volatile noexcept T* operator--() volatile noexcept
{ {
return fetch_sub(1) - 1; return fetch_sub(1) - 1;
} }
T* operator--(int) volatile noexcept T* operator--(int) volatile noexcept
{ {
return fetch_sub(1); return fetch_sub(1);
} }
T* operator-=(ptrdiff_t val) volatile noexcept T* operator-=(ptrdiff_t val) volatile noexcept
{ {
return fetch_sub(val) - val; return fetch_sub(val) - val;
} }
T* fetch_sub(ptrdiff_t val, MemoryOrder order = memory_order_seq_cst) volatile noexcept T* fetch_sub(ptrdiff_t val, MemoryOrder order = memory_order_seq_cst) volatile noexcept
{ {
return __atomic_fetch_sub(&m_value, val * sizeof(*m_value), order); return __atomic_fetch_sub(&m_value, val * sizeof(*m_value), order);
} }
operator T*() const volatile noexcept operator T*() const volatile noexcept
{ {
return load(); return load();
} }
T* load(MemoryOrder order = memory_order_seq_cst) const volatile noexcept T* load(MemoryOrder order = memory_order_seq_cst) const volatile noexcept
{ {
return __atomic_load_n(&m_value, order); return __atomic_load_n(&m_value, order);
} }
T* operator=(T* desired) volatile noexcept T* operator=(T* desired) volatile noexcept
{ {
store(desired); store(desired);
return desired; return desired;
} }
void store(T* desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept void store(T* desired, MemoryOrder order = memory_order_seq_cst) volatile noexcept
{ {
__atomic_store_n(&m_value, desired, order); __atomic_store_n(&m_value, desired, order);
} }
bool is_lock_free() const volatile noexcept bool is_lock_free() const volatile noexcept
{ {
return __atomic_is_lock_free(sizeof(m_value), &m_value); return __atomic_is_lock_free(sizeof(m_value), &m_value);
} }
}; };
} }