mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 08:58:11 +00:00
Kernel: Specify default memory order for some non-synchronizing Atomics
This commit is contained in:
parent
fb84f0ec9c
commit
901ef3f1c8
12 changed files with 44 additions and 43 deletions
|
@ -679,7 +679,7 @@ void Plan9FS::thread_main()
|
||||||
dbg() << "Plan9FS: Thread terminating, error reading";
|
dbg() << "Plan9FS: Thread terminating, error reading";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} while (!m_thread_shutdown.load(AK::MemoryOrder::memory_order_relaxed));
|
} while (!m_thread_shutdown);
|
||||||
dbg() << "Plan9FS: Thread terminating";
|
dbg() << "Plan9FS: Thread terminating";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -162,7 +162,7 @@ private:
|
||||||
SpinLock<u8> m_thread_lock;
|
SpinLock<u8> m_thread_lock;
|
||||||
RefPtr<Thread> m_thread;
|
RefPtr<Thread> m_thread;
|
||||||
Atomic<bool> m_thread_running { false };
|
Atomic<bool> m_thread_running { false };
|
||||||
Atomic<bool> m_thread_shutdown { false };
|
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_thread_shutdown { false };
|
||||||
};
|
};
|
||||||
|
|
||||||
class Plan9FSInode final : public Inode {
|
class Plan9FSInode final : public Inode {
|
||||||
|
|
|
@ -51,7 +51,7 @@ public:
|
||||||
}
|
}
|
||||||
slabs[0].next = nullptr;
|
slabs[0].next = nullptr;
|
||||||
m_freelist = &slabs[m_slab_count - 1];
|
m_freelist = &slabs[m_slab_count - 1];
|
||||||
m_num_allocated.store(0, AK::MemoryOrder::memory_order_release);
|
m_num_allocated = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t slab_size() const { return templated_slab_size; }
|
constexpr size_t slab_size() const { return templated_slab_size; }
|
||||||
|
@ -75,7 +75,7 @@ public:
|
||||||
next_free = free_slab->next;
|
next_free = free_slab->next;
|
||||||
} while (!m_freelist.compare_exchange_strong(free_slab, next_free, AK::memory_order_acq_rel));
|
} while (!m_freelist.compare_exchange_strong(free_slab, next_free, AK::memory_order_acq_rel));
|
||||||
|
|
||||||
m_num_allocated.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
|
m_num_allocated++;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef SANITIZE_SLABS
|
#ifdef SANITIZE_SLABS
|
||||||
|
@ -104,11 +104,11 @@ public:
|
||||||
free_slab->next = next_free;
|
free_slab->next = next_free;
|
||||||
} while (!m_freelist.compare_exchange_strong(next_free, free_slab, AK::memory_order_acq_rel));
|
} while (!m_freelist.compare_exchange_strong(next_free, free_slab, AK::memory_order_acq_rel));
|
||||||
|
|
||||||
m_num_allocated.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
|
m_num_allocated--;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t num_allocated() const { return m_num_allocated.load(AK::MemoryOrder::memory_order_consume); }
|
size_t num_allocated() const { return m_num_allocated; }
|
||||||
size_t num_free() const { return m_slab_count - m_num_allocated.load(AK::MemoryOrder::memory_order_consume); }
|
size_t num_free() const { return m_slab_count - m_num_allocated; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct FreeSlab {
|
struct FreeSlab {
|
||||||
|
@ -117,7 +117,7 @@ private:
|
||||||
};
|
};
|
||||||
|
|
||||||
Atomic<FreeSlab*> m_freelist { nullptr };
|
Atomic<FreeSlab*> m_freelist { nullptr };
|
||||||
Atomic<ssize_t> m_num_allocated;
|
Atomic<ssize_t, AK::MemoryOrder::memory_order_relaxed> m_num_allocated;
|
||||||
size_t m_slab_count;
|
size_t m_slab_count;
|
||||||
void* m_base { nullptr };
|
void* m_base { nullptr };
|
||||||
void* m_end { nullptr };
|
void* m_end { nullptr };
|
||||||
|
|
|
@ -48,7 +48,7 @@ public:
|
||||||
|
|
||||||
u8 interrupt_number() const { return m_interrupt_number; }
|
u8 interrupt_number() const { return m_interrupt_number; }
|
||||||
|
|
||||||
size_t get_invoking_count() const { return m_invoking_count.load(AK::MemoryOrder::memory_order_relaxed); }
|
size_t get_invoking_count() const { return m_invoking_count; }
|
||||||
|
|
||||||
virtual size_t sharing_devices_count() const = 0;
|
virtual size_t sharing_devices_count() const = 0;
|
||||||
virtual bool is_shared_handler() const = 0;
|
virtual bool is_shared_handler() const = 0;
|
||||||
|
@ -61,7 +61,7 @@ public:
|
||||||
virtual bool eoi() = 0;
|
virtual bool eoi() = 0;
|
||||||
ALWAYS_INLINE void increment_invoking_counter()
|
ALWAYS_INLINE void increment_invoking_counter()
|
||||||
{
|
{
|
||||||
m_invoking_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed);
|
m_invoking_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -71,7 +71,7 @@ protected:
|
||||||
void disable_remap() { m_disable_remap = true; }
|
void disable_remap() { m_disable_remap = true; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Atomic<u32> m_invoking_count { 0 };
|
Atomic<u32, AK::MemoryOrder::memory_order_relaxed> m_invoking_count { 0 };
|
||||||
u8 m_interrupt_number { 0 };
|
u8 m_interrupt_number { 0 };
|
||||||
bool m_disable_remap { false };
|
bool m_disable_remap { false };
|
||||||
};
|
};
|
||||||
|
|
|
@ -55,13 +55,13 @@ void Lock::lock(Mode mode)
|
||||||
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
|
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
|
||||||
do {
|
do {
|
||||||
// FIXME: Do not add new readers if writers are queued.
|
// FIXME: Do not add new readers if writers are queued.
|
||||||
auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed);
|
Mode current_mode = m_mode;
|
||||||
switch (current_mode) {
|
switch (current_mode) {
|
||||||
case Mode::Unlocked: {
|
case Mode::Unlocked: {
|
||||||
#ifdef LOCK_TRACE_DEBUG
|
#ifdef LOCK_TRACE_DEBUG
|
||||||
dbg() << "Lock::lock @ " << this << ": acquire " << mode_to_string(mode) << ", currently unlocked";
|
dbg() << "Lock::lock @ " << this << ": acquire " << mode_to_string(mode) << ", currently unlocked";
|
||||||
#endif
|
#endif
|
||||||
m_mode.store(mode, AK::MemoryOrder::memory_order_relaxed);
|
m_mode = mode;
|
||||||
ASSERT(!m_holder);
|
ASSERT(!m_holder);
|
||||||
ASSERT(m_shared_holders.is_empty());
|
ASSERT(m_shared_holders.is_empty());
|
||||||
if (mode == Mode::Exclusive) {
|
if (mode == Mode::Exclusive) {
|
||||||
|
@ -140,7 +140,7 @@ void Lock::unlock()
|
||||||
ScopedCritical critical; // in case we're not in a critical section already
|
ScopedCritical critical; // in case we're not in a critical section already
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
|
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
|
||||||
auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed);
|
Mode current_mode = m_mode;
|
||||||
#ifdef LOCK_TRACE_DEBUG
|
#ifdef LOCK_TRACE_DEBUG
|
||||||
if (current_mode == Mode::Shared)
|
if (current_mode == Mode::Shared)
|
||||||
dbg() << "Lock::unlock @ " << this << ": release " << mode_to_string(current_mode) << ", locks held: " << m_times_locked;
|
dbg() << "Lock::unlock @ " << this << ": release " << mode_to_string(current_mode) << ", locks held: " << m_times_locked;
|
||||||
|
@ -177,7 +177,7 @@ void Lock::unlock()
|
||||||
|
|
||||||
if (m_times_locked == 0) {
|
if (m_times_locked == 0) {
|
||||||
ASSERT(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty());
|
ASSERT(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty());
|
||||||
m_mode.store(Mode::Unlocked, AK::MemoryOrder::memory_order_relaxed);
|
m_mode = Mode::Unlocked;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef LOCK_DEBUG
|
#ifdef LOCK_DEBUG
|
||||||
|
@ -218,7 +218,7 @@ auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
|
||||||
ASSERT(m_times_locked > 0);
|
ASSERT(m_times_locked > 0);
|
||||||
lock_count_to_restore = m_times_locked;
|
lock_count_to_restore = m_times_locked;
|
||||||
m_times_locked = 0;
|
m_times_locked = 0;
|
||||||
m_mode.store(Mode::Unlocked, AK::MemoryOrder::memory_order_relaxed);
|
m_mode = Mode::Unlocked;
|
||||||
m_lock.store(false, AK::memory_order_release);
|
m_lock.store(false, AK::memory_order_release);
|
||||||
#ifdef LOCK_DEBUG
|
#ifdef LOCK_DEBUG
|
||||||
m_holder->holding_lock(*this, -(int)lock_count_to_restore);
|
m_holder->holding_lock(*this, -(int)lock_count_to_restore);
|
||||||
|
@ -247,7 +247,7 @@ auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
|
||||||
ASSERT(m_times_locked >= lock_count_to_restore);
|
ASSERT(m_times_locked >= lock_count_to_restore);
|
||||||
m_times_locked -= lock_count_to_restore;
|
m_times_locked -= lock_count_to_restore;
|
||||||
if (m_times_locked == 0)
|
if (m_times_locked == 0)
|
||||||
m_mode.store(Mode::Unlocked, AK::MemoryOrder::memory_order_relaxed);
|
m_mode = Mode::Unlocked;
|
||||||
m_lock.store(false, AK::memory_order_release);
|
m_lock.store(false, AK::memory_order_release);
|
||||||
previous_mode = Mode::Shared;
|
previous_mode = Mode::Shared;
|
||||||
break;
|
break;
|
||||||
|
@ -290,7 +290,7 @@ void Lock::restore_lock(Mode mode, u32 lock_count)
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case Mode::Exclusive: {
|
case Mode::Exclusive: {
|
||||||
auto expected_mode = Mode::Unlocked;
|
auto expected_mode = Mode::Unlocked;
|
||||||
if (!m_mode.compare_exchange_strong(expected_mode, Mode::Exclusive, AK::MemoryOrder::memory_order_relaxed))
|
if (!m_mode.compare_exchange_strong(expected_mode, Mode::Exclusive))
|
||||||
break;
|
break;
|
||||||
#ifdef LOCK_RESTORE_DEBUG
|
#ifdef LOCK_RESTORE_DEBUG
|
||||||
dbg() << "Lock::restore_lock @ " << this << ": restoring " << mode_to_string(mode) << " with lock count " << lock_count << ", was unlocked";
|
dbg() << "Lock::restore_lock @ " << this << ": restoring " << mode_to_string(mode) << " with lock count " << lock_count << ", was unlocked";
|
||||||
|
@ -308,7 +308,7 @@ void Lock::restore_lock(Mode mode, u32 lock_count)
|
||||||
}
|
}
|
||||||
case Mode::Shared: {
|
case Mode::Shared: {
|
||||||
auto expected_mode = Mode::Unlocked;
|
auto expected_mode = Mode::Unlocked;
|
||||||
if (!m_mode.compare_exchange_strong(expected_mode, Mode::Shared, AK::MemoryOrder::memory_order_relaxed) && expected_mode != Mode::Shared)
|
if (!m_mode.compare_exchange_strong(expected_mode, Mode::Shared) && expected_mode != Mode::Shared)
|
||||||
break;
|
break;
|
||||||
#ifdef LOCK_RESTORE_DEBUG
|
#ifdef LOCK_RESTORE_DEBUG
|
||||||
dbg() << "Lock::restore_lock @ " << this << ": restoring " << mode_to_string(mode) << " with lock count " << lock_count << ", was " << mode_to_string(expected_mode);
|
dbg() << "Lock::restore_lock @ " << this << ": restoring " << mode_to_string(mode) << " with lock count " << lock_count << ", was " << mode_to_string(expected_mode);
|
||||||
|
@ -339,7 +339,7 @@ void Lock::restore_lock(Mode mode, u32 lock_count)
|
||||||
|
|
||||||
void Lock::clear_waiters()
|
void Lock::clear_waiters()
|
||||||
{
|
{
|
||||||
ASSERT(m_mode.load(AK::MemoryOrder::memory_order_relaxed) != Mode::Shared);
|
ASSERT(m_mode != Mode::Shared);
|
||||||
m_queue.wake_all();
|
m_queue.wake_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,7 @@ public:
|
||||||
void unlock();
|
void unlock();
|
||||||
[[nodiscard]] Mode force_unlock_if_locked(u32&);
|
[[nodiscard]] Mode force_unlock_if_locked(u32&);
|
||||||
void restore_lock(Mode, u32);
|
void restore_lock(Mode, u32);
|
||||||
bool is_locked() const { return m_mode.load(AK::MemoryOrder::memory_order_relaxed) != Mode::Unlocked; }
|
bool is_locked() const { return m_mode != Mode::Unlocked; }
|
||||||
void clear_waiters();
|
void clear_waiters();
|
||||||
|
|
||||||
const char* name() const { return m_name; }
|
const char* name() const { return m_name; }
|
||||||
|
@ -81,7 +81,7 @@ private:
|
||||||
Atomic<bool> m_lock { false };
|
Atomic<bool> m_lock { false };
|
||||||
const char* m_name { nullptr };
|
const char* m_name { nullptr };
|
||||||
WaitQueue m_queue;
|
WaitQueue m_queue;
|
||||||
Atomic<Mode> m_mode { Mode::Unlocked };
|
Atomic<Mode, AK::MemoryOrder::memory_order_relaxed> m_mode { Mode::Unlocked };
|
||||||
|
|
||||||
// When locked exclusively, only the thread already holding the lock can
|
// When locked exclusively, only the thread already holding the lock can
|
||||||
// lock it again. When locked in shared mode, any thread can do that.
|
// lock it again. When locked in shared mode, any thread can do that.
|
||||||
|
|
|
@ -154,8 +154,8 @@ public:
|
||||||
|
|
||||||
bool is_dead() const { return m_dead; }
|
bool is_dead() const { return m_dead; }
|
||||||
|
|
||||||
bool is_stopped() const { return m_is_stopped.load(AK::MemoryOrder::memory_order_relaxed); }
|
bool is_stopped() const { return m_is_stopped; }
|
||||||
bool set_stopped(bool stopped) { return m_is_stopped.exchange(stopped, AK::MemoryOrder::memory_order_relaxed); }
|
bool set_stopped(bool stopped) { return m_is_stopped.exchange(stopped); }
|
||||||
|
|
||||||
bool is_kernel_process() const { return m_is_kernel_process; }
|
bool is_kernel_process() const { return m_is_kernel_process; }
|
||||||
bool is_user_process() const { return !m_is_kernel_process; }
|
bool is_user_process() const { return !m_is_kernel_process; }
|
||||||
|
@ -595,7 +595,7 @@ private:
|
||||||
const bool m_is_kernel_process;
|
const bool m_is_kernel_process;
|
||||||
bool m_dead { false };
|
bool m_dead { false };
|
||||||
bool m_profiling { false };
|
bool m_profiling { false };
|
||||||
Atomic<bool> m_is_stopped { false };
|
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_stopped { false };
|
||||||
bool m_should_dump_core { false };
|
bool m_should_dump_core { false };
|
||||||
|
|
||||||
RefPtr<Custody> m_executable;
|
RefPtr<Custody> m_executable;
|
||||||
|
|
|
@ -750,8 +750,9 @@ public:
|
||||||
m_in_block = true;
|
m_in_block = true;
|
||||||
T t(forward<Args>(args)...);
|
T t(forward<Args>(args)...);
|
||||||
|
|
||||||
Atomic<bool> timeout_unblocked(false);
|
// Relaxed semantics are fine for timeout_unblocked because we
|
||||||
Atomic<bool> did_unblock(false);
|
// synchronize on the spin locks already.
|
||||||
|
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
|
||||||
RefPtr<Timer> timer;
|
RefPtr<Timer> timer;
|
||||||
{
|
{
|
||||||
switch (state()) {
|
switch (state()) {
|
||||||
|
@ -785,7 +786,7 @@ public:
|
||||||
// NOTE: this may execute on the same or any other processor!
|
// NOTE: this may execute on the same or any other processor!
|
||||||
ScopedSpinLock scheduler_lock(g_scheduler_lock);
|
ScopedSpinLock scheduler_lock(g_scheduler_lock);
|
||||||
ScopedSpinLock block_lock(m_block_lock);
|
ScopedSpinLock block_lock(m_block_lock);
|
||||||
if (m_blocker && timeout_unblocked.exchange(true, AK::MemoryOrder::memory_order_relaxed) == false)
|
if (m_blocker && timeout_unblocked.exchange(true) == false)
|
||||||
unblock();
|
unblock();
|
||||||
});
|
});
|
||||||
if (!timer) {
|
if (!timer) {
|
||||||
|
@ -833,7 +834,7 @@ public:
|
||||||
}
|
}
|
||||||
// Prevent the timeout from unblocking this thread if it happens to
|
// Prevent the timeout from unblocking this thread if it happens to
|
||||||
// be in the process of firing already
|
// be in the process of firing already
|
||||||
did_timeout |= timeout_unblocked.exchange(true, AK::MemoryOrder::memory_order_relaxed);
|
did_timeout |= timeout_unblocked.exchange(true);
|
||||||
if (m_blocker) {
|
if (m_blocker) {
|
||||||
// Remove ourselves...
|
// Remove ourselves...
|
||||||
ASSERT(m_blocker == &t);
|
ASSERT(m_blocker == &t);
|
||||||
|
|
|
@ -64,7 +64,7 @@ private:
|
||||||
Function<void()> m_callback;
|
Function<void()> m_callback;
|
||||||
Timer* m_next { nullptr };
|
Timer* m_next { nullptr };
|
||||||
Timer* m_prev { nullptr };
|
Timer* m_prev { nullptr };
|
||||||
Atomic<bool> m_queued { false };
|
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_queued { false };
|
||||||
|
|
||||||
bool operator<(const Timer& rhs) const
|
bool operator<(const Timer& rhs) const
|
||||||
{
|
{
|
||||||
|
@ -78,8 +78,8 @@ private:
|
||||||
{
|
{
|
||||||
return m_id == rhs.m_id;
|
return m_id == rhs.m_id;
|
||||||
}
|
}
|
||||||
bool is_queued() const { return m_queued.load(AK::MemoryOrder::memory_order_relaxed); }
|
bool is_queued() const { return m_queued; }
|
||||||
void set_queued(bool queued) { m_queued.store(queued, AK::MemoryOrder::memory_order_relaxed); }
|
void set_queued(bool queued) { m_queued = queued; }
|
||||||
u64 now(bool) const;
|
u64 now(bool) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -205,7 +205,7 @@ void MemoryManager::parse_memory_map()
|
||||||
ASSERT(m_user_physical_pages > 0);
|
ASSERT(m_user_physical_pages > 0);
|
||||||
|
|
||||||
// We start out with no committed pages
|
// We start out with no committed pages
|
||||||
m_user_physical_pages_uncommitted = m_user_physical_pages;
|
m_user_physical_pages_uncommitted = m_user_physical_pages.load();
|
||||||
}
|
}
|
||||||
|
|
||||||
PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr)
|
PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr)
|
||||||
|
|
|
@ -208,12 +208,12 @@ private:
|
||||||
RefPtr<PhysicalPage> m_shared_zero_page;
|
RefPtr<PhysicalPage> m_shared_zero_page;
|
||||||
RefPtr<PhysicalPage> m_lazy_committed_page;
|
RefPtr<PhysicalPage> m_lazy_committed_page;
|
||||||
|
|
||||||
unsigned m_user_physical_pages { 0 };
|
Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_user_physical_pages { 0 };
|
||||||
unsigned m_user_physical_pages_used { 0 };
|
Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_user_physical_pages_used { 0 };
|
||||||
unsigned m_user_physical_pages_committed { 0 };
|
Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_user_physical_pages_committed { 0 };
|
||||||
unsigned m_user_physical_pages_uncommitted { 0 };
|
Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_user_physical_pages_uncommitted { 0 };
|
||||||
unsigned m_super_physical_pages { 0 };
|
Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_super_physical_pages { 0 };
|
||||||
unsigned m_super_physical_pages_used { 0 };
|
Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_super_physical_pages_used { 0 };
|
||||||
|
|
||||||
NonnullRefPtrVector<PhysicalRegion> m_user_physical_regions;
|
NonnullRefPtrVector<PhysicalRegion> m_user_physical_regions;
|
||||||
NonnullRefPtrVector<PhysicalRegion> m_super_physical_regions;
|
NonnullRefPtrVector<PhysicalRegion> m_super_physical_regions;
|
||||||
|
|
|
@ -67,9 +67,9 @@ public:
|
||||||
VMObject* m_next { nullptr };
|
VMObject* m_next { nullptr };
|
||||||
VMObject* m_prev { nullptr };
|
VMObject* m_prev { nullptr };
|
||||||
|
|
||||||
ALWAYS_INLINE void ref_region() { m_regions_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed); }
|
ALWAYS_INLINE void ref_region() { m_regions_count++; }
|
||||||
ALWAYS_INLINE void unref_region() { m_regions_count.fetch_sub(1, AK::MemoryOrder::memory_order_relaxed); }
|
ALWAYS_INLINE void unref_region() { m_regions_count--; }
|
||||||
ALWAYS_INLINE bool is_shared_by_multiple_regions() const { return m_regions_count.load(AK::MemoryOrder::memory_order_relaxed) > 1; }
|
ALWAYS_INLINE bool is_shared_by_multiple_regions() const { return m_regions_count > 1; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
explicit VMObject(size_t);
|
explicit VMObject(size_t);
|
||||||
|
@ -88,7 +88,7 @@ private:
|
||||||
VMObject& operator=(VMObject&&) = delete;
|
VMObject& operator=(VMObject&&) = delete;
|
||||||
VMObject(VMObject&&) = delete;
|
VMObject(VMObject&&) = delete;
|
||||||
|
|
||||||
Atomic<u32> m_regions_count { 0 };
|
Atomic<u32, AK::MemoryOrder::memory_order_relaxed> m_regions_count { 0 };
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue