diff --git a/Kernel/FileSystem/Plan9FileSystem.cpp b/Kernel/FileSystem/Plan9FileSystem.cpp index 6cc0b0d702..ff9b62b550 100644 --- a/Kernel/FileSystem/Plan9FileSystem.cpp +++ b/Kernel/FileSystem/Plan9FileSystem.cpp @@ -679,7 +679,7 @@ void Plan9FS::thread_main() dbg() << "Plan9FS: Thread terminating, error reading"; return; } - } while (!m_thread_shutdown.load(AK::MemoryOrder::memory_order_relaxed)); + } while (!m_thread_shutdown); dbg() << "Plan9FS: Thread terminating"; } diff --git a/Kernel/FileSystem/Plan9FileSystem.h b/Kernel/FileSystem/Plan9FileSystem.h index d1de37a5c7..e371e743a3 100644 --- a/Kernel/FileSystem/Plan9FileSystem.h +++ b/Kernel/FileSystem/Plan9FileSystem.h @@ -162,7 +162,7 @@ private: SpinLock m_thread_lock; RefPtr m_thread; Atomic m_thread_running { false }; - Atomic m_thread_shutdown { false }; + Atomic m_thread_shutdown { false }; }; class Plan9FSInode final : public Inode { diff --git a/Kernel/Heap/SlabAllocator.cpp b/Kernel/Heap/SlabAllocator.cpp index a306537ca9..47e913978a 100644 --- a/Kernel/Heap/SlabAllocator.cpp +++ b/Kernel/Heap/SlabAllocator.cpp @@ -51,7 +51,7 @@ public: } slabs[0].next = nullptr; m_freelist = &slabs[m_slab_count - 1]; - m_num_allocated.store(0, AK::MemoryOrder::memory_order_release); + m_num_allocated = 0; } constexpr size_t slab_size() const { return templated_slab_size; } @@ -75,7 +75,7 @@ public: next_free = free_slab->next; } while (!m_freelist.compare_exchange_strong(free_slab, next_free, AK::memory_order_acq_rel)); - m_num_allocated.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel); + m_num_allocated++; } #ifdef SANITIZE_SLABS @@ -104,11 +104,11 @@ public: free_slab->next = next_free; } while (!m_freelist.compare_exchange_strong(next_free, free_slab, AK::memory_order_acq_rel)); - m_num_allocated.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel); + m_num_allocated--; } - size_t num_allocated() const { return m_num_allocated.load(AK::MemoryOrder::memory_order_consume); } - size_t num_free() const { return m_slab_count - m_num_allocated.load(AK::MemoryOrder::memory_order_consume); } + size_t num_allocated() const { return m_num_allocated; } + size_t num_free() const { return m_slab_count - m_num_allocated; } private: struct FreeSlab { @@ -117,7 +117,7 @@ private: }; Atomic m_freelist { nullptr }; - Atomic m_num_allocated; + Atomic m_num_allocated; size_t m_slab_count; void* m_base { nullptr }; void* m_end { nullptr }; diff --git a/Kernel/Interrupts/GenericInterruptHandler.h b/Kernel/Interrupts/GenericInterruptHandler.h index 143bb87137..e6eed7dd6d 100644 --- a/Kernel/Interrupts/GenericInterruptHandler.h +++ b/Kernel/Interrupts/GenericInterruptHandler.h @@ -48,7 +48,7 @@ public: u8 interrupt_number() const { return m_interrupt_number; } - size_t get_invoking_count() const { return m_invoking_count.load(AK::MemoryOrder::memory_order_relaxed); } + size_t get_invoking_count() const { return m_invoking_count; } virtual size_t sharing_devices_count() const = 0; virtual bool is_shared_handler() const = 0; @@ -61,7 +61,7 @@ public: virtual bool eoi() = 0; ALWAYS_INLINE void increment_invoking_counter() { - m_invoking_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed); + m_invoking_count++; } protected: @@ -71,7 +71,7 @@ protected: void disable_remap() { m_disable_remap = true; } private: - Atomic m_invoking_count { 0 }; + Atomic m_invoking_count { 0 }; u8 m_interrupt_number { 0 }; bool m_disable_remap { false }; }; diff --git a/Kernel/Lock.cpp b/Kernel/Lock.cpp index 416edee21f..f24b8d5fb3 100644 --- a/Kernel/Lock.cpp +++ b/Kernel/Lock.cpp @@ -55,13 +55,13 @@ void Lock::lock(Mode mode) if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) { do { // FIXME: Do not add new readers if writers are queued. - auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed); + Mode current_mode = m_mode; switch (current_mode) { case Mode::Unlocked: { #ifdef LOCK_TRACE_DEBUG dbg() << "Lock::lock @ " << this << ": acquire " << mode_to_string(mode) << ", currently unlocked"; #endif - m_mode.store(mode, AK::MemoryOrder::memory_order_relaxed); + m_mode = mode; ASSERT(!m_holder); ASSERT(m_shared_holders.is_empty()); if (mode == Mode::Exclusive) { @@ -140,7 +140,7 @@ void Lock::unlock() ScopedCritical critical; // in case we're not in a critical section already for (;;) { if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) { - auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed); + Mode current_mode = m_mode; #ifdef LOCK_TRACE_DEBUG if (current_mode == Mode::Shared) dbg() << "Lock::unlock @ " << this << ": release " << mode_to_string(current_mode) << ", locks held: " << m_times_locked; @@ -177,7 +177,7 @@ void Lock::unlock() if (m_times_locked == 0) { ASSERT(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty()); - m_mode.store(Mode::Unlocked, AK::MemoryOrder::memory_order_relaxed); + m_mode = Mode::Unlocked; } #ifdef LOCK_DEBUG @@ -218,7 +218,7 @@ auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode ASSERT(m_times_locked > 0); lock_count_to_restore = m_times_locked; m_times_locked = 0; - m_mode.store(Mode::Unlocked, AK::MemoryOrder::memory_order_relaxed); + m_mode = Mode::Unlocked; m_lock.store(false, AK::memory_order_release); #ifdef LOCK_DEBUG m_holder->holding_lock(*this, -(int)lock_count_to_restore); @@ -247,7 +247,7 @@ auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode ASSERT(m_times_locked >= lock_count_to_restore); m_times_locked -= lock_count_to_restore; if (m_times_locked == 0) - m_mode.store(Mode::Unlocked, AK::MemoryOrder::memory_order_relaxed); + m_mode = Mode::Unlocked; m_lock.store(false, AK::memory_order_release); previous_mode = Mode::Shared; break; @@ -290,7 +290,7 @@ void Lock::restore_lock(Mode mode, u32 lock_count) switch (mode) { case Mode::Exclusive: { auto expected_mode = Mode::Unlocked; - if (!m_mode.compare_exchange_strong(expected_mode, Mode::Exclusive, AK::MemoryOrder::memory_order_relaxed)) + if (!m_mode.compare_exchange_strong(expected_mode, Mode::Exclusive)) break; #ifdef LOCK_RESTORE_DEBUG dbg() << "Lock::restore_lock @ " << this << ": restoring " << mode_to_string(mode) << " with lock count " << lock_count << ", was unlocked"; @@ -308,7 +308,7 @@ void Lock::restore_lock(Mode mode, u32 lock_count) } case Mode::Shared: { auto expected_mode = Mode::Unlocked; - if (!m_mode.compare_exchange_strong(expected_mode, Mode::Shared, AK::MemoryOrder::memory_order_relaxed) && expected_mode != Mode::Shared) + if (!m_mode.compare_exchange_strong(expected_mode, Mode::Shared) && expected_mode != Mode::Shared) break; #ifdef LOCK_RESTORE_DEBUG dbg() << "Lock::restore_lock @ " << this << ": restoring " << mode_to_string(mode) << " with lock count " << lock_count << ", was " << mode_to_string(expected_mode); @@ -339,7 +339,7 @@ void Lock::restore_lock(Mode mode, u32 lock_count) void Lock::clear_waiters() { - ASSERT(m_mode.load(AK::MemoryOrder::memory_order_relaxed) != Mode::Shared); + ASSERT(m_mode != Mode::Shared); m_queue.wake_all(); } diff --git a/Kernel/Lock.h b/Kernel/Lock.h index 85a0ad08b4..4d5e8bd6f9 100644 --- a/Kernel/Lock.h +++ b/Kernel/Lock.h @@ -58,7 +58,7 @@ public: void unlock(); [[nodiscard]] Mode force_unlock_if_locked(u32&); void restore_lock(Mode, u32); - bool is_locked() const { return m_mode.load(AK::MemoryOrder::memory_order_relaxed) != Mode::Unlocked; } + bool is_locked() const { return m_mode != Mode::Unlocked; } void clear_waiters(); const char* name() const { return m_name; } @@ -81,7 +81,7 @@ private: Atomic m_lock { false }; const char* m_name { nullptr }; WaitQueue m_queue; - Atomic m_mode { Mode::Unlocked }; + Atomic m_mode { Mode::Unlocked }; // When locked exclusively, only the thread already holding the lock can // lock it again. When locked in shared mode, any thread can do that. diff --git a/Kernel/Process.h b/Kernel/Process.h index df4f332899..d6d6cbd000 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -154,8 +154,8 @@ public: bool is_dead() const { return m_dead; } - bool is_stopped() const { return m_is_stopped.load(AK::MemoryOrder::memory_order_relaxed); } - bool set_stopped(bool stopped) { return m_is_stopped.exchange(stopped, AK::MemoryOrder::memory_order_relaxed); } + bool is_stopped() const { return m_is_stopped; } + bool set_stopped(bool stopped) { return m_is_stopped.exchange(stopped); } bool is_kernel_process() const { return m_is_kernel_process; } bool is_user_process() const { return !m_is_kernel_process; } @@ -595,7 +595,7 @@ private: const bool m_is_kernel_process; bool m_dead { false }; bool m_profiling { false }; - Atomic m_is_stopped { false }; + Atomic m_is_stopped { false }; bool m_should_dump_core { false }; RefPtr m_executable; diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 8de108bd98..6c0ef52571 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -750,8 +750,9 @@ public: m_in_block = true; T t(forward(args)...); - Atomic timeout_unblocked(false); - Atomic did_unblock(false); + // Relaxed semantics are fine for timeout_unblocked because we + // synchronize on the spin locks already. + Atomic timeout_unblocked(false); RefPtr timer; { switch (state()) { @@ -785,7 +786,7 @@ public: // NOTE: this may execute on the same or any other processor! ScopedSpinLock scheduler_lock(g_scheduler_lock); ScopedSpinLock block_lock(m_block_lock); - if (m_blocker && timeout_unblocked.exchange(true, AK::MemoryOrder::memory_order_relaxed) == false) + if (m_blocker && timeout_unblocked.exchange(true) == false) unblock(); }); if (!timer) { @@ -833,7 +834,7 @@ public: } // Prevent the timeout from unblocking this thread if it happens to // be in the process of firing already - did_timeout |= timeout_unblocked.exchange(true, AK::MemoryOrder::memory_order_relaxed); + did_timeout |= timeout_unblocked.exchange(true); if (m_blocker) { // Remove ourselves... ASSERT(m_blocker == &t); diff --git a/Kernel/TimerQueue.h b/Kernel/TimerQueue.h index a70911e972..87f73af7f8 100644 --- a/Kernel/TimerQueue.h +++ b/Kernel/TimerQueue.h @@ -64,7 +64,7 @@ private: Function m_callback; Timer* m_next { nullptr }; Timer* m_prev { nullptr }; - Atomic m_queued { false }; + Atomic m_queued { false }; bool operator<(const Timer& rhs) const { @@ -78,8 +78,8 @@ private: { return m_id == rhs.m_id; } - bool is_queued() const { return m_queued.load(AK::MemoryOrder::memory_order_relaxed); } - void set_queued(bool queued) { m_queued.store(queued, AK::MemoryOrder::memory_order_relaxed); } + bool is_queued() const { return m_queued; } + void set_queued(bool queued) { m_queued = queued; } u64 now(bool) const; }; diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index f26eacb0a3..40948fc060 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -205,7 +205,7 @@ void MemoryManager::parse_memory_map() ASSERT(m_user_physical_pages > 0); // We start out with no committed pages - m_user_physical_pages_uncommitted = m_user_physical_pages; + m_user_physical_pages_uncommitted = m_user_physical_pages.load(); } PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr) diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 9da4efaf3b..c99f30933d 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -208,12 +208,12 @@ private: RefPtr m_shared_zero_page; RefPtr m_lazy_committed_page; - unsigned m_user_physical_pages { 0 }; - unsigned m_user_physical_pages_used { 0 }; - unsigned m_user_physical_pages_committed { 0 }; - unsigned m_user_physical_pages_uncommitted { 0 }; - unsigned m_super_physical_pages { 0 }; - unsigned m_super_physical_pages_used { 0 }; + Atomic m_user_physical_pages { 0 }; + Atomic m_user_physical_pages_used { 0 }; + Atomic m_user_physical_pages_committed { 0 }; + Atomic m_user_physical_pages_uncommitted { 0 }; + Atomic m_super_physical_pages { 0 }; + Atomic m_super_physical_pages_used { 0 }; NonnullRefPtrVector m_user_physical_regions; NonnullRefPtrVector m_super_physical_regions; diff --git a/Kernel/VM/VMObject.h b/Kernel/VM/VMObject.h index 2a48daa66a..bfec830ea9 100644 --- a/Kernel/VM/VMObject.h +++ b/Kernel/VM/VMObject.h @@ -67,9 +67,9 @@ public: VMObject* m_next { nullptr }; VMObject* m_prev { nullptr }; - ALWAYS_INLINE void ref_region() { m_regions_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed); } - ALWAYS_INLINE void unref_region() { m_regions_count.fetch_sub(1, AK::MemoryOrder::memory_order_relaxed); } - ALWAYS_INLINE bool is_shared_by_multiple_regions() const { return m_regions_count.load(AK::MemoryOrder::memory_order_relaxed) > 1; } + ALWAYS_INLINE void ref_region() { m_regions_count++; } + ALWAYS_INLINE void unref_region() { m_regions_count--; } + ALWAYS_INLINE bool is_shared_by_multiple_regions() const { return m_regions_count > 1; } protected: explicit VMObject(size_t); @@ -88,7 +88,7 @@ private: VMObject& operator=(VMObject&&) = delete; VMObject(VMObject&&) = delete; - Atomic m_regions_count { 0 }; + Atomic m_regions_count { 0 }; }; }