1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 00:57:36 +00:00

Kernel: Reduce LOCK_DEBUG ifdefs by utilizing Kernel::LockLocation

The LOCK_DEBUG conditional code is pretty ugly for a feature that we
only use rarely. We can remove a significant amount of this code by
utilizing a zero sized fake type when not building in LOCK_DEBUG mode.

This lets us keep the same API, but just let the compiler optimize it
away when don't actually care about the location the caller came from.
This commit is contained in:
Brian Gianforcaro 2021-08-07 04:19:39 -07:00 committed by Andreas Kling
parent 6c18b4e558
commit bea74f4b77
4 changed files with 13 additions and 53 deletions

View file

@ -4,22 +4,16 @@
* SPDX-License-Identifier: BSD-2-Clause * SPDX-License-Identifier: BSD-2-Clause
*/ */
#ifdef LOCK_DEBUG
# include <AK/SourceLocation.h>
#endif
#include <Kernel/Debug.h> #include <Kernel/Debug.h>
#include <Kernel/KSyms.h> #include <Kernel/KSyms.h>
#include <Kernel/Locking/LockLocation.h>
#include <Kernel/Locking/Mutex.h> #include <Kernel/Locking/Mutex.h>
#include <Kernel/Locking/SpinLock.h> #include <Kernel/Locking/SpinLock.h>
#include <Kernel/Thread.h> #include <Kernel/Thread.h>
namespace Kernel { namespace Kernel {
#if LOCK_DEBUG void Mutex::lock(Mode mode, [[maybe_unused]] const LockLocation& location)
void Mutex::lock(Mode mode, const SourceLocation& location)
#else
void Mutex::lock(Mode mode)
#endif
{ {
// NOTE: This may be called from an interrupt handler (not an IRQ handler) // NOTE: This may be called from an interrupt handler (not an IRQ handler)
// and also from within critical sections! // and also from within critical sections!
@ -318,11 +312,7 @@ auto Mutex::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
return current_mode; return current_mode;
} }
#if LOCK_DEBUG void Mutex::restore_lock(Mode mode, u32 lock_count, [[maybe_unused]] const LockLocation& location)
void Mutex::restore_lock(Mode mode, u32 lock_count, const SourceLocation& location)
#else
void Mutex::restore_lock(Mode mode, u32 lock_count)
#endif
{ {
VERIFY(mode != Mode::Unlocked); VERIFY(mode != Mode::Unlocked);
VERIFY(lock_count > 0); VERIFY(lock_count > 0);

View file

@ -11,6 +11,7 @@
#include <AK/HashMap.h> #include <AK/HashMap.h>
#include <AK/Types.h> #include <AK/Types.h>
#include <Kernel/Forward.h> #include <Kernel/Forward.h>
#include <Kernel/Locking/LockLocation.h>
#include <Kernel/Locking/LockMode.h> #include <Kernel/Locking/LockMode.h>
#include <Kernel/WaitQueue.h> #include <Kernel/WaitQueue.h>
@ -31,13 +32,8 @@ public:
} }
~Mutex() = default; ~Mutex() = default;
#if LOCK_DEBUG void lock(Mode mode = Mode::Exclusive, const LockLocation& location = LockLocation::current());
void lock(Mode mode = Mode::Exclusive, const SourceLocation& location = SourceLocation::current()); void restore_lock(Mode, u32, const LockLocation& location = LockLocation::current());
void restore_lock(Mode, u32, const SourceLocation& location = SourceLocation::current());
#else
void lock(Mode = Mode::Exclusive);
void restore_lock(Mode, u32);
#endif
void unlock(); void unlock();
[[nodiscard]] Mode force_unlock_if_locked(u32&); [[nodiscard]] Mode force_unlock_if_locked(u32&);
@ -115,18 +111,10 @@ public:
{ {
} }
#if LOCK_DEBUG ALWAYS_INLINE explicit MutexLocker(Mutex& l, Mutex::Mode mode = Mutex::Mode::Exclusive, const LockLocation& location = LockLocation::current())
ALWAYS_INLINE explicit MutexLocker(Mutex& l, Mutex::Mode mode = Mutex::Mode::Exclusive, const SourceLocation& location = SourceLocation::current())
#else
ALWAYS_INLINE explicit MutexLocker(Mutex& l, Mutex::Mode mode = Mutex::Mode::Exclusive)
#endif
: m_lock(&l) : m_lock(&l)
{ {
#if LOCK_DEBUG
m_lock->lock(mode, location); m_lock->lock(mode, location);
#else
m_lock->lock(mode);
#endif
} }
ALWAYS_INLINE ~MutexLocker() ALWAYS_INLINE ~MutexLocker()
@ -143,38 +131,22 @@ public:
m_lock->unlock(); m_lock->unlock();
} }
#if LOCK_DEBUG ALWAYS_INLINE void attach_and_lock(Mutex& lock, Mutex::Mode mode = Mutex::Mode::Exclusive, const LockLocation& location = LockLocation::current())
ALWAYS_INLINE void attach_and_lock(Mutex& lock, Mutex::Mode mode = Mutex::Mode::Exclusive, const SourceLocation& location = SourceLocation::current())
#else
ALWAYS_INLINE void attach_and_lock(Mutex& lock, Mutex::Mode mode = Mutex::Mode::Exclusive)
#endif
{ {
VERIFY(!m_locked); VERIFY(!m_locked);
m_lock = &lock; m_lock = &lock;
m_locked = true; m_locked = true;
#if LOCK_DEBUG
m_lock->lock(mode, location); m_lock->lock(mode, location);
#else
m_lock->lock(mode);
#endif
} }
#if LOCK_DEBUG ALWAYS_INLINE void lock(Mutex::Mode mode = Mutex::Mode::Exclusive, const LockLocation& location = LockLocation::current())
ALWAYS_INLINE void lock(Mutex::Mode mode = Mutex::Mode::Exclusive, const SourceLocation& location = SourceLocation::current())
#else
ALWAYS_INLINE void lock(Mutex::Mode mode = Mutex::Mode::Exclusive)
#endif
{ {
VERIFY(m_lock); VERIFY(m_lock);
VERIFY(!m_locked); VERIFY(!m_locked);
m_locked = true; m_locked = true;
#if LOCK_DEBUG
m_lock->lock(mode, location); m_lock->lock(mode, location);
#else
m_lock->lock(mode);
#endif
} }
private: private:

View file

@ -514,7 +514,7 @@ void Thread::finalize()
dbgln("Thread {} leaking {} Locks!", *this, lock_count()); dbgln("Thread {} leaking {} Locks!", *this, lock_count());
ScopedSpinLock list_lock(m_holding_locks_lock); ScopedSpinLock list_lock(m_holding_locks_lock);
for (auto& info : m_holding_locks_list) { for (auto& info : m_holding_locks_list) {
const auto& location = info.source_location; const auto& location = info.lock_location;
dbgln(" - Mutex: \"{}\" @ {} locked in function \"{}\" at \"{}:{}\" with a count of: {}", info.lock->name(), info.lock, location.function_name(), location.filename(), location.line_number(), info.count); dbgln(" - Mutex: \"{}\" @ {} locked in function \"{}\" at \"{}:{}\" with a count of: {}", info.lock->name(), info.lock, location.function_name(), location.filename(), location.line_number(), info.count);
} }
VERIFY_NOT_REACHED(); VERIFY_NOT_REACHED();

View file

@ -12,9 +12,6 @@
#include <AK/IntrusiveList.h> #include <AK/IntrusiveList.h>
#include <AK/Optional.h> #include <AK/Optional.h>
#include <AK/OwnPtr.h> #include <AK/OwnPtr.h>
#ifdef LOCK_DEBUG
# include <AK/SourceLocation.h>
#endif
#include <AK/String.h> #include <AK/String.h>
#include <AK/Time.h> #include <AK/Time.h>
#include <AK/Vector.h> #include <AK/Vector.h>
@ -27,6 +24,7 @@
#include <Kernel/Forward.h> #include <Kernel/Forward.h>
#include <Kernel/KResult.h> #include <Kernel/KResult.h>
#include <Kernel/KString.h> #include <Kernel/KString.h>
#include <Kernel/Locking/LockLocation.h>
#include <Kernel/Locking/LockMode.h> #include <Kernel/Locking/LockMode.h>
#include <Kernel/Memory/VirtualRange.h> #include <Kernel/Memory/VirtualRange.h>
#include <Kernel/Scheduler.h> #include <Kernel/Scheduler.h>
@ -1152,7 +1150,7 @@ public:
RecursiveSpinLock& get_lock() const { return m_lock; } RecursiveSpinLock& get_lock() const { return m_lock; }
#if LOCK_DEBUG #if LOCK_DEBUG
void holding_lock(Mutex& lock, int refs_delta, const SourceLocation& location) void holding_lock(Mutex& lock, int refs_delta, const LockLocation& location)
{ {
VERIFY(refs_delta != 0); VERIFY(refs_delta != 0);
m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed); m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
@ -1317,7 +1315,7 @@ private:
#if LOCK_DEBUG #if LOCK_DEBUG
struct HoldingLockInfo { struct HoldingLockInfo {
Mutex* lock; Mutex* lock;
SourceLocation source_location; LockLocation lock_location;
unsigned count; unsigned count;
}; };
Atomic<u32> m_holding_locks { 0 }; Atomic<u32> m_holding_locks { 0 };