1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-24 22:37:34 +00:00

Kernel/Locking: Add lock rank tracking per thread to find deadlocks

This change adds a static lock hierarchy / ranking to the Kernel with
the goal of reducing / finding deadlocks when running with SMP enabled.

We have seen quite a few lock ordering deadlocks (locks taken in a
different order, on two different code paths). As we properly annotate
locks in the system, then these facilities will find these locking
protocol violations automatically

The `LockRank` enum documents the various locks in the system and their
rank. The implementation guarantees that a thread holding one or more
locks of a lower rank cannot acquire an additional lock with rank that
is greater or equal to any of the currently held locks.
This commit is contained in:
Brian Gianforcaro 2021-09-07 02:40:31 -07:00 committed by Andreas Kling
parent 0718afa773
commit 066b0590ec
5 changed files with 128 additions and 0 deletions

View file

@ -161,6 +161,7 @@ set(KERNEL_SOURCES
Memory/VirtualRange.cpp Memory/VirtualRange.cpp
Memory/VirtualRangeAllocator.cpp Memory/VirtualRangeAllocator.cpp
MiniStdLib.cpp MiniStdLib.cpp
Locking/LockRank.cpp
Locking/Mutex.cpp Locking/Mutex.cpp
Net/E1000ENetworkAdapter.cpp Net/E1000ENetworkAdapter.cpp
Net/E1000NetworkAdapter.cpp Net/E1000NetworkAdapter.cpp

View file

@ -0,0 +1,29 @@
/*
* Copyright (c) 2021, Brian Gianforcaro <bgianf@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Locking/LockRank.h>
#include <Kernel/Thread.h>
// Note: These stubs can't be in LockRank.h as that would create
// a cyclic dependency in the header include graph of the Kernel.
namespace Kernel {
void track_lock_acquire(LockRank rank)
{
auto thread = Thread::current();
if (thread && !thread->is_crashing())
thread->track_lock_acquire(rank);
}
void track_lock_release(LockRank rank)
{
auto thread = Thread::current();
if (thread && !thread->is_crashing())
thread->track_lock_release(rank);
}
}

43
Kernel/Locking/LockRank.h Normal file
View file

@ -0,0 +1,43 @@
/*
* Copyright (c) 2021, Brian Gianforcaro <bgianf@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/EnumBits.h>
namespace Kernel {
// To catch bugs where locks are taken out of order, we annotate all locks
// in the kernel with a rank. The rank describes the order in which locks
// are allowed to be taken. If a lock is acquired, and it is of an incompatible
// rank with the lock held by the executing thread then the system can detect
// the lock order violation and respond appropriately (crash with error).
//
// A thread holding a lower ranked lock cannot acquire a lock of a greater or equal rank.
enum class LockRank : int {
// Special marker for locks which haven't been annotated yet.
// Note: This should be removed once all locks are annotated.
None = 0x000,
// We need to be able to handle page faults from anywhere, so
// memory manager locks are our lowest rank lock.
MemoryManager = 0x001,
Interrupts = 0x002,
FileSystem = 0x004,
Thread = 0x008,
// Process locks are the lowest rank, as they normally are taken
// first thing when processing syscalls.
Process = 0x010,
};
AK_ENUM_BITWISE_OPERATORS(LockRank);
void track_lock_acquire(LockRank);
void track_lock_release(LockRank);
}

View file

@ -1226,6 +1226,56 @@ bool Thread::should_be_stopped() const
return process().is_stopped(); return process().is_stopped();
} }
void Thread::track_lock_acquire(LockRank rank)
{
// Nothing to do for locks without a rank.
if (rank == LockRank::None)
return;
if (m_lock_rank_mask != LockRank::None) {
// Verify we are only attempting to take a lock of a higher rank.
VERIFY(m_lock_rank_mask > rank);
}
m_lock_rank_mask |= rank;
}
void Thread::track_lock_release(LockRank rank)
{
// Nothing to do for locks without a rank.
if (rank == LockRank::None)
return;
using PrimitiveType = UnderlyingType<LockRank>;
// The rank value from the caller should only contain a single bit, otherwise
// we are disabling the tracking for multiple locks at once which will corrupt
// the lock tracking mask, and we will assert somewhere else.
auto rank_is_a_single_bit = [](auto rank_enum) -> bool {
auto rank = static_cast<PrimitiveType>(rank_enum);
auto rank_without_least_significant_bit = rank - 1;
return (rank & rank_without_least_significant_bit) == 0;
};
// We can't release locks out of order, as that would violate the ranking.
// This is validated by toggling the least significant bit of the mask, and
// then bit wise or-ing the rank we are trying to release with the resulting
// mask. If the rank we are releasing is truly the highest rank then the mask
// we get back will be equal to the current mask of stored on the thread.
auto rank_is_in_order = [](auto mask_enum, auto rank_enum) -> bool {
auto mask = static_cast<PrimitiveType>(mask_enum);
auto rank = static_cast<PrimitiveType>(rank_enum);
auto mask_without_least_significant_bit = mask - 1;
return ((mask & mask_without_least_significant_bit) | rank) == mask;
};
VERIFY(has_flag(m_lock_rank_mask, rank));
VERIFY(rank_is_a_single_bit(rank));
VERIFY(rank_is_in_order(m_lock_rank_mask, rank));
m_lock_rank_mask ^= rank;
}
} }
void AK::Formatter<Kernel::Thread>::format(FormatBuilder& builder, const Kernel::Thread& value) void AK::Formatter<Kernel::Thread>::format(FormatBuilder& builder, const Kernel::Thread& value)

View file

@ -29,6 +29,7 @@
#include <Kernel/Library/ListedRefCounted.h> #include <Kernel/Library/ListedRefCounted.h>
#include <Kernel/Locking/LockLocation.h> #include <Kernel/Locking/LockLocation.h>
#include <Kernel/Locking/LockMode.h> #include <Kernel/Locking/LockMode.h>
#include <Kernel/Locking/LockRank.h>
#include <Kernel/Locking/SpinlockProtected.h> #include <Kernel/Locking/SpinlockProtected.h>
#include <Kernel/Memory/VirtualRange.h> #include <Kernel/Memory/VirtualRange.h>
#include <Kernel/Scheduler.h> #include <Kernel/Scheduler.h>
@ -1083,6 +1084,9 @@ public:
u32 saved_critical() const { return m_saved_critical; } u32 saved_critical() const { return m_saved_critical; }
void save_critical(u32 critical) { m_saved_critical = critical; } void save_critical(u32 critical) { m_saved_critical = critical; }
void track_lock_acquire(LockRank rank);
void track_lock_release(LockRank rank);
[[nodiscard]] bool is_active() const { return m_is_active; } [[nodiscard]] bool is_active() const { return m_is_active; }
[[nodiscard]] bool is_finalizable() const [[nodiscard]] bool is_finalizable() const
@ -1302,6 +1306,7 @@ private:
Kernel::Mutex* m_blocking_lock { nullptr }; Kernel::Mutex* m_blocking_lock { nullptr };
u32 m_lock_requested_count { 0 }; u32 m_lock_requested_count { 0 };
IntrusiveListNode<Thread> m_blocked_threads_list_node; IntrusiveListNode<Thread> m_blocked_threads_list_node;
LockRank m_lock_rank_mask { LockRank::None };
#if LOCK_DEBUG #if LOCK_DEBUG
struct HoldingLockInfo { struct HoldingLockInfo {