diff --git a/Kernel/CMakeLists.txt b/Kernel/CMakeLists.txt index 45d74f98d1..94de737dac 100644 --- a/Kernel/CMakeLists.txt +++ b/Kernel/CMakeLists.txt @@ -161,6 +161,7 @@ set(KERNEL_SOURCES Memory/VirtualRange.cpp Memory/VirtualRangeAllocator.cpp MiniStdLib.cpp + Locking/LockRank.cpp Locking/Mutex.cpp Net/E1000ENetworkAdapter.cpp Net/E1000NetworkAdapter.cpp diff --git a/Kernel/Locking/LockRank.cpp b/Kernel/Locking/LockRank.cpp new file mode 100644 index 0000000000..62efe7400d --- /dev/null +++ b/Kernel/Locking/LockRank.cpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2021, Brian Gianforcaro + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include + +// Note: These stubs can't be in LockRank.h as that would create +// a cyclic dependency in the header include graph of the Kernel. + +namespace Kernel { + +void track_lock_acquire(LockRank rank) +{ + auto thread = Thread::current(); + if (thread && !thread->is_crashing()) + thread->track_lock_acquire(rank); +} + +void track_lock_release(LockRank rank) +{ + auto thread = Thread::current(); + if (thread && !thread->is_crashing()) + thread->track_lock_release(rank); +} + +} diff --git a/Kernel/Locking/LockRank.h b/Kernel/Locking/LockRank.h new file mode 100644 index 0000000000..38c76f7237 --- /dev/null +++ b/Kernel/Locking/LockRank.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2021, Brian Gianforcaro + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include + +namespace Kernel { +// To catch bugs where locks are taken out of order, we annotate all locks +// in the kernel with a rank. The rank describes the order in which locks +// are allowed to be taken. If a lock is acquired, and it is of an incompatible +// rank with the lock held by the executing thread then the system can detect +// the lock order violation and respond appropriately (crash with error). +// +// A thread holding a lower ranked lock cannot acquire a lock of a greater or equal rank. +enum class LockRank : int { + // Special marker for locks which haven't been annotated yet. + // Note: This should be removed once all locks are annotated. + None = 0x000, + + // We need to be able to handle page faults from anywhere, so + // memory manager locks are our lowest rank lock. + MemoryManager = 0x001, + + Interrupts = 0x002, + + FileSystem = 0x004, + + Thread = 0x008, + + // Process locks are the lowest rank, as they normally are taken + // first thing when processing syscalls. + Process = 0x010, +}; + +AK_ENUM_BITWISE_OPERATORS(LockRank); + +void track_lock_acquire(LockRank); +void track_lock_release(LockRank); +} diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index dfb2138c88..d7abe0d65f 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -1226,6 +1226,56 @@ bool Thread::should_be_stopped() const return process().is_stopped(); } +void Thread::track_lock_acquire(LockRank rank) +{ + // Nothing to do for locks without a rank. + if (rank == LockRank::None) + return; + + if (m_lock_rank_mask != LockRank::None) { + // Verify we are only attempting to take a lock of a higher rank. + VERIFY(m_lock_rank_mask > rank); + } + + m_lock_rank_mask |= rank; +} + +void Thread::track_lock_release(LockRank rank) +{ + // Nothing to do for locks without a rank. + if (rank == LockRank::None) + return; + + using PrimitiveType = UnderlyingType; + + // The rank value from the caller should only contain a single bit, otherwise + // we are disabling the tracking for multiple locks at once which will corrupt + // the lock tracking mask, and we will assert somewhere else. + auto rank_is_a_single_bit = [](auto rank_enum) -> bool { + auto rank = static_cast(rank_enum); + auto rank_without_least_significant_bit = rank - 1; + return (rank & rank_without_least_significant_bit) == 0; + }; + + // We can't release locks out of order, as that would violate the ranking. + // This is validated by toggling the least significant bit of the mask, and + // then bit wise or-ing the rank we are trying to release with the resulting + // mask. If the rank we are releasing is truly the highest rank then the mask + // we get back will be equal to the current mask of stored on the thread. + auto rank_is_in_order = [](auto mask_enum, auto rank_enum) -> bool { + auto mask = static_cast(mask_enum); + auto rank = static_cast(rank_enum); + auto mask_without_least_significant_bit = mask - 1; + return ((mask & mask_without_least_significant_bit) | rank) == mask; + }; + + VERIFY(has_flag(m_lock_rank_mask, rank)); + VERIFY(rank_is_a_single_bit(rank)); + VERIFY(rank_is_in_order(m_lock_rank_mask, rank)); + + m_lock_rank_mask ^= rank; +} + } void AK::Formatter::format(FormatBuilder& builder, const Kernel::Thread& value) diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 6a5cd349e7..eb568a17c7 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -1083,6 +1084,9 @@ public: u32 saved_critical() const { return m_saved_critical; } void save_critical(u32 critical) { m_saved_critical = critical; } + void track_lock_acquire(LockRank rank); + void track_lock_release(LockRank rank); + [[nodiscard]] bool is_active() const { return m_is_active; } [[nodiscard]] bool is_finalizable() const @@ -1302,6 +1306,7 @@ private: Kernel::Mutex* m_blocking_lock { nullptr }; u32 m_lock_requested_count { 0 }; IntrusiveListNode m_blocked_threads_list_node; + LockRank m_lock_rank_mask { LockRank::None }; #if LOCK_DEBUG struct HoldingLockInfo {