diff --git a/Kernel/Locking/Mutex.cpp b/Kernel/Locking/Mutex.cpp index 8227f691e8..19efa2b7da 100644 --- a/Kernel/Locking/Mutex.cpp +++ b/Kernel/Locking/Mutex.cpp @@ -90,6 +90,7 @@ void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location) return; } case Mode::Shared: { + VERIFY(m_behavior == MutexBehavior::Regular); VERIFY(!m_holder); if (mode == Mode::Exclusive) { dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}): blocking for exclusive access, currently shared, locks held {}", this, m_name, m_times_locked); @@ -207,9 +208,15 @@ void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker& l if constexpr (LOCK_IN_CRITICAL_DEBUG) VERIFY_INTERRUPTS_ENABLED(); m_blocked_thread_lists.with([&](auto& lists) { - auto& list = lists.list_for_mode(mode); - VERIFY(!list.contains(current_thread)); - list.append(current_thread); + auto append_to_list = [&](L& list) { + VERIFY(!list.contains(current_thread)); + list.append(current_thread); + }; + + if (m_behavior == MutexBehavior::BigLock) + append_to_list(lists.exclusive_big_lock); + else + append_to_list(lists.list_for_mode(mode)); }); dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}) waiting...", this, m_name); @@ -217,9 +224,15 @@ void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker& l dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}) waited", this, m_name); m_blocked_thread_lists.with([&](auto& lists) { - auto& list = lists.list_for_mode(mode); - VERIFY(list.contains(current_thread)); - list.remove(current_thread); + auto remove_from_list = [&](L& list) { + VERIFY(list.contains(current_thread)); + list.remove(current_thread); + }; + + if (m_behavior == MutexBehavior::BigLock) + remove_from_list(lists.exclusive_big_lock); + else + remove_from_list(lists.list_for_mode(mode)); }); } @@ -235,6 +248,7 @@ void Mutex::unblock_waiters(Mode previous_mode) auto unblock_shared = [&]() { if (lists.shared.is_empty()) return false; + VERIFY(m_behavior == MutexBehavior::Regular); m_mode = Mode::Shared; for (auto& thread : lists.shared) { auto requested_locks = thread.unblock_from_mutex(*this); @@ -247,8 +261,8 @@ void Mutex::unblock_waiters(Mode previous_mode) } return true; }; - auto unblock_exclusive = [&]() { - if (auto* next_exclusive_thread = lists.exclusive.first()) { + auto unblock_exclusive = [&](L& list) { + if (auto* next_exclusive_thread = list.first()) { m_mode = Mode::Exclusive; m_times_locked = next_exclusive_thread->unblock_from_mutex(*this); m_holder = next_exclusive_thread; @@ -257,11 +271,13 @@ void Mutex::unblock_waiters(Mode previous_mode) return false; }; - if (previous_mode == Mode::Exclusive) { + if (m_behavior == MutexBehavior::BigLock) { + unblock_exclusive(lists.exclusive_big_lock); + } else if (previous_mode == Mode::Exclusive) { if (!unblock_shared()) - unblock_exclusive(); + unblock_exclusive(lists.exclusive); } else { - if (!unblock_exclusive()) + if (!unblock_exclusive(lists.exclusive)) unblock_shared(); } }); diff --git a/Kernel/Locking/Mutex.h b/Kernel/Locking/Mutex.h index 2bd0a036a9..37769ce79d 100644 --- a/Kernel/Locking/Mutex.h +++ b/Kernel/Locking/Mutex.h @@ -27,8 +27,15 @@ class Mutex { public: using Mode = LockMode; - Mutex(StringView name = {}) + // FIXME: remove this after annihilating Process::m_big_lock + enum class MutexBehavior { + Regular, + BigLock, + }; + + Mutex(StringView name = {}, MutexBehavior behavior = MutexBehavior::Regular) : m_name(name) + , m_behavior(behavior) { } ~Mutex() = default; @@ -72,12 +79,18 @@ public: private: using BlockedThreadList = IntrusiveList<&Thread::m_blocked_threads_list_node>; + // FIXME: remove this after annihilating Process::m_big_lock + using BigLockBlockedThreadList = IntrusiveList<&Thread::m_big_lock_blocked_threads_list_node>; + void block(Thread&, Mode, SpinlockLocker&, u32); void unblock_waiters(Mode); StringView m_name; Mode m_mode { Mode::Unlocked }; + // FIXME: remove this after annihilating Process::m_big_lock + MutexBehavior m_behavior; + // When locked exclusively, only the thread already holding the lock can // lock it again. When locked in shared mode, any thread can do that. u32 m_times_locked { 0 }; @@ -94,6 +107,9 @@ private: BlockedThreadList exclusive; BlockedThreadList shared; + // FIXME: remove this after annihilating Process::m_big_lock + BigLockBlockedThreadList exclusive_big_lock; + ALWAYS_INLINE BlockedThreadList& list_for_mode(Mode mode) { VERIFY(mode == Mode::Exclusive || mode == Mode::Shared); diff --git a/Kernel/Process.h b/Kernel/Process.h index 2a4b263929..4148440d4c 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -809,7 +809,7 @@ private: size_t m_master_tls_size { 0 }; size_t m_master_tls_alignment { 0 }; - Mutex m_big_lock { "Process" }; + Mutex m_big_lock { "Process", Mutex::MutexBehavior::BigLock }; Mutex m_ptrace_lock { "ptrace" }; RefPtr m_alarm_timer; diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 5ba85da85b..7865924051 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -1228,6 +1228,9 @@ private: LockRank m_lock_rank_mask { LockRank::None }; bool m_allocation_enabled { true }; + // FIXME: remove this after annihilating Process::m_big_lock + IntrusiveListNode m_big_lock_blocked_threads_list_node; + #if LOCK_DEBUG struct HoldingLockInfo { Mutex* lock;