From 05ba4295e9613710d6333ddd213f094708a4a27c Mon Sep 17 00:00:00 2001 From: Sergey Bugaev Date: Sat, 18 Apr 2020 12:21:00 +0300 Subject: [PATCH] Kernel: Introduce shared locking mode A Lock can now be held either in shared or exclusive mode. Multiple threads can hold the same lock in shared mode at one time, but if any thread holds the lock in exclusive mode, no other thread can hold it at the same time in either mode. --- Kernel/Lock.cpp | 48 +++++++++++++++++++++++++++++++++++++----------- Kernel/Lock.h | 34 +++++++++++++++++++++++++--------- 2 files changed, 62 insertions(+), 20 deletions(-) diff --git a/Kernel/Lock.cpp b/Kernel/Lock.cpp index ae3a1b8736..eaeb383d64 100644 --- a/Kernel/Lock.cpp +++ b/Kernel/Lock.cpp @@ -30,8 +30,20 @@ namespace Kernel { -void Lock::lock() +static bool modes_conflict(Lock::Mode mode1, Lock::Mode mode2) { + if (mode1 == Lock::Mode::Unlocked || mode2 == Lock::Mode::Unlocked) + return false; + + if (mode1 == Lock::Mode::Shared && mode2 == Lock::Mode::Shared) + return false; + + return true; +} + +void Lock::lock(Mode mode) +{ + ASSERT(mode != Mode::Unlocked); ASSERT(!Scheduler::is_active()); if (!are_interrupts_enabled()) { klog() << "Interrupts disabled when trying to take Lock{" << m_name << "}"; @@ -41,9 +53,15 @@ void Lock::lock() for (;;) { bool expected = false; if (m_lock.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) { - if (!m_holder || m_holder == Thread::current) { + // FIXME: Do not add new readers if writers are queued. + bool modes_dont_conflict = !modes_conflict(m_mode, mode); + bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == Thread::current; + if (modes_dont_conflict || already_hold_exclusive_lock) { + // We got the lock! + if (!already_hold_exclusive_lock) + m_mode = mode; m_holder = Thread::current; - ++m_level; + m_times_locked++; m_lock.store(false, AK::memory_order_release); return; } @@ -57,14 +75,20 @@ void Lock::unlock() for (;;) { bool expected = false; if (m_lock.compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) { - ASSERT(m_holder == Thread::current); - ASSERT(m_level); - --m_level; - if (m_level) { + ASSERT(m_times_locked); + --m_times_locked; + + ASSERT(m_mode != Mode::Unlocked); + if (m_mode == Mode::Exclusive) + ASSERT(m_holder == Thread::current); + if (m_holder == Thread::current && (m_mode == Mode::Shared || m_times_locked == 0)) + m_holder = nullptr; + + if (m_times_locked > 0) { m_lock.store(false, AK::memory_order_release); return; } - m_holder = nullptr; + m_mode = Mode::Unlocked; m_queue.wake_one(&m_lock); return; } @@ -75,19 +99,21 @@ void Lock::unlock() bool Lock::force_unlock_if_locked() { + ASSERT(m_mode != Mode::Shared); InterruptDisabler disabler; if (m_holder != Thread::current) return false; - ASSERT(m_level == 1); - ASSERT(m_holder == Thread::current); + ASSERT(m_times_locked == 1); m_holder = nullptr; - --m_level; + m_mode = Mode::Unlocked; + m_times_locked = 0; m_queue.wake_one(); return true; } void Lock::clear_waiters() { + ASSERT(m_mode != Mode::Shared); InterruptDisabler disabler; m_queue.clear(); } diff --git a/Kernel/Lock.h b/Kernel/Lock.h index 28ab1832f5..d3337e1304 100644 --- a/Kernel/Lock.h +++ b/Kernel/Lock.h @@ -41,9 +41,15 @@ public: : m_name(name) { } - ~Lock() {} + ~Lock() { } - void lock(); + enum class Mode { + Unlocked, + Shared, + Exclusive + }; + + void lock(Mode = Mode::Exclusive); void unlock(); bool force_unlock_if_locked(); bool is_locked() const { return m_holder; } @@ -53,33 +59,43 @@ public: private: Atomic m_lock { false }; - u32 m_level { 0 }; - Thread* m_holder { nullptr }; const char* m_name { nullptr }; WaitQueue m_queue; + Mode m_mode { Mode::Unlocked }; + + // When locked exclusively, only the thread already holding the lock can + // lock it again. When locked in shared mode, any thread can do that. + u32 m_times_locked { 0 }; + + // One of the threads that hold this lock, or nullptr. When locked in shared + // mode, this is stored on best effort basis: nullptr value does *not* mean + // the lock is unlocked, it just means we don't know which threads hold it. + // When locked exclusively, this is always the one thread that holds the + // lock. + Thread* m_holder { nullptr }; }; class Locker { public: - [[gnu::always_inline]] inline explicit Locker(Lock& l) + [[gnu::always_inline]] inline explicit Locker(Lock& l, Lock::Mode mode = Lock::Mode::Exclusive) : m_lock(l) { - lock(); + m_lock.lock(mode); } [[gnu::always_inline]] inline ~Locker() { unlock(); } [[gnu::always_inline]] inline void unlock() { m_lock.unlock(); } - [[gnu::always_inline]] inline void lock() { m_lock.lock(); } + [[gnu::always_inline]] inline void lock(Lock::Mode mode = Lock::Mode::Exclusive) { m_lock.lock(mode); } private: Lock& m_lock; }; -#define LOCKER(lock) Locker locker(lock) +#define LOCKER(...) Locker locker(__VA_ARGS__) template class Lockable { public: - Lockable() {} + Lockable() { } Lockable(T&& resource) : m_resource(move(resource)) {