mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 13:48:12 +00:00
Kernel: Update terminology around Thread's "blocking mutex"
It's more accurate to say that we're blocking on a mutex, rather than blocking on a lock. The previous terminology made sense when this code was using something called Kernel::Lock, but since it was renamed to Kernel::Mutex, this updates brings the language back in sync.
This commit is contained in:
parent
dca5fe69eb
commit
b0e5406ae2
3 changed files with 19 additions and 19 deletions
|
@ -231,7 +231,7 @@ void Mutex::unblock_waiters(Mode previous_mode)
|
||||||
return false;
|
return false;
|
||||||
m_mode = Mode::Shared;
|
m_mode = Mode::Shared;
|
||||||
for (auto& thread : m_blocked_threads_list_shared) {
|
for (auto& thread : m_blocked_threads_list_shared) {
|
||||||
auto requested_locks = thread.unblock_from_lock(*this);
|
auto requested_locks = thread.unblock_from_mutex(*this);
|
||||||
m_shared_holders += requested_locks;
|
m_shared_holders += requested_locks;
|
||||||
#if LOCK_SHARED_UPGRADE_DEBUG
|
#if LOCK_SHARED_UPGRADE_DEBUG
|
||||||
auto set_result = m_shared_holders_map.set(&thread, requested_locks);
|
auto set_result = m_shared_holders_map.set(&thread, requested_locks);
|
||||||
|
@ -244,7 +244,7 @@ void Mutex::unblock_waiters(Mode previous_mode)
|
||||||
auto unblock_exclusive = [&]() {
|
auto unblock_exclusive = [&]() {
|
||||||
if (auto* next_exclusive_thread = m_blocked_threads_list_exclusive.first()) {
|
if (auto* next_exclusive_thread = m_blocked_threads_list_exclusive.first()) {
|
||||||
m_mode = Mode::Exclusive;
|
m_mode = Mode::Exclusive;
|
||||||
m_times_locked = next_exclusive_thread->unblock_from_lock(*this);
|
m_times_locked = next_exclusive_thread->unblock_from_mutex(*this);
|
||||||
m_holder = next_exclusive_thread;
|
m_holder = next_exclusive_thread;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -286,13 +286,13 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock>& lock_lock, u32
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're blocking on the big-lock we may actually be in the process
|
// If we're blocking on the big-lock we may actually be in the process
|
||||||
// of unblocking from another lock. If that's the case m_blocking_lock
|
// of unblocking from another lock. If that's the case m_blocking_mutex
|
||||||
// is already set
|
// is already set
|
||||||
auto& big_lock = process().big_lock();
|
auto& big_lock = process().big_lock();
|
||||||
VERIFY((&lock == &big_lock && m_blocking_lock != &big_lock) || !m_blocking_lock);
|
VERIFY((&lock == &big_lock && m_blocking_mutex != &big_lock) || !m_blocking_mutex);
|
||||||
|
|
||||||
auto* previous_blocking_lock = m_blocking_lock;
|
auto* previous_blocking_mutex = m_blocking_mutex;
|
||||||
m_blocking_lock = &lock;
|
m_blocking_mutex = &lock;
|
||||||
m_lock_requested_count = lock_count;
|
m_lock_requested_count = lock_count;
|
||||||
|
|
||||||
set_state(Thread::State::Blocked);
|
set_state(Thread::State::Blocked);
|
||||||
|
@ -321,31 +321,31 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock>& lock_lock, u32
|
||||||
VERIFY(Processor::in_critical());
|
VERIFY(Processor::in_critical());
|
||||||
|
|
||||||
SpinlockLocker block_lock2(m_block_lock);
|
SpinlockLocker block_lock2(m_block_lock);
|
||||||
VERIFY(!m_blocking_lock);
|
VERIFY(!m_blocking_mutex);
|
||||||
m_blocking_lock = previous_blocking_lock;
|
m_blocking_mutex = previous_blocking_mutex;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
lock_lock.lock();
|
lock_lock.lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 Thread::unblock_from_lock(Kernel::Mutex& lock)
|
u32 Thread::unblock_from_mutex(Kernel::Mutex& mutex)
|
||||||
{
|
{
|
||||||
SpinlockLocker block_lock(m_block_lock);
|
SpinlockLocker block_lock(m_block_lock);
|
||||||
VERIFY(m_blocking_lock == &lock);
|
VERIFY(m_blocking_mutex == &mutex);
|
||||||
auto requested_count = m_lock_requested_count;
|
auto requested_count = m_lock_requested_count;
|
||||||
block_lock.unlock();
|
block_lock.unlock();
|
||||||
|
|
||||||
auto do_unblock = [&]() {
|
auto do_unblock = [&]() {
|
||||||
SpinlockLocker scheduler_lock(g_scheduler_lock);
|
SpinlockLocker scheduler_lock(g_scheduler_lock);
|
||||||
SpinlockLocker block_lock(m_block_lock);
|
SpinlockLocker block_lock(m_block_lock);
|
||||||
VERIFY(m_blocking_lock == &lock);
|
VERIFY(m_blocking_mutex == &mutex);
|
||||||
VERIFY(!Processor::current_in_irq());
|
VERIFY(!Processor::current_in_irq());
|
||||||
VERIFY(g_scheduler_lock.is_locked_by_current_processor());
|
VERIFY(g_scheduler_lock.is_locked_by_current_processor());
|
||||||
VERIFY(m_block_lock.is_locked_by_current_processor());
|
VERIFY(m_block_lock.is_locked_by_current_processor());
|
||||||
VERIFY(m_blocking_lock == &lock);
|
VERIFY(m_blocking_mutex == &mutex);
|
||||||
dbgln_if(THREAD_DEBUG, "Thread {} unblocked from Mutex {}", *this, &lock);
|
dbgln_if(THREAD_DEBUG, "Thread {} unblocked from Mutex {}", *this, &mutex);
|
||||||
m_blocking_lock = nullptr;
|
m_blocking_mutex = nullptr;
|
||||||
if (Thread::current() == this) {
|
if (Thread::current() == this) {
|
||||||
set_state(Thread::State::Running);
|
set_state(Thread::State::Running);
|
||||||
return;
|
return;
|
||||||
|
@ -391,7 +391,7 @@ void Thread::unblock(u8 signal)
|
||||||
VERIFY(m_block_lock.is_locked_by_current_processor());
|
VERIFY(m_block_lock.is_locked_by_current_processor());
|
||||||
if (m_state != Thread::State::Blocked)
|
if (m_state != Thread::State::Blocked)
|
||||||
return;
|
return;
|
||||||
if (m_blocking_lock)
|
if (m_blocking_mutex)
|
||||||
return;
|
return;
|
||||||
VERIFY(m_blocker);
|
VERIFY(m_blocker);
|
||||||
if (signal != 0) {
|
if (signal != 0) {
|
||||||
|
@ -583,7 +583,7 @@ StringView Thread::state_string() const
|
||||||
return "Stopped"sv;
|
return "Stopped"sv;
|
||||||
case Thread::State::Blocked: {
|
case Thread::State::Blocked: {
|
||||||
SpinlockLocker block_lock(m_block_lock);
|
SpinlockLocker block_lock(m_block_lock);
|
||||||
if (m_blocking_lock)
|
if (m_blocking_mutex)
|
||||||
return "Mutex"sv;
|
return "Mutex"sv;
|
||||||
if (m_blocker)
|
if (m_blocker)
|
||||||
return m_blocker->state_string();
|
return m_blocker->state_string();
|
||||||
|
@ -960,7 +960,7 @@ void Thread::resume_from_stopped()
|
||||||
VERIFY(g_scheduler_lock.is_locked_by_current_processor());
|
VERIFY(g_scheduler_lock.is_locked_by_current_processor());
|
||||||
if (m_stop_state == Thread::State::Blocked) {
|
if (m_stop_state == Thread::State::Blocked) {
|
||||||
SpinlockLocker block_lock(m_block_lock);
|
SpinlockLocker block_lock(m_block_lock);
|
||||||
if (m_blocker || m_blocking_lock) {
|
if (m_blocker || m_blocking_mutex) {
|
||||||
// Hasn't been unblocked yet
|
// Hasn't been unblocked yet
|
||||||
set_state(Thread::State::Blocked, 0);
|
set_state(Thread::State::Blocked, 0);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -870,7 +870,7 @@ public:
|
||||||
return block_impl(timeout, blocker);
|
return block_impl(timeout, blocker);
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 unblock_from_lock(Kernel::Mutex&);
|
u32 unblock_from_mutex(Kernel::Mutex&);
|
||||||
void unblock_from_blocker(Blocker&);
|
void unblock_from_blocker(Blocker&);
|
||||||
void unblock(u8 signal = 0);
|
void unblock(u8 signal = 0);
|
||||||
|
|
||||||
|
@ -1227,7 +1227,7 @@ private:
|
||||||
Optional<Memory::VirtualRange> m_thread_specific_range;
|
Optional<Memory::VirtualRange> m_thread_specific_range;
|
||||||
Array<SignalActionData, NSIG> m_signal_action_data;
|
Array<SignalActionData, NSIG> m_signal_action_data;
|
||||||
Blocker* m_blocker { nullptr };
|
Blocker* m_blocker { nullptr };
|
||||||
Kernel::Mutex* m_blocking_lock { nullptr };
|
Kernel::Mutex* m_blocking_mutex { nullptr };
|
||||||
u32 m_lock_requested_count { 0 };
|
u32 m_lock_requested_count { 0 };
|
||||||
IntrusiveListNode<Thread> m_blocked_threads_list_node;
|
IntrusiveListNode<Thread> m_blocked_threads_list_node;
|
||||||
LockRank m_lock_rank_mask { LockRank::None };
|
LockRank m_lock_rank_mask { LockRank::None };
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue