1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 16:17:45 +00:00

Kernel: Track big lock blocked threads in separate list

When we lock a mutex, eventually `Thread::block` is invoked which could
in turn invoke `Process::big_lock().restore_exclusive_lock()`. This
would then try to add the current thread to a different blocked thread
list then the one in use for the original mutex being locked, and
because it's an intrusive list, the thread is removed from its original
list during the `.append()`. When the original mutex eventually
unblocks, we no longer have the thread in the intrusive blocked threads
list and we panic.

Solve this by making the big lock mutex special and giving it its own
blocked thread list. Because the process big lock is temporary and is
being actively removed from e.g. syscalls, it's a matter of time before
we can also remove the fix introduced by this commit.

Fixes issue #9401.
This commit is contained in:
Jelle Raaijmakers 2022-04-06 01:49:30 +02:00 committed by Andreas Kling
parent be26818448
commit 7826729ab2
4 changed files with 48 additions and 13 deletions

View file

@ -90,6 +90,7 @@ void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location)
return;
}
case Mode::Shared: {
VERIFY(m_behavior == MutexBehavior::Regular);
VERIFY(!m_holder);
if (mode == Mode::Exclusive) {
dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}): blocking for exclusive access, currently shared, locks held {}", this, m_name, m_times_locked);
@ -207,9 +208,15 @@ void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker<Spinlock>& l
if constexpr (LOCK_IN_CRITICAL_DEBUG)
VERIFY_INTERRUPTS_ENABLED();
m_blocked_thread_lists.with([&](auto& lists) {
auto& list = lists.list_for_mode(mode);
VERIFY(!list.contains(current_thread));
list.append(current_thread);
auto append_to_list = [&]<typename L>(L& list) {
VERIFY(!list.contains(current_thread));
list.append(current_thread);
};
if (m_behavior == MutexBehavior::BigLock)
append_to_list(lists.exclusive_big_lock);
else
append_to_list(lists.list_for_mode(mode));
});
dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}) waiting...", this, m_name);
@ -217,9 +224,15 @@ void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker<Spinlock>& l
dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}) waited", this, m_name);
m_blocked_thread_lists.with([&](auto& lists) {
auto& list = lists.list_for_mode(mode);
VERIFY(list.contains(current_thread));
list.remove(current_thread);
auto remove_from_list = [&]<typename L>(L& list) {
VERIFY(list.contains(current_thread));
list.remove(current_thread);
};
if (m_behavior == MutexBehavior::BigLock)
remove_from_list(lists.exclusive_big_lock);
else
remove_from_list(lists.list_for_mode(mode));
});
}
@ -235,6 +248,7 @@ void Mutex::unblock_waiters(Mode previous_mode)
auto unblock_shared = [&]() {
if (lists.shared.is_empty())
return false;
VERIFY(m_behavior == MutexBehavior::Regular);
m_mode = Mode::Shared;
for (auto& thread : lists.shared) {
auto requested_locks = thread.unblock_from_mutex(*this);
@ -247,8 +261,8 @@ void Mutex::unblock_waiters(Mode previous_mode)
}
return true;
};
auto unblock_exclusive = [&]() {
if (auto* next_exclusive_thread = lists.exclusive.first()) {
auto unblock_exclusive = [&]<typename L>(L& list) {
if (auto* next_exclusive_thread = list.first()) {
m_mode = Mode::Exclusive;
m_times_locked = next_exclusive_thread->unblock_from_mutex(*this);
m_holder = next_exclusive_thread;
@ -257,11 +271,13 @@ void Mutex::unblock_waiters(Mode previous_mode)
return false;
};
if (previous_mode == Mode::Exclusive) {
if (m_behavior == MutexBehavior::BigLock) {
unblock_exclusive(lists.exclusive_big_lock);
} else if (previous_mode == Mode::Exclusive) {
if (!unblock_shared())
unblock_exclusive();
unblock_exclusive(lists.exclusive);
} else {
if (!unblock_exclusive())
if (!unblock_exclusive(lists.exclusive))
unblock_shared();
}
});