From 704e1c2e3d4e6d1023c0ffff64328aae49ce9e76 Mon Sep 17 00:00:00 2001 From: Tom Date: Thu, 15 Jul 2021 19:45:22 -0600 Subject: [PATCH] Kernel: Rename functions to be less confusing Thread::yield_and_release_relock_big_lock releases the big lock, yields and then relocks the big lock. Thread::yield_assuming_not_holding_big_lock yields assuming the big lock is not being held. --- Kernel/Syscalls/sched.cpp | 2 +- Kernel/Thread.cpp | 13 +++++++------ Kernel/Thread.h | 8 ++++---- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/Kernel/Syscalls/sched.cpp b/Kernel/Syscalls/sched.cpp index 1490c3ccf6..2bdb29090f 100644 --- a/Kernel/Syscalls/sched.cpp +++ b/Kernel/Syscalls/sched.cpp @@ -11,7 +11,7 @@ namespace Kernel { KResultOr Process::sys$yield() { REQUIRE_PROMISE(stdio); - Thread::current()->yield_without_holding_big_lock(); + Thread::current()->yield_and_release_relock_big_lock(); return 0; } diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index e4bb3dc8c4..d28f8af155 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -219,9 +219,9 @@ void Thread::block(Kernel::Lock& lock, ScopedSpinLock>& lock_lock, if (&lock != &big_lock && big_lock.own_lock()) { // We're locking another lock and already hold the big lock... // We need to release the big lock - yield_without_holding_big_lock(); + yield_and_release_relock_big_lock(); } else { - yield_while_not_holding_big_lock(); + yield_assuming_not_holding_big_lock(); } VERIFY(Processor::current().in_critical()); @@ -414,9 +414,10 @@ void Thread::exit(void* exit_value) die_if_needed(); } -void Thread::yield_while_not_holding_big_lock() +void Thread::yield_assuming_not_holding_big_lock() { VERIFY(!g_scheduler_lock.own_lock()); + VERIFY(!process().big_lock().own_lock()); // Disable interrupts here. This ensures we don't accidentally switch contexts twice InterruptDisabler disable; Scheduler::yield(); // flag a switch @@ -426,7 +427,7 @@ void Thread::yield_while_not_holding_big_lock() Processor::current().restore_critical(prev_crit, prev_flags); } -void Thread::yield_without_holding_big_lock() +void Thread::yield_and_release_relock_big_lock() { VERIFY(!g_scheduler_lock.own_lock()); // Disable interrupts here. This ensures we don't accidentally switch contexts twice @@ -594,7 +595,7 @@ void Thread::check_dispatch_pending_signal() switch (result) { case DispatchSignalResult::Yield: - yield_while_not_holding_big_lock(); + yield_assuming_not_holding_big_lock(); break; default: break; @@ -701,7 +702,7 @@ void Thread::send_urgent_signal_to_self(u8 signal) result = dispatch_signal(signal); } if (result == DispatchSignalResult::Yield) - yield_without_holding_big_lock(); + yield_and_release_relock_big_lock(); } DispatchSignalResult Thread::dispatch_one_pending_signal() diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 64916838d5..7b75b2a2b4 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -819,7 +819,7 @@ public: while (state() == Thread::Stopped) { lock.unlock(); // We shouldn't be holding the big lock here - yield_while_not_holding_big_lock(); + yield_assuming_not_holding_big_lock(); lock.lock(); } } @@ -905,7 +905,7 @@ public: // Yield to the scheduler, and wait for us to resume unblocked. VERIFY(!g_scheduler_lock.own_lock()); VERIFY(Processor::current().in_critical()); - yield_while_not_holding_big_lock(); + yield_assuming_not_holding_big_lock(); VERIFY(Processor::current().in_critical()); ScopedSpinLock block_lock2(m_block_lock); @@ -1341,8 +1341,8 @@ private: bool m_is_profiling_suppressed { false }; - void yield_without_holding_big_lock(); - void yield_while_not_holding_big_lock(); + void yield_and_release_relock_big_lock(); + void yield_assuming_not_holding_big_lock(); void drop_thread_count(bool); };