mirror of
https://github.com/RGBCube/serenity
synced 2025-05-22 17:25:07 +00:00
Kernel: {Mutex,Spinlock}::own_lock() => is_locked_by_current_thread()
Rename these API's to make it more clear what they are checking.
This commit is contained in:
parent
d9da513959
commit
0b4671add7
13 changed files with 52 additions and 52 deletions
|
@ -160,7 +160,7 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock<u8>>& lock_lock,
|
|||
VERIFY(!Processor::current_in_irq());
|
||||
VERIFY(this == Thread::current());
|
||||
ScopedCritical critical;
|
||||
VERIFY(!Memory::s_mm_lock.own_lock());
|
||||
VERIFY(!Memory::s_mm_lock.is_locked_by_current_thread());
|
||||
|
||||
SpinlockLocker block_lock(m_block_lock);
|
||||
|
||||
|
@ -198,9 +198,9 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock<u8>>& lock_lock,
|
|||
|
||||
for (;;) {
|
||||
// Yield to the scheduler, and wait for us to resume unblocked.
|
||||
VERIFY(!g_scheduler_lock.own_lock());
|
||||
VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
|
||||
VERIFY(Processor::in_critical());
|
||||
if (&lock != &big_lock && big_lock.own_lock()) {
|
||||
if (&lock != &big_lock && big_lock.is_locked_by_current_thread()) {
|
||||
// We're locking another lock and already hold the big lock...
|
||||
// We need to release the big lock
|
||||
yield_and_release_relock_big_lock();
|
||||
|
@ -239,8 +239,8 @@ u32 Thread::unblock_from_lock(Kernel::Mutex& lock)
|
|||
SpinlockLocker block_lock(m_block_lock);
|
||||
VERIFY(m_blocking_lock == &lock);
|
||||
VERIFY(!Processor::current_in_irq());
|
||||
VERIFY(g_scheduler_lock.own_lock());
|
||||
VERIFY(m_block_lock.own_lock());
|
||||
VERIFY(g_scheduler_lock.is_locked_by_current_thread());
|
||||
VERIFY(m_block_lock.is_locked_by_current_thread());
|
||||
VERIFY(m_blocking_lock == &lock);
|
||||
dbgln_if(THREAD_DEBUG, "Thread {} unblocked from Mutex {}", *this, &lock);
|
||||
m_blocking_lock = nullptr;
|
||||
|
@ -285,8 +285,8 @@ void Thread::unblock_from_blocker(Blocker& blocker)
|
|||
void Thread::unblock(u8 signal)
|
||||
{
|
||||
VERIFY(!Processor::current_in_irq());
|
||||
VERIFY(g_scheduler_lock.own_lock());
|
||||
VERIFY(m_block_lock.own_lock());
|
||||
VERIFY(g_scheduler_lock.is_locked_by_current_thread());
|
||||
VERIFY(m_block_lock.is_locked_by_current_thread());
|
||||
if (m_state != Thread::Blocked)
|
||||
return;
|
||||
if (m_blocking_lock)
|
||||
|
@ -402,8 +402,8 @@ void Thread::exit(void* exit_value)
|
|||
|
||||
void Thread::yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held)
|
||||
{
|
||||
VERIFY(!g_scheduler_lock.own_lock());
|
||||
VERIFY(verify_lock_not_held == VerifyLockNotHeld::No || !process().big_lock().own_lock());
|
||||
VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
|
||||
VERIFY(verify_lock_not_held == VerifyLockNotHeld::No || !process().big_lock().is_locked_by_current_thread());
|
||||
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
|
||||
InterruptDisabler disable;
|
||||
Scheduler::yield(); // flag a switch
|
||||
|
@ -414,7 +414,7 @@ void Thread::yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_
|
|||
|
||||
void Thread::yield_and_release_relock_big_lock()
|
||||
{
|
||||
VERIFY(!g_scheduler_lock.own_lock());
|
||||
VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
|
||||
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
|
||||
InterruptDisabler disable;
|
||||
Scheduler::yield(); // flag a switch
|
||||
|
@ -495,7 +495,7 @@ void Thread::finalize()
|
|||
VERIFY(Thread::current() != this);
|
||||
|
||||
#if LOCK_DEBUG
|
||||
VERIFY(!m_lock.own_lock());
|
||||
VERIFY(!m_lock.is_locked_by_current_thread());
|
||||
if (lock_count() > 0) {
|
||||
dbgln("Thread {} leaking {} Locks!", *this, lock_count());
|
||||
SpinlockLocker list_lock(m_holding_locks_lock);
|
||||
|
@ -612,7 +612,7 @@ u32 Thread::pending_signals() const
|
|||
|
||||
u32 Thread::pending_signals_for_state() const
|
||||
{
|
||||
VERIFY(g_scheduler_lock.own_lock());
|
||||
VERIFY(g_scheduler_lock.is_locked_by_current_thread());
|
||||
constexpr u32 stopped_signal_mask = (1 << (SIGCONT - 1)) | (1 << (SIGKILL - 1)) | (1 << (SIGTRAP - 1));
|
||||
if (is_handling_page_fault())
|
||||
return 0;
|
||||
|
@ -709,7 +709,7 @@ void Thread::send_urgent_signal_to_self(u8 signal)
|
|||
|
||||
DispatchSignalResult Thread::dispatch_one_pending_signal()
|
||||
{
|
||||
VERIFY(m_lock.own_lock());
|
||||
VERIFY(m_lock.is_locked_by_current_thread());
|
||||
u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
|
||||
if (signal_candidates == 0)
|
||||
return DispatchSignalResult::Continue;
|
||||
|
@ -816,7 +816,7 @@ void Thread::resume_from_stopped()
|
|||
{
|
||||
VERIFY(is_stopped());
|
||||
VERIFY(m_stop_state != State::Invalid);
|
||||
VERIFY(g_scheduler_lock.own_lock());
|
||||
VERIFY(g_scheduler_lock.is_locked_by_current_thread());
|
||||
if (m_stop_state == Blocked) {
|
||||
SpinlockLocker block_lock(m_block_lock);
|
||||
if (m_blocker || m_blocking_lock) {
|
||||
|
@ -834,7 +834,7 @@ void Thread::resume_from_stopped()
|
|||
DispatchSignalResult Thread::dispatch_signal(u8 signal)
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(g_scheduler_lock.own_lock());
|
||||
VERIFY(g_scheduler_lock.is_locked_by_current_thread());
|
||||
VERIFY(signal > 0 && signal <= 32);
|
||||
VERIFY(process().is_user_process());
|
||||
VERIFY(this == Thread::current());
|
||||
|
@ -1047,7 +1047,7 @@ RefPtr<Thread> Thread::clone(Process& process)
|
|||
void Thread::set_state(State new_state, u8 stop_signal)
|
||||
{
|
||||
State previous_state;
|
||||
VERIFY(g_scheduler_lock.own_lock());
|
||||
VERIFY(g_scheduler_lock.is_locked_by_current_thread());
|
||||
if (new_state == m_state)
|
||||
return;
|
||||
|
||||
|
@ -1162,7 +1162,7 @@ String Thread::backtrace()
|
|||
|
||||
auto& process = const_cast<Process&>(this->process());
|
||||
auto stack_trace = Processor::capture_stack_trace(*this);
|
||||
VERIFY(!g_scheduler_lock.own_lock());
|
||||
VERIFY(!g_scheduler_lock.is_locked_by_current_thread());
|
||||
ProcessPagingScope paging_scope(process);
|
||||
for (auto& frame : stack_trace) {
|
||||
if (Memory::is_user_range(VirtualAddress(frame), sizeof(FlatPtr) * 2)) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue