diff --git a/Kernel/Arch/x86/common/Processor.cpp b/Kernel/Arch/x86/common/Processor.cpp index 3565a2ea64..ab2875aa6c 100644 --- a/Kernel/Arch/x86/common/Processor.cpp +++ b/Kernel/Arch/x86/common/Processor.cpp @@ -564,7 +564,7 @@ ErrorOr> Processor::capture_stack_trace(Thread& thread, size // reflect the status at the last context switch. SpinlockLocker lock(g_scheduler_lock); if (&thread == Processor::current_thread()) { - VERIFY(thread.state() == Thread::Running); + VERIFY(thread.state() == Thread::State::Running); // Leave the scheduler lock. If we trigger page faults we may // need to be preempted. Since this is our own thread it won't // cause any problems as the stack won't change below this frame. @@ -599,13 +599,13 @@ ErrorOr> Processor::capture_stack_trace(Thread& thread, size TRY(result); } else { switch (thread.state()) { - case Thread::Running: + case Thread::State::Running: VERIFY_NOT_REACHED(); // should have been handled above - case Thread::Runnable: - case Thread::Stopped: - case Thread::Blocked: - case Thread::Dying: - case Thread::Dead: { + case Thread::State::Runnable: + case Thread::State::Stopped: + case Thread::State::Blocked: + case Thread::State::Dying: + case Thread::State::Dead: { // We need to retrieve ebp from what was last pushed to the kernel // stack. Before switching out of that thread, it switch_context // pushed the callee-saved registers, and the last of them happens @@ -1303,8 +1303,8 @@ extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) { - VERIFY(from_thread == to_thread || from_thread->state() != Thread::Running); - VERIFY(to_thread->state() == Thread::Running); + VERIFY(from_thread == to_thread || from_thread->state() != Thread::State::Running); + VERIFY(to_thread->state() == Thread::State::Running); bool has_fxsr = Processor::current().has_feature(CPUFeature::FXSR); Processor::set_current_thread(*to_thread); diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index a0fab77edb..3eb18cc2ae 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -199,7 +199,7 @@ UNMAP_AFTER_INIT void Scheduler::start() idle_thread.did_schedule(); idle_thread.set_initialized(true); processor.init_context(idle_thread, false); - idle_thread.set_state(Thread::Running); + idle_thread.set_state(Thread::State::Running); VERIFY(idle_thread.affinity() == (1u << processor.id())); processor.initialize_context_switching(idle_thread); VERIFY_NOT_REACHED(); @@ -282,8 +282,8 @@ bool Scheduler::context_switch(Thread* thread) if (from_thread) { // If the last process hasn't blocked (still marked as running), // mark it as runnable for the next round. - if (from_thread->state() == Thread::Running) - from_thread->set_state(Thread::Runnable); + if (from_thread->state() == Thread::State::Running) + from_thread->set_state(Thread::State::Runnable); #ifdef LOG_EVERY_CONTEXT_SWITCH const auto msg = "Scheduler[{}]: {} -> {} [prio={}] {:#04x}:{:p}"; @@ -299,7 +299,7 @@ bool Scheduler::context_switch(Thread* thread) proc.init_context(*thread, false); thread->set_initialized(true); } - thread->set_state(Thread::Running); + thread->set_state(Thread::State::Running); PerformanceManager::add_context_switch_perf_event(*from_thread, *thread); @@ -332,7 +332,7 @@ void Scheduler::enter_current(Thread& prev_thread) current_thread->update_time_scheduled(scheduler_time, true, false); prev_thread.set_active(false); - if (prev_thread.state() == Thread::Dying) { + if (prev_thread.state() == Thread::State::Dying) { // If the thread we switched from is marked as dying, then notify // the finalizer. Note that as soon as we leave the scheduler lock // the finalizer may free from_thread! @@ -472,7 +472,7 @@ void Scheduler::timer_tick(const RegisterState& regs) if (current_thread->previous_mode() == Thread::PreviousMode::UserMode && current_thread->should_die() && !current_thread->is_blocked()) { SpinlockLocker scheduler_lock(g_scheduler_lock); dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::current_id(), *current_thread); - current_thread->set_state(Thread::Dying); + current_thread->set_state(Thread::State::Dying); Processor::current().invoke_scheduler_async(); return; } @@ -568,7 +568,7 @@ void dump_thread_list(bool with_stack_traces) Thread::for_each([&](Thread& thread) { switch (thread.state()) { - case Thread::Dying: + case Thread::State::Dying: dmesgln(" {:14} {:30} @ {:04x}:{:08x} Finalizable: {}, (nsched: {})", thread.state_string(), thread, diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index e3fe40efe0..8010995c5f 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -175,10 +175,10 @@ Thread::BlockResult Thread::block_impl(BlockTimeout const& timeout, Blocker& blo bool timer_was_added = false; switch (state()) { - case Thread::Stopped: + case Thread::State::Stopped: // It's possible that we were requested to be stopped! break; - case Thread::Running: + case Thread::State::Running: VERIFY(m_blocker == nullptr); break; default: @@ -210,7 +210,7 @@ Thread::BlockResult Thread::block_impl(BlockTimeout const& timeout, Blocker& blo blocker.begin_blocking({}); - set_state(Thread::Blocked); + set_state(Thread::State::Blocked); scheduler_lock.unlock(); block_lock.unlock(); @@ -230,7 +230,7 @@ Thread::BlockResult Thread::block_impl(BlockTimeout const& timeout, Blocker& blo if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) { block_lock2.unlock(); dbgln("Thread should not be unblocking, current state: {}", state_string()); - set_state(Thread::Blocked); + set_state(Thread::State::Blocked); continue; } // Prevent the timeout from unblocking this thread if it happens to @@ -274,10 +274,10 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker& lock_lock, u32 SpinlockLocker scheduler_lock(g_scheduler_lock); switch (state()) { - case Thread::Stopped: + case Thread::State::Stopped: // It's possible that we were requested to be stopped! break; - case Thread::Running: + case Thread::State::Running: VERIFY(m_blocker == nullptr); break; default: @@ -295,7 +295,7 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker& lock_lock, u32 m_blocking_lock = &lock; m_lock_requested_count = lock_count; - set_state(Thread::Blocked); + set_state(Thread::State::Blocked); scheduler_lock.unlock(); block_lock.unlock(); @@ -347,11 +347,11 @@ u32 Thread::unblock_from_lock(Kernel::Mutex& lock) dbgln_if(THREAD_DEBUG, "Thread {} unblocked from Mutex {}", *this, &lock); m_blocking_lock = nullptr; if (Thread::current() == this) { - set_state(Thread::Running); + set_state(Thread::State::Running); return; } - VERIFY(m_state != Thread::Runnable && m_state != Thread::Running); - set_state(Thread::Runnable); + VERIFY(m_state != Thread::State::Runnable && m_state != Thread::State::Running); + set_state(Thread::State::Runnable); }; if (Processor::current_in_irq() != 0) { Processor::deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() { @@ -389,7 +389,7 @@ void Thread::unblock(u8 signal) VERIFY(!Processor::current_in_irq()); VERIFY(g_scheduler_lock.is_locked_by_current_processor()); VERIFY(m_block_lock.is_locked_by_current_processor()); - if (m_state != Thread::Blocked) + if (m_state != Thread::State::Blocked) return; if (m_blocking_lock) return; @@ -407,11 +407,11 @@ void Thread::unblock(u8 signal) } m_blocker = nullptr; if (Thread::current() == this) { - set_state(Thread::Running); + set_state(Thread::State::Running); return; } - VERIFY(m_state != Thread::Runnable && m_state != Thread::Running); - set_state(Thread::Runnable); + VERIFY(m_state != Thread::State::Runnable && m_state != Thread::State::Running); + set_state(Thread::State::Runnable); } void Thread::set_should_die() @@ -465,7 +465,7 @@ void Thread::die_if_needed() // It's possible that we don't reach the code after this block if the // scheduler is invoked and FinalizerTask cleans up this thread, however // that doesn't matter because we're trying to invoke the scheduler anyway - set_state(Thread::Dying); + set_state(Thread::State::Dying); } ScopedCritical critical; @@ -555,33 +555,33 @@ void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore) // NOLINTNEXTLINE(readability-make-member-function-const) False positive; We call block which is not const auto Thread::sleep(clockid_t clock_id, const Time& duration, Time* remaining_time) -> BlockResult { - VERIFY(state() == Thread::Running); + VERIFY(state() == Thread::State::Running); return Thread::current()->block({}, Thread::BlockTimeout(false, &duration, nullptr, clock_id), remaining_time); } // NOLINTNEXTLINE(readability-make-member-function-const) False positive; We call block which is not const auto Thread::sleep_until(clockid_t clock_id, const Time& deadline) -> BlockResult { - VERIFY(state() == Thread::Running); + VERIFY(state() == Thread::State::Running); return Thread::current()->block({}, Thread::BlockTimeout(true, &deadline, nullptr, clock_id)); } StringView Thread::state_string() const { switch (state()) { - case Thread::Invalid: + case Thread::State::Invalid: return "Invalid"sv; - case Thread::Runnable: + case Thread::State::Runnable: return "Runnable"sv; - case Thread::Running: + case Thread::State::Running: return "Running"sv; - case Thread::Dying: + case Thread::State::Dying: return "Dying"sv; - case Thread::Dead: + case Thread::State::Dead: return "Dead"sv; - case Thread::Stopped: + case Thread::State::Stopped: return "Stopped"sv; - case Thread::Blocked: { + case Thread::State::Blocked: { SpinlockLocker block_lock(m_block_lock); if (m_blocking_lock) return "Mutex"sv; @@ -731,7 +731,7 @@ u32 Thread::pending_signals_for_state() const constexpr u32 stopped_signal_mask = (1 << (SIGCONT - 1)) | (1 << (SIGKILL - 1)) | (1 << (SIGTRAP - 1)); if (is_handling_page_fault()) return 0; - return m_state != Stopped ? m_pending_signals : m_pending_signals & stopped_signal_mask; + return m_state != State::Stopped ? m_pending_signals : m_pending_signals & stopped_signal_mask; } void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender) @@ -759,7 +759,7 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender) if (!has_unmasked_pending_signals()) return; - if (m_state == Stopped) { + if (m_state == Thread::State::Stopped) { SpinlockLocker lock(m_lock); if (pending_signals_for_state() != 0) { dbgln_if(SIGNAL_DEBUG, "Signal: Resuming stopped {} to deliver signal {}", *this, signal); @@ -958,14 +958,14 @@ void Thread::resume_from_stopped() VERIFY(is_stopped()); VERIFY(m_stop_state != State::Invalid); VERIFY(g_scheduler_lock.is_locked_by_current_processor()); - if (m_stop_state == Blocked) { + if (m_stop_state == Thread::State::Blocked) { SpinlockLocker block_lock(m_block_lock); if (m_blocker || m_blocking_lock) { // Hasn't been unblocked yet - set_state(Blocked, 0); + set_state(Thread::State::Blocked, 0); } else { // Was unblocked while stopped - set_state(Runnable); + set_state(Thread::State::Runnable); } } else { set_state(m_stop_state, 0); @@ -982,7 +982,7 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal) dbgln_if(SIGNAL_DEBUG, "Dispatch signal {} to {}, state: {}", signal, *this, state_string()); - if (m_state == Invalid || !is_initialized()) { + if (m_state == Thread::State::Invalid || !is_initialized()) { // Thread has barely been created, we need to wait until it is // at least in Runnable state and is_initialized() returns true, // which indicates that it is fully set up an we actually have @@ -1004,7 +1004,7 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal) auto* tracer = process.tracer(); if (signal == SIGSTOP || (tracer && default_signal_action(signal) == DefaultSignalAction::DumpCore)) { dbgln_if(SIGNAL_DEBUG, "Signal {} stopping this thread", signal); - set_state(State::Stopped, signal); + set_state(Thread::State::Stopped, signal); return DispatchSignalResult::Yield; } @@ -1017,7 +1017,7 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal) // only "pending signals" from the tracer are sent to the tracee if (!tracer->has_pending_signal(signal)) { dbgln("signal: {} stopping {} for tracer", signal, *this); - set_state(Stopped, signal); + set_state(Thread::State::Stopped, signal); return DispatchSignalResult::Yield; } tracer->unset_signal(signal); @@ -1028,7 +1028,7 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal) if (handler_vaddr.is_null()) { switch (default_signal_action(signal)) { case DefaultSignalAction::Stop: - set_state(Stopped, signal); + set_state(Thread::State::Stopped, signal); return DispatchSignalResult::Yield; case DefaultSignalAction::DumpCore: process.set_should_generate_coredump(true); @@ -1213,7 +1213,7 @@ void Thread::set_state(State new_state, u8 stop_signal) { SpinlockLocker thread_lock(m_lock); previous_state = m_state; - if (previous_state == Invalid) { + if (previous_state == Thread::State::Invalid) { // If we were *just* created, we may have already pending signals if (has_unmasked_pending_signals()) { dbgln_if(THREAD_DEBUG, "Dispatch pending signals to new thread {}", *this); @@ -1225,9 +1225,9 @@ void Thread::set_state(State new_state, u8 stop_signal) dbgln_if(THREAD_DEBUG, "Set thread {} state to {}", *this, state_string()); } - if (previous_state == Runnable) { + if (previous_state == Thread::State::Runnable) { Scheduler::dequeue_runnable_thread(*this); - } else if (previous_state == Stopped) { + } else if (previous_state == Thread::State::Stopped) { m_stop_state = State::Invalid; auto& process = this->process(); if (process.set_stopped(false)) { @@ -1247,12 +1247,12 @@ void Thread::set_state(State new_state, u8 stop_signal) } } - if (m_state == Runnable) { + if (m_state == Thread::State::Runnable) { Scheduler::enqueue_runnable_thread(*this); Processor::smp_wake_n_idle_processors(1); - } else if (m_state == Stopped) { + } else if (m_state == Thread::State::Stopped) { // We don't want to restore to Running state, only Runnable! - m_stop_state = previous_state != Running ? previous_state : Runnable; + m_stop_state = previous_state != Thread::State::Running ? previous_state : Thread::State::Runnable; auto& process = this->process(); if (!process.set_stopped(true)) { process.for_each_thread([&](auto& thread) { @@ -1261,7 +1261,7 @@ void Thread::set_state(State new_state, u8 stop_signal) if (thread.is_stopped()) return; dbgln_if(THREAD_DEBUG, "Stopping peer thread {}", thread); - thread.set_state(Stopped, stop_signal); + thread.set_state(Thread::State::Stopped, stop_signal); }); process.unblock_waiters(Thread::WaitBlocker::UnblockFlags::Stopped, stop_signal); // Tell the parent process (if any) about this change. @@ -1269,8 +1269,8 @@ void Thread::set_state(State new_state, u8 stop_signal) [[maybe_unused]] auto result = parent->send_signal(SIGCHLD, &process); } } - } else if (m_state == Dying) { - VERIFY(previous_state != Blocked); + } else if (m_state == Thread::State::Dying) { + VERIFY(previous_state != Thread::State::Blocked); if (this != Thread::current() && is_finalizable()) { // Some other thread set this thread to Dying, notify the // finalizer right away as it can be cleaned up now diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 1362074778..598db880f6 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -201,14 +201,14 @@ public: void finalize(); - enum State : u8 { + enum class State : u8 { Invalid = 0, Runnable, Running, Dying, Dead, Stopped, - Blocked + Blocked, }; class [[nodiscard]] BlockResult { @@ -802,7 +802,7 @@ public: return EDEADLK; SpinlockLocker lock(m_lock); - if (!m_is_joinable || state() == Dead) + if (!m_is_joinable || state() == Thread::State::Dead) return EINVAL; add_blocker(); @@ -820,8 +820,8 @@ public: void resume_from_stopped(); [[nodiscard]] bool should_be_stopped() const; - [[nodiscard]] bool is_stopped() const { return m_state == Stopped; } - [[nodiscard]] bool is_blocked() const { return m_state == Blocked; } + [[nodiscard]] bool is_stopped() const { return m_state == Thread::State::Stopped; } + [[nodiscard]] bool is_blocked() const { return m_state == Thread::State::Blocked; } u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); } void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); } @@ -853,7 +853,7 @@ public: // mode then we will intercept prior to returning back to user // mode. SpinlockLocker lock(m_lock); - while (state() == Thread::Stopped) { + while (state() == Thread::State::Stopped) { lock.unlock(); // We shouldn't be holding the big lock here yield_without_releasing_big_lock(); @@ -1265,11 +1265,11 @@ private: unsigned m_ipv4_socket_write_bytes { 0 }; FPUState m_fpu_state {}; - State m_state { Invalid }; + State m_state { Thread::State::Invalid }; NonnullOwnPtr m_name; u32 m_priority { THREAD_PRIORITY_NORMAL }; - State m_stop_state { Invalid }; + State m_stop_state { Thread::State::Invalid }; bool m_dump_backtrace_on_finalization { false }; bool m_should_die { false };