diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index 96393d0fb0..717b608779 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -322,6 +322,7 @@ NonnullRefPtr Process::create_kernel_process(RefPtr& first_thre process->ref(); } + ScopedSpinLock lock(g_scheduler_lock); first_thread->set_affinity(affinity); first_thread->set_state(Thread::State::Runnable); return process; @@ -781,6 +782,7 @@ RefPtr Process::create_kernel_thread(void (*entry)(), u32 priority, cons auto& tss = thread->tss(); tss.eip = (FlatPtr)entry; + ScopedSpinLock lock(g_scheduler_lock); thread->set_state(Thread::State::Runnable); return thread; } diff --git a/Kernel/Syscalls/execve.cpp b/Kernel/Syscalls/execve.cpp index 4f08ed5c25..3c888f3705 100644 --- a/Kernel/Syscalls/execve.cpp +++ b/Kernel/Syscalls/execve.cpp @@ -308,7 +308,10 @@ int Process::do_exec(NonnullRefPtr main_program_description, Ve if (was_profiling) Profiling::did_exec(path); - new_main_thread->set_state(Thread::State::Runnable); + { + ScopedSpinLock lock(g_scheduler_lock); + new_main_thread->set_state(Thread::State::Runnable); + } big_lock().force_unlock_if_locked(); ASSERT_INTERRUPTS_DISABLED(); ASSERT(Processor::current().in_critical()); diff --git a/Kernel/Syscalls/fork.cpp b/Kernel/Syscalls/fork.cpp index c488391179..161817a373 100644 --- a/Kernel/Syscalls/fork.cpp +++ b/Kernel/Syscalls/fork.cpp @@ -79,24 +79,25 @@ pid_t Process::sys$fork(RegisterState& regs) dbg() << "fork: child will begin executing at " << String::format("%w", child_tss.cs) << ":" << String::format("%x", child_tss.eip) << " with stack " << String::format("%w", child_tss.ss) << ":" << String::format("%x", child_tss.esp) << ", kstack " << String::format("%w", child_tss.ss0) << ":" << String::format("%x", child_tss.esp0); #endif - ScopedSpinLock lock(m_lock); - for (auto& region : m_regions) { -#ifdef FORK_DEBUG - dbg() << "fork: cloning Region{" << ®ion << "} '" << region.name() << "' @ " << region.vaddr(); -#endif - auto& child_region = child->add_region(region.clone()); - child_region.map(child->page_directory()); - - if (®ion == m_master_tls_region) - child->m_master_tls_region = child_region.make_weak_ptr(); - } - { - ScopedSpinLock lock(g_processes_lock); + ScopedSpinLock lock(m_lock); + for (auto& region : m_regions) { +#ifdef FORK_DEBUG + dbg() << "fork: cloning Region{" << ®ion << "} '" << region.name() << "' @ " << region.vaddr(); +#endif + auto& child_region = child->add_region(region.clone()); + child_region.map(child->page_directory()); + + if (®ion == m_master_tls_region) + child->m_master_tls_region = child_region.make_weak_ptr(); + } + + ScopedSpinLock processes_lock(g_processes_lock); g_processes->prepend(child); child->ref(); // This reference will be dropped by Process::reap } + ScopedSpinLock lock(g_scheduler_lock); child_first_thread->set_affinity(Thread::current()->affinity()); child_first_thread->set_state(Thread::State::Runnable); return child->pid().value(); diff --git a/Kernel/Syscalls/thread.cpp b/Kernel/Syscalls/thread.cpp index b4059a8878..d2250d1cae 100644 --- a/Kernel/Syscalls/thread.cpp +++ b/Kernel/Syscalls/thread.cpp @@ -70,7 +70,6 @@ int Process::sys$create_thread(void* (*entry)(void*), Userspacetid().value()); thread->set_name(builder.to_string()); - thread->set_priority(requested_thread_priority); if (!is_thread_joinable) thread->detach(); @@ -83,6 +82,9 @@ int Process::sys$create_thread(void* (*entry)(void*), Userspacemake_thread_specific_region({}); if (tsr_result.is_error()) return tsr_result.error(); + + ScopedSpinLock lock(g_scheduler_lock); + thread->set_priority(requested_thread_priority); thread->set_state(Thread::State::Runnable); return thread->tid().value(); } diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 6347a7a9ae..9fd13bde70 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -119,6 +119,7 @@ Thread::~Thread() void Thread::unblock() { + ASSERT(g_scheduler_lock.own_lock()); ASSERT(m_lock.own_lock()); m_blocker = nullptr; if (Thread::current() == this) { @@ -288,14 +289,19 @@ void Thread::finalize() ASSERT(Thread::current() == g_finalizer); ASSERT(Thread::current() != this); -#ifdef THREAD_DEBUG - dbg() << "Finalizing thread " << *this; -#endif - set_state(Thread::State::Dead); + ASSERT(!m_lock.own_lock()); - if (auto* joiner = m_joiner.exchange(nullptr, AK::memory_order_acq_rel)) { - // Notify joiner that we exited - static_cast(joiner->m_blocker)->joinee_exited(m_exit_value); + { + ScopedSpinLock lock(g_scheduler_lock); +#ifdef THREAD_DEBUG + dbg() << "Finalizing thread " << *this; +#endif + set_state(Thread::State::Dead); + + if (auto* joiner = m_joiner.exchange(nullptr, AK::memory_order_acq_rel)) { + // Notify joiner that we exited + static_cast(joiner->m_blocker)->joinee_exited(m_exit_value); + } } if (m_dump_backtrace_on_finalization) @@ -522,6 +528,7 @@ void Thread::resume_from_stopped() { ASSERT(is_stopped()); ASSERT(m_stop_state != State::Invalid); + ASSERT(g_scheduler_lock.own_lock()); set_state(m_stop_state); m_stop_state = State::Invalid; // make sure SemiPermanentBlocker is unblocked @@ -788,7 +795,7 @@ RefPtr Thread::clone(Process& process) void Thread::set_state(State new_state) { - ScopedSpinLock lock(g_scheduler_lock); + ASSERT(g_scheduler_lock.own_lock()); if (new_state == m_state) return; diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 57dc4cceb4..2c482e1f74 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -422,28 +422,31 @@ public: { T t(forward(args)...); - ScopedSpinLock lock(m_lock); - // We should never be blocking a blocked (or otherwise non-active) thread. - ASSERT(state() == Thread::Running); - ASSERT(m_blocker == nullptr); + { + ScopedSpinLock lock(m_lock); + // We should never be blocking a blocked (or otherwise non-active) thread. + ASSERT(state() == Thread::Running); + ASSERT(m_blocker == nullptr); - if (t.should_unblock(*this)) { - // Don't block if the wake condition is already met - return BlockResult::NotBlocked; + if (t.should_unblock(*this)) { + // Don't block if the wake condition is already met + return BlockResult::NotBlocked; + } + + m_blocker = &t; + m_blocker_timeout = t.override_timeout(timeout); } - m_blocker = &t; - m_blocker_timeout = t.override_timeout(timeout); - set_state(Thread::Blocked); - - // Release our lock - lock.unlock(); + { + ScopedSpinLock scheduler_lock(g_scheduler_lock); + set_state(Thread::Blocked); + } // Yield to the scheduler, and wait for us to resume unblocked. yield_without_holding_big_lock(); // Acquire our lock again - lock.lock(); + ScopedSpinLock lock(m_lock); // We should no longer be blocked once we woke up ASSERT(state() != Thread::Blocked);