diff --git a/Kernel/API/Syscall.h b/Kernel/API/Syscall.h index 616b47f9f4..b28f7f0804 100644 --- a/Kernel/API/Syscall.h +++ b/Kernel/API/Syscall.h @@ -113,7 +113,6 @@ namespace Kernel { S(setsockopt) \ S(create_thread) \ S(gettid) \ - S(donate) \ S(rename) \ S(ftruncate) \ S(exit_thread) \ diff --git a/Kernel/Process.h b/Kernel/Process.h index b8aaa1e6fe..7bf21c630e 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -276,7 +276,6 @@ public: KResultOr sys$dbgputstr(Userspace, size_t); KResultOr sys$dump_backtrace(); KResultOr sys$gettid(); - KResultOr sys$donate(pid_t tid); KResultOr sys$setsid(); KResultOr sys$getsid(pid_t); KResultOr sys$setpgid(pid_t pid, pid_t pgid); diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 850553b157..7d2e072588 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -31,8 +31,6 @@ class SchedulerPerProcessorData { public: SchedulerPerProcessorData() = default; - WeakPtr m_pending_beneficiary; - const char* m_pending_donate_reason { nullptr }; bool m_in_scheduler { true }; }; @@ -206,26 +204,6 @@ bool Scheduler::pick_next() dump_thread_list(); } - auto pending_beneficiary = scheduler_data.m_pending_beneficiary.strong_ref(); - if (pending_beneficiary && dequeue_runnable_thread(*pending_beneficiary, true)) { - // The thread we're supposed to donate to still exists and we can - const char* reason = scheduler_data.m_pending_donate_reason; - scheduler_data.m_pending_beneficiary = nullptr; - scheduler_data.m_pending_donate_reason = nullptr; - - // We need to leave our first critical section before switching context, - // but since we're still holding the scheduler lock we're still in a critical section - critical.leave(); - - dbgln_if(SCHEDULER_DEBUG, "Processing pending donate to {} reason={}", *pending_beneficiary, reason); - return donate_to_and_switch(pending_beneficiary.ptr(), reason); - } - - // Either we're not donating or the beneficiary disappeared. - // Either way clear any pending information - scheduler_data.m_pending_beneficiary = nullptr; - scheduler_data.m_pending_donate_reason = nullptr; - auto& thread_to_schedule = pull_next_runnable_thread(); if constexpr (SCHEDULER_DEBUG) { #if ARCH(I386) @@ -250,11 +228,6 @@ bool Scheduler::yield() { InterruptDisabler disabler; auto& proc = Processor::current(); - auto& scheduler_data = proc.get_scheduler_data(); - - // Clear any pending beneficiary - scheduler_data.m_pending_beneficiary = nullptr; - scheduler_data.m_pending_donate_reason = nullptr; auto current_thread = Thread::current(); dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", proc.get_id(), *current_thread, proc.in_irq()); @@ -275,66 +248,6 @@ bool Scheduler::yield() return true; } -bool Scheduler::donate_to_and_switch(Thread* beneficiary, [[maybe_unused]] const char* reason) -{ - VERIFY(g_scheduler_lock.own_lock()); - - auto& proc = Processor::current(); - VERIFY(proc.in_critical() == 1); - - unsigned ticks_left = Thread::current()->ticks_left(); - if (!beneficiary || beneficiary->state() != Thread::Runnable || ticks_left <= 1) - return Scheduler::yield(); - - unsigned ticks_to_donate = min(ticks_left - 1, time_slice_for(*beneficiary)); - dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Donating {} ticks to {}, reason={}", proc.get_id(), ticks_to_donate, *beneficiary, reason); - beneficiary->set_ticks_left(ticks_to_donate); - - return Scheduler::context_switch(beneficiary); -} - -bool Scheduler::donate_to(RefPtr& beneficiary, const char* reason) -{ - VERIFY(beneficiary); - - if (beneficiary == Thread::current()) - return Scheduler::yield(); - - // Set the m_in_scheduler flag before acquiring the spinlock. This - // prevents a recursive call into Scheduler::invoke_async upon - // leaving the scheduler lock. - ScopedCritical critical; - auto& proc = Processor::current(); - auto& scheduler_data = proc.get_scheduler_data(); - scheduler_data.m_in_scheduler = true; - ScopeGuard guard( - []() { - // We may be on a different processor after we got switched - // back to this thread! - auto& scheduler_data = Processor::current().get_scheduler_data(); - VERIFY(scheduler_data.m_in_scheduler); - scheduler_data.m_in_scheduler = false; - }); - - VERIFY(!proc.in_irq()); - - if (proc.in_critical() > 1) { - scheduler_data.m_pending_beneficiary = beneficiary; // Save the beneficiary - scheduler_data.m_pending_donate_reason = reason; - proc.invoke_scheduler_async(); - return false; - } - - ScopedSpinLock lock(g_scheduler_lock); - - // "Leave" the critical section before switching context. Since we - // still hold the scheduler lock, we're not actually leaving it. - // Processor::switch_context expects Processor::in_critical() to be 1 - critical.leave(); - donate_to_and_switch(beneficiary, reason); - return false; -} - bool Scheduler::context_switch(Thread* thread) { if (s_mm_lock.own_lock()) { diff --git a/Kernel/Scheduler.h b/Kernel/Scheduler.h index 6291609640..18dcd35cac 100644 --- a/Kernel/Scheduler.h +++ b/Kernel/Scheduler.h @@ -36,8 +36,6 @@ public: static bool pick_next(); static bool yield(); static void yield_from_critical(); - static bool donate_to_and_switch(Thread*, const char* reason); - static bool donate_to(RefPtr&, const char* reason); static bool context_switch(Thread*); static void enter_current(Thread& prev_thread, bool is_first); static void leave_on_first_switch(u32 flags); diff --git a/Kernel/Syscalls/sched.cpp b/Kernel/Syscalls/sched.cpp index 7bbc04ca35..1490c3ccf6 100644 --- a/Kernel/Syscalls/sched.cpp +++ b/Kernel/Syscalls/sched.cpp @@ -15,20 +15,6 @@ KResultOr Process::sys$yield() return 0; } -KResultOr Process::sys$donate(pid_t tid) -{ - REQUIRE_PROMISE(stdio); - if (tid < 0) - return EINVAL; - - ScopedCritical critical; - auto thread = Thread::from_tid(tid); - if (!thread || thread->pid() != pid()) - return ESRCH; - Thread::current()->donate_without_holding_big_lock(thread, "sys$donate"); - return 0; -} - KResultOr Process::sys$sched_setparam(int pid, Userspace user_param) { REQUIRE_PROMISE(proc); diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 44f9af007a..6ed5d3e108 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -334,18 +334,6 @@ void Thread::yield_without_holding_big_lock() relock_process(previous_locked, lock_count_to_restore); } -void Thread::donate_without_holding_big_lock(RefPtr& thread, const char* reason) -{ - VERIFY(!g_scheduler_lock.own_lock()); - u32 lock_count_to_restore = 0; - auto previous_locked = unlock_process_if_locked(lock_count_to_restore); - // NOTE: Even though we call Scheduler::yield here, unless we happen - // to be outside of a critical section, the yield will be postponed - // until leaving it in relock_process. - Scheduler::donate_to(thread, reason); - relock_process(previous_locked, lock_count_to_restore); -} - LockMode Thread::unlock_process_if_locked(u32& lock_count_to_restore) { return process().big_lock().force_unlock_if_locked(lock_count_to_restore); @@ -354,8 +342,8 @@ LockMode Thread::unlock_process_if_locked(u32& lock_count_to_restore) void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore) { // Clearing the critical section may trigger the context switch - // flagged by calling Scheduler::donate_to or Scheduler::yield - // above. We have to do it this way because we intentionally + // flagged by calling Scheduler::yield above. + // We have to do it this way because we intentionally // leave the critical section here to be able to switch contexts. u32 prev_flags; u32 prev_crit = Processor::current().clear_critical(prev_flags, true); diff --git a/Kernel/Thread.h b/Kernel/Thread.h index c90bec766f..2980ea9812 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -1337,7 +1337,6 @@ private: bool m_is_profiling_suppressed { false }; void yield_without_holding_big_lock(); - void donate_without_holding_big_lock(RefPtr&, const char*); void yield_while_not_holding_big_lock(); void drop_thread_count(bool); }; diff --git a/Userland/Libraries/LibC/unistd.cpp b/Userland/Libraries/LibC/unistd.cpp index 5236cf73b5..83c1d465e6 100644 --- a/Userland/Libraries/LibC/unistd.cpp +++ b/Userland/Libraries/LibC/unistd.cpp @@ -695,12 +695,6 @@ int gettid() return cached_tid; } -int donate(int tid) -{ - int rc = syscall(SC_donate, tid); - __RETURN_WITH_ERRNO(rc, rc, -1); -} - void sysbeep() { syscall(SC_beep); diff --git a/Userland/Libraries/LibC/unistd.h b/Userland/Libraries/LibC/unistd.h index dc0d26ee9e..6960351227 100644 --- a/Userland/Libraries/LibC/unistd.h +++ b/Userland/Libraries/LibC/unistd.h @@ -40,7 +40,6 @@ void dump_backtrace(); int fsync(int fd); void sysbeep(); int gettid(); -int donate(int tid); int getpagesize(); pid_t fork(); pid_t vfork();