1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-14 08:44:58 +00:00

Kernel+LibC: Remove sys$donate()

This was an old SerenityOS-specific syscall for donating the remainder
of the calling thread's time-slice to another thread within the same
process.

Now that Threading::Lock uses a pthread_mutex_t internally, we no
longer need this syscall, which allows us to get rid of a surprising
amount of unnecessary scheduler logic. :^)
This commit is contained in:
Andreas Kling 2021-07-05 23:07:18 +02:00
parent c40780d404
commit 565796ae4e
9 changed files with 2 additions and 127 deletions

View file

@ -113,7 +113,6 @@ namespace Kernel {
S(setsockopt) \
S(create_thread) \
S(gettid) \
S(donate) \
S(rename) \
S(ftruncate) \
S(exit_thread) \

View file

@ -276,7 +276,6 @@ public:
KResultOr<FlatPtr> sys$dbgputstr(Userspace<const u8*>, size_t);
KResultOr<FlatPtr> sys$dump_backtrace();
KResultOr<FlatPtr> sys$gettid();
KResultOr<FlatPtr> sys$donate(pid_t tid);
KResultOr<FlatPtr> sys$setsid();
KResultOr<FlatPtr> sys$getsid(pid_t);
KResultOr<FlatPtr> sys$setpgid(pid_t pid, pid_t pgid);

View file

@ -31,8 +31,6 @@ class SchedulerPerProcessorData {
public:
SchedulerPerProcessorData() = default;
WeakPtr<Thread> m_pending_beneficiary;
const char* m_pending_donate_reason { nullptr };
bool m_in_scheduler { true };
};
@ -206,26 +204,6 @@ bool Scheduler::pick_next()
dump_thread_list();
}
auto pending_beneficiary = scheduler_data.m_pending_beneficiary.strong_ref();
if (pending_beneficiary && dequeue_runnable_thread(*pending_beneficiary, true)) {
// The thread we're supposed to donate to still exists and we can
const char* reason = scheduler_data.m_pending_donate_reason;
scheduler_data.m_pending_beneficiary = nullptr;
scheduler_data.m_pending_donate_reason = nullptr;
// We need to leave our first critical section before switching context,
// but since we're still holding the scheduler lock we're still in a critical section
critical.leave();
dbgln_if(SCHEDULER_DEBUG, "Processing pending donate to {} reason={}", *pending_beneficiary, reason);
return donate_to_and_switch(pending_beneficiary.ptr(), reason);
}
// Either we're not donating or the beneficiary disappeared.
// Either way clear any pending information
scheduler_data.m_pending_beneficiary = nullptr;
scheduler_data.m_pending_donate_reason = nullptr;
auto& thread_to_schedule = pull_next_runnable_thread();
if constexpr (SCHEDULER_DEBUG) {
#if ARCH(I386)
@ -250,11 +228,6 @@ bool Scheduler::yield()
{
InterruptDisabler disabler;
auto& proc = Processor::current();
auto& scheduler_data = proc.get_scheduler_data();
// Clear any pending beneficiary
scheduler_data.m_pending_beneficiary = nullptr;
scheduler_data.m_pending_donate_reason = nullptr;
auto current_thread = Thread::current();
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", proc.get_id(), *current_thread, proc.in_irq());
@ -275,66 +248,6 @@ bool Scheduler::yield()
return true;
}
bool Scheduler::donate_to_and_switch(Thread* beneficiary, [[maybe_unused]] const char* reason)
{
VERIFY(g_scheduler_lock.own_lock());
auto& proc = Processor::current();
VERIFY(proc.in_critical() == 1);
unsigned ticks_left = Thread::current()->ticks_left();
if (!beneficiary || beneficiary->state() != Thread::Runnable || ticks_left <= 1)
return Scheduler::yield();
unsigned ticks_to_donate = min(ticks_left - 1, time_slice_for(*beneficiary));
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Donating {} ticks to {}, reason={}", proc.get_id(), ticks_to_donate, *beneficiary, reason);
beneficiary->set_ticks_left(ticks_to_donate);
return Scheduler::context_switch(beneficiary);
}
bool Scheduler::donate_to(RefPtr<Thread>& beneficiary, const char* reason)
{
VERIFY(beneficiary);
if (beneficiary == Thread::current())
return Scheduler::yield();
// Set the m_in_scheduler flag before acquiring the spinlock. This
// prevents a recursive call into Scheduler::invoke_async upon
// leaving the scheduler lock.
ScopedCritical critical;
auto& proc = Processor::current();
auto& scheduler_data = proc.get_scheduler_data();
scheduler_data.m_in_scheduler = true;
ScopeGuard guard(
[]() {
// We may be on a different processor after we got switched
// back to this thread!
auto& scheduler_data = Processor::current().get_scheduler_data();
VERIFY(scheduler_data.m_in_scheduler);
scheduler_data.m_in_scheduler = false;
});
VERIFY(!proc.in_irq());
if (proc.in_critical() > 1) {
scheduler_data.m_pending_beneficiary = beneficiary; // Save the beneficiary
scheduler_data.m_pending_donate_reason = reason;
proc.invoke_scheduler_async();
return false;
}
ScopedSpinLock lock(g_scheduler_lock);
// "Leave" the critical section before switching context. Since we
// still hold the scheduler lock, we're not actually leaving it.
// Processor::switch_context expects Processor::in_critical() to be 1
critical.leave();
donate_to_and_switch(beneficiary, reason);
return false;
}
bool Scheduler::context_switch(Thread* thread)
{
if (s_mm_lock.own_lock()) {

View file

@ -36,8 +36,6 @@ public:
static bool pick_next();
static bool yield();
static void yield_from_critical();
static bool donate_to_and_switch(Thread*, const char* reason);
static bool donate_to(RefPtr<Thread>&, const char* reason);
static bool context_switch(Thread*);
static void enter_current(Thread& prev_thread, bool is_first);
static void leave_on_first_switch(u32 flags);

View file

@ -15,20 +15,6 @@ KResultOr<FlatPtr> Process::sys$yield()
return 0;
}
KResultOr<FlatPtr> Process::sys$donate(pid_t tid)
{
REQUIRE_PROMISE(stdio);
if (tid < 0)
return EINVAL;
ScopedCritical critical;
auto thread = Thread::from_tid(tid);
if (!thread || thread->pid() != pid())
return ESRCH;
Thread::current()->donate_without_holding_big_lock(thread, "sys$donate");
return 0;
}
KResultOr<FlatPtr> Process::sys$sched_setparam(int pid, Userspace<const struct sched_param*> user_param)
{
REQUIRE_PROMISE(proc);

View file

@ -334,18 +334,6 @@ void Thread::yield_without_holding_big_lock()
relock_process(previous_locked, lock_count_to_restore);
}
void Thread::donate_without_holding_big_lock(RefPtr<Thread>& thread, const char* reason)
{
VERIFY(!g_scheduler_lock.own_lock());
u32 lock_count_to_restore = 0;
auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
// NOTE: Even though we call Scheduler::yield here, unless we happen
// to be outside of a critical section, the yield will be postponed
// until leaving it in relock_process.
Scheduler::donate_to(thread, reason);
relock_process(previous_locked, lock_count_to_restore);
}
LockMode Thread::unlock_process_if_locked(u32& lock_count_to_restore)
{
return process().big_lock().force_unlock_if_locked(lock_count_to_restore);
@ -354,8 +342,8 @@ LockMode Thread::unlock_process_if_locked(u32& lock_count_to_restore)
void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore)
{
// Clearing the critical section may trigger the context switch
// flagged by calling Scheduler::donate_to or Scheduler::yield
// above. We have to do it this way because we intentionally
// flagged by calling Scheduler::yield above.
// We have to do it this way because we intentionally
// leave the critical section here to be able to switch contexts.
u32 prev_flags;
u32 prev_crit = Processor::current().clear_critical(prev_flags, true);

View file

@ -1337,7 +1337,6 @@ private:
bool m_is_profiling_suppressed { false };
void yield_without_holding_big_lock();
void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
void yield_while_not_holding_big_lock();
void drop_thread_count(bool);
};

View file

@ -695,12 +695,6 @@ int gettid()
return cached_tid;
}
int donate(int tid)
{
int rc = syscall(SC_donate, tid);
__RETURN_WITH_ERRNO(rc, rc, -1);
}
void sysbeep()
{
syscall(SC_beep);

View file

@ -40,7 +40,6 @@ void dump_backtrace();
int fsync(int fd);
void sysbeep();
int gettid();
int donate(int tid);
int getpagesize();
pid_t fork();
pid_t vfork();