1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-06-30 12:42:07 +00:00

Kernel+LibC: Remove sys$donate()

This was an old SerenityOS-specific syscall for donating the remainder
of the calling thread's time-slice to another thread within the same
process.

Now that Threading::Lock uses a pthread_mutex_t internally, we no
longer need this syscall, which allows us to get rid of a surprising
amount of unnecessary scheduler logic. :^)
This commit is contained in:
Andreas Kling 2021-07-05 23:07:18 +02:00
parent c40780d404
commit 565796ae4e
9 changed files with 2 additions and 127 deletions

View file

@ -334,18 +334,6 @@ void Thread::yield_without_holding_big_lock()
relock_process(previous_locked, lock_count_to_restore);
}
void Thread::donate_without_holding_big_lock(RefPtr<Thread>& thread, const char* reason)
{
VERIFY(!g_scheduler_lock.own_lock());
u32 lock_count_to_restore = 0;
auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
// NOTE: Even though we call Scheduler::yield here, unless we happen
// to be outside of a critical section, the yield will be postponed
// until leaving it in relock_process.
Scheduler::donate_to(thread, reason);
relock_process(previous_locked, lock_count_to_restore);
}
LockMode Thread::unlock_process_if_locked(u32& lock_count_to_restore)
{
return process().big_lock().force_unlock_if_locked(lock_count_to_restore);
@ -354,8 +342,8 @@ LockMode Thread::unlock_process_if_locked(u32& lock_count_to_restore)
void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore)
{
// Clearing the critical section may trigger the context switch
// flagged by calling Scheduler::donate_to or Scheduler::yield
// above. We have to do it this way because we intentionally
// flagged by calling Scheduler::yield above.
// We have to do it this way because we intentionally
// leave the critical section here to be able to switch contexts.
u32 prev_flags;
u32 prev_crit = Processor::current().clear_critical(prev_flags, true);