From 7557f2db905eaf768b54be854d9bf1ce32f0973d Mon Sep 17 00:00:00 2001 From: Gunnar Beutner Date: Thu, 20 May 2021 00:41:51 +0200 Subject: [PATCH] Kernel: Remove an allocation when blocking a thread When blocking a thread with a timeout we would previously allocate a Timer object. This removes the allocation for that Timer object. --- Kernel/Syscalls/alarm.cpp | 15 +++++++++++---- Kernel/Thread.cpp | 9 +++++++-- Kernel/Thread.h | 14 ++++++++------ Kernel/TimerQueue.cpp | 15 +++++++++------ Kernel/TimerQueue.h | 12 +++++++----- 5 files changed, 42 insertions(+), 23 deletions(-) diff --git a/Kernel/Syscalls/alarm.cpp b/Kernel/Syscalls/alarm.cpp index 221df0d233..da30599508 100644 --- a/Kernel/Syscalls/alarm.cpp +++ b/Kernel/Syscalls/alarm.cpp @@ -13,10 +13,10 @@ KResultOr Process::sys$alarm(unsigned seconds) { REQUIRE_PROMISE(stdio); unsigned previous_alarm_remaining = 0; - if (auto alarm_timer = move(m_alarm_timer)) { - if (TimerQueue::the().cancel_timer(*alarm_timer)) { + if (m_alarm_timer) { + if (TimerQueue::the().cancel_timer(*m_alarm_timer)) { // The timer hasn't fired. Round up the remaining time (if any) - Time remaining = alarm_timer->remaining() + Time::from_nanoseconds(999'999'999); + Time remaining = m_alarm_timer->remaining() + Time::from_nanoseconds(999'999'999); previous_alarm_remaining = remaining.to_truncated_seconds(); } // We had an existing alarm, must return a non-zero value here! @@ -27,9 +27,16 @@ KResultOr Process::sys$alarm(unsigned seconds) if (seconds > 0) { auto deadline = TimeManagement::the().current_time(CLOCK_REALTIME_COARSE); deadline = deadline + Time::from_seconds(seconds); - m_alarm_timer = TimerQueue::the().add_timer_without_id(CLOCK_REALTIME_COARSE, deadline, [this]() { + if (!m_alarm_timer) { + m_alarm_timer = adopt_ref_if_nonnull(new Timer()); + if (!m_alarm_timer) + return ENOMEM; + } + auto timer_was_added = TimerQueue::the().add_timer_without_id(*m_alarm_timer, CLOCK_REALTIME_COARSE, deadline, [this]() { [[maybe_unused]] auto rc = send_signal(SIGALRM, nullptr); }); + if (!timer_was_added) + return ENOMEM; } return previous_alarm_remaining; } diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index beeee29472..5cf5718530 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -42,17 +42,22 @@ KResultOr> Thread::try_create(NonnullRefPtr proce return ENOMEM; kernel_stack_region->set_stack(true); - auto thread = adopt_ref_if_nonnull(new Thread(move(process), kernel_stack_region.release_nonnull())); + auto block_timer = adopt_ref_if_nonnull(new Timer()); + if (!block_timer) + return ENOMEM; + + auto thread = adopt_ref_if_nonnull(new Thread(move(process), kernel_stack_region.release_nonnull(), block_timer.release_nonnull())); if (!thread) return ENOMEM; return thread.release_nonnull(); } -Thread::Thread(NonnullRefPtr process, NonnullOwnPtr kernel_stack_region) +Thread::Thread(NonnullRefPtr process, NonnullOwnPtr kernel_stack_region, NonnullRefPtr block_timer) : m_process(move(process)) , m_kernel_stack_region(move(kernel_stack_region)) , m_name(m_process->name()) + , m_block_timer(block_timer) { bool is_first_thread = m_process->add_thread(*this); if (is_first_thread) { diff --git a/Kernel/Thread.h b/Kernel/Thread.h index ee77fd3bd2..df3ee93efa 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -791,7 +791,7 @@ public: // Relaxed semantics are fine for timeout_unblocked because we // synchronize on the spin locks already. Atomic timeout_unblocked(false); - RefPtr timer; + bool timer_was_added = false; { switch (state()) { case Thread::Stopped: @@ -817,7 +817,7 @@ public: if (!block_timeout.is_infinite()) { // Process::kill_all_threads may be called at any time, which will mark all // threads to die. In that case - timer = TimerQueue::the().add_timer_without_id(block_timeout.clock_id(), block_timeout.absolute_time(), [&]() { + timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() { VERIFY(!Processor::current().in_irq()); VERIFY(!g_scheduler_lock.own_lock()); VERIFY(!m_block_lock.own_lock()); @@ -827,7 +827,7 @@ public: if (m_blocker && timeout_unblocked.exchange(true) == false) unblock(); }); - if (!timer) { + if (!timer_was_added) { // Timeout is already in the past blocker.not_blocking(true); m_blocker = nullptr; @@ -890,11 +890,11 @@ public: // to clean up now while we're still holding m_lock auto result = blocker.end_blocking({}, did_timeout); // calls was_unblocked internally - if (timer && !did_timeout) { + if (timer_was_added && !did_timeout) { // Cancel the timer while not holding any locks. This allows // the timer function to complete before we remove it // (e.g. if it's on another processor) - TimerQueue::the().cancel_timer(timer.release_nonnull()); + TimerQueue::the().cancel_timer(*m_block_timer); } if (previous_locked != LockMode::Unlocked) { // NOTE: this may trigger another call to Thread::block(), so @@ -1131,7 +1131,7 @@ public: } private: - Thread(NonnullRefPtr, NonnullOwnPtr kernel_stack_region); + Thread(NonnullRefPtr, NonnullOwnPtr, NonnullRefPtr); IntrusiveListNode m_process_thread_list_node; int m_runnable_priority { -1 }; @@ -1269,6 +1269,8 @@ private: Atomic m_have_any_unmasked_pending_signals { false }; Atomic m_nested_profiler_calls { 0 }; + RefPtr m_block_timer; + void yield_without_holding_big_lock(); void donate_without_holding_big_lock(RefPtr&, const char*); void yield_while_not_holding_big_lock(); diff --git a/Kernel/TimerQueue.cpp b/Kernel/TimerQueue.cpp index 727499c81e..2ccf22e554 100644 --- a/Kernel/TimerQueue.cpp +++ b/Kernel/TimerQueue.cpp @@ -57,22 +57,22 @@ UNMAP_AFTER_INIT TimerQueue::TimerQueue() m_ticks_per_second = TimeManagement::the().ticks_per_second(); } -RefPtr TimerQueue::add_timer_without_id(clockid_t clock_id, const Time& deadline, Function&& callback) +bool TimerQueue::add_timer_without_id(NonnullRefPtr timer, clockid_t clock_id, const Time& deadline, Function&& callback) { if (deadline <= TimeManagement::the().current_time(clock_id)) - return {}; + return false; // Because timer handlers can execute on any processor and there is // a race between executing a timer handler and cancel_timer() this // *must* be a RefPtr. Otherwise calling cancel_timer() could // inadvertently cancel another timer that has been created between // returning from the timer handler and a call to cancel_timer(). - auto timer = adopt_ref(*new Timer(clock_id, deadline, move(callback))); + timer->setup(clock_id, deadline, move(callback)); ScopedSpinLock lock(g_timerqueue_lock); timer->m_id = 0; // Don't generate a timer id - add_timer_locked(timer); - return timer; + add_timer_locked(move(timer)); + return true; } TimerId TimerQueue::add_timer(NonnullRefPtr&& timer) @@ -119,7 +119,10 @@ TimerId TimerQueue::add_timer(clockid_t clock_id, const Time& deadline, Function { auto expires = TimeManagement::the().current_time(clock_id); expires = expires + deadline; - return add_timer(adopt_ref(*new Timer(clock_id, expires, move(callback)))); + auto timer = new Timer(); + VERIFY(timer); + timer->setup(clock_id, expires, move(callback)); + return add_timer(adopt_ref(*timer)); } bool TimerQueue::cancel_timer(TimerId id) diff --git a/Kernel/TimerQueue.h b/Kernel/TimerQueue.h index c84f981786..ca0cfaea58 100644 --- a/Kernel/TimerQueue.h +++ b/Kernel/TimerQueue.h @@ -24,12 +24,14 @@ class Timer : public RefCounted friend class InlineLinkedListNode; public: - Timer(clockid_t clock_id, Time expires, Function&& callback) - : m_clock_id(clock_id) - , m_expires(expires) - , m_callback(move(callback)) + void setup(clockid_t clock_id, Time expires, Function&& callback) { + VERIFY(!is_queued()); + m_clock_id = clock_id; + m_expires = expires; + m_callback = move(callback); } + ~Timer() { VERIFY(!is_queued()); @@ -72,7 +74,7 @@ public: static TimerQueue& the(); TimerId add_timer(NonnullRefPtr&&); - RefPtr add_timer_without_id(clockid_t, const Time&, Function&&); + bool add_timer_without_id(NonnullRefPtr, clockid_t, const Time&, Function&&); TimerId add_timer(clockid_t, const Time& timeout, Function&& callback); bool cancel_timer(TimerId id); bool cancel_timer(Timer&);