mirror of
https://github.com/RGBCube/serenity
synced 2025-05-28 10:15:07 +00:00
Kernel: Remove an allocation when blocking a thread
When blocking a thread with a timeout we would previously allocate a Timer object. This removes the allocation for that Timer object.
This commit is contained in:
parent
96b75af5d1
commit
7557f2db90
5 changed files with 42 additions and 23 deletions
|
@ -13,10 +13,10 @@ KResultOr<unsigned> Process::sys$alarm(unsigned seconds)
|
|||
{
|
||||
REQUIRE_PROMISE(stdio);
|
||||
unsigned previous_alarm_remaining = 0;
|
||||
if (auto alarm_timer = move(m_alarm_timer)) {
|
||||
if (TimerQueue::the().cancel_timer(*alarm_timer)) {
|
||||
if (m_alarm_timer) {
|
||||
if (TimerQueue::the().cancel_timer(*m_alarm_timer)) {
|
||||
// The timer hasn't fired. Round up the remaining time (if any)
|
||||
Time remaining = alarm_timer->remaining() + Time::from_nanoseconds(999'999'999);
|
||||
Time remaining = m_alarm_timer->remaining() + Time::from_nanoseconds(999'999'999);
|
||||
previous_alarm_remaining = remaining.to_truncated_seconds();
|
||||
}
|
||||
// We had an existing alarm, must return a non-zero value here!
|
||||
|
@ -27,9 +27,16 @@ KResultOr<unsigned> Process::sys$alarm(unsigned seconds)
|
|||
if (seconds > 0) {
|
||||
auto deadline = TimeManagement::the().current_time(CLOCK_REALTIME_COARSE);
|
||||
deadline = deadline + Time::from_seconds(seconds);
|
||||
m_alarm_timer = TimerQueue::the().add_timer_without_id(CLOCK_REALTIME_COARSE, deadline, [this]() {
|
||||
if (!m_alarm_timer) {
|
||||
m_alarm_timer = adopt_ref_if_nonnull(new Timer());
|
||||
if (!m_alarm_timer)
|
||||
return ENOMEM;
|
||||
}
|
||||
auto timer_was_added = TimerQueue::the().add_timer_without_id(*m_alarm_timer, CLOCK_REALTIME_COARSE, deadline, [this]() {
|
||||
[[maybe_unused]] auto rc = send_signal(SIGALRM, nullptr);
|
||||
});
|
||||
if (!timer_was_added)
|
||||
return ENOMEM;
|
||||
}
|
||||
return previous_alarm_remaining;
|
||||
}
|
||||
|
|
|
@ -42,17 +42,22 @@ KResultOr<NonnullRefPtr<Thread>> Thread::try_create(NonnullRefPtr<Process> proce
|
|||
return ENOMEM;
|
||||
kernel_stack_region->set_stack(true);
|
||||
|
||||
auto thread = adopt_ref_if_nonnull(new Thread(move(process), kernel_stack_region.release_nonnull()));
|
||||
auto block_timer = adopt_ref_if_nonnull(new Timer());
|
||||
if (!block_timer)
|
||||
return ENOMEM;
|
||||
|
||||
auto thread = adopt_ref_if_nonnull(new Thread(move(process), kernel_stack_region.release_nonnull(), block_timer.release_nonnull()));
|
||||
if (!thread)
|
||||
return ENOMEM;
|
||||
|
||||
return thread.release_nonnull();
|
||||
}
|
||||
|
||||
Thread::Thread(NonnullRefPtr<Process> process, NonnullOwnPtr<Region> kernel_stack_region)
|
||||
Thread::Thread(NonnullRefPtr<Process> process, NonnullOwnPtr<Region> kernel_stack_region, NonnullRefPtr<Timer> block_timer)
|
||||
: m_process(move(process))
|
||||
, m_kernel_stack_region(move(kernel_stack_region))
|
||||
, m_name(m_process->name())
|
||||
, m_block_timer(block_timer)
|
||||
{
|
||||
bool is_first_thread = m_process->add_thread(*this);
|
||||
if (is_first_thread) {
|
||||
|
|
|
@ -791,7 +791,7 @@ public:
|
|||
// Relaxed semantics are fine for timeout_unblocked because we
|
||||
// synchronize on the spin locks already.
|
||||
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
|
||||
RefPtr<Timer> timer;
|
||||
bool timer_was_added = false;
|
||||
{
|
||||
switch (state()) {
|
||||
case Thread::Stopped:
|
||||
|
@ -817,7 +817,7 @@ public:
|
|||
if (!block_timeout.is_infinite()) {
|
||||
// Process::kill_all_threads may be called at any time, which will mark all
|
||||
// threads to die. In that case
|
||||
timer = TimerQueue::the().add_timer_without_id(block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
|
||||
timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
|
||||
VERIFY(!Processor::current().in_irq());
|
||||
VERIFY(!g_scheduler_lock.own_lock());
|
||||
VERIFY(!m_block_lock.own_lock());
|
||||
|
@ -827,7 +827,7 @@ public:
|
|||
if (m_blocker && timeout_unblocked.exchange(true) == false)
|
||||
unblock();
|
||||
});
|
||||
if (!timer) {
|
||||
if (!timer_was_added) {
|
||||
// Timeout is already in the past
|
||||
blocker.not_blocking(true);
|
||||
m_blocker = nullptr;
|
||||
|
@ -890,11 +890,11 @@ public:
|
|||
// to clean up now while we're still holding m_lock
|
||||
auto result = blocker.end_blocking({}, did_timeout); // calls was_unblocked internally
|
||||
|
||||
if (timer && !did_timeout) {
|
||||
if (timer_was_added && !did_timeout) {
|
||||
// Cancel the timer while not holding any locks. This allows
|
||||
// the timer function to complete before we remove it
|
||||
// (e.g. if it's on another processor)
|
||||
TimerQueue::the().cancel_timer(timer.release_nonnull());
|
||||
TimerQueue::the().cancel_timer(*m_block_timer);
|
||||
}
|
||||
if (previous_locked != LockMode::Unlocked) {
|
||||
// NOTE: this may trigger another call to Thread::block(), so
|
||||
|
@ -1131,7 +1131,7 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Region> kernel_stack_region);
|
||||
Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Region>, NonnullRefPtr<Timer>);
|
||||
|
||||
IntrusiveListNode<Thread> m_process_thread_list_node;
|
||||
int m_runnable_priority { -1 };
|
||||
|
@ -1269,6 +1269,8 @@ private:
|
|||
Atomic<bool> m_have_any_unmasked_pending_signals { false };
|
||||
Atomic<u32> m_nested_profiler_calls { 0 };
|
||||
|
||||
RefPtr<Timer> m_block_timer;
|
||||
|
||||
void yield_without_holding_big_lock();
|
||||
void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
|
||||
void yield_while_not_holding_big_lock();
|
||||
|
|
|
@ -57,22 +57,22 @@ UNMAP_AFTER_INIT TimerQueue::TimerQueue()
|
|||
m_ticks_per_second = TimeManagement::the().ticks_per_second();
|
||||
}
|
||||
|
||||
RefPtr<Timer> TimerQueue::add_timer_without_id(clockid_t clock_id, const Time& deadline, Function<void()>&& callback)
|
||||
bool TimerQueue::add_timer_without_id(NonnullRefPtr<Timer> timer, clockid_t clock_id, const Time& deadline, Function<void()>&& callback)
|
||||
{
|
||||
if (deadline <= TimeManagement::the().current_time(clock_id))
|
||||
return {};
|
||||
return false;
|
||||
|
||||
// Because timer handlers can execute on any processor and there is
|
||||
// a race between executing a timer handler and cancel_timer() this
|
||||
// *must* be a RefPtr<Timer>. Otherwise calling cancel_timer() could
|
||||
// inadvertently cancel another timer that has been created between
|
||||
// returning from the timer handler and a call to cancel_timer().
|
||||
auto timer = adopt_ref(*new Timer(clock_id, deadline, move(callback)));
|
||||
timer->setup(clock_id, deadline, move(callback));
|
||||
|
||||
ScopedSpinLock lock(g_timerqueue_lock);
|
||||
timer->m_id = 0; // Don't generate a timer id
|
||||
add_timer_locked(timer);
|
||||
return timer;
|
||||
add_timer_locked(move(timer));
|
||||
return true;
|
||||
}
|
||||
|
||||
TimerId TimerQueue::add_timer(NonnullRefPtr<Timer>&& timer)
|
||||
|
@ -119,7 +119,10 @@ TimerId TimerQueue::add_timer(clockid_t clock_id, const Time& deadline, Function
|
|||
{
|
||||
auto expires = TimeManagement::the().current_time(clock_id);
|
||||
expires = expires + deadline;
|
||||
return add_timer(adopt_ref(*new Timer(clock_id, expires, move(callback))));
|
||||
auto timer = new Timer();
|
||||
VERIFY(timer);
|
||||
timer->setup(clock_id, expires, move(callback));
|
||||
return add_timer(adopt_ref(*timer));
|
||||
}
|
||||
|
||||
bool TimerQueue::cancel_timer(TimerId id)
|
||||
|
|
|
@ -24,12 +24,14 @@ class Timer : public RefCounted<Timer>
|
|||
friend class InlineLinkedListNode<Timer>;
|
||||
|
||||
public:
|
||||
Timer(clockid_t clock_id, Time expires, Function<void()>&& callback)
|
||||
: m_clock_id(clock_id)
|
||||
, m_expires(expires)
|
||||
, m_callback(move(callback))
|
||||
void setup(clockid_t clock_id, Time expires, Function<void()>&& callback)
|
||||
{
|
||||
VERIFY(!is_queued());
|
||||
m_clock_id = clock_id;
|
||||
m_expires = expires;
|
||||
m_callback = move(callback);
|
||||
}
|
||||
|
||||
~Timer()
|
||||
{
|
||||
VERIFY(!is_queued());
|
||||
|
@ -72,7 +74,7 @@ public:
|
|||
static TimerQueue& the();
|
||||
|
||||
TimerId add_timer(NonnullRefPtr<Timer>&&);
|
||||
RefPtr<Timer> add_timer_without_id(clockid_t, const Time&, Function<void()>&&);
|
||||
bool add_timer_without_id(NonnullRefPtr<Timer>, clockid_t, const Time&, Function<void()>&&);
|
||||
TimerId add_timer(clockid_t, const Time& timeout, Function<void()>&& callback);
|
||||
bool cancel_timer(TimerId id);
|
||||
bool cancel_timer(Timer&);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue