1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-25 22:35:07 +00:00

Kernel: Stop using *LockRefPtr for Kernel::Timer

This commit is contained in:
Andreas Kling 2023-04-03 13:09:38 +02:00
parent 83b409083b
commit 496d918e92
9 changed files with 42 additions and 39 deletions

View file

@ -39,7 +39,7 @@ void VMWareFramebufferConsole::flush(size_t, size_t, size_t, size_t)
void VMWareFramebufferConsole::enqueue_refresh_timer()
{
NonnullLockRefPtr<Timer> refresh_timer = adopt_lock_ref(*new (nothrow) Timer());
auto refresh_timer = adopt_nonnull_ref_or_enomem(new (nothrow) Timer()).release_value_but_fixme_should_propagate_errors();
refresh_timer->setup(CLOCK_MONOTONIC, refresh_interval, [this]() {
if (m_enabled.load() && m_dirty) {
MUST(g_io_work->try_queue([this]() {

View file

@ -67,7 +67,7 @@ void Console::flush(size_t, size_t, size_t, size_t)
void Console::enqueue_refresh_timer()
{
NonnullLockRefPtr<Timer> refresh_timer = adopt_lock_ref(*new Timer());
auto refresh_timer = adopt_nonnull_ref_or_enomem(new (nothrow) Timer()).release_value_but_fixme_should_propagate_errors();
refresh_timer->setup(CLOCK_MONOTONIC, refresh_interval, [this]() {
if (m_enabled.load() && m_dirty) {
MUST(g_io_work->try_queue([this]() {

View file

@ -364,7 +364,6 @@ Process::~Process()
unprotect_data();
VERIFY(thread_count() == 0); // all threads should have been finalized
VERIFY(!m_alarm_timer);
PerformanceManager::add_process_exit_event(*this);
}
@ -754,8 +753,10 @@ void Process::finalize()
m_threads_for_coredump.clear();
if (m_alarm_timer)
TimerQueue::the().cancel_timer(m_alarm_timer.release_nonnull());
m_alarm_timer.with([&](auto& timer) {
if (timer)
TimerQueue::the().cancel_timer(timer.release_nonnull());
});
m_fds.with_exclusive([](auto& fds) { fds.clear(); });
m_tty = nullptr;
m_executable.with([](auto& executable) { executable = nullptr; });

View file

@ -875,7 +875,7 @@ private:
Mutex m_big_lock { "Process"sv, Mutex::MutexBehavior::BigLock };
Mutex m_ptrace_lock { "ptrace"sv };
LockRefPtr<Timer> m_alarm_timer;
SpinlockProtected<RefPtr<Timer>, LockRank::None> m_alarm_timer;
SpinlockProtected<UnveilData, LockRank::None> m_unveil_data;
SpinlockProtected<UnveilData, LockRank::None> m_exec_unveil_data;

View file

@ -15,11 +15,12 @@ ErrorOr<FlatPtr> Process::sys$alarm(unsigned seconds)
VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
TRY(require_promise(Pledge::stdio));
unsigned previous_alarm_remaining = 0;
if (m_alarm_timer) {
return m_alarm_timer.with([&](auto& timer) -> ErrorOr<FlatPtr> {
if (timer) {
bool was_in_use = false;
if (TimerQueue::the().cancel_timer(*m_alarm_timer, &was_in_use)) {
if (TimerQueue::the().cancel_timer(*timer, &was_in_use)) {
// The timer hasn't fired. Round up the remaining time (if any)
Time remaining = m_alarm_timer->remaining() + Time::from_nanoseconds(999'999'999);
Time remaining = timer->remaining() + Time::from_nanoseconds(999'999'999);
previous_alarm_remaining = remaining.to_truncated_seconds();
}
// We had an existing alarm, must return a non-zero value here!
@ -30,16 +31,17 @@ ErrorOr<FlatPtr> Process::sys$alarm(unsigned seconds)
if (seconds > 0) {
auto deadline = TimeManagement::the().current_time(CLOCK_REALTIME_COARSE);
deadline = deadline + Time::from_seconds(seconds);
if (!m_alarm_timer) {
m_alarm_timer = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) Timer));
if (!timer) {
timer = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) Timer));
}
auto timer_was_added = TimerQueue::the().add_timer_without_id(*m_alarm_timer, CLOCK_REALTIME_COARSE, deadline, [this]() {
auto timer_was_added = TimerQueue::the().add_timer_without_id(*timer, CLOCK_REALTIME_COARSE, deadline, [this]() {
MUST(send_signal(SIGALRM, nullptr));
});
if (!timer_was_added)
return ENOMEM;
}
return previous_alarm_remaining;
});
}
}

View file

@ -44,13 +44,13 @@ ErrorOr<NonnullRefPtr<Thread>> Thread::create(NonnullRefPtr<Process> process)
auto kernel_stack_region = TRY(MM.allocate_kernel_region(default_kernel_stack_size, {}, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow));
kernel_stack_region->set_stack(true);
auto block_timer = TRY(try_make_lock_ref_counted<Timer>());
auto block_timer = TRY(try_make_ref_counted<Timer>());
auto name = TRY(process->name().with([](auto& name) { return name->try_clone(); }));
return adopt_nonnull_ref_or_enomem(new (nothrow) Thread(move(process), move(kernel_stack_region), move(block_timer), move(name)));
}
Thread::Thread(NonnullRefPtr<Process> process, NonnullOwnPtr<Memory::Region> kernel_stack_region, NonnullLockRefPtr<Timer> block_timer, NonnullOwnPtr<KString> name)
Thread::Thread(NonnullRefPtr<Process> process, NonnullOwnPtr<Memory::Region> kernel_stack_region, NonnullRefPtr<Timer> block_timer, NonnullOwnPtr<KString> name)
: m_process(move(process))
, m_kernel_stack_region(move(kernel_stack_region))
, m_name(move(name))

View file

@ -1083,7 +1083,7 @@ public:
#endif
private:
Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Memory::Region>, NonnullLockRefPtr<Timer>, NonnullOwnPtr<KString>);
Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Memory::Region>, NonnullRefPtr<Timer>, NonnullOwnPtr<KString>);
BlockResult block_impl(BlockTimeout const&, Blocker&);
@ -1235,7 +1235,7 @@ private:
Atomic<bool> m_have_any_unmasked_pending_signals { false };
Atomic<u32> m_nested_profiler_calls { 0 };
NonnullLockRefPtr<Timer> m_block_timer;
NonnullRefPtr<Timer> const m_block_timer;
bool m_is_profiling_suppressed { false };

View file

@ -55,14 +55,14 @@ UNMAP_AFTER_INIT TimerQueue::TimerQueue()
m_ticks_per_second = TimeManagement::the().ticks_per_second();
}
bool TimerQueue::add_timer_without_id(NonnullLockRefPtr<Timer> timer, clockid_t clock_id, Time const& deadline, Function<void()>&& callback)
bool TimerQueue::add_timer_without_id(NonnullRefPtr<Timer> timer, clockid_t clock_id, Time const& deadline, Function<void()>&& callback)
{
if (deadline <= TimeManagement::the().current_time(clock_id))
return false;
// Because timer handlers can execute on any processor and there is
// a race between executing a timer handler and cancel_timer() this
// *must* be a LockRefPtr<Timer>. Otherwise, calling cancel_timer() could
// *must* be a RefPtr<Timer>. Otherwise, calling cancel_timer() could
// inadvertently cancel another timer that has been created between
// returning from the timer handler and a call to cancel_timer().
timer->setup(clock_id, deadline, move(callback));
@ -73,7 +73,7 @@ bool TimerQueue::add_timer_without_id(NonnullLockRefPtr<Timer> timer, clockid_t
return true;
}
TimerId TimerQueue::add_timer(NonnullLockRefPtr<Timer>&& timer)
TimerId TimerQueue::add_timer(NonnullRefPtr<Timer>&& timer)
{
SpinlockLocker lock(g_timerqueue_lock);
@ -84,7 +84,7 @@ TimerId TimerQueue::add_timer(NonnullLockRefPtr<Timer>&& timer)
return id;
}
void TimerQueue::add_timer_locked(NonnullLockRefPtr<Timer> timer)
void TimerQueue::add_timer_locked(NonnullRefPtr<Timer> timer)
{
Time timer_expiration = timer->m_expires;

View file

@ -87,8 +87,8 @@ public:
TimerQueue();
static TimerQueue& the();
TimerId add_timer(NonnullLockRefPtr<Timer>&&);
bool add_timer_without_id(NonnullLockRefPtr<Timer>, clockid_t, Time const&, Function<void()>&&);
TimerId add_timer(NonnullRefPtr<Timer>&&);
bool add_timer_without_id(NonnullRefPtr<Timer>, clockid_t, Time const&, Function<void()>&&);
bool cancel_timer(Timer& timer, bool* was_in_use = nullptr);
void fire();
@ -99,7 +99,7 @@ private:
};
void remove_timer_locked(Queue&, Timer&);
void update_next_timer_due(Queue&);
void add_timer_locked(NonnullLockRefPtr<Timer>);
void add_timer_locked(NonnullRefPtr<Timer>);
Queue& queue_for_timer(Timer& timer)
{