mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 20:57:44 +00:00
Kernel: Move all tasks-related code to the Tasks subdirectory
This commit is contained in:
parent
788022d5d1
commit
1b04726c85
184 changed files with 245 additions and 243 deletions
87
Kernel/Tasks/AtomicEdgeAction.h
Normal file
87
Kernel/Tasks/AtomicEdgeAction.h
Normal file
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Copyright (c) 2021, the SerenityOS developers.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <AK/Atomic.h>
|
||||
#include <Kernel/Arch/Processor.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
template<typename AtomicRefCountType>
|
||||
class AtomicEdgeAction {
|
||||
public:
|
||||
template<typename FirstRefAction>
|
||||
bool ref(FirstRefAction first_ref_action)
|
||||
{
|
||||
AtomicRefCountType expected = 0;
|
||||
AtomicRefCountType desired = (1 << 1) | 1;
|
||||
// Least significant bit indicates we're busy protecting/unprotecting
|
||||
for (;;) {
|
||||
if (m_atomic_ref_count.compare_exchange_strong(expected, desired, AK::memory_order_relaxed))
|
||||
break;
|
||||
|
||||
Processor::wait_check();
|
||||
|
||||
expected &= ~1;
|
||||
desired = expected + (1 << 1);
|
||||
VERIFY(desired > expected);
|
||||
if (expected == 0)
|
||||
desired |= 1;
|
||||
}
|
||||
|
||||
atomic_thread_fence(AK::memory_order_acquire);
|
||||
|
||||
if (expected == 0) {
|
||||
first_ref_action();
|
||||
|
||||
// drop the busy flag
|
||||
m_atomic_ref_count.store(desired & ~1, AK::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template<typename LastRefAction>
|
||||
bool unref(LastRefAction last_ref_action)
|
||||
{
|
||||
AtomicRefCountType expected = 1 << 1;
|
||||
AtomicRefCountType desired = (1 << 1) | 1;
|
||||
// Least significant bit indicates we're busy protecting/unprotecting
|
||||
for (;;) {
|
||||
if (m_atomic_ref_count.compare_exchange_strong(expected, desired, AK::memory_order_relaxed))
|
||||
break;
|
||||
|
||||
Processor::wait_check();
|
||||
|
||||
expected &= ~1;
|
||||
VERIFY(expected != 0); // Someone should always have at least one reference
|
||||
|
||||
if (expected == 1 << 1) {
|
||||
desired = (1 << 1) | 1;
|
||||
} else {
|
||||
desired = expected - (1 << 1);
|
||||
VERIFY(desired < expected);
|
||||
}
|
||||
}
|
||||
|
||||
AK::atomic_thread_fence(AK::memory_order_release);
|
||||
|
||||
if (expected == 1 << 1) {
|
||||
last_ref_action();
|
||||
|
||||
// drop the busy flag and release reference
|
||||
m_atomic_ref_count.store(0, AK::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
Atomic<AtomicRefCountType> m_atomic_ref_count { 0 };
|
||||
};
|
||||
|
||||
}
|
|
@ -4,10 +4,10 @@
|
|||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <Kernel/Process.h>
|
||||
#include <Kernel/Scheduler.h>
|
||||
#include <Kernel/Sections.h>
|
||||
#include <Kernel/Tasks/FinalizerTask.h>
|
||||
#include <Kernel/Tasks/Process.h>
|
||||
#include <Kernel/Tasks/Scheduler.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
|
|
160
Kernel/Tasks/FutexQueue.cpp
Normal file
160
Kernel/Tasks/FutexQueue.cpp
Normal file
|
@ -0,0 +1,160 @@
|
|||
/*
|
||||
* Copyright (c) 2020, the SerenityOS developers.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <Kernel/Debug.h>
|
||||
#include <Kernel/Tasks/FutexQueue.h>
|
||||
#include <Kernel/Tasks/Thread.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
FutexQueue::FutexQueue() = default;
|
||||
FutexQueue::~FutexQueue() = default;
|
||||
|
||||
bool FutexQueue::should_add_blocker(Thread::Blocker& b, void*)
|
||||
{
|
||||
VERIFY(m_lock.is_locked());
|
||||
VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
|
||||
|
||||
VERIFY(m_imminent_waits > 0);
|
||||
m_imminent_waits--;
|
||||
|
||||
if (m_was_removed) {
|
||||
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: should not block thread {}: was removed", this, b.thread());
|
||||
return false;
|
||||
}
|
||||
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: should block thread {}", this, b.thread());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
ErrorOr<u32> FutexQueue::wake_n_requeue(u32 wake_count, Function<ErrorOr<FutexQueue*>()> const& get_target_queue, u32 requeue_count, bool& is_empty, bool& is_empty_target)
|
||||
{
|
||||
is_empty_target = false;
|
||||
SpinlockLocker lock(m_lock);
|
||||
|
||||
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue({}, {})", this, wake_count, requeue_count);
|
||||
|
||||
u32 did_wake = 0, did_requeue = 0;
|
||||
unblock_all_blockers_whose_conditions_are_met_locked([&](Thread::Blocker& b, void*, bool& stop_iterating) {
|
||||
VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
|
||||
auto& blocker = static_cast<Thread::FutexBlocker&>(b);
|
||||
|
||||
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue unblocking {}", this, blocker.thread());
|
||||
VERIFY(did_wake < wake_count);
|
||||
if (blocker.unblock()) {
|
||||
if (++did_wake >= wake_count)
|
||||
stop_iterating = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
is_empty = is_empty_and_no_imminent_waits_locked();
|
||||
if (requeue_count > 0) {
|
||||
auto blockers_to_requeue = do_take_blockers(requeue_count);
|
||||
if (!blockers_to_requeue.is_empty()) {
|
||||
if (auto* target_futex_queue = TRY(get_target_queue())) {
|
||||
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue requeueing {} blockers to {}", this, blockers_to_requeue.size(), target_futex_queue);
|
||||
|
||||
// While still holding m_lock, notify each blocker
|
||||
for (auto& info : blockers_to_requeue) {
|
||||
VERIFY(info.blocker->blocker_type() == Thread::Blocker::Type::Futex);
|
||||
auto& blocker = *static_cast<Thread::FutexBlocker*>(info.blocker);
|
||||
blocker.begin_requeue();
|
||||
}
|
||||
|
||||
lock.unlock();
|
||||
did_requeue = blockers_to_requeue.size();
|
||||
|
||||
SpinlockLocker target_lock(target_futex_queue->m_lock);
|
||||
// Now that we have the lock of the target, append the blockers
|
||||
// and notify them that they completed the move
|
||||
for (auto& info : blockers_to_requeue) {
|
||||
VERIFY(info.blocker->blocker_type() == Thread::Blocker::Type::Futex);
|
||||
auto& blocker = *static_cast<Thread::FutexBlocker*>(info.blocker);
|
||||
blocker.finish_requeue(*target_futex_queue);
|
||||
}
|
||||
target_futex_queue->do_append_blockers(move(blockers_to_requeue));
|
||||
is_empty_target = target_futex_queue->is_empty_and_no_imminent_waits_locked();
|
||||
} else {
|
||||
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue could not get target queue to requeue {} blockers", this, blockers_to_requeue.size());
|
||||
do_append_blockers(move(blockers_to_requeue));
|
||||
}
|
||||
}
|
||||
}
|
||||
return did_wake + did_requeue;
|
||||
}
|
||||
|
||||
u32 FutexQueue::wake_n(u32 wake_count, Optional<u32> const& bitset, bool& is_empty)
|
||||
{
|
||||
if (wake_count == 0) {
|
||||
is_empty = false;
|
||||
return 0; // should we assert instead?
|
||||
}
|
||||
SpinlockLocker lock(m_lock);
|
||||
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n({})", this, wake_count);
|
||||
u32 did_wake = 0;
|
||||
unblock_all_blockers_whose_conditions_are_met_locked([&](Thread::Blocker& b, void*, bool& stop_iterating) {
|
||||
VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
|
||||
auto& blocker = static_cast<Thread::FutexBlocker&>(b);
|
||||
|
||||
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n unblocking {}", this, blocker.thread());
|
||||
VERIFY(did_wake < wake_count);
|
||||
if (bitset.has_value() ? blocker.unblock_bitset(bitset.value()) : blocker.unblock()) {
|
||||
if (++did_wake >= wake_count)
|
||||
stop_iterating = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
is_empty = is_empty_and_no_imminent_waits_locked();
|
||||
return did_wake;
|
||||
}
|
||||
|
||||
u32 FutexQueue::wake_all(bool& is_empty)
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_all", this);
|
||||
u32 did_wake = 0;
|
||||
unblock_all_blockers_whose_conditions_are_met_locked([&](Thread::Blocker& b, void*, bool&) {
|
||||
VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
|
||||
auto& blocker = static_cast<Thread::FutexBlocker&>(b);
|
||||
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_all unblocking {}", this, blocker.thread());
|
||||
if (blocker.unblock(true)) {
|
||||
did_wake++;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
is_empty = is_empty_and_no_imminent_waits_locked();
|
||||
return did_wake;
|
||||
}
|
||||
|
||||
bool FutexQueue::is_empty_and_no_imminent_waits_locked()
|
||||
{
|
||||
return m_imminent_waits == 0 && is_empty_locked();
|
||||
}
|
||||
|
||||
bool FutexQueue::queue_imminent_wait()
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_was_removed)
|
||||
return false;
|
||||
m_imminent_waits++;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FutexQueue::try_remove()
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_was_removed)
|
||||
return false;
|
||||
if (!is_empty_and_no_imminent_waits_locked())
|
||||
return false;
|
||||
m_was_removed = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
50
Kernel/Tasks/FutexQueue.h
Normal file
50
Kernel/Tasks/FutexQueue.h
Normal file
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Copyright (c) 2020, the SerenityOS developers.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <AK/AtomicRefCounted.h>
|
||||
#include <Kernel/Locking/Spinlock.h>
|
||||
#include <Kernel/Tasks/Thread.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class FutexQueue final
|
||||
: public AtomicRefCounted<FutexQueue>
|
||||
, public Thread::BlockerSet {
|
||||
public:
|
||||
FutexQueue();
|
||||
virtual ~FutexQueue();
|
||||
|
||||
ErrorOr<u32> wake_n_requeue(u32, Function<ErrorOr<FutexQueue*>()> const&, u32, bool&, bool&);
|
||||
u32 wake_n(u32, Optional<u32> const&, bool&);
|
||||
u32 wake_all(bool&);
|
||||
|
||||
template<class... Args>
|
||||
Thread::BlockResult wait_on(Thread::BlockTimeout const& timeout, Args&&... args)
|
||||
{
|
||||
return Thread::current()->block<Thread::FutexBlocker>(timeout, *this, forward<Args>(args)...);
|
||||
}
|
||||
|
||||
bool queue_imminent_wait();
|
||||
bool try_remove();
|
||||
|
||||
bool is_empty_and_no_imminent_waits()
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
return is_empty_and_no_imminent_waits_locked();
|
||||
}
|
||||
bool is_empty_and_no_imminent_waits_locked();
|
||||
|
||||
protected:
|
||||
virtual bool should_add_blocker(Thread::Blocker& b, void*) override;
|
||||
|
||||
private:
|
||||
size_t m_imminent_waits { 1 }; // We only create this object if we're going to be waiting, so start out with 1
|
||||
bool m_was_removed { false };
|
||||
};
|
||||
|
||||
}
|
1146
Kernel/Tasks/Process.cpp
Normal file
1146
Kernel/Tasks/Process.cpp
Normal file
File diff suppressed because it is too large
Load diff
1040
Kernel/Tasks/Process.h
Normal file
1040
Kernel/Tasks/Process.h
Normal file
File diff suppressed because it is too large
Load diff
59
Kernel/Tasks/ProcessGroup.cpp
Normal file
59
Kernel/Tasks/ProcessGroup.cpp
Normal file
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Copyright (c) 2020, the SerenityOS developers.
|
||||
* Copyright (c) 2021-2023, Andreas Kling <kling@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <AK/Singleton.h>
|
||||
#include <Kernel/Tasks/ProcessGroup.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
static Singleton<SpinlockProtected<ProcessGroup::AllInstancesList, LockRank::None>> s_all_instances;
|
||||
|
||||
SpinlockProtected<ProcessGroup::AllInstancesList, LockRank::None>& ProcessGroup::all_instances()
|
||||
{
|
||||
return s_all_instances;
|
||||
}
|
||||
|
||||
ProcessGroup::~ProcessGroup() = default;
|
||||
|
||||
ErrorOr<NonnullRefPtr<ProcessGroup>> ProcessGroup::create_if_unused_pgid(ProcessGroupID pgid)
|
||||
{
|
||||
return all_instances().with([&](auto& all_instances) -> ErrorOr<NonnullRefPtr<ProcessGroup>> {
|
||||
for (auto& process_group : all_instances) {
|
||||
if (process_group.pgid() == pgid)
|
||||
return EPERM;
|
||||
}
|
||||
auto process_group = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) ProcessGroup(pgid)));
|
||||
all_instances.prepend(*process_group);
|
||||
return process_group;
|
||||
});
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<ProcessGroup>> ProcessGroup::find_or_create(ProcessGroupID pgid)
|
||||
{
|
||||
return all_instances().with([&](auto& all_instances) -> ErrorOr<NonnullRefPtr<ProcessGroup>> {
|
||||
for (auto& group : all_instances) {
|
||||
if (group.pgid() == pgid)
|
||||
return group;
|
||||
}
|
||||
auto process_group = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) ProcessGroup(pgid)));
|
||||
all_instances.prepend(*process_group);
|
||||
return process_group;
|
||||
});
|
||||
}
|
||||
|
||||
RefPtr<ProcessGroup> ProcessGroup::from_pgid(ProcessGroupID pgid)
|
||||
{
|
||||
return all_instances().with([&](auto& groups) -> RefPtr<ProcessGroup> {
|
||||
for (auto& group : groups) {
|
||||
if (group.pgid() == pgid)
|
||||
return &group;
|
||||
}
|
||||
return nullptr;
|
||||
});
|
||||
}
|
||||
|
||||
}
|
50
Kernel/Tasks/ProcessGroup.h
Normal file
50
Kernel/Tasks/ProcessGroup.h
Normal file
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Copyright (c) 2020-2023, the SerenityOS developers.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <AK/IntrusiveList.h>
|
||||
#include <AK/RefPtr.h>
|
||||
#include <Kernel/Forward.h>
|
||||
#include <Kernel/Library/ListedRefCounted.h>
|
||||
#include <Kernel/Library/LockWeakable.h>
|
||||
#include <Kernel/Locking/SpinlockProtected.h>
|
||||
#include <Kernel/UnixTypes.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class ProcessGroup
|
||||
: public ListedRefCounted<ProcessGroup, LockType::Spinlock>
|
||||
, public LockWeakable<ProcessGroup> {
|
||||
|
||||
AK_MAKE_NONMOVABLE(ProcessGroup);
|
||||
AK_MAKE_NONCOPYABLE(ProcessGroup);
|
||||
|
||||
public:
|
||||
~ProcessGroup();
|
||||
|
||||
static ErrorOr<NonnullRefPtr<ProcessGroup>> create_if_unused_pgid(ProcessGroupID);
|
||||
static ErrorOr<NonnullRefPtr<ProcessGroup>> find_or_create(ProcessGroupID);
|
||||
static RefPtr<ProcessGroup> from_pgid(ProcessGroupID);
|
||||
|
||||
ProcessGroupID const& pgid() const { return m_pgid; }
|
||||
|
||||
private:
|
||||
ProcessGroup(ProcessGroupID pgid)
|
||||
: m_pgid(pgid)
|
||||
{
|
||||
}
|
||||
|
||||
ProcessGroupID m_pgid;
|
||||
|
||||
mutable IntrusiveListNode<ProcessGroup> m_list_node;
|
||||
|
||||
public:
|
||||
using AllInstancesList = IntrusiveList<&ProcessGroup::m_list_node>;
|
||||
static SpinlockProtected<AllInstancesList, LockRank::None>& all_instances();
|
||||
};
|
||||
|
||||
}
|
16
Kernel/Tasks/ProcessList.cpp
Normal file
16
Kernel/Tasks/ProcessList.cpp
Normal file
|
@ -0,0 +1,16 @@
|
|||
/*
|
||||
* Copyright (c) 2023, Liav A. <liavalb@hotmail.co.il>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <Kernel/Tasks/Process.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ErrorOr<NonnullRefPtr<ProcessList>> ProcessList::create()
|
||||
{
|
||||
return adopt_nonnull_ref_or_enomem(new (nothrow) ProcessList());
|
||||
}
|
||||
|
||||
}
|
561
Kernel/Tasks/Scheduler.cpp
Normal file
561
Kernel/Tasks/Scheduler.cpp
Normal file
|
@ -0,0 +1,561 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <AK/BuiltinWrappers.h>
|
||||
#include <AK/ScopeGuard.h>
|
||||
#include <AK/Singleton.h>
|
||||
#include <AK/Time.h>
|
||||
#include <Kernel/Arch/TrapFrame.h>
|
||||
#include <Kernel/Debug.h>
|
||||
#include <Kernel/InterruptDisabler.h>
|
||||
#include <Kernel/Panic.h>
|
||||
#include <Kernel/PerformanceManager.h>
|
||||
#include <Kernel/Sections.h>
|
||||
#include <Kernel/Tasks/Process.h>
|
||||
#include <Kernel/Tasks/Scheduler.h>
|
||||
#include <Kernel/Time/TimeManagement.h>
|
||||
#include <Kernel/kstdio.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
RecursiveSpinlock<LockRank::None> g_scheduler_lock {};
|
||||
|
||||
static u32 time_slice_for(Thread const& thread)
|
||||
{
|
||||
// One time slice unit == 4ms (assuming 250 ticks/second)
|
||||
if (thread.is_idle_thread())
|
||||
return 1;
|
||||
return 2;
|
||||
}
|
||||
|
||||
READONLY_AFTER_INIT Thread* g_finalizer;
|
||||
READONLY_AFTER_INIT WaitQueue* g_finalizer_wait_queue;
|
||||
Atomic<bool> g_finalizer_has_work { false };
|
||||
READONLY_AFTER_INIT static Process* s_colonel_process;
|
||||
|
||||
struct ThreadReadyQueue {
|
||||
IntrusiveList<&Thread::m_ready_queue_node> thread_list;
|
||||
};
|
||||
|
||||
struct ThreadReadyQueues {
|
||||
u32 mask {};
|
||||
static constexpr size_t count = sizeof(mask) * 8;
|
||||
Array<ThreadReadyQueue, count> queues;
|
||||
};
|
||||
|
||||
static Singleton<SpinlockProtected<ThreadReadyQueues, LockRank::None>> g_ready_queues;
|
||||
|
||||
static SpinlockProtected<TotalTimeScheduled, LockRank::None> g_total_time_scheduled {};
|
||||
|
||||
static void dump_thread_list(bool = false);
|
||||
|
||||
static inline u32 thread_priority_to_priority_index(u32 thread_priority)
|
||||
{
|
||||
// Converts the priority in the range of THREAD_PRIORITY_MIN...THREAD_PRIORITY_MAX
|
||||
// to a index into g_ready_queues where 0 is the highest priority bucket
|
||||
VERIFY(thread_priority >= THREAD_PRIORITY_MIN && thread_priority <= THREAD_PRIORITY_MAX);
|
||||
constexpr u32 thread_priority_count = THREAD_PRIORITY_MAX - THREAD_PRIORITY_MIN + 1;
|
||||
static_assert(thread_priority_count > 0);
|
||||
auto priority_bucket = ((thread_priority_count - (thread_priority - THREAD_PRIORITY_MIN)) / thread_priority_count) * (ThreadReadyQueues::count - 1);
|
||||
VERIFY(priority_bucket < ThreadReadyQueues::count);
|
||||
return priority_bucket;
|
||||
}
|
||||
|
||||
Thread& Scheduler::pull_next_runnable_thread()
|
||||
{
|
||||
auto affinity_mask = 1u << Processor::current_id();
|
||||
|
||||
return g_ready_queues->with([&](auto& ready_queues) -> Thread& {
|
||||
auto priority_mask = ready_queues.mask;
|
||||
while (priority_mask != 0) {
|
||||
auto priority = bit_scan_forward(priority_mask);
|
||||
VERIFY(priority > 0);
|
||||
auto& ready_queue = ready_queues.queues[--priority];
|
||||
for (auto& thread : ready_queue.thread_list) {
|
||||
VERIFY(thread.m_runnable_priority == (int)priority);
|
||||
if (thread.is_active())
|
||||
continue;
|
||||
if (!(thread.affinity() & affinity_mask))
|
||||
continue;
|
||||
thread.m_runnable_priority = -1;
|
||||
ready_queue.thread_list.remove(thread);
|
||||
if (ready_queue.thread_list.is_empty())
|
||||
ready_queues.mask &= ~(1u << priority);
|
||||
// Mark it as active because we are using this thread. This is similar
|
||||
// to comparing it with Processor::current_thread, but when there are
|
||||
// multiple processors there's no easy way to check whether the thread
|
||||
// is actually still needed. This prevents accidental finalization when
|
||||
// a thread is no longer in Running state, but running on another core.
|
||||
|
||||
// We need to mark it active here so that this thread won't be
|
||||
// scheduled on another core if it were to be queued before actually
|
||||
// switching to it.
|
||||
// FIXME: Figure out a better way maybe?
|
||||
thread.set_active(true);
|
||||
return thread;
|
||||
}
|
||||
priority_mask &= ~(1u << priority);
|
||||
}
|
||||
|
||||
auto* idle_thread = Processor::idle_thread();
|
||||
idle_thread->set_active(true);
|
||||
return *idle_thread;
|
||||
});
|
||||
}
|
||||
|
||||
Thread* Scheduler::peek_next_runnable_thread()
|
||||
{
|
||||
auto affinity_mask = 1u << Processor::current_id();
|
||||
|
||||
return g_ready_queues->with([&](auto& ready_queues) -> Thread* {
|
||||
auto priority_mask = ready_queues.mask;
|
||||
while (priority_mask != 0) {
|
||||
auto priority = bit_scan_forward(priority_mask);
|
||||
VERIFY(priority > 0);
|
||||
auto& ready_queue = ready_queues.queues[--priority];
|
||||
for (auto& thread : ready_queue.thread_list) {
|
||||
VERIFY(thread.m_runnable_priority == (int)priority);
|
||||
if (thread.is_active())
|
||||
continue;
|
||||
if (!(thread.affinity() & affinity_mask))
|
||||
continue;
|
||||
return &thread;
|
||||
}
|
||||
priority_mask &= ~(1u << priority);
|
||||
}
|
||||
|
||||
// Unlike in pull_next_runnable_thread() we don't want to fall back to
|
||||
// the idle thread. We just want to see if we have any other thread ready
|
||||
// to be scheduled.
|
||||
return nullptr;
|
||||
});
|
||||
}
|
||||
|
||||
bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
|
||||
{
|
||||
if (thread.is_idle_thread())
|
||||
return true;
|
||||
|
||||
return g_ready_queues->with([&](auto& ready_queues) {
|
||||
auto priority = thread.m_runnable_priority;
|
||||
if (priority < 0) {
|
||||
VERIFY(!thread.m_ready_queue_node.is_in_list());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (check_affinity && !(thread.affinity() & (1 << Processor::current_id())))
|
||||
return false;
|
||||
|
||||
VERIFY(ready_queues.mask & (1u << priority));
|
||||
auto& ready_queue = ready_queues.queues[priority];
|
||||
thread.m_runnable_priority = -1;
|
||||
ready_queue.thread_list.remove(thread);
|
||||
if (ready_queue.thread_list.is_empty())
|
||||
ready_queues.mask &= ~(1u << priority);
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
void Scheduler::enqueue_runnable_thread(Thread& thread)
|
||||
{
|
||||
VERIFY(g_scheduler_lock.is_locked_by_current_processor());
|
||||
if (thread.is_idle_thread())
|
||||
return;
|
||||
auto priority = thread_priority_to_priority_index(thread.priority());
|
||||
|
||||
g_ready_queues->with([&](auto& ready_queues) {
|
||||
VERIFY(thread.m_runnable_priority < 0);
|
||||
thread.m_runnable_priority = (int)priority;
|
||||
VERIFY(!thread.m_ready_queue_node.is_in_list());
|
||||
auto& ready_queue = ready_queues.queues[priority];
|
||||
bool was_empty = ready_queue.thread_list.is_empty();
|
||||
ready_queue.thread_list.append(thread);
|
||||
if (was_empty)
|
||||
ready_queues.mask |= (1u << priority);
|
||||
});
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT void Scheduler::start()
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
|
||||
// We need to acquire our scheduler lock, which will be released
|
||||
// by the idle thread once control transferred there
|
||||
g_scheduler_lock.lock();
|
||||
|
||||
auto& processor = Processor::current();
|
||||
VERIFY(processor.is_initialized());
|
||||
auto& idle_thread = *Processor::idle_thread();
|
||||
VERIFY(processor.current_thread() == &idle_thread);
|
||||
idle_thread.set_ticks_left(time_slice_for(idle_thread));
|
||||
idle_thread.did_schedule();
|
||||
idle_thread.set_initialized(true);
|
||||
processor.init_context(idle_thread, false);
|
||||
idle_thread.set_state(Thread::State::Running);
|
||||
VERIFY(idle_thread.affinity() == (1u << processor.id()));
|
||||
processor.initialize_context_switching(idle_thread);
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
|
||||
void Scheduler::pick_next()
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
|
||||
// Set the in_scheduler flag before acquiring the spinlock. This
|
||||
// prevents a recursive call into Scheduler::invoke_async upon
|
||||
// leaving the scheduler lock.
|
||||
ScopedCritical critical;
|
||||
Processor::set_current_in_scheduler(true);
|
||||
ScopeGuard guard(
|
||||
[]() {
|
||||
// We may be on a different processor after we got switched
|
||||
// back to this thread!
|
||||
VERIFY(Processor::current_in_scheduler());
|
||||
Processor::set_current_in_scheduler(false);
|
||||
});
|
||||
|
||||
SpinlockLocker lock(g_scheduler_lock);
|
||||
|
||||
if constexpr (SCHEDULER_RUNNABLE_DEBUG) {
|
||||
dump_thread_list();
|
||||
}
|
||||
|
||||
auto& thread_to_schedule = pull_next_runnable_thread();
|
||||
if constexpr (SCHEDULER_DEBUG) {
|
||||
dbgln("Scheduler[{}]: Switch to {} @ {:p}",
|
||||
Processor::current_id(),
|
||||
thread_to_schedule,
|
||||
thread_to_schedule.regs().ip());
|
||||
}
|
||||
|
||||
// We need to leave our first critical section before switching context,
|
||||
// but since we're still holding the scheduler lock we're still in a critical section
|
||||
critical.leave();
|
||||
|
||||
thread_to_schedule.set_ticks_left(time_slice_for(thread_to_schedule));
|
||||
context_switch(&thread_to_schedule);
|
||||
}
|
||||
|
||||
void Scheduler::yield()
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
|
||||
auto const* current_thread = Thread::current();
|
||||
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", Processor::current_id(), *current_thread, Processor::current_in_irq());
|
||||
VERIFY(current_thread != nullptr);
|
||||
if (Processor::current_in_irq() || Processor::in_critical()) {
|
||||
// If we're handling an IRQ we can't switch context, or we're in
|
||||
// a critical section where we don't want to switch contexts, then
|
||||
// delay until exiting the trap or critical section
|
||||
Processor::current().invoke_scheduler_async();
|
||||
return;
|
||||
}
|
||||
|
||||
Scheduler::pick_next();
|
||||
}
|
||||
|
||||
void Scheduler::context_switch(Thread* thread)
|
||||
{
|
||||
thread->did_schedule();
|
||||
|
||||
auto* from_thread = Thread::current();
|
||||
VERIFY(from_thread);
|
||||
|
||||
if (from_thread == thread)
|
||||
return;
|
||||
|
||||
// If the last process hasn't blocked (still marked as running),
|
||||
// mark it as runnable for the next round.
|
||||
if (from_thread->state() == Thread::State::Running)
|
||||
from_thread->set_state(Thread::State::Runnable);
|
||||
|
||||
#ifdef LOG_EVERY_CONTEXT_SWITCH
|
||||
auto const msg = "Scheduler[{}]: {} -> {} [prio={}] {:p}";
|
||||
|
||||
dbgln(msg,
|
||||
Processor::current_id(), from_thread->tid().value(),
|
||||
thread->tid().value(), thread->priority(), thread->regs().ip());
|
||||
#endif
|
||||
|
||||
auto& proc = Processor::current();
|
||||
if (!thread->is_initialized()) {
|
||||
proc.init_context(*thread, false);
|
||||
thread->set_initialized(true);
|
||||
}
|
||||
thread->set_state(Thread::State::Running);
|
||||
|
||||
PerformanceManager::add_context_switch_perf_event(*from_thread, *thread);
|
||||
|
||||
proc.switch_context(from_thread, thread);
|
||||
|
||||
// NOTE: from_thread at this point reflects the thread we were
|
||||
// switched from, and thread reflects Thread::current()
|
||||
enter_current(*from_thread);
|
||||
VERIFY(thread == Thread::current());
|
||||
|
||||
{
|
||||
SpinlockLocker lock(thread->get_lock());
|
||||
thread->dispatch_one_pending_signal();
|
||||
}
|
||||
}
|
||||
|
||||
void Scheduler::enter_current(Thread& prev_thread)
|
||||
{
|
||||
VERIFY(g_scheduler_lock.is_locked_by_current_processor());
|
||||
|
||||
// We already recorded the scheduled time when entering the trap, so this merely accounts for the kernel time since then
|
||||
auto scheduler_time = TimeManagement::scheduler_current_time();
|
||||
prev_thread.update_time_scheduled(scheduler_time, true, true);
|
||||
auto* current_thread = Thread::current();
|
||||
current_thread->update_time_scheduled(scheduler_time, true, false);
|
||||
|
||||
// NOTE: When doing an exec(), we will context switch from and to the same thread!
|
||||
// In that case, we must not mark the previous thread as inactive.
|
||||
if (&prev_thread != current_thread)
|
||||
prev_thread.set_active(false);
|
||||
|
||||
if (prev_thread.state() == Thread::State::Dying) {
|
||||
// If the thread we switched from is marked as dying, then notify
|
||||
// the finalizer. Note that as soon as we leave the scheduler lock
|
||||
// the finalizer may free from_thread!
|
||||
notify_finalizer();
|
||||
}
|
||||
}
|
||||
|
||||
void Scheduler::leave_on_first_switch(InterruptsState previous_interrupts_state)
|
||||
{
|
||||
// This is called when a thread is switched into for the first time.
|
||||
// At this point, enter_current has already be called, but because
|
||||
// Scheduler::context_switch is not in the call stack we need to
|
||||
// clean up and release locks manually here
|
||||
g_scheduler_lock.unlock(previous_interrupts_state);
|
||||
|
||||
VERIFY(Processor::current_in_scheduler());
|
||||
Processor::set_current_in_scheduler(false);
|
||||
}
|
||||
|
||||
void Scheduler::prepare_after_exec()
|
||||
{
|
||||
// This is called after exec() when doing a context "switch" into
|
||||
// the new process. This is called from Processor::assume_context
|
||||
VERIFY(g_scheduler_lock.is_locked_by_current_processor());
|
||||
|
||||
VERIFY(!Processor::current_in_scheduler());
|
||||
Processor::set_current_in_scheduler(true);
|
||||
}
|
||||
|
||||
void Scheduler::prepare_for_idle_loop()
|
||||
{
|
||||
// This is called when the CPU finished setting up the idle loop
|
||||
// and is about to run it. We need to acquire the scheduler lock
|
||||
VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
|
||||
g_scheduler_lock.lock();
|
||||
|
||||
VERIFY(!Processor::current_in_scheduler());
|
||||
Processor::set_current_in_scheduler(true);
|
||||
}
|
||||
|
||||
Process* Scheduler::colonel()
|
||||
{
|
||||
VERIFY(s_colonel_process);
|
||||
return s_colonel_process;
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT void Scheduler::initialize()
|
||||
{
|
||||
VERIFY(Processor::is_initialized()); // sanity check
|
||||
VERIFY(TimeManagement::is_initialized());
|
||||
|
||||
g_finalizer_wait_queue = new WaitQueue;
|
||||
|
||||
g_finalizer_has_work.store(false, AK::MemoryOrder::memory_order_release);
|
||||
auto [colonel_process, idle_thread] = MUST(Process::create_kernel_process(KString::must_create("colonel"sv), idle_loop, nullptr, 1, Process::RegisterProcess::No));
|
||||
s_colonel_process = &colonel_process.leak_ref();
|
||||
idle_thread->set_priority(THREAD_PRIORITY_MIN);
|
||||
idle_thread->set_name(KString::must_create("Idle Task #0"sv));
|
||||
|
||||
set_idle_thread(idle_thread);
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT void Scheduler::set_idle_thread(Thread* idle_thread)
|
||||
{
|
||||
idle_thread->set_idle_thread();
|
||||
Processor::current().set_idle_thread(*idle_thread);
|
||||
Processor::set_current_thread(*idle_thread);
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT Thread* Scheduler::create_ap_idle_thread(u32 cpu)
|
||||
{
|
||||
VERIFY(cpu != 0);
|
||||
// This function is called on the bsp, but creates an idle thread for another AP
|
||||
VERIFY(Processor::is_bootstrap_processor());
|
||||
|
||||
VERIFY(s_colonel_process);
|
||||
Thread* idle_thread = MUST(s_colonel_process->create_kernel_thread(idle_loop, nullptr, THREAD_PRIORITY_MIN, MUST(KString::formatted("idle thread #{}", cpu)), 1 << cpu, false));
|
||||
VERIFY(idle_thread);
|
||||
return idle_thread;
|
||||
}
|
||||
|
||||
void Scheduler::add_time_scheduled(u64 time_to_add, bool is_kernel)
|
||||
{
|
||||
g_total_time_scheduled.with([&](auto& total_time_scheduled) {
|
||||
total_time_scheduled.total += time_to_add;
|
||||
if (is_kernel)
|
||||
total_time_scheduled.total_kernel += time_to_add;
|
||||
});
|
||||
}
|
||||
|
||||
void Scheduler::timer_tick(RegisterState const& regs)
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(Processor::current_in_irq());
|
||||
|
||||
auto* current_thread = Processor::current_thread();
|
||||
if (!current_thread)
|
||||
return;
|
||||
|
||||
// Sanity checks
|
||||
VERIFY(current_thread->current_trap());
|
||||
VERIFY(current_thread->current_trap()->regs == ®s);
|
||||
|
||||
if (current_thread->process().is_kernel_process()) {
|
||||
// Because the previous mode when entering/exiting kernel threads never changes
|
||||
// we never update the time scheduled. So we need to update it manually on the
|
||||
// timer interrupt
|
||||
current_thread->update_time_scheduled(TimeManagement::scheduler_current_time(), true, false);
|
||||
}
|
||||
|
||||
if (current_thread->previous_mode() == ExecutionMode::User && current_thread->should_die() && !current_thread->is_blocked()) {
|
||||
SpinlockLocker scheduler_lock(g_scheduler_lock);
|
||||
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::current_id(), *current_thread);
|
||||
current_thread->set_state(Thread::State::Dying);
|
||||
Processor::current().invoke_scheduler_async();
|
||||
return;
|
||||
}
|
||||
|
||||
if (current_thread->tick())
|
||||
return;
|
||||
|
||||
if (!current_thread->is_idle_thread() && !peek_next_runnable_thread()) {
|
||||
// If no other thread is ready to be scheduled we don't need to
|
||||
// switch to the idle thread. Just give the current thread another
|
||||
// time slice and let it run!
|
||||
current_thread->set_ticks_left(time_slice_for(*current_thread));
|
||||
current_thread->did_schedule();
|
||||
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: No other threads ready, give {} another timeslice", Processor::current_id(), *current_thread);
|
||||
return;
|
||||
}
|
||||
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(Processor::current_in_irq());
|
||||
Processor::current().invoke_scheduler_async();
|
||||
}
|
||||
|
||||
void Scheduler::invoke_async()
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(!Processor::current_in_irq());
|
||||
|
||||
// Since this function is called when leaving critical sections (such
|
||||
// as a Spinlock), we need to check if we're not already doing this
|
||||
// to prevent recursion
|
||||
if (!Processor::current_in_scheduler())
|
||||
pick_next();
|
||||
}
|
||||
|
||||
void Scheduler::notify_finalizer()
|
||||
{
|
||||
if (!g_finalizer_has_work.exchange(true, AK::MemoryOrder::memory_order_acq_rel))
|
||||
g_finalizer_wait_queue->wake_all();
|
||||
}
|
||||
|
||||
void Scheduler::idle_loop(void*)
|
||||
{
|
||||
auto& proc = Processor::current();
|
||||
dbgln("Scheduler[{}]: idle loop running", proc.id());
|
||||
VERIFY(Processor::are_interrupts_enabled());
|
||||
|
||||
for (;;) {
|
||||
proc.idle_begin();
|
||||
proc.wait_for_interrupt();
|
||||
proc.idle_end();
|
||||
VERIFY_INTERRUPTS_ENABLED();
|
||||
yield();
|
||||
}
|
||||
}
|
||||
|
||||
void Scheduler::dump_scheduler_state(bool with_stack_traces)
|
||||
{
|
||||
dump_thread_list(with_stack_traces);
|
||||
}
|
||||
|
||||
bool Scheduler::is_initialized()
|
||||
{
|
||||
// The scheduler is initialized iff the idle thread exists
|
||||
return Processor::idle_thread() != nullptr;
|
||||
}
|
||||
|
||||
TotalTimeScheduled Scheduler::get_total_time_scheduled()
|
||||
{
|
||||
return g_total_time_scheduled.with([&](auto& total_time_scheduled) { return total_time_scheduled; });
|
||||
}
|
||||
|
||||
void dump_thread_list(bool with_stack_traces)
|
||||
{
|
||||
dbgln("Scheduler thread list for processor {}:", Processor::current_id());
|
||||
|
||||
auto get_eip = [](Thread& thread) -> u32 {
|
||||
if (!thread.current_trap())
|
||||
return thread.regs().ip();
|
||||
return thread.get_register_dump_from_stack().ip();
|
||||
};
|
||||
|
||||
Thread::for_each([&](Thread& thread) {
|
||||
auto color = thread.process().is_kernel_process() ? "\x1b[34;1m"sv : "\x1b[33;1m"sv;
|
||||
switch (thread.state()) {
|
||||
case Thread::State::Dying:
|
||||
dmesgln(" {}{:30}\x1b[0m @ {:08x} is {:14} (Finalizable: {}, nsched: {})",
|
||||
color,
|
||||
thread,
|
||||
get_eip(thread),
|
||||
thread.state_string(),
|
||||
thread.is_finalizable(),
|
||||
thread.times_scheduled());
|
||||
break;
|
||||
default:
|
||||
dmesgln(" {}{:30}\x1b[0m @ {:08x} is {:14} (Pr:{:2}, nsched: {})",
|
||||
color,
|
||||
thread,
|
||||
get_eip(thread),
|
||||
thread.state_string(),
|
||||
thread.priority(),
|
||||
thread.times_scheduled());
|
||||
break;
|
||||
}
|
||||
if (thread.state() == Thread::State::Blocked && thread.blocking_mutex()) {
|
||||
dmesgln(" Blocking on Mutex {:#x} ({})", thread.blocking_mutex(), thread.blocking_mutex()->name());
|
||||
}
|
||||
if (thread.state() == Thread::State::Blocked && thread.blocker()) {
|
||||
dmesgln(" Blocking on Blocker {:#x}", thread.blocker());
|
||||
}
|
||||
#if LOCK_DEBUG
|
||||
thread.for_each_held_lock([](auto const& entry) {
|
||||
dmesgln(" Holding lock {:#x} ({}) at {}", entry.lock, entry.lock->name(), entry.lock_location);
|
||||
});
|
||||
#endif
|
||||
if (with_stack_traces) {
|
||||
auto trace_or_error = thread.backtrace();
|
||||
if (!trace_or_error.is_error()) {
|
||||
auto trace = trace_or_error.release_value();
|
||||
dbgln("Backtrace:");
|
||||
kernelputstr(trace->characters(), trace->length());
|
||||
}
|
||||
}
|
||||
return IterationDecision::Continue;
|
||||
});
|
||||
}
|
||||
|
||||
}
|
60
Kernel/Tasks/Scheduler.h
Normal file
60
Kernel/Tasks/Scheduler.h
Normal file
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <AK/Assertions.h>
|
||||
#include <AK/Function.h>
|
||||
#include <AK/IntrusiveList.h>
|
||||
#include <AK/Types.h>
|
||||
#include <Kernel/Forward.h>
|
||||
#include <Kernel/Locking/Spinlock.h>
|
||||
#include <Kernel/Time/TimeManagement.h>
|
||||
#include <Kernel/UnixTypes.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
struct RegisterState;
|
||||
|
||||
extern Thread* g_finalizer;
|
||||
extern WaitQueue* g_finalizer_wait_queue;
|
||||
extern Atomic<bool> g_finalizer_has_work;
|
||||
extern RecursiveSpinlock<LockRank::None> g_scheduler_lock;
|
||||
|
||||
struct TotalTimeScheduled {
|
||||
u64 total { 0 };
|
||||
u64 total_kernel { 0 };
|
||||
};
|
||||
|
||||
class Scheduler {
|
||||
public:
|
||||
static void initialize();
|
||||
static Thread* create_ap_idle_thread(u32 cpu);
|
||||
static void set_idle_thread(Thread* idle_thread);
|
||||
static void timer_tick(RegisterState const&);
|
||||
[[noreturn]] static void start();
|
||||
static void pick_next();
|
||||
static void yield();
|
||||
static void context_switch(Thread*);
|
||||
static void enter_current(Thread& prev_thread);
|
||||
static void leave_on_first_switch(InterruptsState);
|
||||
static void prepare_after_exec();
|
||||
static void prepare_for_idle_loop();
|
||||
static Process* colonel();
|
||||
static void idle_loop(void*);
|
||||
static void invoke_async();
|
||||
static void notify_finalizer();
|
||||
static Thread& pull_next_runnable_thread();
|
||||
static Thread* peek_next_runnable_thread();
|
||||
static bool dequeue_runnable_thread(Thread&, bool = false);
|
||||
static void enqueue_runnable_thread(Thread&);
|
||||
static void dump_scheduler_state(bool = false);
|
||||
static bool is_initialized();
|
||||
static TotalTimeScheduled get_total_time_scheduled();
|
||||
static void add_time_scheduled(u64, bool);
|
||||
};
|
||||
|
||||
}
|
|
@ -5,8 +5,8 @@
|
|||
*/
|
||||
|
||||
#include <Kernel/FileSystem/VirtualFileSystem.h>
|
||||
#include <Kernel/Process.h>
|
||||
#include <Kernel/Sections.h>
|
||||
#include <Kernel/Tasks/Process.h>
|
||||
#include <Kernel/Tasks/SyncTask.h>
|
||||
#include <Kernel/Time/TimeManagement.h>
|
||||
|
||||
|
|
1482
Kernel/Tasks/Thread.cpp
Normal file
1482
Kernel/Tasks/Thread.cpp
Normal file
File diff suppressed because it is too large
Load diff
1339
Kernel/Tasks/Thread.h
Normal file
1339
Kernel/Tasks/Thread.h
Normal file
File diff suppressed because it is too large
Load diff
858
Kernel/Tasks/ThreadBlockers.cpp
Normal file
858
Kernel/Tasks/ThreadBlockers.cpp
Normal file
|
@ -0,0 +1,858 @@
|
|||
/*
|
||||
* Copyright (c) 2020, the SerenityOS developers.
|
||||
* Copyright (c) 2022, Idan Horowitz <idan.horowitz@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <AK/BuiltinWrappers.h>
|
||||
#include <Kernel/Debug.h>
|
||||
#include <Kernel/FileSystem/OpenFileDescription.h>
|
||||
#include <Kernel/Net/Socket.h>
|
||||
#include <Kernel/Tasks/Process.h>
|
||||
#include <Kernel/Tasks/Scheduler.h>
|
||||
#include <Kernel/Tasks/Thread.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Thread::BlockTimeout::BlockTimeout(bool is_absolute, Duration const* time, Duration const* start_time, clockid_t clock_id)
|
||||
: m_clock_id(clock_id)
|
||||
, m_infinite(!time)
|
||||
{
|
||||
if (m_infinite)
|
||||
return;
|
||||
if (*time > Duration::zero())
|
||||
m_time = *time;
|
||||
m_start_time = start_time ? *start_time : TimeManagement::the().current_time(clock_id);
|
||||
if (!is_absolute)
|
||||
m_time += m_start_time;
|
||||
}
|
||||
|
||||
bool Thread::Blocker::add_to_blocker_set(Thread::BlockerSet& blocker_set, void* data)
|
||||
{
|
||||
VERIFY(!m_blocker_set);
|
||||
if (blocker_set.add_blocker(*this, data)) {
|
||||
m_blocker_set = &blocker_set;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
Thread::Blocker::~Blocker() = default;
|
||||
|
||||
void Thread::Blocker::finalize()
|
||||
{
|
||||
if (m_blocker_set)
|
||||
m_blocker_set->remove_blocker(*this);
|
||||
}
|
||||
|
||||
bool Thread::Blocker::setup_blocker()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
void Thread::Blocker::begin_blocking(Badge<Thread>)
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
VERIFY(!m_is_blocking);
|
||||
m_is_blocking = true;
|
||||
}
|
||||
|
||||
auto Thread::Blocker::end_blocking(Badge<Thread>, bool did_timeout) -> BlockResult
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
// if m_is_blocking is false here, some thread forced to
|
||||
// unblock us when we get here. This is only called from the
|
||||
// thread that was blocked.
|
||||
VERIFY(Thread::current() == m_thread);
|
||||
m_is_blocking = false;
|
||||
|
||||
was_unblocked(did_timeout);
|
||||
return block_result();
|
||||
}
|
||||
|
||||
Thread::JoinBlocker::JoinBlocker(Thread& joinee, ErrorOr<void>& try_join_result, void*& joinee_exit_value)
|
||||
: m_joinee(joinee)
|
||||
, m_joinee_exit_value(joinee_exit_value)
|
||||
, m_try_join_result(try_join_result)
|
||||
{
|
||||
}
|
||||
|
||||
bool Thread::JoinBlocker::setup_blocker()
|
||||
{
|
||||
// We need to hold our lock to avoid a race where try_join succeeds
|
||||
// but the joinee is joining immediately
|
||||
SpinlockLocker lock(m_lock);
|
||||
bool should_block = true;
|
||||
m_try_join_result = m_joinee->try_join([&]() {
|
||||
if (!add_to_blocker_set(m_joinee->m_join_blocker_set))
|
||||
should_block = false;
|
||||
});
|
||||
if (m_try_join_result.is_error())
|
||||
return false;
|
||||
return should_block;
|
||||
}
|
||||
|
||||
void Thread::JoinBlocker::will_unblock_immediately_without_blocking(UnblockImmediatelyReason reason)
|
||||
{
|
||||
// If we should have blocked but got here it must have been that the
|
||||
// timeout was already in the past. So we need to ask the BlockerSet
|
||||
// to supply us the information. We cannot hold the lock as unblock
|
||||
// could be called by the BlockerSet at any time!
|
||||
if (reason == UnblockImmediatelyReason::TimeoutInThePast) {
|
||||
m_joinee->m_join_blocker_set.try_unblock(*this);
|
||||
}
|
||||
}
|
||||
|
||||
bool Thread::JoinBlocker::unblock(void* value, bool from_add_blocker)
|
||||
{
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_did_unblock)
|
||||
return false;
|
||||
m_did_unblock = true;
|
||||
m_joinee_exit_value = value;
|
||||
do_set_interrupted_by_death();
|
||||
}
|
||||
|
||||
if (!from_add_blocker)
|
||||
unblock_from_blocker();
|
||||
return true;
|
||||
}
|
||||
|
||||
Thread::WaitQueueBlocker::WaitQueueBlocker(WaitQueue& wait_queue, StringView block_reason)
|
||||
: m_wait_queue(wait_queue)
|
||||
, m_block_reason(block_reason)
|
||||
{
|
||||
}
|
||||
|
||||
bool Thread::WaitQueueBlocker::setup_blocker()
|
||||
{
|
||||
return add_to_blocker_set(m_wait_queue);
|
||||
}
|
||||
|
||||
Thread::WaitQueueBlocker::~WaitQueueBlocker() = default;
|
||||
|
||||
bool Thread::WaitQueueBlocker::unblock()
|
||||
{
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_did_unblock)
|
||||
return false;
|
||||
m_did_unblock = true;
|
||||
}
|
||||
|
||||
unblock_from_blocker();
|
||||
return true;
|
||||
}
|
||||
|
||||
Thread::FutexBlocker::FutexBlocker(FutexQueue& futex_queue, u32 bitset)
|
||||
: m_futex_queue(futex_queue)
|
||||
, m_bitset(bitset)
|
||||
{
|
||||
}
|
||||
|
||||
bool Thread::FutexBlocker::setup_blocker()
|
||||
{
|
||||
return add_to_blocker_set(m_futex_queue);
|
||||
}
|
||||
|
||||
Thread::FutexBlocker::~FutexBlocker() = default;
|
||||
|
||||
void Thread::FutexBlocker::finish_requeue(FutexQueue& futex_queue)
|
||||
{
|
||||
VERIFY(m_lock.is_locked_by_current_processor());
|
||||
set_blocker_set_raw_locked(&futex_queue);
|
||||
// We can now release the lock
|
||||
m_lock.unlock(m_previous_interrupts_state);
|
||||
}
|
||||
|
||||
bool Thread::FutexBlocker::unblock_bitset(u32 bitset)
|
||||
{
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_did_unblock || (bitset != FUTEX_BITSET_MATCH_ANY && (m_bitset & bitset) == 0))
|
||||
return false;
|
||||
|
||||
m_did_unblock = true;
|
||||
}
|
||||
|
||||
unblock_from_blocker();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Thread::FutexBlocker::unblock(bool force)
|
||||
{
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_did_unblock)
|
||||
return force;
|
||||
m_did_unblock = true;
|
||||
}
|
||||
|
||||
unblock_from_blocker();
|
||||
return true;
|
||||
}
|
||||
|
||||
Thread::OpenFileDescriptionBlocker::OpenFileDescriptionBlocker(OpenFileDescription& description, BlockFlags flags, BlockFlags& unblocked_flags)
|
||||
: m_blocked_description(description)
|
||||
, m_flags(flags)
|
||||
, m_unblocked_flags(unblocked_flags)
|
||||
{
|
||||
}
|
||||
|
||||
bool Thread::OpenFileDescriptionBlocker::setup_blocker()
|
||||
{
|
||||
m_unblocked_flags = BlockFlags::None;
|
||||
return add_to_blocker_set(m_blocked_description->blocker_set());
|
||||
}
|
||||
|
||||
bool Thread::OpenFileDescriptionBlocker::unblock_if_conditions_are_met(bool from_add_blocker, void*)
|
||||
{
|
||||
auto unblock_flags = m_blocked_description->should_unblock(m_flags);
|
||||
if (unblock_flags == BlockFlags::None)
|
||||
return false;
|
||||
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_did_unblock)
|
||||
return false;
|
||||
m_did_unblock = true;
|
||||
m_unblocked_flags = unblock_flags;
|
||||
}
|
||||
|
||||
if (!from_add_blocker)
|
||||
unblock_from_blocker();
|
||||
return true;
|
||||
}
|
||||
|
||||
void Thread::OpenFileDescriptionBlocker::will_unblock_immediately_without_blocking(UnblockImmediatelyReason reason)
|
||||
{
|
||||
if (reason == UnblockImmediatelyReason::UnblockConditionAlreadyMet)
|
||||
return;
|
||||
|
||||
// If we should have blocked but got here it must have been that the
|
||||
// timeout was already in the past. So we need to ask the BlockerSet
|
||||
// to supply us the information. We cannot hold the lock as unblock
|
||||
// could be called by the BlockerSet at any time!
|
||||
VERIFY(reason == UnblockImmediatelyReason::TimeoutInThePast);
|
||||
|
||||
// Just call unblock_if_conditions_are_met here because we will query the file description
|
||||
// for the data and don't need any input from the FileBlockerSet.
|
||||
// However, it's possible that if timeout_in_past is true then FileBlockerSet
|
||||
// may call us at any given time, so our call to unblock here may fail.
|
||||
// Either way, unblock will be called at least once, which provides
|
||||
// all the data we need.
|
||||
unblock_if_conditions_are_met(false, nullptr);
|
||||
}
|
||||
|
||||
OpenFileDescription const& Thread::OpenFileDescriptionBlocker::blocked_description() const
|
||||
{
|
||||
return m_blocked_description;
|
||||
}
|
||||
|
||||
Thread::AcceptBlocker::AcceptBlocker(OpenFileDescription& description, BlockFlags& unblocked_flags)
|
||||
: OpenFileDescriptionBlocker(description, BlockFlags::Accept | BlockFlags::Exception, unblocked_flags)
|
||||
{
|
||||
}
|
||||
|
||||
Thread::ConnectBlocker::ConnectBlocker(OpenFileDescription& description, BlockFlags& unblocked_flags)
|
||||
: OpenFileDescriptionBlocker(description, BlockFlags::Connect | BlockFlags::Exception, unblocked_flags)
|
||||
{
|
||||
}
|
||||
|
||||
Thread::WriteBlocker::WriteBlocker(OpenFileDescription& description, BlockFlags& unblocked_flags)
|
||||
: OpenFileDescriptionBlocker(description, BlockFlags::Write | BlockFlags::Exception, unblocked_flags)
|
||||
{
|
||||
}
|
||||
|
||||
auto Thread::WriteBlocker::override_timeout(BlockTimeout const& timeout) -> BlockTimeout const&
|
||||
{
|
||||
auto const& description = blocked_description();
|
||||
if (description.is_socket()) {
|
||||
auto const& socket = *description.socket();
|
||||
if (socket.has_send_timeout()) {
|
||||
Duration send_timeout = socket.send_timeout();
|
||||
m_timeout = BlockTimeout(false, &send_timeout, timeout.start_time(), timeout.clock_id());
|
||||
if (timeout.is_infinite() || (!m_timeout.is_infinite() && m_timeout.absolute_time() < timeout.absolute_time()))
|
||||
return m_timeout;
|
||||
}
|
||||
}
|
||||
return timeout;
|
||||
}
|
||||
|
||||
Thread::ReadBlocker::ReadBlocker(OpenFileDescription& description, BlockFlags& unblocked_flags)
|
||||
: OpenFileDescriptionBlocker(description, BlockFlags::Read | BlockFlags::Exception, unblocked_flags)
|
||||
{
|
||||
}
|
||||
|
||||
auto Thread::ReadBlocker::override_timeout(BlockTimeout const& timeout) -> BlockTimeout const&
|
||||
{
|
||||
auto const& description = blocked_description();
|
||||
if (description.is_socket()) {
|
||||
auto const& socket = *description.socket();
|
||||
if (socket.has_receive_timeout()) {
|
||||
Duration receive_timeout = socket.receive_timeout();
|
||||
m_timeout = BlockTimeout(false, &receive_timeout, timeout.start_time(), timeout.clock_id());
|
||||
if (timeout.is_infinite() || (!m_timeout.is_infinite() && m_timeout.absolute_time() < timeout.absolute_time()))
|
||||
return m_timeout;
|
||||
}
|
||||
}
|
||||
return timeout;
|
||||
}
|
||||
|
||||
Thread::SleepBlocker::SleepBlocker(BlockTimeout const& deadline, Duration* remaining)
|
||||
: m_deadline(deadline)
|
||||
, m_remaining(remaining)
|
||||
{
|
||||
}
|
||||
|
||||
auto Thread::SleepBlocker::override_timeout(BlockTimeout const& timeout) -> BlockTimeout const&
|
||||
{
|
||||
VERIFY(timeout.is_infinite()); // A timeout should not be provided
|
||||
// To simplify things only use the sleep deadline.
|
||||
return m_deadline;
|
||||
}
|
||||
|
||||
void Thread::SleepBlocker::will_unblock_immediately_without_blocking(UnblockImmediatelyReason reason)
|
||||
{
|
||||
// SleepBlocker::should_block should always return true, so timeout
|
||||
// in the past is the only valid case when this function is called
|
||||
VERIFY(reason == UnblockImmediatelyReason::TimeoutInThePast);
|
||||
calculate_remaining();
|
||||
}
|
||||
|
||||
void Thread::SleepBlocker::was_unblocked(bool did_timeout)
|
||||
{
|
||||
Blocker::was_unblocked(did_timeout);
|
||||
|
||||
calculate_remaining();
|
||||
}
|
||||
|
||||
void Thread::SleepBlocker::calculate_remaining()
|
||||
{
|
||||
if (!m_remaining)
|
||||
return;
|
||||
auto time_now = TimeManagement::the().current_time(m_deadline.clock_id());
|
||||
if (time_now < m_deadline.absolute_time())
|
||||
*m_remaining = m_deadline.absolute_time() - time_now;
|
||||
else
|
||||
*m_remaining = {};
|
||||
}
|
||||
|
||||
Thread::BlockResult Thread::SleepBlocker::block_result()
|
||||
{
|
||||
auto result = Blocker::block_result();
|
||||
if (result == Thread::BlockResult::InterruptedByTimeout)
|
||||
return Thread::BlockResult::WokeNormally;
|
||||
return result;
|
||||
}
|
||||
|
||||
Thread::SelectBlocker::SelectBlocker(FDVector& fds)
|
||||
: m_fds(fds)
|
||||
{
|
||||
}
|
||||
|
||||
bool Thread::SelectBlocker::setup_blocker()
|
||||
{
|
||||
bool should_block = true;
|
||||
for (auto& fd_entry : m_fds) {
|
||||
fd_entry.unblocked_flags = FileBlocker::BlockFlags::None;
|
||||
|
||||
if (!should_block)
|
||||
continue;
|
||||
if (!fd_entry.description) {
|
||||
should_block = false;
|
||||
continue;
|
||||
}
|
||||
if (!fd_entry.description->blocker_set().add_blocker(*this, &fd_entry))
|
||||
should_block = false;
|
||||
}
|
||||
return should_block;
|
||||
}
|
||||
|
||||
Thread::SelectBlocker::~SelectBlocker() = default;
|
||||
|
||||
void Thread::SelectBlocker::finalize()
|
||||
{
|
||||
Thread::FileBlocker::finalize();
|
||||
for (auto& fd_entry : m_fds) {
|
||||
if (fd_entry.description)
|
||||
fd_entry.description->blocker_set().remove_blocker(*this);
|
||||
}
|
||||
}
|
||||
|
||||
void Thread::SelectBlocker::will_unblock_immediately_without_blocking(UnblockImmediatelyReason reason)
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_did_unblock)
|
||||
return;
|
||||
m_did_unblock = true;
|
||||
if (reason == UnblockImmediatelyReason::UnblockConditionAlreadyMet) {
|
||||
auto count = collect_unblocked_flags();
|
||||
VERIFY(count > 0);
|
||||
}
|
||||
}
|
||||
|
||||
bool Thread::SelectBlocker::unblock_if_conditions_are_met(bool from_add_blocker, void* data)
|
||||
{
|
||||
VERIFY(data); // data is a pointer to an entry in the m_fds vector
|
||||
auto& fd_info = *static_cast<FDInfo*>(data);
|
||||
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_did_unblock)
|
||||
return false;
|
||||
|
||||
VERIFY(fd_info.description);
|
||||
auto unblock_flags = fd_info.description->should_unblock(fd_info.block_flags);
|
||||
if (unblock_flags == BlockFlags::None)
|
||||
return false;
|
||||
|
||||
m_did_unblock = true;
|
||||
|
||||
// We need to store unblock_flags here, otherwise someone else
|
||||
// affecting this file descriptor could change the information
|
||||
// between now and when was_unblocked is called!
|
||||
fd_info.unblocked_flags = unblock_flags;
|
||||
}
|
||||
|
||||
// Only do this once for the first one
|
||||
if (!from_add_blocker)
|
||||
unblock_from_blocker();
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t Thread::SelectBlocker::collect_unblocked_flags()
|
||||
{
|
||||
size_t count = 0;
|
||||
for (auto& fd_entry : m_fds) {
|
||||
VERIFY(fd_entry.block_flags != FileBlocker::BlockFlags::None);
|
||||
|
||||
if (!fd_entry.description) {
|
||||
count++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// unblock will have set at least the first descriptor's unblock
|
||||
// flags that triggered the unblock. Make sure we don't discard that
|
||||
// information as it may have changed by now!
|
||||
if (fd_entry.unblocked_flags == FileBlocker::BlockFlags::None)
|
||||
fd_entry.unblocked_flags = fd_entry.description->should_unblock(fd_entry.block_flags);
|
||||
|
||||
if (fd_entry.unblocked_flags != FileBlocker::BlockFlags::None)
|
||||
count++;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
void Thread::SelectBlocker::was_unblocked(bool did_timeout)
|
||||
{
|
||||
Blocker::was_unblocked(did_timeout);
|
||||
if (!did_timeout && !was_interrupted()) {
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
VERIFY(m_did_unblock);
|
||||
}
|
||||
size_t count = collect_unblocked_flags();
|
||||
// If we were blocked and didn't time out, we should have at least one unblocked fd!
|
||||
VERIFY(count > 0);
|
||||
}
|
||||
}
|
||||
|
||||
Thread::SignalBlocker::SignalBlocker(sigset_t pending_set, siginfo_t& result)
|
||||
: m_pending_set(pending_set)
|
||||
, m_result(result)
|
||||
{
|
||||
}
|
||||
|
||||
void Thread::SignalBlocker::will_unblock_immediately_without_blocking(UnblockImmediatelyReason unblock_immediately_reason)
|
||||
{
|
||||
if (unblock_immediately_reason != UnblockImmediatelyReason::TimeoutInThePast)
|
||||
return;
|
||||
// If the specified timeout is 0 the caller is simply trying to poll once for pending signals,
|
||||
// so simply calling check_pending_signals should populate the requested information.
|
||||
check_pending_signals(false);
|
||||
}
|
||||
|
||||
bool Thread::SignalBlocker::setup_blocker()
|
||||
{
|
||||
return add_to_blocker_set(thread().m_signal_blocker_set);
|
||||
}
|
||||
|
||||
bool Thread::SignalBlocker::check_pending_signals(bool from_add_blocker)
|
||||
{
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_did_unblock)
|
||||
return false;
|
||||
|
||||
auto pending_signals = thread().pending_signals() & m_pending_set;
|
||||
|
||||
// Also unblock if we have just "handled" that signal and are in the procecss
|
||||
// of running their signal handler (i.e. we just unmarked the signal as pending).
|
||||
if (thread().m_currently_handled_signal)
|
||||
pending_signals |= (1 << (thread().m_currently_handled_signal - 1)) & m_pending_set;
|
||||
|
||||
auto matching_pending_signal = bit_scan_forward(pending_signals);
|
||||
|
||||
if (matching_pending_signal == 0)
|
||||
return false;
|
||||
|
||||
m_did_unblock = true;
|
||||
m_result = {};
|
||||
m_result.si_signo = matching_pending_signal;
|
||||
m_result.si_code = 0; // FIXME: How can we determine this?
|
||||
}
|
||||
|
||||
if (!from_add_blocker)
|
||||
unblock_from_blocker();
|
||||
return true;
|
||||
}
|
||||
|
||||
Thread::WaitBlockerSet::ProcessBlockInfo::ProcessBlockInfo(NonnullRefPtr<Process>&& process, WaitBlocker::UnblockFlags flags, u8 signal)
|
||||
: process(move(process))
|
||||
, flags(flags)
|
||||
, signal(signal)
|
||||
{
|
||||
}
|
||||
|
||||
Thread::WaitBlockerSet::ProcessBlockInfo::~ProcessBlockInfo() = default;
|
||||
|
||||
void Thread::WaitBlockerSet::try_unblock(Thread::WaitBlocker& blocker)
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
// We if we have any processes pending
|
||||
for (size_t i = 0; i < m_processes.size(); i++) {
|
||||
auto& info = m_processes[i];
|
||||
// We need to call unblock as if we were called from add_blocker
|
||||
// so that we don't trigger a context switch by yielding!
|
||||
if (info.was_waited && blocker.is_wait())
|
||||
continue; // This state was already waited on, do not unblock
|
||||
if (blocker.unblock(info.process, info.flags, info.signal, true)) {
|
||||
if (blocker.is_wait()) {
|
||||
if (info.flags == Thread::WaitBlocker::UnblockFlags::Terminated) {
|
||||
m_processes.remove(i);
|
||||
dbgln_if(WAITBLOCK_DEBUG, "WaitBlockerSet[{}] terminated, remove {}", m_process, *info.process);
|
||||
} else {
|
||||
dbgln_if(WAITBLOCK_DEBUG, "WaitBlockerSet[{}] terminated, mark as waited {}", m_process, *info.process);
|
||||
info.was_waited = true;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Thread::WaitBlockerSet::disowned_by_waiter(Process& process)
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_finalized)
|
||||
return;
|
||||
for (size_t i = 0; i < m_processes.size();) {
|
||||
auto& info = m_processes[i];
|
||||
if (info.process == &process) {
|
||||
unblock_all_blockers_whose_conditions_are_met_locked([&](Blocker& b, void*, bool&) {
|
||||
VERIFY(b.blocker_type() == Blocker::Type::Wait);
|
||||
auto& blocker = static_cast<WaitBlocker&>(b);
|
||||
bool did_unblock = blocker.unblock(info.process, WaitBlocker::UnblockFlags::Disowned, 0, false);
|
||||
VERIFY(did_unblock); // disowning must unblock everyone
|
||||
return true;
|
||||
});
|
||||
dbgln_if(WAITBLOCK_DEBUG, "WaitBlockerSet[{}] disowned {}", m_process, *info.process);
|
||||
m_processes.remove(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
bool Thread::WaitBlockerSet::unblock(Process& process, WaitBlocker::UnblockFlags flags, u8 signal)
|
||||
{
|
||||
VERIFY(flags != WaitBlocker::UnblockFlags::Disowned);
|
||||
|
||||
bool did_unblock_any = false;
|
||||
bool did_wait = false;
|
||||
bool was_waited_already = false;
|
||||
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_finalized)
|
||||
return false;
|
||||
if (flags != WaitBlocker::UnblockFlags::Terminated) {
|
||||
// First check if this state was already waited on
|
||||
for (auto& info : m_processes) {
|
||||
if (info.process == &process) {
|
||||
was_waited_already = info.was_waited;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unblock_all_blockers_whose_conditions_are_met_locked([&](Blocker& b, void*, bool&) {
|
||||
VERIFY(b.blocker_type() == Blocker::Type::Wait);
|
||||
auto& blocker = static_cast<WaitBlocker&>(b);
|
||||
if (was_waited_already && blocker.is_wait())
|
||||
return false; // This state was already waited on, do not unblock
|
||||
if (blocker.unblock(process, flags, signal, false)) {
|
||||
did_wait |= blocker.is_wait(); // anyone requesting a wait
|
||||
did_unblock_any = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
// If no one has waited (yet), or this wasn't a wait, or if it's anything other than
|
||||
// UnblockFlags::Terminated then add it to your list
|
||||
if (!did_unblock_any || !did_wait || flags != WaitBlocker::UnblockFlags::Terminated) {
|
||||
bool updated_existing = false;
|
||||
for (auto& info : m_processes) {
|
||||
if (info.process == &process) {
|
||||
VERIFY(info.flags != WaitBlocker::UnblockFlags::Terminated);
|
||||
info.flags = flags;
|
||||
info.signal = signal;
|
||||
info.was_waited = did_wait;
|
||||
dbgln_if(WAITBLOCK_DEBUG, "WaitBlockerSet[{}] update {} flags={}, waited={}", m_process, process, (int)flags, info.was_waited);
|
||||
updated_existing = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!updated_existing) {
|
||||
dbgln_if(WAITBLOCK_DEBUG, "WaitBlockerSet[{}] add {} flags: {}", m_process, process, (int)flags);
|
||||
m_processes.append(ProcessBlockInfo(process, flags, signal));
|
||||
}
|
||||
}
|
||||
return did_unblock_any;
|
||||
}
|
||||
|
||||
bool Thread::WaitBlockerSet::should_add_blocker(Blocker& b, void*)
|
||||
{
|
||||
// NOTE: m_lock is held already!
|
||||
if (m_finalized)
|
||||
return false;
|
||||
VERIFY(b.blocker_type() == Blocker::Type::Wait);
|
||||
auto& blocker = static_cast<WaitBlocker&>(b);
|
||||
// See if we can match any process immediately
|
||||
for (size_t i = 0; i < m_processes.size(); i++) {
|
||||
auto& info = m_processes[i];
|
||||
if (blocker.unblock(info.process, info.flags, info.signal, true)) {
|
||||
// Only remove the entry if UnblockFlags::Terminated
|
||||
if (info.flags == Thread::WaitBlocker::UnblockFlags::Terminated && blocker.is_wait())
|
||||
m_processes.remove(i);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Thread::WaitBlockerSet::finalize()
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
VERIFY(!m_finalized);
|
||||
m_finalized = true;
|
||||
|
||||
// Clear the list of threads here so we can drop the references to them
|
||||
m_processes.clear();
|
||||
|
||||
// NOTE: Kernel processes don't have a leaked ref on them.
|
||||
if (!m_process.is_kernel_process()) {
|
||||
// No more waiters, drop the last reference immediately. This may
|
||||
// cause us to be destructed ourselves!
|
||||
VERIFY(m_process.ref_count() > 0);
|
||||
m_process.unref();
|
||||
}
|
||||
}
|
||||
|
||||
Thread::WaitBlocker::WaitBlocker(int wait_options, Variant<Empty, NonnullRefPtr<Process>, NonnullRefPtr<ProcessGroup>> waitee, ErrorOr<siginfo_t>& result)
|
||||
: m_wait_options(wait_options)
|
||||
, m_result(result)
|
||||
, m_waitee(move(waitee))
|
||||
{
|
||||
}
|
||||
|
||||
bool Thread::WaitBlocker::setup_blocker()
|
||||
{
|
||||
if (m_wait_options & WNOHANG)
|
||||
return false;
|
||||
return add_to_blocker_set(Process::current().wait_blocker_set());
|
||||
}
|
||||
|
||||
void Thread::WaitBlocker::will_unblock_immediately_without_blocking(UnblockImmediatelyReason)
|
||||
{
|
||||
Process::current().wait_blocker_set().try_unblock(*this);
|
||||
}
|
||||
|
||||
void Thread::WaitBlocker::was_unblocked(bool)
|
||||
{
|
||||
bool got_sigchld, try_unblock;
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
try_unblock = !m_did_unblock;
|
||||
got_sigchld = m_got_sigchild;
|
||||
}
|
||||
|
||||
if (try_unblock)
|
||||
Process::current().wait_blocker_set().try_unblock(*this);
|
||||
|
||||
// If we were interrupted by SIGCHLD (which gets special handling
|
||||
// here) we're not going to return with EINTR. But we're going to
|
||||
// deliver SIGCHLD (only) here.
|
||||
auto* current_thread = Thread::current();
|
||||
if (got_sigchld && current_thread->state() != State::Stopped)
|
||||
current_thread->try_dispatch_one_pending_signal(SIGCHLD);
|
||||
}
|
||||
|
||||
void Thread::WaitBlocker::do_was_disowned()
|
||||
{
|
||||
VERIFY(!m_did_unblock);
|
||||
m_did_unblock = true;
|
||||
m_result = ECHILD;
|
||||
}
|
||||
|
||||
void Thread::WaitBlocker::do_set_result(siginfo_t const& result)
|
||||
{
|
||||
VERIFY(!m_did_unblock);
|
||||
m_did_unblock = true;
|
||||
m_result = result;
|
||||
|
||||
if (do_get_interrupted_by_signal() == SIGCHLD) {
|
||||
// This makes it so that wait() will return normally despite the
|
||||
// fact that SIGCHLD was delivered. Calling do_clear_interrupted_by_signal
|
||||
// will disable dispatching signals in Thread::block and prevent
|
||||
// it from returning with EINTR. We will then manually dispatch
|
||||
// SIGCHLD (and only SIGCHLD) in was_unblocked.
|
||||
m_got_sigchild = true;
|
||||
do_clear_interrupted_by_signal();
|
||||
}
|
||||
}
|
||||
|
||||
bool Thread::WaitBlocker::unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker)
|
||||
{
|
||||
VERIFY(flags != UnblockFlags::Terminated || signal == 0); // signal argument should be ignored for Terminated
|
||||
|
||||
bool do_not_unblock = m_waitee.visit(
|
||||
[&](NonnullRefPtr<Process> const& waitee_process) {
|
||||
return &process != waitee_process;
|
||||
},
|
||||
[&](NonnullRefPtr<ProcessGroup> const& waitee_process_group) {
|
||||
return waitee_process_group->pgid() != process.pgid();
|
||||
},
|
||||
[&](Empty const&) {
|
||||
// Generic waiter won't be unblocked by disown
|
||||
return flags == UnblockFlags::Disowned;
|
||||
});
|
||||
|
||||
if (do_not_unblock)
|
||||
return false;
|
||||
|
||||
switch (flags) {
|
||||
case UnblockFlags::Terminated:
|
||||
if (!(m_wait_options & WEXITED))
|
||||
return false;
|
||||
break;
|
||||
case UnblockFlags::Stopped:
|
||||
if (!(m_wait_options & WSTOPPED))
|
||||
return false;
|
||||
if (!(m_wait_options & WUNTRACED) && !process.is_traced())
|
||||
return false;
|
||||
break;
|
||||
case UnblockFlags::Continued:
|
||||
if (!(m_wait_options & WCONTINUED))
|
||||
return false;
|
||||
if (!(m_wait_options & WUNTRACED) && !process.is_traced())
|
||||
return false;
|
||||
break;
|
||||
case UnblockFlags::Disowned:
|
||||
SpinlockLocker lock(m_lock);
|
||||
// Disowning must unblock anyone waiting for this process explicitly
|
||||
if (!m_did_unblock)
|
||||
do_was_disowned();
|
||||
return true;
|
||||
}
|
||||
|
||||
if (flags == UnblockFlags::Terminated) {
|
||||
VERIFY(process.is_dead());
|
||||
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_did_unblock)
|
||||
return false;
|
||||
// Up until this point, this function may have been called
|
||||
// more than once!
|
||||
do_set_result(process.wait_info());
|
||||
} else {
|
||||
siginfo_t siginfo {};
|
||||
{
|
||||
SpinlockLocker lock(g_scheduler_lock);
|
||||
auto credentials = process.credentials();
|
||||
// We need to gather the information before we release the scheduler lock!
|
||||
siginfo.si_signo = SIGCHLD;
|
||||
siginfo.si_pid = process.pid().value();
|
||||
siginfo.si_uid = credentials->uid().value();
|
||||
siginfo.si_status = signal;
|
||||
|
||||
switch (flags) {
|
||||
case UnblockFlags::Terminated:
|
||||
case UnblockFlags::Disowned:
|
||||
VERIFY_NOT_REACHED();
|
||||
case UnblockFlags::Stopped:
|
||||
siginfo.si_code = CLD_STOPPED;
|
||||
break;
|
||||
case UnblockFlags::Continued:
|
||||
siginfo.si_code = CLD_CONTINUED;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_did_unblock)
|
||||
return false;
|
||||
// Up until this point, this function may have been called
|
||||
// more than once!
|
||||
do_set_result(siginfo);
|
||||
}
|
||||
|
||||
if (!from_add_blocker) {
|
||||
// Only call unblock if we weren't called from within add_to_blocker_set!
|
||||
VERIFY(flags != UnblockFlags::Disowned);
|
||||
unblock_from_blocker();
|
||||
}
|
||||
// Because this may be called from add_blocker, in which case we should
|
||||
// not be actually trying to unblock the thread (because it hasn't actually
|
||||
// been blocked yet), we need to return true anyway
|
||||
return true;
|
||||
}
|
||||
|
||||
Thread::FlockBlocker::FlockBlocker(NonnullRefPtr<Inode> inode, flock const& flock)
|
||||
: m_inode(move(inode))
|
||||
, m_flock(flock)
|
||||
{
|
||||
}
|
||||
|
||||
void Thread::FlockBlocker::will_unblock_immediately_without_blocking(UnblockImmediatelyReason reason)
|
||||
{
|
||||
VERIFY(reason == UnblockImmediatelyReason::UnblockConditionAlreadyMet);
|
||||
}
|
||||
|
||||
bool Thread::FlockBlocker::setup_blocker()
|
||||
{
|
||||
return add_to_blocker_set(m_inode->flock_blocker_set());
|
||||
}
|
||||
|
||||
bool Thread::FlockBlocker::try_unblock(bool from_add_blocker)
|
||||
{
|
||||
if (!m_inode->can_apply_flock(m_flock))
|
||||
return false;
|
||||
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
if (m_did_unblock)
|
||||
return false;
|
||||
m_did_unblock = true;
|
||||
}
|
||||
|
||||
if (!from_add_blocker)
|
||||
unblock_from_blocker();
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
24
Kernel/Tasks/ThreadTracer.cpp
Normal file
24
Kernel/Tasks/ThreadTracer.cpp
Normal file
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <Kernel/Arch/RegisterState.h>
|
||||
#include <Kernel/Tasks/ThreadTracer.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ThreadTracer::ThreadTracer(ProcessID tracer_pid)
|
||||
: m_tracer_pid(tracer_pid)
|
||||
{
|
||||
}
|
||||
|
||||
void ThreadTracer::set_regs(RegisterState const& regs)
|
||||
{
|
||||
PtraceRegisters r {};
|
||||
copy_kernel_registers_into_ptrace_registers(r, regs);
|
||||
m_regs = r;
|
||||
}
|
||||
|
||||
}
|
52
Kernel/Tasks/ThreadTracer.h
Normal file
52
Kernel/Tasks/ThreadTracer.h
Normal file
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <AK/Optional.h>
|
||||
#include <AK/OwnPtr.h>
|
||||
#include <Kernel/Arch/RegisterState.h>
|
||||
#include <Kernel/Forward.h>
|
||||
#include <LibC/sys/arch/regs.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class ThreadTracer {
|
||||
public:
|
||||
static ErrorOr<NonnullOwnPtr<ThreadTracer>> try_create(ProcessID tracer) { return adopt_nonnull_own_or_enomem(new (nothrow) ThreadTracer(tracer)); }
|
||||
|
||||
ProcessID tracer_pid() const { return m_tracer_pid; }
|
||||
bool has_pending_signal(u32 signal) const { return (m_pending_signals & (1 << (signal - 1))) != 0; }
|
||||
void set_signal(u32 signal) { m_pending_signals |= (1 << (signal - 1)); }
|
||||
void unset_signal(u32 signal) { m_pending_signals &= ~(1 << (signal - 1)); }
|
||||
|
||||
bool is_tracing_syscalls() const { return m_trace_syscalls; }
|
||||
void set_trace_syscalls(bool val) { m_trace_syscalls = val; }
|
||||
|
||||
void set_regs(RegisterState const& regs);
|
||||
void set_regs(PtraceRegisters const& regs) { m_regs = regs; }
|
||||
bool has_regs() const { return m_regs.has_value(); }
|
||||
PtraceRegisters const& regs() const
|
||||
{
|
||||
VERIFY(m_regs.has_value());
|
||||
return m_regs.value();
|
||||
}
|
||||
|
||||
private:
|
||||
explicit ThreadTracer(ProcessID);
|
||||
|
||||
ProcessID m_tracer_pid { -1 };
|
||||
|
||||
// This is a bitmap for signals that are sent from the tracer to the tracee
|
||||
// TODO: Since we do not currently support sending signals
|
||||
// to the tracee via PT_CONTINUE, this bitmap is always zeroed
|
||||
u32 m_pending_signals { 0 };
|
||||
|
||||
bool m_trace_syscalls { false };
|
||||
Optional<PtraceRegisters> m_regs;
|
||||
};
|
||||
|
||||
}
|
96
Kernel/Tasks/WaitQueue.cpp
Normal file
96
Kernel/Tasks/WaitQueue.cpp
Normal file
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
* Copyright (c) 2020, the SerenityOS developers.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <Kernel/Debug.h>
|
||||
#include <Kernel/Tasks/Thread.h>
|
||||
#include <Kernel/Tasks/WaitQueue.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
bool WaitQueue::should_add_blocker(Thread::Blocker& b, void*)
|
||||
{
|
||||
VERIFY(m_lock.is_locked());
|
||||
VERIFY(b.blocker_type() == Thread::Blocker::Type::Queue);
|
||||
if (m_wake_requested) {
|
||||
m_wake_requested = false;
|
||||
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: do not block thread {}", this, b.thread());
|
||||
return false;
|
||||
}
|
||||
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: should block thread {}", this, b.thread());
|
||||
return true;
|
||||
}
|
||||
|
||||
u32 WaitQueue::wake_one()
|
||||
{
|
||||
u32 did_wake = 0;
|
||||
SpinlockLocker lock(m_lock);
|
||||
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_one", this);
|
||||
bool did_unblock_one = unblock_all_blockers_whose_conditions_are_met_locked([&](Thread::Blocker& b, void*, bool& stop_iterating) {
|
||||
VERIFY(b.blocker_type() == Thread::Blocker::Type::Queue);
|
||||
auto& blocker = static_cast<Thread::WaitQueueBlocker&>(b);
|
||||
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_one unblocking {}", this, blocker.thread());
|
||||
if (blocker.unblock()) {
|
||||
stop_iterating = true;
|
||||
did_wake = 1;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
m_wake_requested = !did_unblock_one;
|
||||
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_one woke {} threads", this, did_wake);
|
||||
return did_wake;
|
||||
}
|
||||
|
||||
u32 WaitQueue::wake_n(u32 wake_count)
|
||||
{
|
||||
if (wake_count == 0)
|
||||
return 0; // should we assert instead?
|
||||
SpinlockLocker lock(m_lock);
|
||||
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_n({})", this, wake_count);
|
||||
u32 did_wake = 0;
|
||||
|
||||
bool did_unblock_some = unblock_all_blockers_whose_conditions_are_met_locked([&](Thread::Blocker& b, void*, bool& stop_iterating) {
|
||||
VERIFY(b.blocker_type() == Thread::Blocker::Type::Queue);
|
||||
auto& blocker = static_cast<Thread::WaitQueueBlocker&>(b);
|
||||
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_n unblocking {}", this, blocker.thread());
|
||||
VERIFY(did_wake < wake_count);
|
||||
if (blocker.unblock()) {
|
||||
if (++did_wake >= wake_count)
|
||||
stop_iterating = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
m_wake_requested = !did_unblock_some;
|
||||
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_n({}) woke {} threads", this, wake_count, did_wake);
|
||||
return did_wake;
|
||||
}
|
||||
|
||||
u32 WaitQueue::wake_all()
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
|
||||
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_all", this);
|
||||
u32 did_wake = 0;
|
||||
|
||||
bool did_unblock_any = unblock_all_blockers_whose_conditions_are_met_locked([&](Thread::Blocker& b, void*, bool&) {
|
||||
VERIFY(b.blocker_type() == Thread::Blocker::Type::Queue);
|
||||
auto& blocker = static_cast<Thread::WaitQueueBlocker&>(b);
|
||||
|
||||
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_all unblocking {}", this, blocker.thread());
|
||||
|
||||
if (blocker.unblock()) {
|
||||
did_wake++;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
m_wake_requested = !did_unblock_any;
|
||||
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_all woke {} threads", this, did_wake);
|
||||
return did_wake;
|
||||
}
|
||||
|
||||
}
|
40
Kernel/Tasks/WaitQueue.h
Normal file
40
Kernel/Tasks/WaitQueue.h
Normal file
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Copyright (c) 2020, the SerenityOS developers.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <AK/Atomic.h>
|
||||
#include <Kernel/Locking/Spinlock.h>
|
||||
#include <Kernel/Tasks/Thread.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class WaitQueue final : public Thread::BlockerSet {
|
||||
public:
|
||||
u32 wake_one();
|
||||
u32 wake_n(u32 wake_count);
|
||||
u32 wake_all();
|
||||
|
||||
template<class... Args>
|
||||
Thread::BlockResult wait_on(Thread::BlockTimeout const& timeout, Args&&... args)
|
||||
{
|
||||
return Thread::current()->block<Thread::WaitQueueBlocker>(timeout, *this, forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<class... Args>
|
||||
void wait_forever(Args&&... args)
|
||||
{
|
||||
(void)Thread::current()->block<Thread::WaitQueueBlocker>({}, *this, forward<Args>(args)...);
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual bool should_add_blocker(Thread::Blocker& b, void*) override;
|
||||
|
||||
private:
|
||||
bool m_wake_requested { false };
|
||||
};
|
||||
|
||||
}
|
59
Kernel/Tasks/WorkQueue.cpp
Normal file
59
Kernel/Tasks/WorkQueue.cpp
Normal file
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Copyright (c) 2021, the SerenityOS developers.
|
||||
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#include <Kernel/Arch/Processor.h>
|
||||
#include <Kernel/Sections.h>
|
||||
#include <Kernel/Tasks/Process.h>
|
||||
#include <Kernel/Tasks/WaitQueue.h>
|
||||
#include <Kernel/Tasks/WorkQueue.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
WorkQueue* g_io_work;
|
||||
WorkQueue* g_ata_work;
|
||||
|
||||
UNMAP_AFTER_INIT void WorkQueue::initialize()
|
||||
{
|
||||
g_io_work = new WorkQueue("IO WorkQueue Task"sv);
|
||||
g_ata_work = new WorkQueue("ATA WorkQueue Task"sv);
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT WorkQueue::WorkQueue(StringView name)
|
||||
{
|
||||
auto name_kstring = KString::try_create(name);
|
||||
if (name_kstring.is_error())
|
||||
TODO();
|
||||
auto [_, thread] = Process::create_kernel_process(name_kstring.release_value(), [this] {
|
||||
for (;;) {
|
||||
WorkItem* item;
|
||||
bool have_more;
|
||||
m_items.with([&](auto& items) {
|
||||
item = items.take_first();
|
||||
have_more = !items.is_empty();
|
||||
});
|
||||
if (item) {
|
||||
item->function();
|
||||
delete item;
|
||||
|
||||
if (have_more)
|
||||
continue;
|
||||
}
|
||||
[[maybe_unused]] auto result = m_wait_queue.wait_on({});
|
||||
}
|
||||
}).release_value_but_fixme_should_propagate_errors();
|
||||
m_thread = move(thread);
|
||||
}
|
||||
|
||||
void WorkQueue::do_queue(WorkItem& item)
|
||||
{
|
||||
m_items.with([&](auto& items) {
|
||||
items.append(item);
|
||||
});
|
||||
m_wait_queue.wake_one();
|
||||
}
|
||||
|
||||
}
|
69
Kernel/Tasks/WorkQueue.h
Normal file
69
Kernel/Tasks/WorkQueue.h
Normal file
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Copyright (c) 2021, the SerenityOS developers.
|
||||
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <AK/Error.h>
|
||||
#include <AK/IntrusiveList.h>
|
||||
#include <Kernel/Forward.h>
|
||||
#include <Kernel/Locking/SpinlockProtected.h>
|
||||
#include <Kernel/Tasks/WaitQueue.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
extern WorkQueue* g_io_work;
|
||||
extern WorkQueue* g_ata_work;
|
||||
|
||||
class WorkQueue {
|
||||
AK_MAKE_NONCOPYABLE(WorkQueue);
|
||||
AK_MAKE_NONMOVABLE(WorkQueue);
|
||||
|
||||
public:
|
||||
static void initialize();
|
||||
|
||||
ErrorOr<void> try_queue(void (*function)(void*), void* data = nullptr, void (*free_data)(void*) = nullptr)
|
||||
{
|
||||
auto item = new (nothrow) WorkItem; // TODO: use a pool
|
||||
if (!item)
|
||||
return Error::from_errno(ENOMEM);
|
||||
item->function = [function, data, free_data] {
|
||||
function(data);
|
||||
if (free_data)
|
||||
free_data(data);
|
||||
};
|
||||
do_queue(*item);
|
||||
return {};
|
||||
}
|
||||
|
||||
template<typename Function>
|
||||
ErrorOr<void> try_queue(Function function)
|
||||
{
|
||||
auto item = new (nothrow) WorkItem; // TODO: use a pool
|
||||
if (!item)
|
||||
return Error::from_errno(ENOMEM);
|
||||
item->function = Function(function);
|
||||
do_queue(*item);
|
||||
return {};
|
||||
}
|
||||
|
||||
private:
|
||||
explicit WorkQueue(StringView);
|
||||
|
||||
struct WorkItem {
|
||||
public:
|
||||
IntrusiveListNode<WorkItem> m_node;
|
||||
Function<void()> function;
|
||||
};
|
||||
|
||||
void do_queue(WorkItem&);
|
||||
|
||||
RefPtr<Thread> m_thread;
|
||||
WaitQueue m_wait_queue;
|
||||
SpinlockProtected<IntrusiveList<&WorkItem::m_node>, LockRank::None> m_items {};
|
||||
};
|
||||
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue