mirror of
https://github.com/RGBCube/serenity
synced 2025-05-28 16:15:10 +00:00
Kernel: Keep a list of threads per Process
This allow us to iterate only the threads of the process.
This commit is contained in:
parent
03a9ee79fa
commit
ac3927086f
4 changed files with 45 additions and 28 deletions
|
@ -907,4 +907,22 @@ PerformanceEventBuffer& Process::ensure_perf_events()
|
||||||
m_perf_event_buffer = make<PerformanceEventBuffer>();
|
m_perf_event_buffer = make<PerformanceEventBuffer>();
|
||||||
return *m_perf_event_buffer;
|
return *m_perf_event_buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool Process::remove_thread(Thread& thread)
|
||||||
|
{
|
||||||
|
auto thread_cnt_before = m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
|
||||||
|
ASSERT(thread_cnt_before != 0);
|
||||||
|
ScopedSpinLock thread_list_lock(m_thread_list_lock);
|
||||||
|
m_thread_list.remove(thread);
|
||||||
|
return thread_cnt_before == 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Process::add_thread(Thread& thread)
|
||||||
|
{
|
||||||
|
bool is_first = m_thread_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed) == 0;
|
||||||
|
ScopedSpinLock thread_list_lock(m_thread_list_lock);
|
||||||
|
m_thread_list.append(thread);
|
||||||
|
return is_first;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -204,7 +204,7 @@ public:
|
||||||
template<typename Callback>
|
template<typename Callback>
|
||||||
void for_each_child(Callback);
|
void for_each_child(Callback);
|
||||||
template<typename Callback>
|
template<typename Callback>
|
||||||
void for_each_thread(Callback) const;
|
IterationDecision for_each_thread(Callback) const;
|
||||||
|
|
||||||
void die();
|
void die();
|
||||||
void finalize();
|
void finalize();
|
||||||
|
@ -507,6 +507,9 @@ private:
|
||||||
friend class Scheduler;
|
friend class Scheduler;
|
||||||
friend class Region;
|
friend class Region;
|
||||||
|
|
||||||
|
bool add_thread(Thread&);
|
||||||
|
bool remove_thread(Thread&);
|
||||||
|
|
||||||
PerformanceEventBuffer& ensure_perf_events();
|
PerformanceEventBuffer& ensure_perf_events();
|
||||||
|
|
||||||
Process(RefPtr<Thread>& first_thread, const String& name, uid_t, gid_t, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> cwd = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
|
Process(RefPtr<Thread>& first_thread, const String& name, uid_t, gid_t, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> cwd = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
|
||||||
|
@ -592,6 +595,8 @@ private:
|
||||||
u8 m_termination_status { 0 };
|
u8 m_termination_status { 0 };
|
||||||
u8 m_termination_signal { 0 };
|
u8 m_termination_signal { 0 };
|
||||||
Atomic<u32> m_thread_count { 0 };
|
Atomic<u32> m_thread_count { 0 };
|
||||||
|
mutable IntrusiveList<Thread, &Thread::m_process_thread_list_node> m_thread_list;
|
||||||
|
mutable RecursiveSpinLock m_thread_list_lock;
|
||||||
|
|
||||||
const bool m_is_kernel_process;
|
const bool m_is_kernel_process;
|
||||||
bool m_dead { false };
|
bool m_dead { false };
|
||||||
|
@ -693,12 +698,9 @@ inline void Process::for_each_child(Callback callback)
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Callback>
|
template<typename Callback>
|
||||||
inline void Process::for_each_thread(Callback callback) const
|
inline IterationDecision Process::for_each_thread(Callback callback) const
|
||||||
{
|
{
|
||||||
InterruptDisabler disabler;
|
if (pid() == 0) {
|
||||||
ProcessID my_pid = pid();
|
|
||||||
|
|
||||||
if (my_pid == 0) {
|
|
||||||
// NOTE: Special case the colonel process, since its main thread is not in the global thread table.
|
// NOTE: Special case the colonel process, since its main thread is not in the global thread table.
|
||||||
Processor::for_each(
|
Processor::for_each(
|
||||||
[&](Processor& proc) -> IterationDecision {
|
[&](Processor& proc) -> IterationDecision {
|
||||||
|
@ -707,15 +709,15 @@ inline void Process::for_each_thread(Callback callback) const
|
||||||
return callback(*idle_thread);
|
return callback(*idle_thread);
|
||||||
return IterationDecision::Continue;
|
return IterationDecision::Continue;
|
||||||
});
|
});
|
||||||
return;
|
} else {
|
||||||
|
ScopedSpinLock thread_list_lock(m_thread_list_lock);
|
||||||
|
for (auto& thread : m_thread_list) {
|
||||||
|
IterationDecision decision = callback(thread);
|
||||||
|
if (decision != IterationDecision::Continue)
|
||||||
|
return decision;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return IterationDecision::Continue;
|
||||||
Thread::for_each([callback, my_pid](Thread& thread) -> IterationDecision {
|
|
||||||
if (thread.pid() == my_pid)
|
|
||||||
return callback(thread);
|
|
||||||
|
|
||||||
return IterationDecision::Continue;
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Callback>
|
template<typename Callback>
|
||||||
|
|
|
@ -49,7 +49,7 @@ Thread::Thread(NonnullRefPtr<Process> process)
|
||||||
: m_process(move(process))
|
: m_process(move(process))
|
||||||
, m_name(m_process->name())
|
, m_name(m_process->name())
|
||||||
{
|
{
|
||||||
bool is_first_thread = m_process->m_thread_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed) == 0;
|
bool is_first_thread = m_process->add_thread(*this);
|
||||||
ArmedScopeGuard guard([&]() {
|
ArmedScopeGuard guard([&]() {
|
||||||
drop_thread_count(is_first_thread);
|
drop_thread_count(is_first_thread);
|
||||||
});
|
});
|
||||||
|
@ -130,6 +130,7 @@ Thread::~Thread()
|
||||||
// block conditions would access m_process, which would be in
|
// block conditions would access m_process, which would be in
|
||||||
// the middle of being destroyed.
|
// the middle of being destroyed.
|
||||||
ScopedSpinLock lock(g_scheduler_lock);
|
ScopedSpinLock lock(g_scheduler_lock);
|
||||||
|
ASSERT(!m_process_thread_list_node.is_in_list());
|
||||||
g_scheduler_data->thread_list_for_state(m_state).remove(*this);
|
g_scheduler_data->thread_list_for_state(m_state).remove(*this);
|
||||||
|
|
||||||
// We shouldn't be queued
|
// We shouldn't be queued
|
||||||
|
@ -388,10 +389,9 @@ void Thread::finalize()
|
||||||
|
|
||||||
void Thread::drop_thread_count(bool initializing_first_thread)
|
void Thread::drop_thread_count(bool initializing_first_thread)
|
||||||
{
|
{
|
||||||
auto thread_cnt_before = m_process->m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
|
bool is_last = process().remove_thread(*this);
|
||||||
|
|
||||||
ASSERT(thread_cnt_before != 0);
|
if (!initializing_first_thread && is_last)
|
||||||
if (!initializing_first_thread && thread_cnt_before == 1)
|
|
||||||
process().finalize();
|
process().finalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,6 +50,8 @@
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
|
extern RecursiveSpinLock s_mm_lock;
|
||||||
|
|
||||||
enum class DispatchSignalResult {
|
enum class DispatchSignalResult {
|
||||||
Deferred = 0,
|
Deferred = 0,
|
||||||
Yield,
|
Yield,
|
||||||
|
@ -818,6 +820,7 @@ public:
|
||||||
ASSERT(!Processor::current().in_irq());
|
ASSERT(!Processor::current().in_irq());
|
||||||
ASSERT(this == Thread::current());
|
ASSERT(this == Thread::current());
|
||||||
ScopedCritical critical;
|
ScopedCritical critical;
|
||||||
|
ASSERT(!s_mm_lock.own_lock());
|
||||||
ScopedSpinLock scheduler_lock(g_scheduler_lock);
|
ScopedSpinLock scheduler_lock(g_scheduler_lock);
|
||||||
ScopedSpinLock block_lock(m_block_lock);
|
ScopedSpinLock block_lock(m_block_lock);
|
||||||
// We need to hold m_block_lock so that nobody can unblock a blocker as soon
|
// We need to hold m_block_lock so that nobody can unblock a blocker as soon
|
||||||
|
@ -1061,18 +1064,12 @@ public:
|
||||||
m_ipv4_socket_write_bytes += bytes;
|
m_ipv4_socket_write_bytes += bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_active(bool active)
|
void set_active(bool active) { m_is_active = active; }
|
||||||
{
|
|
||||||
m_is_active.store(active, AK::memory_order_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 saved_critical() const { return m_saved_critical; }
|
u32 saved_critical() const { return m_saved_critical; }
|
||||||
void save_critical(u32 critical) { m_saved_critical = critical; }
|
void save_critical(u32 critical) { m_saved_critical = critical; }
|
||||||
|
|
||||||
[[nodiscard]] bool is_active() const
|
[[nodiscard]] bool is_active() const { return m_is_active; }
|
||||||
{
|
|
||||||
return m_is_active.load(AK::MemoryOrder::memory_order_acquire);
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] bool is_finalizable() const
|
[[nodiscard]] bool is_finalizable() const
|
||||||
{
|
{
|
||||||
|
@ -1170,10 +1167,10 @@ public:
|
||||||
void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
|
void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
IntrusiveListNode m_process_thread_list_node;
|
||||||
IntrusiveListNode m_runnable_list_node;
|
IntrusiveListNode m_runnable_list_node;
|
||||||
int m_runnable_priority { -1 };
|
int m_runnable_priority { -1 };
|
||||||
|
|
||||||
private:
|
|
||||||
friend struct SchedulerData;
|
friend struct SchedulerData;
|
||||||
friend class WaitQueue;
|
friend class WaitQueue;
|
||||||
|
|
||||||
|
@ -1274,7 +1271,7 @@ private:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
JoinBlockCondition m_join_condition;
|
JoinBlockCondition m_join_condition;
|
||||||
Atomic<bool> m_is_active { false };
|
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
|
||||||
bool m_is_joinable { true };
|
bool m_is_joinable { true };
|
||||||
bool m_handling_page_fault { false };
|
bool m_handling_page_fault { false };
|
||||||
PreviousMode m_previous_mode { PreviousMode::UserMode };
|
PreviousMode m_previous_mode { PreviousMode::UserMode };
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue