mirror of
https://github.com/RGBCube/serenity
synced 2025-05-23 17:45:07 +00:00
Kernel: Guard the all processes list with a Spinlock rather than a Mutex
There are callers of processes().with or processes().for_each that require interrupts to be disabled. Taking a Mutexe with interrupts disabled is a recipe for deadlock, so convert this to a Spinlock.
This commit is contained in:
parent
70518e69f4
commit
dea62fe93c
4 changed files with 13 additions and 13 deletions
|
@ -916,7 +916,7 @@ KResult ProcFSRootDirectory::traverse_as_directory(unsigned fsid, Function<bool(
|
||||||
InodeIdentifier identifier = { fsid, component.component_index() };
|
InodeIdentifier identifier = { fsid, component.component_index() };
|
||||||
callback({ component.name(), identifier, 0 });
|
callback({ component.name(), identifier, 0 });
|
||||||
}
|
}
|
||||||
processes().for_each_shared([&](Process& process) {
|
processes().for_each([&](Process& process) {
|
||||||
VERIFY(!(process.pid() < 0));
|
VERIFY(!(process.pid() < 0));
|
||||||
u64 process_id = (u64)process.pid().value();
|
u64 process_id = (u64)process.pid().value();
|
||||||
InodeIdentifier identifier = { fsid, static_cast<InodeIndex>(process_id << 36) };
|
InodeIdentifier identifier = { fsid, static_cast<InodeIndex>(process_id << 36) };
|
||||||
|
|
|
@ -44,7 +44,7 @@ static void create_signal_trampoline();
|
||||||
|
|
||||||
RecursiveSpinlock g_profiling_lock;
|
RecursiveSpinlock g_profiling_lock;
|
||||||
static Atomic<pid_t> next_pid;
|
static Atomic<pid_t> next_pid;
|
||||||
static Singleton<MutexProtected<Process::List>> s_processes;
|
static Singleton<SpinlockProtected<Process::List>> s_processes;
|
||||||
READONLY_AFTER_INIT HashMap<String, OwnPtr<Module>>* g_modules;
|
READONLY_AFTER_INIT HashMap<String, OwnPtr<Module>>* g_modules;
|
||||||
READONLY_AFTER_INIT Memory::Region* g_signal_trampoline_region;
|
READONLY_AFTER_INIT Memory::Region* g_signal_trampoline_region;
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ MutexProtected<String>& hostname()
|
||||||
return *s_hostname;
|
return *s_hostname;
|
||||||
}
|
}
|
||||||
|
|
||||||
MutexProtected<Process::List>& processes()
|
SpinlockProtected<Process::List>& processes()
|
||||||
{
|
{
|
||||||
return *s_processes;
|
return *s_processes;
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,7 @@ UNMAP_AFTER_INIT void Process::initialize()
|
||||||
NonnullRefPtrVector<Process> Process::all_processes()
|
NonnullRefPtrVector<Process> Process::all_processes()
|
||||||
{
|
{
|
||||||
NonnullRefPtrVector<Process> output;
|
NonnullRefPtrVector<Process> output;
|
||||||
processes().with_shared([&](const auto& list) {
|
processes().with([&](const auto& list) {
|
||||||
output.ensure_capacity(list.size_slow());
|
output.ensure_capacity(list.size_slow());
|
||||||
for (const auto& process : list)
|
for (const auto& process : list)
|
||||||
output.append(NonnullRefPtr<Process>(process));
|
output.append(NonnullRefPtr<Process>(process));
|
||||||
|
@ -138,7 +138,7 @@ void Process::register_new(Process& process)
|
||||||
{
|
{
|
||||||
// Note: this is essentially the same like process->ref()
|
// Note: this is essentially the same like process->ref()
|
||||||
RefPtr<Process> new_process = process;
|
RefPtr<Process> new_process = process;
|
||||||
processes().with_exclusive([&](auto& list) {
|
processes().with([&](auto& list) {
|
||||||
list.prepend(process);
|
list.prepend(process);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -301,7 +301,7 @@ bool Process::unref() const
|
||||||
// NOTE: We need to obtain the process list lock before doing anything,
|
// NOTE: We need to obtain the process list lock before doing anything,
|
||||||
// because otherwise someone might get in between us lowering the
|
// because otherwise someone might get in between us lowering the
|
||||||
// refcount and acquiring the lock.
|
// refcount and acquiring the lock.
|
||||||
auto did_hit_zero = processes().with_exclusive([&](auto& list) {
|
auto did_hit_zero = processes().with([&](auto& list) {
|
||||||
auto new_ref_count = deref_base();
|
auto new_ref_count = deref_base();
|
||||||
if (new_ref_count > 0)
|
if (new_ref_count > 0)
|
||||||
return false;
|
return false;
|
||||||
|
@ -418,7 +418,7 @@ void Process::crash(int signal, FlatPtr ip, bool out_of_memory)
|
||||||
|
|
||||||
RefPtr<Process> Process::from_pid(ProcessID pid)
|
RefPtr<Process> Process::from_pid(ProcessID pid)
|
||||||
{
|
{
|
||||||
return processes().with_shared([&](const auto& list) -> RefPtr<Process> {
|
return processes().with([&](const auto& list) -> RefPtr<Process> {
|
||||||
for (auto& process : list) {
|
for (auto& process : list) {
|
||||||
if (process.pid() == pid)
|
if (process.pid() == pid)
|
||||||
return &process;
|
return &process;
|
||||||
|
@ -696,7 +696,7 @@ void Process::die()
|
||||||
m_threads_for_coredump.append(thread);
|
m_threads_for_coredump.append(thread);
|
||||||
});
|
});
|
||||||
|
|
||||||
processes().with_shared([&](const auto& list) {
|
processes().with([&](const auto& list) {
|
||||||
for (auto it = list.begin(); it != list.end();) {
|
for (auto it = list.begin(); it != list.end();) {
|
||||||
auto& process = *it;
|
auto& process = *it;
|
||||||
++it;
|
++it;
|
||||||
|
|
|
@ -815,13 +815,13 @@ static_assert(sizeof(Process) == (PAGE_SIZE * 2));
|
||||||
|
|
||||||
extern RecursiveSpinlock g_profiling_lock;
|
extern RecursiveSpinlock g_profiling_lock;
|
||||||
|
|
||||||
MutexProtected<Process::List>& processes();
|
SpinlockProtected<Process::List>& processes();
|
||||||
|
|
||||||
template<IteratorFunction<Process&> Callback>
|
template<IteratorFunction<Process&> Callback>
|
||||||
inline void Process::for_each(Callback callback)
|
inline void Process::for_each(Callback callback)
|
||||||
{
|
{
|
||||||
VERIFY_INTERRUPTS_DISABLED();
|
VERIFY_INTERRUPTS_DISABLED();
|
||||||
processes().with_shared([&](const auto& list) {
|
processes().with([&](const auto& list) {
|
||||||
for (auto it = list.begin(); it != list.end();) {
|
for (auto it = list.begin(); it != list.end();) {
|
||||||
auto& process = *it;
|
auto& process = *it;
|
||||||
++it;
|
++it;
|
||||||
|
@ -835,7 +835,7 @@ template<IteratorFunction<Process&> Callback>
|
||||||
inline void Process::for_each_child(Callback callback)
|
inline void Process::for_each_child(Callback callback)
|
||||||
{
|
{
|
||||||
ProcessID my_pid = pid();
|
ProcessID my_pid = pid();
|
||||||
processes().with_shared([&](const auto& list) {
|
processes().with([&](const auto& list) {
|
||||||
for (auto it = list.begin(); it != list.end();) {
|
for (auto it = list.begin(); it != list.end();) {
|
||||||
auto& process = *it;
|
auto& process = *it;
|
||||||
++it;
|
++it;
|
||||||
|
@ -876,7 +876,7 @@ inline IterationDecision Process::for_each_thread(Callback callback)
|
||||||
template<IteratorFunction<Process&> Callback>
|
template<IteratorFunction<Process&> Callback>
|
||||||
inline void Process::for_each_in_pgrp(ProcessGroupID pgid, Callback callback)
|
inline void Process::for_each_in_pgrp(ProcessGroupID pgid, Callback callback)
|
||||||
{
|
{
|
||||||
processes().with_shared([&](const auto& list) {
|
processes().with([&](const auto& list) {
|
||||||
for (auto it = list.begin(); it != list.end();) {
|
for (auto it = list.begin(); it != list.end();) {
|
||||||
auto& process = *it;
|
auto& process = *it;
|
||||||
++it;
|
++it;
|
||||||
|
|
|
@ -65,7 +65,7 @@ KResult Process::do_killall(int signal)
|
||||||
KResult error = KSuccess;
|
KResult error = KSuccess;
|
||||||
|
|
||||||
// Send the signal to all processes we have access to for.
|
// Send the signal to all processes we have access to for.
|
||||||
processes().for_each_shared([&](auto& process) {
|
processes().for_each([&](auto& process) {
|
||||||
KResult res = KSuccess;
|
KResult res = KSuccess;
|
||||||
if (process.pid() == pid())
|
if (process.pid() == pid())
|
||||||
res = do_killself(signal);
|
res = do_killself(signal);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue