From 928ee2c791b0789ad4e845062a8212f50fe299e3 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Thu, 21 Jan 2021 00:06:19 +0100 Subject: [PATCH] Kernel: Don't let signals unblock threads while handling a page fault It was possible to signal a process while it was paging in an inode backed VM object. This would cause the inode read to EINTR, and the page fault handler would assert. Solve this by simply not unblocking threads due to signals if they are currently busy handling a page fault. This is probably not the best way to solve this issue, so I've added a FIXME to that effect. --- Kernel/Arch/i386/CPU.cpp | 9 +++++++++ Kernel/Thread.cpp | 8 ++++++++ Kernel/Thread.h | 4 ++++ 3 files changed, 21 insertions(+) diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp index e0aa077c3b..20be34443f 100644 --- a/Kernel/Arch/i386/CPU.cpp +++ b/Kernel/Arch/i386/CPU.cpp @@ -25,6 +25,7 @@ */ #include +#include #include #include #include @@ -257,6 +258,14 @@ void page_fault_handler(TrapFrame* trap) } auto current_thread = Thread::current(); + + if (current_thread) + current_thread->set_handling_page_fault(true); + ScopeGuard guard = [current_thread] { + if (current_thread) + current_thread->set_handling_page_fault(false); + }; + if (!faulted_in_kernel && !MM.validate_user_stack(current_thread->process(), VirtualAddress(regs.userspace_esp))) { dbgln("Invalid stack pointer: {}", VirtualAddress(regs.userspace_esp)); handle_crash(regs, "Bad stack on page fault", SIGSTKFLT); diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 209baa35bb..5e7a89eda5 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -166,6 +166,12 @@ void Thread::unblock(u8 signal) return; ASSERT(m_blocker); if (signal != 0) { + if (is_handling_page_fault()) { + // Don't let signals unblock threads that are blocked inside a page fault handler. + // This prevents threads from EINTR'ing the inode read in an inode page fault. + // FIXME: There's probably a better way to solve this. + return; + } if (!m_blocker->can_be_interrupted() && !m_should_die) return; m_blocker->set_interrupted_by_signal(signal); @@ -461,6 +467,8 @@ u32 Thread::pending_signals_for_state() const { ASSERT(g_scheduler_lock.own_lock()); constexpr u32 stopped_signal_mask = (1 << (SIGCONT - 1)) | (1 << (SIGKILL - 1)) | (1 << (SIGTRAP - 1)); + if (is_handling_page_fault()) + return 0; return m_state != Stopped ? m_pending_signals : m_pending_signals & stopped_signal_mask; } diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 761fcb33a8..f1096420e2 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -1155,6 +1155,9 @@ public: return m_kernel_stack_region; } + bool is_handling_page_fault() const { return m_handling_page_fault; } + void set_handling_page_fault(bool b) { m_handling_page_fault = b; } + private: IntrusiveListNode m_runnable_list_node; @@ -1258,6 +1261,7 @@ private: JoinBlockCondition m_join_condition; Atomic m_is_active { false }; bool m_is_joinable { true }; + bool m_handling_page_fault { false }; unsigned m_syscall_count { 0 }; unsigned m_inode_faults { 0 };