1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 02:18:11 +00:00

Kernel: Fix signal delivery

When delivering urgent signals to the current thread
we need to check if we should be unblocked, and if not
we need to yield to another process.

We also need to make sure that we suppress context switches
during Process::exec() so that we don't clobber the registers
that it sets up (eip mainly) by a context switch. To be able
to do that we add the concept of a critical section, which are
similar to Process::m_in_irq but different in that they can be
requested at any time. Calls to Scheduler::yield and
Scheduler::donate_to will return instantly without triggering
a context switch, but the processor will then asynchronously
trigger a context switch once the critical section is left.
This commit is contained in:
Tom 2020-07-03 05:19:50 -06:00 committed by Andreas Kling
parent a308b176ce
commit e373e5f007
12 changed files with 242 additions and 95 deletions

View file

@ -828,6 +828,10 @@ void Processor::early_initialize(u32 cpu)
m_cpu = cpu; m_cpu = cpu;
m_in_irq = 0; m_in_irq = 0;
m_in_critical = 0;
m_invoke_scheduler_async = false;
m_scheduler_initialized = false;
m_idle_thread = nullptr; m_idle_thread = nullptr;
m_current_thread = nullptr; m_current_thread = nullptr;
@ -961,9 +965,10 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
void Processor::switch_context(Thread* from_thread, Thread* to_thread) void Processor::switch_context(Thread* from_thread, Thread* to_thread)
{ {
ASSERT(!in_irq()); ASSERT(!in_irq());
ASSERT(!m_in_critical);
ASSERT(is_kernel_mode()); ASSERT(is_kernel_mode());
#ifdef CONTEXT_SWITCH_DEBUG #ifdef CONTEXT_SWITCH_DEBUG
dbg() << "switch_context --> switching out of: " << *from_thread; dbg() << "switch_context --> switching out of: " << VirtualAddress(from_thread) << " " << *from_thread;
#endif #endif
// Switch to new thread context, passing from_thread and to_thread // Switch to new thread context, passing from_thread and to_thread
@ -1006,7 +1011,7 @@ void Processor::switch_context(Thread* from_thread, Thread* to_thread)
[to_thread] "a" (to_thread) [to_thread] "a" (to_thread)
); );
#ifdef CONTEXT_SWITCH_DEBUG #ifdef CONTEXT_SWITCH_DEBUG
dbg() << "switch_context <-- from " << *from_thread << " to " << *to_thread; dbg() << "switch_context <-- from " << VirtualAddress(from_thread) << " " << *from_thread << " to " << VirtualAddress(to_thread) << " " << *to_thread;
#endif #endif
} }
@ -1017,9 +1022,14 @@ extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapF
(void)from_thread; (void)from_thread;
(void)to_thread; (void)to_thread;
(void)trap; (void)trap;
ASSERT(to_thread == Thread::current());
#ifdef CONTEXT_SWITCH_DEBUG #ifdef CONTEXT_SWITCH_DEBUG
dbg() << "switch_context <-- from " << *from_thread << " to " << *to_thread << " (context_first_init)"; dbg() << "switch_context <-- from " << VirtualAddress(from_thread) << " " << *from_thread << " to " << VirtualAddress(to_thread) << " " << *to_thread << " (context_first_init)";
#endif #endif
if (to_thread->process().wait_for_tracer_at_next_execve()) {
to_thread->send_urgent_signal_to_self(SIGSTOP);
}
} }
extern "C" void thread_context_first_enter(void); extern "C" void thread_context_first_enter(void);
@ -1038,9 +1048,15 @@ asm(
" jmp common_trap_exit \n" " jmp common_trap_exit \n"
); );
u32 Processor::init_context(Thread& thread) u32 Processor::init_context(Thread& thread, bool leave_crit)
{ {
ASSERT(is_kernel_mode()); ASSERT(is_kernel_mode());
if (leave_crit) {
ASSERT(in_critical());
m_in_critical--; // leave it without triggering anything
ASSERT(!in_critical());
}
const u32 kernel_stack_top = thread.kernel_stack_top(); const u32 kernel_stack_top = thread.kernel_stack_top();
u32 stack_top = kernel_stack_top; u32 stack_top = kernel_stack_top;
@ -1098,7 +1114,10 @@ u32 Processor::init_context(Thread& thread)
*reinterpret_cast<u32*>(stack_top) = stack_top + 4; *reinterpret_cast<u32*>(stack_top) = stack_top + 4;
#ifdef CONTEXT_SWITCH_DEBUG #ifdef CONTEXT_SWITCH_DEBUG
dbg() << "init_context " << thread << " set up to execute at eip: " << VirtualAddress(tss.eip) << " esp: " << VirtualAddress(tss.esp) << " stack top: " << VirtualAddress(stack_top); if (return_to_user)
dbg() << "init_context " << thread << " (" << VirtualAddress(&thread) << ") set up to execute at eip: " << String::format("%02x:%08x", iretframe.cs, (u32)tss.eip) << " esp: " << VirtualAddress(tss.esp) << " stack top: " << VirtualAddress(stack_top) << " user esp: " << String::format("%02x:%08x", iretframe.userspace_ss, (u32)iretframe.userspace_esp);
else
dbg() << "init_context " << thread << " (" << VirtualAddress(&thread) << ") set up to execute at eip: " << String::format("%02x:%08x", iretframe.cs, (u32)tss.eip) << " esp: " << VirtualAddress(tss.esp) << " stack top: " << VirtualAddress(stack_top);
#endif #endif
// make switch_context() always first return to thread_context_first_enter() // make switch_context() always first return to thread_context_first_enter()
@ -1118,24 +1137,29 @@ u32 Processor::init_context(Thread& thread)
} }
extern "C" u32 do_init_context(Thread* thread) extern "C" u32 do_init_context(Thread* thread, u32 flags)
{ {
return Processor::init_context(*thread); ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_critical());
thread->tss().eflags = flags;
return Processor::current().init_context(*thread, true);
} }
extern "C" void do_assume_context(Thread* thread); extern "C" void do_assume_context(Thread* thread, u32 flags);
asm( asm(
".global do_assume_context \n" ".global do_assume_context \n"
"do_assume_context: \n" "do_assume_context: \n"
" movl 4(%esp), %ebx \n" " movl 4(%esp), %ebx \n"
" movl 8(%esp), %esi \n"
// We're going to call Processor::init_context, so just make sure // We're going to call Processor::init_context, so just make sure
// we have enough stack space so we don't stomp over it // we have enough stack space so we don't stomp over it
" subl $(" __STRINGIFY(4 + REGISTER_STATE_SIZE + TRAP_FRAME_SIZE + 4) "), %esp \n" " subl $(" __STRINGIFY(4 + REGISTER_STATE_SIZE + TRAP_FRAME_SIZE + 4) "), %esp \n"
" pushl %esi \n"
" pushl %ebx \n" " pushl %ebx \n"
" cld \n" " cld \n"
" call do_init_context \n" " call do_init_context \n"
" addl $4, %esp \n" " addl $8, %esp \n"
" movl %eax, %esp \n" // move stack pointer to what Processor::init_context set up for us " movl %eax, %esp \n" // move stack pointer to what Processor::init_context set up for us
" pushl %ebx \n" // push to_thread " pushl %ebx \n" // push to_thread
" pushl %ebx \n" // push from_thread " pushl %ebx \n" // push from_thread
@ -1143,9 +1167,13 @@ asm(
" jmp enter_thread_context \n" " jmp enter_thread_context \n"
); );
void Processor::assume_context(Thread& thread) void Processor::assume_context(Thread& thread, u32 flags)
{ {
do_assume_context(&thread); #ifdef CONTEXT_SWITCH_DEBUG
dbg() << "Assume context for thread " << VirtualAddress(&thread) << " " << thread;
#endif
ASSERT_INTERRUPTS_DISABLED();
do_assume_context(&thread, flags);
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
@ -1161,6 +1189,7 @@ void Processor::initialize_context_switching(Thread& initial_thread)
m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3; m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3;
m_tss.fs = GDT_SELECTOR_PROC | 3; m_tss.fs = GDT_SELECTOR_PROC | 3;
m_scheduler_initialized = true;
asm volatile( asm volatile(
"movl %[new_esp], %%esp \n" // swich to new stack "movl %[new_esp], %%esp \n" // swich to new stack
@ -1197,7 +1226,15 @@ void Processor::exit_trap(TrapFrame& trap)
ASSERT(m_in_irq >= trap.prev_irq_level); ASSERT(m_in_irq >= trap.prev_irq_level);
m_in_irq = trap.prev_irq_level; m_in_irq = trap.prev_irq_level;
if (m_invoke_scheduler_async && !m_in_irq) { if (!m_in_irq && !m_in_critical)
check_invoke_scheduler();
}
void Processor::check_invoke_scheduler()
{
ASSERT(!m_in_irq);
ASSERT(!m_in_critical);
if (m_invoke_scheduler_async && m_scheduler_initialized) {
m_invoke_scheduler_async = false; m_invoke_scheduler_async = false;
Scheduler::invoke_async(); Scheduler::invoke_async();
} }

View file

@ -622,6 +622,7 @@ class Processor {
u32 m_cpu; u32 m_cpu;
u32 m_in_irq; u32 m_in_irq;
u32 m_in_critical;
TSS32 m_tss; TSS32 m_tss;
static FPUState s_clean_fpu_state; static FPUState s_clean_fpu_state;
@ -632,6 +633,7 @@ class Processor {
Thread* m_idle_thread; Thread* m_idle_thread;
bool m_invoke_scheduler_async; bool m_invoke_scheduler_async;
bool m_scheduler_initialized;
void gdt_init(); void gdt_init();
void write_raw_gdt_entry(u16 selector, u32 low, u32 high); void write_raw_gdt_entry(u16 selector, u32 low, u32 high);
@ -713,11 +715,32 @@ public:
return m_in_irq; return m_in_irq;
} }
ALWAYS_INLINE void enter_critical(u32& prev_flags)
{
m_in_critical++;
prev_flags = cpu_flags();
cli();
}
ALWAYS_INLINE void leave_critical(u32 prev_flags)
{
ASSERT(m_in_critical > 0);
if (--m_in_critical == 0) {
if (!m_in_irq)
check_invoke_scheduler();
}
if (prev_flags & 0x200)
sti();
}
ALWAYS_INLINE u32& in_critical() { return m_in_critical; }
ALWAYS_INLINE const FPUState& clean_fpu_state() const ALWAYS_INLINE const FPUState& clean_fpu_state() const
{ {
return s_clean_fpu_state; return s_clean_fpu_state;
} }
void check_invoke_scheduler();
void invoke_scheduler_async() { m_invoke_scheduler_async = true; } void invoke_scheduler_async() { m_invoke_scheduler_async = true; }
void enter_trap(TrapFrame& trap, bool raise_irq); void enter_trap(TrapFrame& trap, bool raise_irq);
@ -725,12 +748,55 @@ public:
[[noreturn]] void initialize_context_switching(Thread& initial_thread); [[noreturn]] void initialize_context_switching(Thread& initial_thread);
void switch_context(Thread* from_thread, Thread* to_thread); void switch_context(Thread* from_thread, Thread* to_thread);
[[noreturn]] static void assume_context(Thread& thread); [[noreturn]] static void assume_context(Thread& thread, u32 flags);
static u32 init_context(Thread& thread); u32 init_context(Thread& thread, bool leave_crit);
void set_thread_specific(u8* data, size_t len); void set_thread_specific(u8* data, size_t len);
}; };
class ScopedCritical
{
u32 m_prev_flags;
bool m_valid;
public:
ScopedCritical(const ScopedCritical&) = delete;
ScopedCritical& operator=(const ScopedCritical&) = delete;
ScopedCritical()
{
m_valid = true;
Processor::current().enter_critical(m_prev_flags);
}
~ScopedCritical()
{
if (m_valid) {
m_valid = false;
Processor::current().leave_critical(m_prev_flags);
}
}
ScopedCritical(ScopedCritical&& from):
m_prev_flags(from.m_prev_flags),
m_valid(from.m_valid)
{
from.m_prev_flags = 0;
from.m_valid = false;
}
ScopedCritical& operator=(ScopedCritical&& from)
{
if (&from != this) {
m_prev_flags = from.m_prev_flags;
m_valid = from.m_valid;
from.m_prev_flags = 0;
from.m_valid = false;
}
return *this;
}
};
struct TrapFrame { struct TrapFrame {
u32 prev_irq_level; u32 prev_irq_level;
RegisterState* regs; // must be last RegisterState* regs; // must be last

View file

@ -24,6 +24,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include <AK/TemporaryChange.h>
#include <Kernel/KSyms.h> #include <Kernel/KSyms.h>
#include <Kernel/Lock.h> #include <Kernel/Lock.h>
#include <Kernel/Thread.h> #include <Kernel/Thread.h>
@ -67,6 +68,13 @@ void Lock::lock(Mode mode)
} }
timeval* timeout = nullptr; timeval* timeout = nullptr;
current_thread->wait_on(m_queue, timeout, &m_lock, m_holder, m_name); current_thread->wait_on(m_queue, timeout, &m_lock, m_holder, m_name);
} else if (Processor::current().in_critical()) {
// If we're in a critical section and trying to lock, no context
// switch will happen, so yield.
// The assumption is that if we call this from a critical section
// that we DO want to temporarily leave it
TemporaryChange change(Processor::current().in_critical(), 0u);
Scheduler::yield();
} }
} }
} }
@ -95,6 +103,9 @@ void Lock::unlock()
return; return;
} }
// I don't know *who* is using "m_lock", so just yield. // I don't know *who* is using "m_lock", so just yield.
// The assumption is that if we call this from a critical section
// that we DO want to temporarily leave it
TemporaryChange change(Processor::current().in_critical(), 0u);
Scheduler::yield(); Scheduler::yield();
} }
} }
@ -102,7 +113,7 @@ void Lock::unlock()
bool Lock::force_unlock_if_locked() bool Lock::force_unlock_if_locked()
{ {
ASSERT(m_mode != Mode::Shared); ASSERT(m_mode != Mode::Shared);
InterruptDisabler disabler; ScopedCritical critical;
if (m_holder != Thread::current()) if (m_holder != Thread::current())
return false; return false;
ASSERT(m_times_locked == 1); ASSERT(m_times_locked == 1);
@ -116,7 +127,7 @@ bool Lock::force_unlock_if_locked()
void Lock::clear_waiters() void Lock::clear_waiters()
{ {
ASSERT(m_mode != Mode::Shared); ASSERT(m_mode != Mode::Shared);
InterruptDisabler disabler; ScopedCritical critical;
m_queue.clear(); m_queue.clear();
} }

View file

@ -84,10 +84,10 @@
#include <LibELF/Validation.h> #include <LibELF/Validation.h>
#include <LibKeyboard/CharacterMapData.h> #include <LibKeyboard/CharacterMapData.h>
#define PROCESS_DEBUG //#define PROCESS_DEBUG
//#define DEBUG_POLL_SELECT //#define DEBUG_POLL_SELECT
//#define DEBUG_IO //#define DEBUG_IO
#define TASK_DEBUG //#define TASK_DEBUG
//#define FORK_DEBUG //#define FORK_DEBUG
//#define EXEC_DEBUG //#define EXEC_DEBUG
//#define SIGNAL_DEBUG //#define SIGNAL_DEBUG
@ -821,9 +821,10 @@ void Process::kill_all_threads()
}); });
} }
int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Vector<String> arguments, Vector<String> environment, RefPtr<FileDescription> interpreter_description) int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Vector<String> arguments, Vector<String> environment, RefPtr<FileDescription> interpreter_description, Thread*& new_main_thread, u32& prev_flags)
{ {
ASSERT(is_ring3()); ASSERT(is_ring3());
ASSERT(!Processor::current().in_critical());
auto path = main_program_description->absolute_path(); auto path = main_program_description->absolute_path();
#ifdef EXEC_DEBUG #ifdef EXEC_DEBUG
dbg() << "do_exec(" << path << ")"; dbg() << "do_exec(" << path << ")";
@ -867,7 +868,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
{ {
// Need to make sure we don't swap contexts in the middle // Need to make sure we don't swap contexts in the middle
InterruptDisabler disabler; ScopedCritical critical;
old_page_directory = move(m_page_directory); old_page_directory = move(m_page_directory);
old_regions = move(m_regions); old_regions = move(m_regions);
m_page_directory = PageDirectory::create_for_userspace(*this); m_page_directory = PageDirectory::create_for_userspace(*this);
@ -910,7 +911,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
ArmedScopeGuard rollback_regions_guard([&]() { ArmedScopeGuard rollback_regions_guard([&]() {
ASSERT(Process::current() == this); ASSERT(Process::current() == this);
// Need to make sure we don't swap contexts in the middle // Need to make sure we don't swap contexts in the middle
InterruptDisabler disabler; ScopedCritical critical;
m_page_directory = move(old_page_directory); m_page_directory = move(old_page_directory);
m_regions = move(old_regions); m_regions = move(old_regions);
MM.enter_process_paging_scope(*this); MM.enter_process_paging_scope(*this);
@ -963,6 +964,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
master_tls_alignment = alignment; master_tls_alignment = alignment;
return master_tls_region->vaddr().as_ptr(); return master_tls_region->vaddr().as_ptr();
}; };
ASSERT(!Processor::current().in_critical());
bool success = loader->load(); bool success = loader->load();
if (!success) { if (!success) {
klog() << "do_exec: Failure loading " << path.characters(); klog() << "do_exec: Failure loading " << path.characters();
@ -1026,7 +1028,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
} }
} }
Thread* new_main_thread = nullptr; new_main_thread = nullptr;
if (&current_thread->process() == this) { if (&current_thread->process() == this) {
new_main_thread = current_thread; new_main_thread = current_thread;
} else { } else {
@ -1041,11 +1043,10 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
// and we don't want to deal with faults after this point. // and we don't want to deal with faults after this point.
u32 new_userspace_esp = new_main_thread->make_userspace_stack_for_main_thread(move(arguments), move(environment)); u32 new_userspace_esp = new_main_thread->make_userspace_stack_for_main_thread(move(arguments), move(environment));
// We cli() manually here because we don't want to get interrupted between do_exec() and Processor::assume_context(). // We cli() manually here because we don't want to get interrupted between do_exec()
// The reason is that the task redirection we've set up above will be clobbered by the timer IRQ. // and Processor::assume_context() or the next context switch.
// If we used an InterruptDisabler that sti()'d on exit, we might timer tick'd too soon in exec(). // If we used an InterruptDisabler that sti()'d on exit, we might timer tick'd too soon in exec().
if (&current_thread->process() == this) Processor::current().enter_critical(prev_flags);
cli();
// NOTE: Be careful to not trigger any page faults below! // NOTE: Be careful to not trigger any page faults below!
@ -1059,7 +1060,6 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
new_main_thread->make_thread_specific_region({}); new_main_thread->make_thread_specific_region({});
new_main_thread->reset_fpu_state(); new_main_thread->reset_fpu_state();
// NOTE: if a context switch were to happen, tss.eip and tss.esp would get overwritten!!!
auto& tss = new_main_thread->m_tss; auto& tss = new_main_thread->m_tss;
tss.cs = GDT_SELECTOR_CODE3 | 3; tss.cs = GDT_SELECTOR_CODE3 | 3;
tss.ds = GDT_SELECTOR_DATA3 | 3; tss.ds = GDT_SELECTOR_DATA3 | 3;
@ -1073,7 +1073,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
tss.ss2 = m_pid; tss.ss2 = m_pid;
#ifdef TASK_DEBUG #ifdef TASK_DEBUG
klog() << "Process exec'd " << path.characters() << " @ " << String::format("%p", entry_eip); klog() << "Process " << VirtualAddress(this) << " thread " << VirtualAddress(new_main_thread) << " exec'd " << path.characters() << " @ " << String::format("%p", entry_eip);
#endif #endif
if (was_profiling) if (was_profiling)
@ -1081,6 +1081,8 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
new_main_thread->set_state(Thread::State::Skip1SchedulerPass); new_main_thread->set_state(Thread::State::Skip1SchedulerPass);
big_lock().force_unlock_if_locked(); big_lock().force_unlock_if_locked();
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_critical());
return 0; return 0;
} }
@ -1249,26 +1251,26 @@ int Process::exec(String path, Vector<String> arguments, Vector<String> environm
// The bulk of exec() is done by do_exec(), which ensures that all locals // The bulk of exec() is done by do_exec(), which ensures that all locals
// are cleaned up by the time we yield-teleport below. // are cleaned up by the time we yield-teleport below.
int rc = do_exec(move(description), move(arguments), move(environment), move(interpreter_description)); Thread* new_main_thread = nullptr;
u32 prev_flags = 0;
int rc = do_exec(move(description), move(arguments), move(environment), move(interpreter_description), new_main_thread, prev_flags);
m_exec_tid = 0; m_exec_tid = 0;
if (rc < 0) if (rc < 0)
return rc; return rc;
auto current_thread = Thread::current(); ASSERT_INTERRUPTS_DISABLED();
if (m_wait_for_tracer_at_next_execve) { ASSERT(Processor::current().in_critical());
ASSERT(current_thread->state() == Thread::State::Skip1SchedulerPass);
// State::Skip1SchedulerPass is irrelevant since we block the thread
current_thread->set_state(Thread::State::Running);
current_thread->send_urgent_signal_to_self(SIGSTOP);
}
if (&current_thread->process() == this) { auto current_thread = Thread::current();
if (current_thread == new_main_thread) {
current_thread->set_state(Thread::State::Running); current_thread->set_state(Thread::State::Running);
Processor::assume_context(*current_thread); Processor::assume_context(*current_thread, prev_flags);
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
Processor::current().leave_critical(prev_flags);
return 0; return 0;
} }

View file

@ -450,6 +450,7 @@ public:
void increment_inspector_count(Badge<ProcessInspectionHandle>) { ++m_inspector_count; } void increment_inspector_count(Badge<ProcessInspectionHandle>) { ++m_inspector_count; }
void decrement_inspector_count(Badge<ProcessInspectionHandle>) { --m_inspector_count; } void decrement_inspector_count(Badge<ProcessInspectionHandle>) { --m_inspector_count; }
bool wait_for_tracer_at_next_execve() const { return m_wait_for_tracer_at_next_execve; }
void set_wait_for_tracer_at_next_execve(bool val) { m_wait_for_tracer_at_next_execve = val; } void set_wait_for_tracer_at_next_execve(bool val) { m_wait_for_tracer_at_next_execve = val; }
KResultOr<u32> peek_user_data(u32* address); KResultOr<u32> peek_user_data(u32* address);
@ -470,7 +471,7 @@ private:
void kill_threads_except_self(); void kill_threads_except_self();
void kill_all_threads(); void kill_all_threads();
int do_exec(NonnullRefPtr<FileDescription> main_program_description, Vector<String> arguments, Vector<String> environment, RefPtr<FileDescription> interpreter_description); int do_exec(NonnullRefPtr<FileDescription> main_program_description, Vector<String> arguments, Vector<String> environment, RefPtr<FileDescription> interpreter_description, Thread*& new_main_thread, u32& prev_flags);
ssize_t do_write(FileDescription&, const u8*, int data_size); ssize_t do_write(FileDescription&, const u8*, int data_size);
KResultOr<NonnullRefPtr<FileDescription>> find_elf_interpreter_for_executable(const String& path, char (&first_page)[PAGE_SIZE], int nread, size_t file_size); KResultOr<NonnullRefPtr<FileDescription>> find_elf_interpreter_for_executable(const String& path, char (&first_page)[PAGE_SIZE], int nread, size_t file_size);

View file

@ -327,7 +327,7 @@ void Scheduler::start()
idle_thread.set_ticks_left(time_slice_for(idle_thread)); idle_thread.set_ticks_left(time_slice_for(idle_thread));
idle_thread.did_schedule(); idle_thread.did_schedule();
idle_thread.set_initialized(true); idle_thread.set_initialized(true);
Processor::init_context(idle_thread); processor.init_context(idle_thread, false);
idle_thread.set_state(Thread::Running); idle_thread.set_state(Thread::Running);
ASSERT(idle_thread.affinity() == (1u << processor.id())); ASSERT(idle_thread.affinity() == (1u << processor.id()));
processor.initialize_context_switching(idle_thread); processor.initialize_context_switching(idle_thread);
@ -453,21 +453,23 @@ bool Scheduler::pick_next()
bool Scheduler::yield() bool Scheduler::yield()
{ {
InterruptDisabler disabler;
auto& proc = Processor::current(); auto& proc = Processor::current();
auto current_thread = Thread::current(); auto current_thread = Thread::current();
#ifdef SCHEDULER_DEBUG #ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << Processor::current().id() << "]: yielding thread " << *current_thread << " in_irq: " << proc.in_irq(); dbg() << "Scheduler[" << proc.id() << "]: yielding thread " << *current_thread << " in_irq: " << proc.in_irq();
#endif #endif
InterruptDisabler disabler;
ASSERT(current_thread != nullptr); ASSERT(current_thread != nullptr);
if (proc.in_irq()) { if (proc.in_irq() || proc.in_critical()) {
// If we're handling an IRQ we can't switch context, delay until // If we're handling an IRQ we can't switch context, or we're in
// exiting the trap // a critical section where we don't want to switch contexts, then
// delay until exiting the trap or critical section
proc.invoke_scheduler_async(); proc.invoke_scheduler_async();
return false;
} else if (!Scheduler::pick_next()) } else if (!Scheduler::pick_next())
return false; return false;
#ifdef SCHEDULER_DEBUG #ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << Processor::current().id() << "]: yield returns to thread " << *current_thread << " in_irq: " << proc.in_irq(); dbg() << "Scheduler[" << proc.id() << "]: yield returns to thread " << *current_thread << " in_irq: " << proc.in_irq();
#endif #endif
return true; return true;
} }
@ -475,10 +477,16 @@ bool Scheduler::yield()
bool Scheduler::donate_to(Thread* beneficiary, const char* reason) bool Scheduler::donate_to(Thread* beneficiary, const char* reason)
{ {
InterruptDisabler disabler; InterruptDisabler disabler;
ASSERT(!Processor::current().in_irq()); auto& proc = Processor::current();
ASSERT(!proc.in_irq());
if (!Thread::is_thread(beneficiary)) if (!Thread::is_thread(beneficiary))
return false; return false;
if (proc.in_critical()) {
proc.invoke_scheduler_async();
return false;
}
(void)reason; (void)reason;
unsigned ticks_left = Thread::current()->ticks_left(); unsigned ticks_left = Thread::current()->ticks_left();
if (!beneficiary || beneficiary->state() != Thread::Runnable || ticks_left <= 1) if (!beneficiary || beneficiary->state() != Thread::Runnable || ticks_left <= 1)
@ -486,7 +494,7 @@ bool Scheduler::donate_to(Thread* beneficiary, const char* reason)
unsigned ticks_to_donate = min(ticks_left - 1, time_slice_for(*beneficiary)); unsigned ticks_to_donate = min(ticks_left - 1, time_slice_for(*beneficiary));
#ifdef SCHEDULER_DEBUG #ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << Processor::current().id() << "]: Donating " << ticks_to_donate << " ticks to " << *beneficiary << ", reason=" << reason; dbg() << "Scheduler[" << proc.id() << "]: Donating " << ticks_to_donate << " ticks to " << *beneficiary << ", reason=" << reason;
#endif #endif
beneficiary->set_ticks_left(ticks_to_donate); beneficiary->set_ticks_left(ticks_to_donate);
Scheduler::context_switch(*beneficiary); Scheduler::context_switch(*beneficiary);
@ -513,13 +521,14 @@ bool Scheduler::context_switch(Thread& thread)
#endif #endif
} }
auto& proc = Processor::current();
if (!thread.is_initialized()) { if (!thread.is_initialized()) {
Processor::init_context(thread); proc.init_context(thread, false);
thread.set_initialized(true); thread.set_initialized(true);
} }
thread.set_state(Thread::Running); thread.set_state(Thread::Running);
Processor::current().switch_context(current_thread, &thread); proc.switch_context(current_thread, &thread);
return true; return true;
} }

View file

@ -43,19 +43,23 @@ public:
SpinLock(const SpinLock&) = delete; SpinLock(const SpinLock&) = delete;
SpinLock(SpinLock&&) = delete; SpinLock(SpinLock&&) = delete;
ALWAYS_INLINE void lock() ALWAYS_INLINE u32 lock()
{ {
u32 prev_flags;
Processor::current().enter_critical(prev_flags);
BaseType expected; BaseType expected;
do { do {
expected = 0; expected = 0;
} while (!m_lock.compare_exchange_strong(expected, 1, AK::memory_order_acq_rel)); } while (!m_lock.compare_exchange_strong(expected, 1, AK::memory_order_acq_rel));
return prev_flags;
} }
ALWAYS_INLINE void unlock() ALWAYS_INLINE void unlock(u32 prev_flags)
{ {
ASSERT(is_locked()); ASSERT(is_locked());
m_lock.store(0, AK::memory_order_release); m_lock.store(0, AK::memory_order_release);
Processor::current().leave_critical(prev_flags);
} }
ALWAYS_INLINE bool is_locked() const ALWAYS_INLINE bool is_locked() const
@ -79,9 +83,12 @@ public:
RecursiveSpinLock(const RecursiveSpinLock&) = delete; RecursiveSpinLock(const RecursiveSpinLock&) = delete;
RecursiveSpinLock(RecursiveSpinLock&&) = delete; RecursiveSpinLock(RecursiveSpinLock&&) = delete;
ALWAYS_INLINE void lock() ALWAYS_INLINE u32 lock()
{ {
FlatPtr cpu = FlatPtr(&Processor::current()); auto& proc = Processor::current();
FlatPtr cpu = FlatPtr(&proc);
u32 prev_flags;
proc.enter_critical(prev_flags);
FlatPtr expected = 0; FlatPtr expected = 0;
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) { while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
if (expected == cpu) if (expected == cpu)
@ -89,14 +96,16 @@ public:
expected = 0; expected = 0;
} }
m_recursions++; m_recursions++;
return prev_flags;
} }
ALWAYS_INLINE void unlock() ALWAYS_INLINE void unlock(u32 prev_flags)
{ {
ASSERT(m_recursions > 0); ASSERT(m_recursions > 0);
ASSERT(m_lock.load(AK::memory_order_consume) == FlatPtr(&Processor::current())); ASSERT(m_lock.load(AK::memory_order_consume) == FlatPtr(&Processor::current()));
if (--m_recursions == 0) if (--m_recursions == 0)
m_lock.store(0, AK::memory_order_release); m_lock.store(0, AK::memory_order_release);
Processor::current().leave_critical(prev_flags);
} }
ALWAYS_INLINE bool is_locked() const ALWAYS_INLINE bool is_locked() const
@ -114,8 +123,8 @@ template <typename BaseType = u32, typename LockType = SpinLock<BaseType>>
class ScopedSpinLock class ScopedSpinLock
{ {
LockType* m_lock; LockType* m_lock;
u32 m_prev_flags{0};
bool m_have_lock{false}; bool m_have_lock{false};
bool m_flag{false};
public: public:
ScopedSpinLock() = delete; ScopedSpinLock() = delete;
@ -124,19 +133,18 @@ public:
m_lock(&lock) m_lock(&lock)
{ {
ASSERT(m_lock); ASSERT(m_lock);
m_flag = cli_and_save_interrupt_flag(); m_prev_flags = m_lock->lock();
m_lock->lock();
m_have_lock = true; m_have_lock = true;
} }
ScopedSpinLock(ScopedSpinLock&& from): ScopedSpinLock(ScopedSpinLock&& from):
m_lock(from.m_lock), m_lock(from.m_lock),
m_have_lock(from.m_have_lock), m_prev_flags(from.m_prev_flags),
m_flag(from.m_flag) m_have_lock(from.m_have_lock)
{ {
from.m_lock = nullptr; from.m_lock = nullptr;
from.m_prev_flags = 0;
from.m_have_lock = false; from.m_have_lock = false;
from.m_flag = false;
} }
ScopedSpinLock(const ScopedSpinLock&) = delete; ScopedSpinLock(const ScopedSpinLock&) = delete;
@ -144,8 +152,7 @@ public:
~ScopedSpinLock() ~ScopedSpinLock()
{ {
if (m_lock && m_have_lock) { if (m_lock && m_have_lock) {
m_lock->unlock(); m_lock->unlock(m_prev_flags);
restore_interrupt_flag(m_flag);
} }
} }
@ -153,8 +160,7 @@ public:
{ {
ASSERT(m_lock); ASSERT(m_lock);
ASSERT(!m_have_lock); ASSERT(!m_have_lock);
m_flag = cli_and_save_interrupt_flag(); m_prev_flags = m_lock->lock();
m_lock->lock();
m_have_lock = true; m_have_lock = true;
} }
@ -162,10 +168,9 @@ public:
{ {
ASSERT(m_lock); ASSERT(m_lock);
ASSERT(m_have_lock); ASSERT(m_have_lock);
m_lock->unlock(); m_lock->unlock(m_prev_flags);
m_prev_flags = 0;
m_have_lock = false; m_have_lock = false;
restore_interrupt_flag(m_flag);
m_flag = false;
} }
ALWAYS_INLINE bool have_lock() const ALWAYS_INLINE bool have_lock() const

View file

@ -181,7 +181,8 @@ void Thread::die_if_needed()
if (!m_should_die) if (!m_should_die)
return; return;
unlock_process_if_locked(); u32 prev_crit;
unlock_process_if_locked(prev_crit);
InterruptDisabler disabler; InterruptDisabler disabler;
set_state(Thread::State::Dying); set_state(Thread::State::Dying);
@ -190,20 +191,26 @@ void Thread::die_if_needed()
void Thread::yield_without_holding_big_lock() void Thread::yield_without_holding_big_lock()
{ {
bool did_unlock = unlock_process_if_locked(); u32 prev_crit;
bool did_unlock = unlock_process_if_locked(prev_crit);
Scheduler::yield(); Scheduler::yield();
if (did_unlock) relock_process(did_unlock, prev_crit);
relock_process();
} }
bool Thread::unlock_process_if_locked() bool Thread::unlock_process_if_locked(u32& prev_crit)
{ {
auto& in_critical = Processor::current().in_critical();
prev_crit = in_critical;
in_critical = 0;
return process().big_lock().force_unlock_if_locked(); return process().big_lock().force_unlock_if_locked();
} }
void Thread::relock_process() void Thread::relock_process(bool did_unlock, u32 prev_crit)
{ {
process().big_lock().lock(); if (did_unlock)
process().big_lock().lock();
ASSERT(!Processor::current().in_critical());
Processor::current().in_critical() = prev_crit;
} }
u64 Thread::sleep(u32 ticks) u64 Thread::sleep(u32 ticks)
@ -328,6 +335,7 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
dbg() << "Signal: Kernel sent " << signal << " to " << process(); dbg() << "Signal: Kernel sent " << signal << " to " << process();
#endif #endif
ScopedSpinLock lock(g_scheduler_lock);
m_pending_signals |= 1 << (signal - 1); m_pending_signals |= 1 << (signal - 1);
} }
@ -339,13 +347,10 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
// the appropriate signal handler. // the appropriate signal handler.
void Thread::send_urgent_signal_to_self(u8 signal) void Thread::send_urgent_signal_to_self(u8 signal)
{ {
// FIXME: because of a bug in dispatch_signal we can't ASSERT(Thread::current() == this);
// setup a signal while we are the current thread. Because of ScopedSpinLock lock(g_scheduler_lock);
// this we use a work-around where we send the signal and then if (dispatch_signal(signal) == ShouldUnblockThread::No)
// block, allowing the scheduler to properly dispatch the signal Scheduler::yield();
// before the thread is next run.
send_signal(signal, &process());
(void)block<SemiPermanentBlocker>(SemiPermanentBlocker::Reason::Signal);
} }
ShouldUnblockThread Thread::dispatch_one_pending_signal() ShouldUnblockThread Thread::dispatch_one_pending_signal()
@ -443,6 +448,7 @@ static void push_value_on_user_stack(u32* stack, u32 data)
ShouldUnblockThread Thread::dispatch_signal(u8 signal) ShouldUnblockThread Thread::dispatch_signal(u8 signal)
{ {
ASSERT_INTERRUPTS_DISABLED(); ASSERT_INTERRUPTS_DISABLED();
ASSERT(g_scheduler_lock.is_locked());
ASSERT(signal > 0 && signal <= 32); ASSERT(signal > 0 && signal <= 32);
ASSERT(!process().is_ring0()); ASSERT(!process().is_ring0());
@ -540,6 +546,10 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
u32 ret_eip = state.eip; u32 ret_eip = state.eip;
u32 ret_eflags = state.eflags; u32 ret_eflags = state.eflags;
#ifdef SIGNAL_DEBUG
klog() << "signal: setting up user stack to return to eip: " << String::format("%p", ret_eip) << " esp: " << String::format("%p", old_esp);
#endif
// Align the stack to 16 bytes. // Align the stack to 16 bytes.
// Note that we push 56 bytes (4 * 14) on to the stack, // Note that we push 56 bytes (4 * 14) on to the stack,
// so we need to account for this here. // so we need to account for this here.
@ -698,7 +708,7 @@ bool Thread::is_thread(void* ptr)
void Thread::set_state(State new_state) void Thread::set_state(State new_state)
{ {
InterruptDisabler disabler; ScopedSpinLock lock(g_scheduler_lock);
if (new_state == m_state) if (new_state == m_state)
return; return;
@ -712,6 +722,9 @@ void Thread::set_state(State new_state)
} }
m_state = new_state; m_state = new_state;
#ifdef THREAD_DEBUG
dbg() << "Set Thread " << VirtualAddress(this) << " " << *this << " state to " << state_string();
#endif
if (m_process.pid() != 0) { if (m_process.pid() != 0) {
Scheduler::update_state_for_thread(*this); Scheduler::update_state_for_thread(*this);
} }
@ -842,11 +855,12 @@ const LogStream& operator<<(const LogStream& stream, const Thread& value)
Thread::BlockResult Thread::wait_on(WaitQueue& queue, timeval* timeout, Atomic<bool>* lock, Thread* beneficiary, const char* reason) Thread::BlockResult Thread::wait_on(WaitQueue& queue, timeval* timeout, Atomic<bool>* lock, Thread* beneficiary, const char* reason)
{ {
TimerId timer_id {}; TimerId timer_id {};
u32 prev_crit;
bool did_unlock; bool did_unlock;
{ {
InterruptDisabler disable; InterruptDisabler disable;
did_unlock = unlock_process_if_locked(); did_unlock = unlock_process_if_locked(prev_crit);
if (lock) if (lock)
*lock = false; *lock = false;
set_state(State::Queued); set_state(State::Queued);
@ -870,8 +884,7 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, timeval* timeout, Atomic<b
sti(); sti();
// We've unblocked, relock the process if needed and carry on. // We've unblocked, relock the process if needed and carry on.
if (did_unlock) relock_process(did_unlock, prev_crit);
relock_process();
BlockResult result = m_wait_queue_node.is_in_list() ? BlockResult::InterruptedByTimeout : BlockResult::WokeNormally; BlockResult result = m_wait_queue_node.is_in_list() ? BlockResult::InterruptedByTimeout : BlockResult::WokeNormally;
@ -884,6 +897,7 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, timeval* timeout, Atomic<b
void Thread::wake_from_queue() void Thread::wake_from_queue()
{ {
ScopedSpinLock lock(g_scheduler_lock);
ASSERT(state() == State::Queued); ASSERT(state() == State::Queued);
if (this != Thread::current()) if (this != Thread::current())
set_state(State::Runnable); set_state(State::Runnable);

View file

@ -463,8 +463,8 @@ private:
private: private:
friend class SchedulerData; friend class SchedulerData;
friend class WaitQueue; friend class WaitQueue;
bool unlock_process_if_locked(); bool unlock_process_if_locked(u32& prev_crit);
void relock_process(); void relock_process(bool did_unlock, u32 prev_crit);
String backtrace_impl() const; String backtrace_impl() const;
void reset_fpu_state(); void reset_fpu_state();

View file

@ -591,7 +591,7 @@ u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
{ {
ASSERT_INTERRUPTS_DISABLED(); ASSERT_INTERRUPTS_DISABLED();
auto& mm_data = get_data(); auto& mm_data = get_data();
mm_data.m_quickmap_in_use.lock(); mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
ScopedSpinLock lock(s_lock); ScopedSpinLock lock(s_lock);
u32 pte_idx = 8 + Processor::current().id(); u32 pte_idx = 8 + Processor::current().id();
@ -622,7 +622,7 @@ void MemoryManager::unquickmap_page()
auto& pte = boot_pd3_pt1023[pte_idx]; auto& pte = boot_pd3_pt1023[pte_idx];
pte.clear(); pte.clear();
flush_tlb(vaddr); flush_tlb(vaddr);
mm_data.m_quickmap_in_use.unlock(); mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags);
} }
template<MemoryManager::AccessSpace space, MemoryManager::AccessType access_type> template<MemoryManager::AccessSpace space, MemoryManager::AccessType access_type>
@ -736,6 +736,7 @@ void MemoryManager::dump_kernel_regions()
{ {
klog() << "Kernel regions:"; klog() << "Kernel regions:";
klog() << "BEGIN END SIZE ACCESS NAME"; klog() << "BEGIN END SIZE ACCESS NAME";
ScopedSpinLock lock(s_lock);
for (auto& region : MM.m_kernel_regions) { for (auto& region : MM.m_kernel_regions) {
klog() << String::format("%08x", region.vaddr().get()) << " -- " << String::format("%08x", region.vaddr().offset(region.size() - 1).get()) << " " << String::format("%08x", region.size()) << " " << (region.is_readable() ? 'R' : ' ') << (region.is_writable() ? 'W' : ' ') << (region.is_executable() ? 'X' : ' ') << (region.is_shared() ? 'S' : ' ') << (region.is_stack() ? 'T' : ' ') << (region.vmobject().is_purgeable() ? 'P' : ' ') << " " << region.name().characters(); klog() << String::format("%08x", region.vaddr().get()) << " -- " << String::format("%08x", region.vaddr().offset(region.size() - 1).get()) << " " << String::format("%08x", region.size()) << " " << (region.is_readable() ? 'R' : ' ') << (region.is_writable() ? 'W' : ' ') << (region.is_executable() ? 'X' : ' ') << (region.is_shared() ? 'S' : ' ') << (region.is_stack() ? 'T' : ' ') << (region.vmobject().is_purgeable() ? 'P' : ' ') << " " << region.name().characters();
} }

View file

@ -69,6 +69,7 @@ class SynthFSInode;
struct MemoryManagerData { struct MemoryManagerData {
SpinLock<u8> m_quickmap_in_use; SpinLock<u8> m_quickmap_in_use;
u32 m_quickmap_prev_flags;
}; };
class MemoryManager { class MemoryManager {

View file

@ -39,13 +39,13 @@ WaitQueue::~WaitQueue()
void WaitQueue::enqueue(Thread& thread) void WaitQueue::enqueue(Thread& thread)
{ {
InterruptDisabler disabler; ScopedCritical critical;
m_threads.append(thread); m_threads.append(thread);
} }
void WaitQueue::wake_one(Atomic<bool>* lock) void WaitQueue::wake_one(Atomic<bool>* lock)
{ {
InterruptDisabler disabler; ScopedCritical critical;
if (lock) if (lock)
*lock = false; *lock = false;
if (m_threads.is_empty()) if (m_threads.is_empty())
@ -57,7 +57,7 @@ void WaitQueue::wake_one(Atomic<bool>* lock)
void WaitQueue::wake_n(i32 wake_count) void WaitQueue::wake_n(i32 wake_count)
{ {
InterruptDisabler disabler; ScopedCritical critical;
if (m_threads.is_empty()) if (m_threads.is_empty())
return; return;
@ -72,7 +72,7 @@ void WaitQueue::wake_n(i32 wake_count)
void WaitQueue::wake_all() void WaitQueue::wake_all()
{ {
InterruptDisabler disabler; ScopedCritical critical;
if (m_threads.is_empty()) if (m_threads.is_empty())
return; return;
while (!m_threads.is_empty()) while (!m_threads.is_empty())
@ -82,7 +82,7 @@ void WaitQueue::wake_all()
void WaitQueue::clear() void WaitQueue::clear()
{ {
InterruptDisabler disabler; ScopedCritical critical;
m_threads.clear(); m_threads.clear();
} }