mirror of
https://github.com/RGBCube/serenity
synced 2025-05-28 09:05:09 +00:00
Kernel: Switch to eagerly restoring x86 FPU state on context switch
Lazy FPU restore is well known to be vulnerable to timing attacks, and eager restore is a lot simpler anyway, so let's just do it eagerly.
This commit is contained in:
parent
9c0836ce97
commit
fd740829d1
5 changed files with 15 additions and 39 deletions
|
@ -205,34 +205,11 @@ void general_protection_fault_handler(RegisterDump regs)
|
|||
|
||||
// 7: FPU not available exception
|
||||
EH_ENTRY_NO_CODE(7, fpu_exception);
|
||||
void fpu_exception_handler(RegisterDump regs)
|
||||
void fpu_exception_handler(RegisterDump)
|
||||
{
|
||||
(void)regs;
|
||||
|
||||
// Just clear the TS flag. We've already restored the FPU state eagerly.
|
||||
// FIXME: It would be nice if we didn't have to do this at all.
|
||||
asm volatile("clts");
|
||||
if (g_last_fpu_thread == current)
|
||||
return;
|
||||
if (g_last_fpu_thread) {
|
||||
asm volatile("fxsave %0"
|
||||
: "=m"(g_last_fpu_thread->fpu_state()));
|
||||
} else {
|
||||
asm volatile("fnclex");
|
||||
}
|
||||
g_last_fpu_thread = current;
|
||||
|
||||
if (current->has_used_fpu()) {
|
||||
asm volatile("fxrstor %0" ::"m"(current->fpu_state()));
|
||||
} else {
|
||||
asm volatile("fninit");
|
||||
asm volatile("fxsave %0"
|
||||
: "=m"(current->fpu_state()));
|
||||
current->set_has_used_fpu(true);
|
||||
}
|
||||
|
||||
#ifdef FPU_EXCEPTION_DEBUG
|
||||
kprintf("%s FPU not available exception: %u(%s)\n", current->process().is_ring0() ? "Kernel" : "Process", current->pid(), current->process().name().characters());
|
||||
dump(regs);
|
||||
#endif
|
||||
}
|
||||
|
||||
// 14: Page Fault
|
||||
|
|
|
@ -39,7 +39,6 @@ static u32 time_slice_for(const Thread& thread)
|
|||
}
|
||||
|
||||
Thread* current;
|
||||
Thread* g_last_fpu_thread;
|
||||
Thread* g_finalizer;
|
||||
Thread* g_colonel;
|
||||
WaitQueue* g_finalizer_wait_queue;
|
||||
|
@ -376,7 +375,6 @@ bool Scheduler::pick_next()
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
if (!thread_to_schedule)
|
||||
thread_to_schedule = g_colonel;
|
||||
|
||||
|
@ -457,6 +455,9 @@ bool Scheduler::context_switch(Thread& thread)
|
|||
if (current->state() == Thread::Running)
|
||||
current->set_state(Thread::Runnable);
|
||||
|
||||
asm volatile("fxsave %0"
|
||||
: "=m"(current->fpu_state()));
|
||||
|
||||
#ifdef LOG_EVERY_CONTEXT_SWITCH
|
||||
dbgprintf("Scheduler: %s(%u:%u) -> %s(%u:%u) [%u] %w:%x\n",
|
||||
current->process().name().characters(), current->process().pid(), current->tid(),
|
||||
|
@ -469,6 +470,8 @@ bool Scheduler::context_switch(Thread& thread)
|
|||
current = &thread;
|
||||
thread.set_state(Thread::Running);
|
||||
|
||||
asm volatile("fxrstor %0" ::"m"(current->fpu_state()));
|
||||
|
||||
if (!thread.selector()) {
|
||||
thread.set_selector(gdt_alloc_entry());
|
||||
auto& descriptor = get_gdt_entry(thread.selector());
|
||||
|
|
|
@ -12,7 +12,6 @@ struct RegisterDump;
|
|||
struct SchedulerData;
|
||||
|
||||
extern Thread* current;
|
||||
extern Thread* g_last_fpu_thread;
|
||||
extern Thread* g_finalizer;
|
||||
extern Thread* g_colonel;
|
||||
extern WaitQueue* g_finalizer_wait_queue;
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
|
||||
//#define SIGNAL_DEBUG
|
||||
|
||||
static FPUState s_clean_fpu_state;
|
||||
|
||||
u16 thread_specific_selector()
|
||||
{
|
||||
static u16 selector;
|
||||
|
@ -55,7 +57,7 @@ Thread::Thread(Process& process)
|
|||
dbgprintf("Thread{%p}: New thread TID=%u in %s(%u)\n", this, m_tid, process.name().characters(), process.pid());
|
||||
set_default_signal_dispositions();
|
||||
m_fpu_state = (FPUState*)kmalloc_aligned(sizeof(FPUState), 16);
|
||||
memset(m_fpu_state, 0, sizeof(FPUState));
|
||||
memcpy(m_fpu_state, &s_clean_fpu_state, sizeof(FPUState));
|
||||
memset(&m_tss, 0, sizeof(m_tss));
|
||||
|
||||
// Only IF is set when a process boots.
|
||||
|
@ -119,9 +121,6 @@ Thread::~Thread()
|
|||
thread_table().remove(this);
|
||||
}
|
||||
|
||||
if (g_last_fpu_thread == this)
|
||||
g_last_fpu_thread = nullptr;
|
||||
|
||||
if (selector())
|
||||
gdt_free_entry(selector());
|
||||
|
||||
|
@ -625,8 +624,7 @@ u32 Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vecto
|
|||
}
|
||||
env[environment.size()] = nullptr;
|
||||
|
||||
auto push_on_new_stack = [&new_esp](u32 value)
|
||||
{
|
||||
auto push_on_new_stack = [&new_esp](u32 value) {
|
||||
new_esp -= 4;
|
||||
u32* stack_ptr = (u32*)new_esp;
|
||||
*stack_ptr = value;
|
||||
|
@ -646,7 +644,6 @@ Thread* Thread::clone(Process& process)
|
|||
memcpy(clone->m_signal_action_data, m_signal_action_data, sizeof(m_signal_action_data));
|
||||
clone->m_signal_mask = m_signal_mask;
|
||||
memcpy(clone->m_fpu_state, m_fpu_state, sizeof(FPUState));
|
||||
clone->m_has_used_fpu = m_has_used_fpu;
|
||||
clone->m_thread_specific_data = m_thread_specific_data;
|
||||
return clone;
|
||||
}
|
||||
|
@ -654,6 +651,9 @@ Thread* Thread::clone(Process& process)
|
|||
void Thread::initialize()
|
||||
{
|
||||
Scheduler::initialize();
|
||||
asm volatile("fninit");
|
||||
asm volatile("fxsave %0"
|
||||
: "=m"(s_clean_fpu_state));
|
||||
}
|
||||
|
||||
Vector<Thread*> Thread::all_threads()
|
||||
|
|
|
@ -329,8 +329,6 @@ public:
|
|||
bool has_signal_handler(u8 signal) const;
|
||||
|
||||
FPUState& fpu_state() { return *m_fpu_state; }
|
||||
bool has_used_fpu() const { return m_has_used_fpu; }
|
||||
void set_has_used_fpu(bool b) { m_has_used_fpu = b; }
|
||||
|
||||
void set_default_signal_dispositions();
|
||||
void push_value_on_stack(u32);
|
||||
|
@ -457,7 +455,6 @@ private:
|
|||
u32 m_priority { THREAD_PRIORITY_NORMAL };
|
||||
u32 m_extra_priority { 0 };
|
||||
u32 m_priority_boost { 0 };
|
||||
bool m_has_used_fpu { false };
|
||||
bool m_dump_backtrace_on_finalization { false };
|
||||
bool m_should_die { false };
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue