1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-21 16:05:07 +00:00

Kernel: Switch to eagerly restoring x86 FPU state on context switch

Lazy FPU restore is well known to be vulnerable to timing attacks,
and eager restore is a lot simpler anyway, so let's just do it eagerly.
This commit is contained in:
Andreas Kling 2020-01-01 16:49:08 +01:00
parent 9c0836ce97
commit fd740829d1
5 changed files with 15 additions and 39 deletions

View file

@ -39,7 +39,6 @@ static u32 time_slice_for(const Thread& thread)
}
Thread* current;
Thread* g_last_fpu_thread;
Thread* g_finalizer;
Thread* g_colonel;
WaitQueue* g_finalizer_wait_queue;
@ -376,7 +375,6 @@ bool Scheduler::pick_next()
}
}
if (!thread_to_schedule)
thread_to_schedule = g_colonel;
@ -457,6 +455,9 @@ bool Scheduler::context_switch(Thread& thread)
if (current->state() == Thread::Running)
current->set_state(Thread::Runnable);
asm volatile("fxsave %0"
: "=m"(current->fpu_state()));
#ifdef LOG_EVERY_CONTEXT_SWITCH
dbgprintf("Scheduler: %s(%u:%u) -> %s(%u:%u) [%u] %w:%x\n",
current->process().name().characters(), current->process().pid(), current->tid(),
@ -469,6 +470,8 @@ bool Scheduler::context_switch(Thread& thread)
current = &thread;
thread.set_state(Thread::Running);
asm volatile("fxrstor %0" ::"m"(current->fpu_state()));
if (!thread.selector()) {
thread.set_selector(gdt_alloc_entry());
auto& descriptor = get_gdt_entry(thread.selector());