1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 07:47:35 +00:00

Kernel: Rename Thread::tss to Thread::regs and add x86_64 support

We're using software context switches so calling this struct tss is
somewhat misleading.
This commit is contained in:
Gunnar Beutner 2021-06-26 19:57:16 +02:00 committed by Andreas Kling
parent eba33f82b8
commit f285241cb8
14 changed files with 246 additions and 204 deletions

View file

@ -489,14 +489,10 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
// pushed the callee-saved registers, and the last of them happens
// to be ebp.
ProcessPagingScope paging_scope(thread.process());
auto& tss = thread.tss();
u32* stack_top;
#if ARCH(I386)
stack_top = reinterpret_cast<u32*>(tss.esp);
#else
(void)tss;
TODO();
#endif
auto& regs = thread.regs();
u32* stack_top;
stack_top = reinterpret_cast<u32*>(regs.esp);
if (is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) {
if (!copy_from_user(&frame_ptr, &((FlatPtr*)stack_top)[0]))
frame_ptr = 0;
@ -505,8 +501,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
if (!safe_memcpy(&frame_ptr, &((FlatPtr*)stack_top)[0], sizeof(FlatPtr), fault_at))
frame_ptr = 0;
}
#if ARCH(I386)
eip = tss.eip;
eip = regs.eip;
#else
TODO();
#endif

View file

@ -30,8 +30,8 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
bool has_fxsr = Processor::current().has_feature(CPUFeature::FXSR);
Processor::set_current_thread(*to_thread);
auto& from_tss = from_thread->tss();
auto& to_tss = to_thread->tss();
auto& from_regs = from_thread->regs();
auto& to_regs = to_thread->regs();
if (has_fxsr)
asm volatile("fxsave %0"
@ -40,10 +40,10 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
asm volatile("fnsave %0"
: "=m"(from_thread->fpu_state()));
from_tss.fs = get_fs();
from_tss.gs = get_gs();
set_fs(to_tss.fs);
set_gs(to_tss.gs);
from_regs.fs = get_fs();
from_regs.gs = get_gs();
set_fs(to_regs.fs);
set_gs(to_regs.gs);
if (from_thread->process().is_traced())
read_debug_registers_into(from_thread->debug_register_state());
@ -59,8 +59,8 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
tls_descriptor.set_base(to_thread->thread_specific_data());
tls_descriptor.set_limit(to_thread->thread_specific_region_size());
if (from_tss.cr3 != to_tss.cr3)
write_cr3(to_tss.cr3);
if (from_regs.cr3 != to_regs.cr3)
write_cr3(to_regs.cr3);
to_thread->set_cpu(processor.get_id());
processor.restore_in_critical(to_thread->saved_critical());
@ -96,7 +96,7 @@ extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe
extern "C" u32 do_init_context(Thread* thread, u32 flags)
{
VERIFY_INTERRUPTS_DISABLED();
thread->tss().eflags = flags;
thread->regs().eflags = flags;
return Processor::current().init_context(*thread, true);
}

View file

@ -54,7 +54,7 @@ asm(
" movl %eax, %esp \n" // move stack pointer to what Processor::init_context set up for us
" pushl %ebx \n" // push to_thread
" pushl %ebx \n" // push from_thread
" pushl $thread_context_first_enter \n" // should be same as tss.eip
" pushl $thread_context_first_enter \n" // should be same as regs.eip
" jmp enter_thread_context \n"
);
// clang-format on
@ -86,8 +86,8 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
// TODO: handle NT?
VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
auto& tss = thread.tss();
bool return_to_user = (tss.cs & 3) != 0;
auto& regs = thread.regs();
bool return_to_user = (regs.cs & 3) != 0;
// make room for an interrupt frame
if (!return_to_user) {
@ -96,10 +96,10 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
stack_top -= sizeof(RegisterState) - 2 * sizeof(u32);
// For kernel threads we'll push the thread function argument
// which should be in tss.esp and exit_kernel_thread as return
// which should be in regs.esp and exit_kernel_thread as return
// address.
stack_top -= 2 * sizeof(u32);
*reinterpret_cast<u32*>(kernel_stack_top - 2 * sizeof(u32)) = tss.esp;
*reinterpret_cast<u32*>(kernel_stack_top - 2 * sizeof(u32)) = regs.esp;
*reinterpret_cast<u32*>(kernel_stack_top - 3 * sizeof(u32)) = FlatPtr(&exit_kernel_thread);
} else {
stack_top -= sizeof(RegisterState);
@ -113,25 +113,25 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
// we will end up either in kernel mode or user mode, depending on how the thread is set up
// However, the first step is to always start in kernel mode with thread_context_first_enter
RegisterState& iretframe = *reinterpret_cast<RegisterState*>(stack_top);
iretframe.ss = tss.ss;
iretframe.gs = tss.gs;
iretframe.fs = tss.fs;
iretframe.es = tss.es;
iretframe.ds = tss.ds;
iretframe.edi = tss.edi;
iretframe.esi = tss.esi;
iretframe.ebp = tss.ebp;
iretframe.ss = regs.ss;
iretframe.gs = regs.gs;
iretframe.fs = regs.fs;
iretframe.es = regs.es;
iretframe.ds = regs.ds;
iretframe.edi = regs.edi;
iretframe.esi = regs.esi;
iretframe.ebp = regs.ebp;
iretframe.esp = 0;
iretframe.ebx = tss.ebx;
iretframe.edx = tss.edx;
iretframe.ecx = tss.ecx;
iretframe.eax = tss.eax;
iretframe.eflags = tss.eflags;
iretframe.eip = tss.eip;
iretframe.cs = tss.cs;
iretframe.ebx = regs.ebx;
iretframe.edx = regs.edx;
iretframe.ecx = regs.ecx;
iretframe.eax = regs.eax;
iretframe.eflags = regs.eflags;
iretframe.eip = regs.eip;
iretframe.cs = regs.cs;
if (return_to_user) {
iretframe.userspace_esp = tss.esp;
iretframe.userspace_ss = tss.ss;
iretframe.userspace_esp = regs.esp;
iretframe.userspace_ss = regs.ss;
}
// make space for a trap frame
@ -149,8 +149,8 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}, user_top={}:{}",
thread,
VirtualAddress(&thread),
iretframe.cs, tss.eip,
VirtualAddress(tss.esp),
iretframe.cs, regs.eip,
VirtualAddress(regs.esp),
VirtualAddress(stack_top),
iretframe.userspace_ss,
iretframe.userspace_esp);
@ -158,8 +158,8 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}",
thread,
VirtualAddress(&thread),
iretframe.cs, tss.eip,
VirtualAddress(tss.esp),
iretframe.cs, regs.eip,
VirtualAddress(regs.esp),
VirtualAddress(stack_top));
}
}
@ -168,15 +168,15 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
// in kernel mode, so set up these values so that we end up popping iretframe
// off the stack right after the context switch completed, at which point
// control is transferred to what iretframe is pointing to.
tss.eip = FlatPtr(&thread_context_first_enter);
tss.esp0 = kernel_stack_top;
tss.esp = stack_top;
tss.cs = GDT_SELECTOR_CODE0;
tss.ds = GDT_SELECTOR_DATA0;
tss.es = GDT_SELECTOR_DATA0;
tss.gs = GDT_SELECTOR_DATA0;
tss.ss = GDT_SELECTOR_DATA0;
tss.fs = GDT_SELECTOR_PROC;
regs.eip = FlatPtr(&thread_context_first_enter);
regs.esp0 = kernel_stack_top;
regs.esp = stack_top;
regs.cs = GDT_SELECTOR_CODE0;
regs.ds = GDT_SELECTOR_DATA0;
regs.es = GDT_SELECTOR_DATA0;
regs.gs = GDT_SELECTOR_DATA0;
regs.ss = GDT_SELECTOR_DATA0;
regs.fs = GDT_SELECTOR_PROC;
return stack_top;
}
@ -218,14 +218,14 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
"popl %%esi \n"
"popl %%ebx \n"
"popfl \n"
: [from_esp] "=m" (from_thread->tss().esp),
[from_eip] "=m" (from_thread->tss().eip),
: [from_esp] "=m" (from_thread->regs().esp),
[from_eip] "=m" (from_thread->regs().eip),
[tss_esp0] "=m" (m_tss.esp0),
"=d" (from_thread), // needed so that from_thread retains the correct value
"=a" (to_thread) // needed so that to_thread retains the correct value
: [to_esp] "g" (to_thread->tss().esp),
[to_esp0] "g" (to_thread->tss().esp0),
[to_eip] "c" (to_thread->tss().eip),
: [to_esp] "g" (to_thread->regs().esp),
[to_esp0] "g" (to_thread->regs().esp0),
[to_eip] "c" (to_thread->regs().eip),
[from_thread] "d" (from_thread),
[to_thread] "a" (to_thread)
: "memory"
@ -256,13 +256,10 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
{
VERIFY(initial_thread.process().is_kernel_process());
auto& tss = initial_thread.tss();
m_tss = tss;
m_tss.esp0 = tss.esp0;
auto& regs = initial_thread.regs();
m_tss.iomapbase = sizeof(m_tss);
m_tss.esp0 = regs.esp0;
m_tss.ss0 = GDT_SELECTOR_DATA0;
// user mode needs to be able to switch to kernel mode:
m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3;
m_tss.fs = GDT_SELECTOR_PROC | 3;
m_scheduler_initialized = true;
@ -285,8 +282,8 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
"call enter_trap_no_irq \n"
"addl $4, %%esp \n"
"lret \n"
:: [new_esp] "g" (tss.esp),
[new_eip] "a" (tss.eip),
:: [new_esp] "g" (regs.esp),
[new_eip] "a" (regs.eip),
[from_to_thread] "b" (&initial_thread),
[cpu] "c" (id())
);

View file

@ -67,6 +67,7 @@ String Processor::platform_string() const
return "x86_64";
}
// FIXME: For the most part this is a copy of the i386-specific function, get rid of the code duplication
u32 Processor::init_context(Thread& thread, bool leave_crit)
{
VERIFY(is_kernel_mode());
@ -88,29 +89,28 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
// TODO: handle NT?
VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
#if 0
auto& tss = thread.tss();
bool return_to_user = (tss.cs & 3) != 0;
auto& regs = thread.regs();
bool return_to_user = (regs.cs & 3) != 0;
// make room for an interrupt frame
if (!return_to_user) {
// userspace_esp and userspace_ss are not popped off by iret
// userspace_rsp is not popped off by iretq
// unless we're switching back to user mode
stack_top -= sizeof(RegisterState) - 2 * sizeof(u32);
stack_top -= sizeof(RegisterState) - 2 * sizeof(FlatPtr);
// For kernel threads we'll push the thread function argument
// which should be in tss.esp and exit_kernel_thread as return
// which should be in regs.rsp and exit_kernel_thread as return
// address.
stack_top -= 2 * sizeof(u32);
*reinterpret_cast<u32*>(kernel_stack_top - 2 * sizeof(u32)) = tss.esp;
stack_top -= 2 * sizeof(u64);
*reinterpret_cast<u64*>(kernel_stack_top - 2 * sizeof(u32)) = regs.rsp;
*reinterpret_cast<u32*>(kernel_stack_top - 3 * sizeof(u32)) = FlatPtr(&exit_kernel_thread);
} else {
stack_top -= sizeof(RegisterState);
}
// we want to end up 16-byte aligned, %esp + 4 should be aligned
stack_top -= sizeof(u32);
*reinterpret_cast<u32*>(kernel_stack_top - sizeof(u32)) = 0;
stack_top -= sizeof(u64);
*reinterpret_cast<u64*>(kernel_stack_top - sizeof(u64)) = 0;
// set up the stack so that after returning from thread_context_first_enter()
// we will end up either in kernel mode or user mode, depending on how the thread is set up
@ -125,25 +125,24 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
trap.prev_irq_level = 0;
trap.next_trap = nullptr;
stack_top -= sizeof(u32); // pointer to TrapFrame
*reinterpret_cast<u32*>(stack_top) = stack_top + 4;
stack_top -= sizeof(u64); // pointer to TrapFrame
*reinterpret_cast<u64*>(stack_top) = stack_top + 8;
if constexpr (CONTEXT_SWITCH_DEBUG) {
if (return_to_user) {
dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}, user_top={}:{}",
dbgln("init_context {} ({}) set up to execute at rip={}:{}, rsp={}, stack_top={}, user_top={}",
thread,
VirtualAddress(&thread),
iretframe.cs, tss.eip,
VirtualAddress(tss.esp),
iretframe.cs, regs.rip,
VirtualAddress(regs.rsp),
VirtualAddress(stack_top),
iretframe.userspace_ss,
iretframe.userspace_esp);
iretframe.userspace_rsp);
} else {
dbgln("init_context {} ({}) set up to execute at eip={}:{}, esp={}, stack_top={}",
dbgln("init_context {} ({}) set up to execute at rip={}:{}, rsp={}, stack_top={}",
thread,
VirtualAddress(&thread),
iretframe.cs, tss.eip,
VirtualAddress(tss.esp),
iretframe.cs, regs.rip,
VirtualAddress(regs.rsp),
VirtualAddress(stack_top));
}
}
@ -152,18 +151,9 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
// in kernel mode, so set up these values so that we end up popping iretframe
// off the stack right after the context switch completed, at which point
// control is transferred to what iretframe is pointing to.
tss.eip = FlatPtr(&thread_context_first_enter);
tss.esp0 = kernel_stack_top;
tss.esp = stack_top;
tss.cs = GDT_SELECTOR_CODE0;
tss.ds = GDT_SELECTOR_DATA0;
tss.es = GDT_SELECTOR_DATA0;
tss.gs = GDT_SELECTOR_DATA0;
tss.ss = GDT_SELECTOR_DATA0;
tss.fs = GDT_SELECTOR_PROC;
#else
TODO();
#endif
regs.rip = FlatPtr(&thread_context_first_enter);
regs.rsp0 = kernel_stack_top;
regs.rsp = stack_top;
return stack_top;
}
@ -203,21 +193,40 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
{
VERIFY(initial_thread.process().is_kernel_process());
auto& tss = initial_thread.tss();
m_tss = tss;
#if 0
m_tss.esp0 = tss.esp0;
m_tss.ss0 = GDT_SELECTOR_DATA0;
// user mode needs to be able to switch to kernel mode:
m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3;
m_tss.fs = GDT_SELECTOR_PROC | 3;
#else
TODO();
#endif
auto& regs = initial_thread.regs();
m_tss.iomapbase = sizeof(m_tss);
m_tss.rsp0l = regs.rsp0 & 0xffffffff;
m_tss.rsp0h = regs.rsp0 >> 32;
m_scheduler_initialized = true;
// FIXME: Context switching (see i386 impl)
// clang-format off
asm volatile(
"movq %[new_rsp], %%rsp \n" // switch to new stack
"pushq %[from_to_thread] \n" // to_thread
"pushq %[from_to_thread] \n" // from_thread
"pushq $" __STRINGIFY(GDT_SELECTOR_CODE0) " \n"
"pushq %[new_rip] \n" // save the entry rip to the stack
"movq %%rsp, %%rbx \n"
"addq $40, %%rbx \n" // calculate pointer to TrapFrame
"pushq %%rbx \n"
"cld \n"
"pushq %[cpu] \n" // push argument for init_finished before register is clobbered
"call pre_init_finished \n"
"pop %%rdi \n" // move argument for init_finished into place
"call init_finished \n"
"addq $8, %%rsp \n"
"call post_init_finished \n"
"pop %%rdi \n" // move pointer to TrapFrame into place
"call enter_trap_no_irq \n"
"addq $8, %%rsp \n"
"retq \n"
:: [new_rsp] "g" (regs.rsp),
[new_rip] "a" (regs.rip),
[from_to_thread] "b" (&initial_thread),
[cpu] "c" ((u64)id())
);
// clang-format on
VERIFY_NOT_REACHED();
}