1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 17:17:45 +00:00

Kernel: Detect APs and boot them into protected mode

This isn't fully working, the APs pretend like they're
fully initialized and are just halted permanently for now.
This commit is contained in:
Tom 2020-06-04 09:10:16 -06:00 committed by Andreas Kling
parent 93b9832fac
commit 0bc92c259d
9 changed files with 463 additions and 108 deletions

View file

@ -218,7 +218,6 @@ start:
pushl $exit_message
call kprintf
add $4, %esp
cli
loop:
@ -227,3 +226,171 @@ loop:
exit_message:
.asciz "Kernel exited."
.extern init_ap
.type init_ap, @function
/*
The apic_ap_start function will be loaded to P0x00008000 where the APIC
will boot the AP from in real mode. This code also contains space for
special variables that *must* remain here. When initializing the APIC,
the code here gets copied to P0x00008000, the variables in here get
populated and then the the boot of the APs will be triggered. Having
the variables here allows us to access them from real mode. Also, the
code here avoids the need for relocation entries.
Basically, the variables between apic_ap_start and end_apic_ap_start
*MUST* remain here and cannot be moved into a .bss or any other location.
*/
.global apic_ap_start
.type apic_ap_start, @function
apic_ap_start:
.code16
cli
jmp $0x800, $(1f - apic_ap_start) /* avoid relocation entries */
1:
mov %cs, %ax
mov %ax, %ds
/* Generate a new processor id. This is not the APIC id. We just
need a way to find ourselves a stack without stomping on other
APs that may be doing this concurrently. */
xor %ax, %ax
mov %ax, %bp
inc %ax
lock; xaddw %ax, %ds:(ap_cpu_id - apic_ap_start)(%bp) /* avoid relocation entries */
mov %ax, %bx
xor %ax, %ax
mov %ax, %sp
/* load the first temporary gdt */
lgdt (ap_cpu_gdtr_initial - apic_ap_start)
/* enable PM */
movl %cr0, %eax
orl $1, %eax
movl %eax, %cr0
ljmpl $8, $(apic_ap_start32 - apic_ap_start + 0x8000)
apic_ap_start32:
.code32
mov $0x10, %ax
mov %ax, %ss
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
movl $0x8000, %ebp
/* find our allocated stack based on the generated id */
andl 0x0000FFFF, %ebx
movl %ebx, %esi
movl (ap_cpu_init_stacks - apic_ap_start)(%ebp, %ebx, 4), %esp
/* check if we support NX and enable it if we do */
movl $0x80000001, %eax
cpuid
testl $0x100000, %edx
je (1f - apic_ap_start + 0x8000)
/* turn on IA32_EFER.NXE */
movl $0xc0000080, %ecx
rdmsr
orl $0x800, %eax
wrmsr
1:
/* load the bsp's cr3 value */
movl (ap_cpu_init_cr3 - apic_ap_start)(%ebp), %eax
movl %eax, %cr3
/* enable PAE + PSE */
movl %cr4, %eax
orl $0x60, %eax
movl %eax, %cr4
/* enable PG */
movl %cr0, %eax
orl $0x80000000, %eax
movl %eax, %cr0
/* load a second temporary gdt that points above 3GB */
lgdt (ap_cpu_gdtr_initial2 - apic_ap_start + 0xc0008000)
/* jump above 3GB into our identity mapped area now */
ljmp $8, $(1f - apic_ap_start + 0xc0008000)
1:
/* flush the TLB */
movl %cr3, %eax
movl %eax, %cr3
movl $0xc0008000, %ebp
/* now load the final gdt and idt from the identity mapped area */
movl (ap_cpu_gdtr - apic_ap_start)(%ebp), %eax
lgdt (%eax)
movl (ap_cpu_idtr - apic_ap_start)(%ebp), %eax
lidt (%eax)
/* set same cr0 and cr4 values as the BSP */
movl (ap_cpu_init_cr0 - apic_ap_start)(%ebp), %eax
movl %eax, %cr0
movl (ap_cpu_init_cr4 - apic_ap_start)(%ebp), %eax
movl %eax, %cr4
xor %ebp, %ebp
cld
/* push the arbitrary cpu id, 0 representing the bsp and call into c++ */
inc %esi
push %esi
/* We are in identity mapped P0x8000 and the BSP will unload this code
once all APs are initialized, so call init_ap but return to our
infinite loop */
push $loop
ljmp $8, $init_ap
.align 4
.global apic_ap_start_size
apic_ap_start_size:
.2byte end_apic_ap_start - apic_ap_start
ap_cpu_id:
.2byte 0x0
ap_cpu_gdt:
/* null */
.8byte 0x0
/* code */
.4byte 0x0000FFFF
.4byte 0x00cf9a00
/* data */
.4byte 0x0000FFFF
.4byte 0x00cf9200
ap_cpu_gdt_end:
ap_cpu_gdtr_initial:
.2byte ap_cpu_gdt_end - ap_cpu_gdt - 1
.4byte (ap_cpu_gdt - apic_ap_start) + 0x8000
ap_cpu_gdtr_initial2:
.2byte ap_cpu_gdt_end - ap_cpu_gdt - 1
.4byte (ap_cpu_gdt - apic_ap_start) + 0xc0008000
.global ap_cpu_gdtr
ap_cpu_gdtr:
.4byte 0x0 /* will be set at runtime */
.global ap_cpu_idtr
ap_cpu_idtr:
.4byte 0x0 /* will be set at runtime */
.global ap_cpu_init_cr0
ap_cpu_init_cr0:
.4byte 0x0 /* will be set at runtime */
.global ap_cpu_init_cr3
ap_cpu_init_cr3:
.4byte 0x0 /* will be set at runtime */
.global ap_cpu_init_cr4
ap_cpu_init_cr4:
.4byte 0x0 /* will be set at runtime */
.global ap_cpu_init_stacks
ap_cpu_init_stacks:
/* array of allocated stack pointers */
/* NOTE: ap_cpu_init_stacks must be the last variable before
end_apic_ap_start! */
.set end_apic_ap_start, .

View file

@ -46,12 +46,6 @@
namespace Kernel {
struct [[gnu::packed]] DescriptorTablePointer
{
u16 limit;
void* address;
};
static DescriptorTablePointer s_idtr;
static DescriptorTablePointer s_gdtr;
static Descriptor s_idt[256];
@ -391,6 +385,16 @@ void flush_gdt()
: "memory");
}
const DescriptorTablePointer& get_gdtr()
{
return s_gdtr;
}
const DescriptorTablePointer& get_idtr()
{
return s_idtr;
}
void gdt_init()
{
s_gdt_length = 5;
@ -819,6 +823,14 @@ void cpu_setup()
}
}
u32 read_cr0()
{
u32 cr0;
asm("movl %%cr0, %%eax"
: "=a"(cr0));
return cr0;
}
u32 read_cr3()
{
u32 cr3;
@ -833,6 +845,14 @@ void write_cr3(u32 cr3)
: "memory");
}
u32 read_cr4()
{
u32 cr4;
asm("movl %%cr4, %%eax"
: "=a"(cr4));
return cr4;
}
u32 read_dr6()
{
u32 dr6;

View file

@ -41,6 +41,12 @@ class MemoryManager;
class PageDirectory;
class PageTableEntry;
struct [[gnu::packed]] DescriptorTablePointer
{
u16 limit;
void* address;
};
struct [[gnu::packed]] TSS32
{
u16 backlink, __blh;
@ -248,6 +254,8 @@ public:
class GenericInterruptHandler;
struct RegisterState;
const DescriptorTablePointer& get_gdtr();
const DescriptorTablePointer& get_idtr();
void gdt_init();
void idt_init();
void sse_init();
@ -477,8 +485,10 @@ inline FlatPtr offset_in_page(const void* address)
return offset_in_page((FlatPtr)address);
}
u32 read_cr0();
u32 read_cr3();
void write_cr3(u32);
u32 read_cr4();
u32 read_dr6();