1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 23:47:45 +00:00

Kernel: Introduce basic pre-kernel environment

This implements a simple bootloader that is capable of loading ELF64
kernel images. It does this by using QEMU/GRUB to load the kernel image
from disk and pass it to our bootloader as a Multiboot module.

The bootloader then parses the ELF image and sets it up appropriately.
The kernel's entry point is a C++ function with architecture-native
code.

Co-authored-by: Liav A <liavalb@gmail.com>
This commit is contained in:
Gunnar Beutner 2021-07-18 14:47:32 +02:00 committed by Andreas Kling
parent 357ddd393e
commit 7e94b090fe
30 changed files with 1207 additions and 181 deletions

View file

@ -22,6 +22,8 @@
#include <Kernel/VM/PhysicalRegion.h>
#include <Kernel/VM/SharedInodeVMObject.h>
extern u8* start_of_bootloader_image;
extern u8* end_of_bootloader_image;
extern u8* start_of_kernel_image;
extern u8* end_of_kernel_image;
extern FlatPtr start_of_kernel_text;
@ -34,6 +36,9 @@ extern FlatPtr end_of_unmap_after_init;
extern FlatPtr start_of_kernel_ksyms;
extern FlatPtr end_of_kernel_ksyms;
extern "C" void* boot_pd_kernel;
extern "C" void* boot_pd_kernel_pt1023;
extern multiboot_module_entry_t multiboot_copy_boot_modules_array[16];
extern size_t multiboot_copy_boot_modules_count;
@ -196,6 +201,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
// Register used memory regions that we know of.
m_used_memory_ranges.ensure_capacity(4);
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Bootloader, PhysicalAddress(virtual_to_low_physical(FlatPtr(start_of_bootloader_image))), PhysicalAddress(page_round_up(virtual_to_low_physical(FlatPtr(end_of_bootloader_image)))) });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical(FlatPtr(&start_of_kernel_image))), PhysicalAddress(page_round_up(virtual_to_low_physical(FlatPtr(&end_of_kernel_image)))) });
if (multiboot_info_ptr->flags & 0x4) {
@ -334,8 +340,6 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
}
}
extern "C" PageDirectoryEntry boot_pd3[1024];
UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
{
// We assume that the physical page range is contiguous and doesn't contain huge gaps!
@ -436,10 +440,10 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
unquickmap_page();
// Hook the page table into the kernel page directory
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((FlatPtr)boot_pd3));
PhysicalAddress boot_pd_kernel_paddr(virtual_to_low_physical((FlatPtr)boot_pd_kernel));
u32 page_directory_index = (virtual_page_base_for_this_pt >> 21) & 0x1ff;
auto* pd = reinterpret_cast<PageDirectoryEntry*>(quickmap_page(boot_pd3_paddr));
auto* pd = reinterpret_cast<PageDirectoryEntry*>(quickmap_page(boot_pd_kernel_paddr));
PageDirectoryEntry& pde = pd[page_directory_index];
VERIFY(!pde.is_present()); // Nothing should be using this PD yet
@ -909,7 +913,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
return {};
}
fast_u32_fill((u32*)page->paddr().offset(KERNEL_BASE).as_ptr(), 0, PAGE_SIZE / sizeof(u32));
fast_u32_fill((u32*)page->paddr().offset(kernel_base).as_ptr(), 0, PAGE_SIZE / sizeof(u32));
++m_system_memory_info.super_physical_pages_used;
return page;
}
@ -939,13 +943,11 @@ void MemoryManager::flush_tlb(PageDirectory const* page_directory, VirtualAddres
Processor::flush_tlb(page_directory, vaddr, page_count);
}
extern "C" PageTableEntry boot_pd3_pt1023[1024];
PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
{
VERIFY(s_mm_lock.own_lock());
auto& mm_data = get_data();
auto& pte = boot_pd3_pt1023[(KERNEL_QUICKMAP_PD - KERNEL_PT1024_BASE) / PAGE_SIZE];
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[(KERNEL_QUICKMAP_PD - KERNEL_PT1024_BASE) / PAGE_SIZE];
auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
if (pte.physical_page_base() != pd_paddr.get()) {
pte.set_physical_page_base(pd_paddr.get());
@ -971,7 +973,7 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
{
VERIFY(s_mm_lock.own_lock());
auto& mm_data = get_data();
auto& pte = boot_pd3_pt1023[(KERNEL_QUICKMAP_PT - KERNEL_PT1024_BASE) / PAGE_SIZE];
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[(KERNEL_QUICKMAP_PT - KERNEL_PT1024_BASE) / PAGE_SIZE];
if (pte.physical_page_base() != pt_paddr.get()) {
pte.set_physical_page_base(pt_paddr.get());
pte.set_present(true);
@ -1002,7 +1004,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
auto& pte = boot_pd3_pt1023[pte_idx];
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx];
if (pte.physical_page_base() != physical_address.get()) {
pte.set_physical_page_base(physical_address.get());
pte.set_present(true);
@ -1021,7 +1023,7 @@ void MemoryManager::unquickmap_page()
VERIFY(mm_data.m_quickmap_in_use.is_locked());
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
auto& pte = boot_pd3_pt1023[pte_idx];
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx];
pte.clear();
flush_tlb_local(vaddr);
mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags);

View file

@ -43,16 +43,17 @@ constexpr FlatPtr page_round_down(FlatPtr x)
inline FlatPtr low_physical_to_virtual(FlatPtr physical)
{
return physical + KERNEL_BASE;
return physical + kernel_base;
}
inline FlatPtr virtual_to_low_physical(FlatPtr virtual_)
{
return virtual_ - KERNEL_BASE;
return virtual_ - kernel_base;
}
enum class UsedMemoryRangeType {
LowMemory = 0,
Bootloader,
Kernel,
BootModule,
PhysicalPages,
@ -60,6 +61,7 @@ enum class UsedMemoryRangeType {
static constexpr StringView UserMemoryRangeTypeNames[] {
"Low memory",
"Bootloader",
"Kernel",
"Boot module",
"Physical Pages"

View file

@ -6,12 +6,15 @@
#include <AK/Memory.h>
#include <AK/Singleton.h>
#include <Kernel/Prekernel/Prekernel.h>
#include <Kernel/Process.h>
#include <Kernel/Random.h>
#include <Kernel/Sections.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/PageDirectory.h>
extern u8* end_of_kernel_image;
namespace Kernel {
static AK::Singleton<HashMap<FlatPtr, PageDirectory*>> s_cr3_map;
@ -28,16 +31,19 @@ RefPtr<PageDirectory> PageDirectory::find_by_cr3(FlatPtr cr3)
return cr3_map().get(cr3).value_or({});
}
extern "C" FlatPtr kernel_base;
#if ARCH(X86_64)
extern "C" PageDirectoryEntry boot_pml4t[1024];
extern "C" void* boot_pml4t;
#endif
extern "C" PageDirectoryEntry* boot_pdpt[4];
extern "C" PageDirectoryEntry boot_pd0[1024];
extern "C" PageDirectoryEntry boot_pd3[1024];
extern "C" void* boot_pdpt;
extern "C" void* boot_pd0;
extern "C" void* boot_pd_kernel;
UNMAP_AFTER_INIT PageDirectory::PageDirectory()
{
m_range_allocator.initialize_with_range(VirtualAddress(KERNEL_BASE + KERNEL_PD_OFFSET), KERNEL_PD_END - (KERNEL_BASE + KERNEL_PD_OFFSET));
// make sure this starts in a new page directory to make MemoryManager::initialize_physical_pages() happy
FlatPtr start_of_range = ((FlatPtr)&end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
m_range_allocator.initialize_with_range(VirtualAddress(start_of_range), KERNEL_PD_END - start_of_range);
m_identity_range_allocator.initialize_with_range(VirtualAddress(FlatPtr(0x00000000)), 0x00200000);
}
@ -51,13 +57,13 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
#endif
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((FlatPtr)boot_pdpt));
PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((FlatPtr)boot_pd0));
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((FlatPtr)boot_pd3));
PhysicalAddress boot_pd_kernel_paddr(virtual_to_low_physical((FlatPtr)boot_pd_kernel));
dmesgln("MM: boot_pdpt @ {}", boot_pdpt_paddr);
dmesgln("MM: boot_pd0 @ {}", boot_pd0_paddr);
dmesgln("MM: boot_pd3 @ {}", boot_pd3_paddr);
dmesgln("MM: boot_pd_kernel @ {}", boot_pd_kernel_paddr);
m_directory_table = PhysicalPage::create(boot_pdpt_paddr, MayReturnToFreeList::No);
m_directory_pages[0] = PhysicalPage::create(boot_pd0_paddr, MayReturnToFreeList::No);
m_directory_pages[3] = PhysicalPage::create(boot_pd3_paddr, MayReturnToFreeList::No);
m_directory_pages[(kernel_base >> 30) & 0x1ff] = PhysicalPage::create(boot_pd_kernel_paddr, MayReturnToFreeList::No);
}
PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)
@ -83,15 +89,13 @@ PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)
m_directory_table = MM.allocate_user_physical_page();
if (!m_directory_table)
return;
auto kernel_pd_index = (KERNEL_BASE >> 30) & 0xffu;
for (size_t i = 0; i < 4; i++) {
if (i == kernel_pd_index)
continue;
auto kernel_pd_index = (kernel_base >> 30) & 0x1ffu;
for (size_t i = 0; i < kernel_pd_index; i++) {
m_directory_pages[i] = MM.allocate_user_physical_page();
if (!m_directory_pages[i])
return;
}
// Share the top 1 GiB of kernel-only mappings (>=3GiB or >=KERNEL_BASE)
// Share the top 1 GiB of kernel-only mappings (>=kernel_base)
m_directory_pages[kernel_pd_index] = MM.kernel_page_directory().m_directory_pages[kernel_pd_index];
#if ARCH(X86_64)

View file

@ -86,7 +86,7 @@ public:
void set_mmap(bool mmap) { m_mmap = mmap; }
bool is_user() const { return !is_kernel(); }
bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= KERNEL_BASE; }
bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= kernel_base; }
PageFaultResponse handle_fault(const PageFault&, ScopedSpinLock<RecursiveSpinLock>&);