1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-30 21:48:11 +00:00
serenity/Kernel/VM/PageDirectory.cpp
Andreas Kling a850a89c1b Kernel: Add a random offset to the base of the per-process VM allocator
This is not ASLR, but it does de-trivialize exploiting the ELF loader
which would previously always parse executables at 0x01001000 in every
single exec(). I've taken advantage of this multiple times in my own
toy exploits and it's starting to feel cheesy. :^)
2020-01-17 23:29:54 +01:00

93 lines
3.5 KiB
C++

#include <Kernel/Process.h>
#include <Kernel/Random.h>
#include <Kernel/Thread.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/PageDirectory.h>
static const u32 userspace_range_base = 0x00800000;
static const u32 userspace_range_ceiling = 0xbe000000;
static const u32 kernelspace_range_base = 0xc0800000;
static HashMap<u32, PageDirectory*>& cr3_map()
{
ASSERT_INTERRUPTS_DISABLED();
static HashMap<u32, PageDirectory*>* map;
if (!map)
map = new HashMap<u32, PageDirectory*>;
return *map;
}
RefPtr<PageDirectory> PageDirectory::find_by_cr3(u32 cr3)
{
InterruptDisabler disabler;
return cr3_map().get(cr3).value_or({});
}
extern "C" PageDirectoryEntry* boot_pdpt[4];
extern "C" PageDirectoryEntry boot_pd0[1024];
extern "C" PageDirectoryEntry boot_pd3[1024];
PageDirectory::PageDirectory()
{
m_range_allocator.initialize_with_range(VirtualAddress(0xc0800000), 0x3f000000);
// Adopt the page tables already set up by boot.S
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((u32)boot_pdpt));
PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((u32)boot_pd0));
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((u32)boot_pd3));
kprintf("MM: boot_pdpt @ P%p\n", boot_pdpt_paddr.get());
kprintf("MM: boot_pd0 @ P%p\n", boot_pd0_paddr.get());
kprintf("MM: boot_pd3 @ P%p\n", boot_pd3_paddr.get());
m_directory_table = PhysicalPage::create(boot_pdpt_paddr, true, false);
m_directory_pages[0] = PhysicalPage::create(boot_pd0_paddr, true, false);
m_directory_pages[3] = PhysicalPage::create(boot_pd3_paddr, true, false);
}
PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_range_allocator)
: m_process(&process)
{
if (parent_range_allocator) {
m_range_allocator.initialize_from_parent(*parent_range_allocator);
} else {
size_t random_offset = (get_good_random<u32>() % 32 * MB) & PAGE_MASK;
u32 base = userspace_range_base + random_offset;
m_range_allocator.initialize_with_range(VirtualAddress(base), userspace_range_ceiling - base);
}
// Set up a userspace page directory
m_directory_table = MM.allocate_user_physical_page();
m_directory_pages[0] = MM.allocate_user_physical_page();
m_directory_pages[1] = MM.allocate_user_physical_page();
m_directory_pages[2] = MM.allocate_user_physical_page();
// Share the top 1 GB of kernel-only mappings (>=3GB or >=0xc0000000)
m_directory_pages[3] = MM.kernel_page_directory().m_directory_pages[3];
{
InterruptDisabler disabler;
auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*m_directory_table);
table.raw[0] = (u64)m_directory_pages[0]->paddr().as_ptr() | 1;
table.raw[1] = (u64)m_directory_pages[1]->paddr().as_ptr() | 1;
table.raw[2] = (u64)m_directory_pages[2]->paddr().as_ptr() | 1;
table.raw[3] = (u64)m_directory_pages[3]->paddr().as_ptr() | 1;
MM.unquickmap_page();
}
// Clone bottom 2 MB of mappings from kernel_page_directory
PageDirectoryEntry buffer;
auto* kernel_pd = MM.quickmap_pd(MM.kernel_page_directory(), 0);
memcpy(&buffer, kernel_pd, sizeof(PageDirectoryEntry));
auto* new_pd = MM.quickmap_pd(*this, 0);
memcpy(new_pd, &buffer, sizeof(PageDirectoryEntry));
InterruptDisabler disabler;
cr3_map().set(cr3(), this);
}
PageDirectory::~PageDirectory()
{
#ifdef MM_DEBUG
dbgprintf("MM: ~PageDirectory K%x\n", this);
#endif
InterruptDisabler disabler;
cr3_map().remove(cr3());
}