diff --git a/Kernel/MemoryManager.cpp b/Kernel/MemoryManager.cpp index 0f655acdbd..66856ffe29 100644 --- a/Kernel/MemoryManager.cpp +++ b/Kernel/MemoryManager.cpp @@ -26,6 +26,14 @@ MemoryManager::~MemoryManager() { } +void MemoryManager::populatePageDirectory(Task& task) +{ + memset(task.m_pageDirectory, 0, 4096); + + task.m_pageDirectory[0] = m_pageDirectory[0]; + task.m_pageDirectory[1] = m_pageDirectory[1]; +} + void MemoryManager::initializePaging() { static_assert(sizeof(MemoryManager::PageDirectoryEntry) == 4); @@ -41,6 +49,7 @@ void MemoryManager::initializePaging() // Make null dereferences crash. protectMap(LinearAddress(0), 4 * KB); + // The bottom 4 MB are identity mapped & supervisor only. Every process shares this mapping. identityMap(LinearAddress(4096), 4 * MB); for (size_t i = (4 * MB) + PAGE_SIZE; i < (8 * MB); i += PAGE_SIZE) { @@ -63,13 +72,13 @@ void* MemoryManager::allocatePageTable() return (void*)address; } -auto MemoryManager::ensurePTE(LinearAddress linearAddress) -> PageTableEntry +auto MemoryManager::ensurePTE(dword* pageDirectory, LinearAddress linearAddress) -> PageTableEntry { ASSERT_INTERRUPTS_DISABLED(); dword pageDirectoryIndex = (linearAddress.get() >> 22) & 0x3ff; dword pageTableIndex = (linearAddress.get() >> 12) & 0x3ff; - PageDirectoryEntry pde = PageDirectoryEntry(&m_pageDirectory[pageDirectoryIndex]); + PageDirectoryEntry pde = PageDirectoryEntry(&pageDirectory[pageDirectoryIndex]); if (!pde.isPresent()) { #ifdef MM_DEBUG kprintf("MM: PDE %u not present, allocating\n", pageDirectoryIndex); @@ -103,7 +112,7 @@ void MemoryManager::protectMap(LinearAddress linearAddress, size_t length) // FIXME: ASSERT(linearAddress is 4KB aligned); for (dword offset = 0; offset < length; offset += 4096) { auto pteAddress = linearAddress.offset(offset); - auto pte = ensurePTE(pteAddress); + auto pte = ensurePTE(m_pageDirectory, pteAddress); pte.setPhysicalPageBase(pteAddress.get()); pte.setUserAllowed(false); pte.setPresent(false); @@ -118,7 +127,7 @@ void MemoryManager::identityMap(LinearAddress linearAddress, size_t length) // FIXME: ASSERT(linearAddress is 4KB aligned); for (dword offset = 0; offset < length; offset += 4096) { auto pteAddress = linearAddress.offset(offset); - auto pte = ensurePTE(pteAddress); + auto pte = ensurePTE(m_pageDirectory, pteAddress); pte.setPhysicalPageBase(pteAddress.get()); pte.setUserAllowed(true); pte.setPresent(true); @@ -195,7 +204,7 @@ Vector MemoryManager::allocatePhysicalPages(size_t count) byte* MemoryManager::quickMapOnePage(PhysicalAddress physicalAddress) { ASSERT_INTERRUPTS_DISABLED(); - auto pte = ensurePTE(LinearAddress(4 * MB)); + auto pte = ensurePTE(m_pageDirectory, LinearAddress(4 * MB)); kprintf("MM: quickmap %x @ %x {pte @ %p}\n", physicalAddress.get(), 4*MB, pte.ptr()); pte.setPhysicalPageBase(physicalAddress.pageBase()); pte.setPresent(true); @@ -223,7 +232,7 @@ bool MemoryManager::unmapRegion(Task& task, Task::Region& region) auto& zone = *region.zone; for (size_t i = 0; i < zone.m_pages.size(); ++i) { auto laddr = region.linearAddress.offset(i * PAGE_SIZE); - auto pte = ensurePTE(laddr); + auto pte = ensurePTE(task.m_pageDirectory, laddr); pte.setPhysicalPageBase(0); pte.setPresent(false); pte.setWritable(false); @@ -238,12 +247,11 @@ bool MemoryManager::unmapSubregion(Task& task, Task::Subregion& subregion) { InterruptDisabler disabler; auto& region = *subregion.region; - auto& zone = *region.zone; size_t numPages = subregion.size / 4096; ASSERT(numPages); for (size_t i = 0; i < numPages; ++i) { auto laddr = subregion.linearAddress.offset(i * PAGE_SIZE); - auto pte = ensurePTE(laddr); + auto pte = ensurePTE(task.m_pageDirectory, laddr); pte.setPhysicalPageBase(0); pte.setPresent(false); pte.setWritable(false); @@ -278,7 +286,7 @@ bool MemoryManager::mapSubregion(Task& task, Task::Subregion& subregion) ASSERT(numPages); for (size_t i = 0; i < numPages; ++i) { auto laddr = subregion.linearAddress.offset(i * PAGE_SIZE); - auto pte = ensurePTE(laddr); + auto pte = ensurePTE(task.m_pageDirectory, laddr); pte.setPhysicalPageBase(zone.m_pages[firstPage + i].get()); pte.setPresent(true); pte.setWritable(true); @@ -295,11 +303,11 @@ bool MemoryManager::mapRegion(Task& task, Task::Region& region) auto& zone = *region.zone; for (size_t i = 0; i < zone.m_pages.size(); ++i) { auto laddr = region.linearAddress.offset(i * PAGE_SIZE); - auto pte = ensurePTE(laddr); + auto pte = ensurePTE(task.m_pageDirectory,laddr); pte.setPhysicalPageBase(zone.m_pages[i].get()); pte.setPresent(true); pte.setWritable(true); - pte.setUserAllowed(!task.isRing0()); + pte.setUserAllowed(!task.isRing0()); // FIXME: This doesn't make sense. Allow USER if the TASK is RING0? Wh...what? flushTLB(laddr); //kprintf("MM: >> Mapped L%x => P%x <<\n", laddr, zone.m_pages[i].get()); } diff --git a/Kernel/MemoryManager.h b/Kernel/MemoryManager.h index c8e6a9ca5f..6a2f5a8c99 100644 --- a/Kernel/MemoryManager.h +++ b/Kernel/MemoryManager.h @@ -65,6 +65,8 @@ public: void registerZone(Zone&); void unregisterZone(Zone&); + void populatePageDirectory(Task&); + private: MemoryManager(); ~MemoryManager(); @@ -158,7 +160,7 @@ private: dword* m_pte; }; - PageTableEntry ensurePTE(LinearAddress); + PageTableEntry ensurePTE(dword* pageDirectory, LinearAddress); dword* m_pageDirectory; dword* m_pageTableZero; diff --git a/Kernel/ProcFileSystem.cpp b/Kernel/ProcFileSystem.cpp index d8ba73f983..5912703c20 100644 --- a/Kernel/ProcFileSystem.cpp +++ b/Kernel/ProcFileSystem.cpp @@ -172,9 +172,9 @@ ByteBuffer procfs$mounts() ByteBuffer procfs$kmalloc() { InterruptDisabler disabler; - auto buffer = ByteBuffer::createUninitialized(128); + auto buffer = ByteBuffer::createUninitialized(256); char* ptr = (char*)buffer.pointer(); - ptr += ksprintf(ptr, "eternal: %u\nallocated: %u\nfree: %u\n", kmalloc_sum_eternal, sum_alloc, sum_free); + ptr += ksprintf(ptr, "eternal: %u\npage-aligned: %u\nallocated: %u\nfree: %u\n", kmalloc_sum_eternal, sum_alloc, sum_free); buffer.trim(ptr - (char*)buffer.pointer()); return buffer; } diff --git a/Kernel/Task.cpp b/Kernel/Task.cpp index b47a3e1192..dda1a78ac5 100644 --- a/Kernel/Task.cpp +++ b/Kernel/Task.cpp @@ -408,6 +408,9 @@ Task::Task(String&& name, uid_t uid, gid_t gid, pid_t parentPID, RingLevel ring, , m_tty(tty) , m_parentPID(parentPID) { + m_pageDirectory = (dword*)kmalloc_page_aligned(4096); + MM.populatePageDirectory(*this); + if (tty) { m_fileHandles.append(tty->open(O_RDONLY)); // stdin m_fileHandles.append(tty->open(O_WRONLY)); // stdout @@ -449,7 +452,7 @@ Task::Task(String&& name, uid_t uid, gid_t gid, pid_t parentPID, RingLevel ring, m_tss.ss = ss; m_tss.cs = cs; - m_tss.cr3 = MM.pageDirectoryBase().get(); + m_tss.cr3 = (dword)m_pageDirectory; if (isRing0()) { // FIXME: This memory is leaked. diff --git a/Kernel/Task.h b/Kernel/Task.h index 20c5000d7d..818c5a3585 100644 --- a/Kernel/Task.h +++ b/Kernel/Task.h @@ -152,6 +152,8 @@ private: void allocateLDT(); + dword* m_pageDirectory { nullptr }; + Task* m_prev { nullptr }; Task* m_next { nullptr }; diff --git a/Kernel/kmalloc.cpp b/Kernel/kmalloc.cpp index b65f4b5936..0956a40103 100644 --- a/Kernel/kmalloc.cpp +++ b/Kernel/kmalloc.cpp @@ -22,6 +22,7 @@ typedef struct #define CHUNK_SIZE 128 #define POOL_SIZE (1024 * 1024) +#define PAGE_ALIGNED_BASE_PHYSICAL 0x380000 #define ETERNAL_BASE_PHYSICAL 0x300000 #define BASE_PHYS 0x200000 @@ -30,8 +31,10 @@ PRIVATE BYTE alloc_map[POOL_SIZE / CHUNK_SIZE / 8]; volatile DWORD sum_alloc = 0; volatile DWORD sum_free = POOL_SIZE; volatile size_t kmalloc_sum_eternal = 0; +volatile size_t kmalloc_sum_page_aligned = 0; static byte* s_next_eternal_ptr; +static byte* s_next_page_aligned_ptr; bool is_kmalloc_address(void* ptr) { @@ -47,10 +50,12 @@ kmalloc_init() memset( (void *)BASE_PHYS, 0, POOL_SIZE ); kmalloc_sum_eternal = 0; + kmalloc_sum_page_aligned = 0; sum_alloc = 0; sum_free = POOL_SIZE; s_next_eternal_ptr = (byte*)ETERNAL_BASE_PHYSICAL; + s_next_page_aligned_ptr = (byte*)PAGE_ALIGNED_BASE_PHYSICAL; } void* kmalloc_eternal(size_t size) @@ -61,6 +66,16 @@ void* kmalloc_eternal(size_t size) return ptr; } +void* kmalloc_page_aligned(size_t size) +{ + ASSERT((size % 4096) == 0); + void* ptr = s_next_page_aligned_ptr; + s_next_page_aligned_ptr += size; + kmalloc_sum_page_aligned += size; + return ptr; +} + + PUBLIC void * kmalloc( DWORD size ) { diff --git a/Kernel/kmalloc.h b/Kernel/kmalloc.h index 72c295f465..4fcc9d5972 100644 --- a/Kernel/kmalloc.h +++ b/Kernel/kmalloc.h @@ -3,6 +3,7 @@ void kmalloc_init(); void *kmalloc(DWORD size) __attribute__ ((malloc)); void* kmalloc_eternal(size_t) __attribute__ ((malloc)); +void* kmalloc_page_aligned(size_t) __attribute__ ((malloc)); void kfree(void*); bool is_kmalloc_address(void*); @@ -10,6 +11,7 @@ bool is_kmalloc_address(void*); extern volatile DWORD sum_alloc; extern volatile DWORD sum_free; extern volatile dword kmalloc_sum_eternal; +extern volatile dword kmalloc_sum_page_aligned; inline void* operator new(size_t, void* p) { return p; } inline void* operator new[](size_t, void* p) { return p; }