diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h index 0bc0cc1d1d..693aa1e0d8 100644 --- a/Kernel/Arch/i386/CPU.h +++ b/Kernel/Arch/i386/CPU.h @@ -115,6 +115,7 @@ union [[gnu::packed]] Descriptor class PageDirectoryEntry { public: + const PageTableEntry* page_table_base() const { return reinterpret_cast(m_raw & 0xfffff000u); } PageTableEntry* page_table_base() { return reinterpret_cast(m_raw & 0xfffff000u); } void set_page_table_base(u32 value) { diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index bb4eddb75f..dcba4fae39 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -813,7 +813,7 @@ Vector Thread::raw_backtrace(uintptr_t ebp) const ProcessPagingScope paging_scope(process); Vector backtrace; backtrace.append(ebp); - for (uintptr_t* stack_ptr = (uintptr_t*)ebp; process.validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(uintptr_t) * 2); stack_ptr = (uintptr_t*)*stack_ptr) { + for (uintptr_t* stack_ptr = (uintptr_t*)ebp; MM.can_read_without_faulting(process, VirtualAddress(stack_ptr), sizeof(uintptr_t) * 2); stack_ptr = (uintptr_t*)*stack_ptr) { uintptr_t retaddr = stack_ptr[1]; backtrace.append(retaddr); if (backtrace.size() == Profiling::max_stack_frame_count) diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 1e71e3172e..c9a1846733 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -191,6 +191,21 @@ void MemoryManager::parse_memory_map() m_user_physical_pages += region.finalize_capacity(); } +const PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr) +{ + ASSERT_INTERRUPTS_DISABLED(); + u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3; + u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff; + u32 page_table_index = (vaddr.get() >> 12) & 0x1ff; + + auto* pd = quickmap_pd(const_cast(page_directory), page_directory_table_index); + const PageDirectoryEntry& pde = pd[page_directory_index]; + if (!pde.is_present()) + return nullptr; + + return &quickmap_pt(PhysicalAddress((uintptr_t)pde.page_table_base()))[page_table_index]; +} + PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr) { ASSERT_INTERRUPTS_DISABLED(); @@ -611,6 +626,17 @@ bool MemoryManager::validate_kernel_read(const Process& process, VirtualAddress return validate_range(process, vaddr, size); } +bool MemoryManager::can_read_without_faulting(const Process& process, VirtualAddress vaddr, size_t size) const +{ + // FIXME: Use the size argument! + UNUSED_PARAM(size); + auto* pte = const_cast(this)->pte(process.page_directory(), vaddr); + if (!pte) + return false; + return pte->is_present(); +} + + bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr, size_t size) const { if (!is_user_address(vaddr)) diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 98114e1fe4..ab5023e080 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -91,6 +91,8 @@ public: bool validate_kernel_read(const Process&, VirtualAddress, size_t) const; + bool can_read_without_faulting(const Process&, VirtualAddress, size_t) const; + enum class ShouldZeroFill { No, Yes @@ -164,6 +166,7 @@ private: PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; } + const PageTableEntry* pte(const PageDirectory&, VirtualAddress); PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress); RefPtr m_kernel_page_directory;