From ad5d9d648b835f665e7a3e3eb0847c651d9c8c20 Mon Sep 17 00:00:00 2001 From: Tom Date: Tue, 6 Jul 2021 21:35:15 -0600 Subject: [PATCH] Kernel: Use PAE to allow accessing all physical memory beyond 4GB We already use PAE for the NX bit, but this changes the PhysicalAddress structure to be able to hold 64 bit physical addresses. This allows us to use all the available physical memory. --- Kernel/Arch/x86/PageDirectory.h | 14 ++--- Kernel/GlobalProcessExposed.cpp | 22 +++----- Kernel/PhysicalAddress.h | 26 ++++++--- Kernel/VM/MemoryManager.cpp | 56 +++++++++---------- Kernel/VM/MemoryManager.h | 31 +++++----- Kernel/VM/PhysicalRegion.cpp | 8 +-- .../SystemMonitor/MemoryStatsWidget.cpp | 16 +++--- 7 files changed, 87 insertions(+), 86 deletions(-) diff --git a/Kernel/Arch/x86/PageDirectory.h b/Kernel/Arch/x86/PageDirectory.h index e740ade989..e288d4d916 100644 --- a/Kernel/Arch/x86/PageDirectory.h +++ b/Kernel/Arch/x86/PageDirectory.h @@ -8,6 +8,7 @@ #include #include +#include namespace Kernel { @@ -16,12 +17,11 @@ class PageTableEntry; class PageDirectoryEntry { public: - const PageTableEntry* page_table_base() const { return reinterpret_cast(m_raw & 0xfffff000u); } - PageTableEntry* page_table_base() { return reinterpret_cast(m_raw & 0xfffff000u); } + PhysicalPtr page_table_base() const { return PhysicalAddress::physical_page_base(m_raw); } void set_page_table_base(u32 value) { m_raw &= 0x8000000000000fffULL; - m_raw |= value & 0xfffff000; + m_raw |= PhysicalAddress::physical_page_base(value); } bool is_null() const { return m_raw == 0; } @@ -79,11 +79,11 @@ private: class PageTableEntry { public: - void* physical_page_base() { return reinterpret_cast(m_raw & 0xfffff000u); } - void set_physical_page_base(u32 value) + PhysicalPtr physical_page_base() { return PhysicalAddress::physical_page_base(m_raw); } + void set_physical_page_base(PhysicalPtr value) { m_raw &= 0x8000000000000fffULL; - m_raw |= value & 0xfffff000; + m_raw |= PhysicalAddress::physical_page_base(value); } u64 raw() const { return (u32)m_raw; } @@ -141,7 +141,7 @@ class PageDirectoryPointerTable { public: PageDirectoryEntry* directory(size_t index) { - return (PageDirectoryEntry*)(raw[index] & ~0xfffu); + return (PageDirectoryEntry*)(PhysicalAddress::physical_page_base(raw[index])); } u64 raw[4]; diff --git a/Kernel/GlobalProcessExposed.cpp b/Kernel/GlobalProcessExposed.cpp index 71f73b71a3..a33429d3ea 100644 --- a/Kernel/GlobalProcessExposed.cpp +++ b/Kernel/GlobalProcessExposed.cpp @@ -375,26 +375,18 @@ private: kmalloc_stats stats; get_kmalloc_stats(stats); - ScopedSpinLock mm_lock(s_mm_lock); - auto user_physical_pages_total = MM.user_physical_pages(); - auto user_physical_pages_used = MM.user_physical_pages_used(); - auto user_physical_pages_committed = MM.user_physical_pages_committed(); - auto user_physical_pages_uncommitted = MM.user_physical_pages_uncommitted(); - - auto super_physical_total = MM.super_physical_pages(); - auto super_physical_used = MM.super_physical_pages_used(); - mm_lock.unlock(); + auto system_memory = MemoryManager::the().get_system_memory_info(); JsonObjectSerializer json { builder }; json.add("kmalloc_allocated", stats.bytes_allocated); json.add("kmalloc_available", stats.bytes_free); json.add("kmalloc_eternal_allocated", stats.bytes_eternal); - json.add("user_physical_allocated", user_physical_pages_used); - json.add("user_physical_available", user_physical_pages_total - user_physical_pages_used); - json.add("user_physical_committed", user_physical_pages_committed); - json.add("user_physical_uncommitted", user_physical_pages_uncommitted); - json.add("super_physical_allocated", super_physical_used); - json.add("super_physical_available", super_physical_total - super_physical_used); + json.add("user_physical_allocated", system_memory.user_physical_pages_used); + json.add("user_physical_available", system_memory.user_physical_pages - system_memory.user_physical_pages_used); + json.add("user_physical_committed", system_memory.user_physical_pages_committed); + json.add("user_physical_uncommitted", system_memory.user_physical_pages_uncommitted); + json.add("super_physical_allocated", system_memory.super_physical_pages_used); + json.add("super_physical_available", system_memory.super_physical_pages - system_memory.super_physical_pages_used); json.add("kmalloc_call_count", stats.kmalloc_call_count); json.add("kfree_call_count", stats.kfree_call_count); slab_alloc_stats([&json](size_t slab_size, size_t num_allocated, size_t num_free) { diff --git a/Kernel/PhysicalAddress.h b/Kernel/PhysicalAddress.h index 7c0a2efcd2..96d2042129 100644 --- a/Kernel/PhysicalAddress.h +++ b/Kernel/PhysicalAddress.h @@ -9,26 +9,31 @@ #include #include +typedef u64 PhysicalPtr; +typedef u64 PhysicalSize; + class PhysicalAddress { public: + ALWAYS_INLINE static PhysicalPtr physical_page_base(PhysicalPtr page_address) { return page_address & ~(PhysicalPtr)0xfff; } + PhysicalAddress() = default; - explicit PhysicalAddress(FlatPtr address) + explicit PhysicalAddress(PhysicalPtr address) : m_address(address) { } - [[nodiscard]] PhysicalAddress offset(FlatPtr o) const { return PhysicalAddress(m_address + o); } - [[nodiscard]] FlatPtr get() const { return m_address; } - void set(FlatPtr address) { m_address = address; } - void mask(FlatPtr m) { m_address &= m; } + [[nodiscard]] PhysicalAddress offset(PhysicalPtr o) const { return PhysicalAddress(m_address + o); } + [[nodiscard]] PhysicalPtr get() const { return m_address; } + void set(PhysicalPtr address) { m_address = address; } + void mask(PhysicalPtr m) { m_address &= m; } [[nodiscard]] bool is_null() const { return m_address == 0; } [[nodiscard]] u8* as_ptr() { return reinterpret_cast(m_address); } [[nodiscard]] const u8* as_ptr() const { return reinterpret_cast(m_address); } - [[nodiscard]] PhysicalAddress page_base() const { return PhysicalAddress(m_address & 0xfffff000); } - [[nodiscard]] FlatPtr offset_in_page() const { return PhysicalAddress(m_address & 0xfff).get(); } + [[nodiscard]] PhysicalAddress page_base() const { return PhysicalAddress(physical_page_base(m_address)); } + [[nodiscard]] PhysicalPtr offset_in_page() const { return PhysicalAddress(m_address & 0xfff).get(); } bool operator==(const PhysicalAddress& other) const { return m_address == other.m_address; } bool operator!=(const PhysicalAddress& other) const { return m_address != other.m_address; } @@ -38,13 +43,16 @@ public: bool operator<=(const PhysicalAddress& other) const { return m_address <= other.m_address; } private: - FlatPtr m_address { 0 }; + PhysicalPtr m_address { 0 }; }; template<> struct AK::Formatter : AK::Formatter { void format(FormatBuilder& builder, PhysicalAddress value) { - return AK::Formatter::format(builder, "P{}", value.as_ptr()); + if constexpr (sizeof(PhysicalPtr) == sizeof(u64)) + return AK::Formatter::format(builder, "P{:016x}", value.get()); + else + return AK::Formatter::format(builder, "P{}", value.as_ptr()); } }; diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 9a26833cd7..74b37fb2ef 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -200,7 +200,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map() dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", mmap->addr, mmap->len, mmap->type); auto start_address = PhysicalAddress(mmap->addr); - auto length = static_cast(mmap->len); + auto length = mmap->len; switch (mmap->type) { case (MULTIBOOT_MEMORY_AVAILABLE): m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length }); @@ -227,9 +227,6 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map() if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE) continue; - if ((mmap->addr + mmap->len) > 0xffffffff) - continue; - // Fix up unaligned memory regions. auto diff = (FlatPtr)mmap->addr % PAGE_SIZE; if (diff != 0) { @@ -247,7 +244,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map() continue; } - for (size_t page_base = mmap->addr; page_base <= (mmap->addr + mmap->len); page_base += PAGE_SIZE) { + for (PhysicalSize page_base = mmap->addr; page_base <= (mmap->addr + mmap->len); page_base += PAGE_SIZE) { auto addr = PhysicalAddress(page_base); // Skip used memory ranges. @@ -277,20 +274,21 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map() PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages + sizeof(super_pages)))))); for (auto& region : m_super_physical_regions) { - m_super_physical_pages += region.finalize_capacity(); + m_system_memory_info.super_physical_pages += region.finalize_capacity(); dmesgln("MM: Super physical region: {} - {}", region.lower(), region.upper()); } for (auto& region : m_user_physical_regions) { - m_user_physical_pages += region.finalize_capacity(); + m_system_memory_info.user_physical_pages += region.finalize_capacity(); dmesgln("MM: User physical region: {} - {}", region.lower(), region.upper()); } - VERIFY(m_super_physical_pages > 0); - VERIFY(m_user_physical_pages > 0); + VERIFY(m_system_memory_info.super_physical_pages > 0); + VERIFY(m_system_memory_info.user_physical_pages > 0); // We start out with no committed pages - m_user_physical_pages_uncommitted = m_user_physical_pages.load(); + m_system_memory_info.user_physical_pages_uncommitted = m_system_memory_info.user_physical_pages; + register_reserved_ranges(); for (auto& range : m_reserved_memory_ranges) { dmesgln("MM: Contiguous reserved range from {}, length is {}", range.start, range.length); @@ -534,11 +532,11 @@ bool MemoryManager::commit_user_physical_pages(size_t page_count) { VERIFY(page_count > 0); ScopedSpinLock lock(s_mm_lock); - if (m_user_physical_pages_uncommitted < page_count) + if (m_system_memory_info.user_physical_pages_uncommitted < page_count) return false; - m_user_physical_pages_uncommitted -= page_count; - m_user_physical_pages_committed += page_count; + m_system_memory_info.user_physical_pages_uncommitted -= page_count; + m_system_memory_info.user_physical_pages_committed += page_count; return true; } @@ -546,10 +544,10 @@ void MemoryManager::uncommit_user_physical_pages(size_t page_count) { VERIFY(page_count > 0); ScopedSpinLock lock(s_mm_lock); - VERIFY(m_user_physical_pages_committed >= page_count); + VERIFY(m_system_memory_info.user_physical_pages_committed >= page_count); - m_user_physical_pages_uncommitted += page_count; - m_user_physical_pages_committed -= page_count; + m_system_memory_info.user_physical_pages_uncommitted += page_count; + m_system_memory_info.user_physical_pages_committed -= page_count; } void MemoryManager::deallocate_user_physical_page(const PhysicalPage& page) @@ -560,12 +558,12 @@ void MemoryManager::deallocate_user_physical_page(const PhysicalPage& page) continue; region.return_page(page); - --m_user_physical_pages_used; + --m_system_memory_info.user_physical_pages_used; // Always return pages to the uncommitted pool. Pages that were // committed and allocated are only freed upon request. Once // returned there is no guarantee being able to get them back. - ++m_user_physical_pages_uncommitted; + ++m_system_memory_info.user_physical_pages_uncommitted; return; } @@ -579,18 +577,18 @@ RefPtr MemoryManager::find_free_user_physical_page(bool committed) RefPtr page; if (committed) { // Draw from the committed pages pool. We should always have these pages available - VERIFY(m_user_physical_pages_committed > 0); - m_user_physical_pages_committed--; + VERIFY(m_system_memory_info.user_physical_pages_committed > 0); + m_system_memory_info.user_physical_pages_committed--; } else { // We need to make sure we don't touch pages that we have committed to - if (m_user_physical_pages_uncommitted == 0) + if (m_system_memory_info.user_physical_pages_uncommitted == 0) return {}; - m_user_physical_pages_uncommitted--; + m_system_memory_info.user_physical_pages_uncommitted--; } for (auto& region : m_user_physical_regions) { page = region.take_free_page(false); if (!page.is_null()) { - ++m_user_physical_pages_used; + ++m_system_memory_info.user_physical_pages_used; break; } } @@ -659,7 +657,7 @@ void MemoryManager::deallocate_supervisor_physical_page(const PhysicalPage& page } region.return_page(page); - --m_super_physical_pages_used; + --m_system_memory_info.super_physical_pages_used; return; } @@ -692,7 +690,7 @@ NonnullRefPtrVector MemoryManager::allocate_contiguous_supervisor_ auto cleanup_region = MM.allocate_kernel_region(physical_pages[0].paddr(), PAGE_SIZE * count, "MemoryManager Allocation Sanitization", Region::Access::Read | Region::Access::Write); fast_u32_fill((u32*)cleanup_region->vaddr().as_ptr(), 0, (PAGE_SIZE * count) / sizeof(u32)); - m_super_physical_pages_used += count; + m_system_memory_info.super_physical_pages_used += count; return physical_pages; } @@ -718,7 +716,7 @@ RefPtr MemoryManager::allocate_supervisor_physical_page() } fast_u32_fill((u32*)page->paddr().offset(KERNEL_BASE).as_ptr(), 0, PAGE_SIZE / sizeof(u32)); - ++m_super_physical_pages_used; + ++m_system_memory_info.super_physical_pages_used; return page; } @@ -755,7 +753,7 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t auto& mm_data = get_data(); auto& pte = boot_pd3_pt1023[4]; auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr(); - if (pte.physical_page_base() != pd_paddr.as_ptr()) { + if (pte.physical_page_base() != pd_paddr.get()) { pte.set_physical_page_base(pd_paddr.get()); pte.set_present(true); pte.set_writable(true); @@ -780,7 +778,7 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr) VERIFY(s_mm_lock.own_lock()); auto& mm_data = get_data(); auto& pte = boot_pd3_pt1023[0]; - if (pte.physical_page_base() != pt_paddr.as_ptr()) { + if (pte.physical_page_base() != pt_paddr.get()) { pte.set_physical_page_base(pt_paddr.get()); pte.set_present(true); pte.set_writable(true); @@ -811,7 +809,7 @@ u8* MemoryManager::quickmap_page(PhysicalPage& physical_page) VirtualAddress vaddr(0xffe00000 + pte_idx * PAGE_SIZE); auto& pte = boot_pd3_pt1023[pte_idx]; - if (pte.physical_page_base() != physical_page.paddr().as_ptr()) { + if (pte.physical_page_base() != physical_page.paddr().get()) { pte.set_physical_page_base(physical_page.paddr().get()); pte.set_present(true); pte.set_writable(true); diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index f2ee0acd45..d2a87c65b5 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -68,7 +68,7 @@ struct UsedMemoryRange { struct ContiguousReservedMemoryRange { PhysicalAddress start; - size_t length {}; + PhysicalSize length {}; }; enum class PhysicalMemoryRangeType { @@ -83,7 +83,7 @@ enum class PhysicalMemoryRangeType { struct PhysicalMemoryRange { PhysicalMemoryRangeType type { PhysicalMemoryRangeType::Unknown }; PhysicalAddress start; - size_t length {}; + PhysicalSize length {}; }; #define MM Kernel::MemoryManager::the() @@ -151,12 +151,20 @@ public: OwnPtr allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr allocate_kernel_region_with_vmobject(const Range&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); - unsigned user_physical_pages() const { return m_user_physical_pages; } - unsigned user_physical_pages_used() const { return m_user_physical_pages_used; } - unsigned user_physical_pages_committed() const { return m_user_physical_pages_committed; } - unsigned user_physical_pages_uncommitted() const { return m_user_physical_pages_uncommitted; } - unsigned super_physical_pages() const { return m_super_physical_pages; } - unsigned super_physical_pages_used() const { return m_super_physical_pages_used; } + struct SystemMemoryInfo { + PhysicalSize user_physical_pages { 0 }; + PhysicalSize user_physical_pages_used { 0 }; + PhysicalSize user_physical_pages_committed { 0 }; + PhysicalSize user_physical_pages_uncommitted { 0 }; + PhysicalSize super_physical_pages { 0 }; + PhysicalSize super_physical_pages_used { 0 }; + }; + + SystemMemoryInfo get_system_memory_info() + { + ScopedSpinLock lock(s_mm_lock); + return m_system_memory_info; + } template Callback> static void for_each_vmobject(Callback callback) @@ -223,12 +231,7 @@ private: RefPtr m_shared_zero_page; RefPtr m_lazy_committed_page; - Atomic m_user_physical_pages { 0 }; - Atomic m_user_physical_pages_used { 0 }; - Atomic m_user_physical_pages_committed { 0 }; - Atomic m_user_physical_pages_uncommitted { 0 }; - Atomic m_super_physical_pages { 0 }; - Atomic m_super_physical_pages_used { 0 }; + SystemMemoryInfo m_system_memory_info; NonnullRefPtrVector m_user_physical_regions; NonnullRefPtrVector m_super_physical_regions; diff --git a/Kernel/VM/PhysicalRegion.cpp b/Kernel/VM/PhysicalRegion.cpp index 420442bf41..c95474798b 100644 --- a/Kernel/VM/PhysicalRegion.cpp +++ b/Kernel/VM/PhysicalRegion.cpp @@ -76,7 +76,7 @@ Optional PhysicalRegion::find_one_free_page() // Check if we can draw one from the return queue if (m_recently_returned.size() > 0) { u8 index = get_fast_random() % m_recently_returned.size(); - Checked local_offset = m_recently_returned[index].get(); + Checked local_offset = m_recently_returned[index].get(); local_offset -= m_lower.get(); m_recently_returned.remove(index); VERIFY(!local_offset.has_overflow()); @@ -131,7 +131,7 @@ RefPtr PhysicalRegion::take_free_page(bool supervisor) if (!free_index.has_value()) return nullptr; - return PhysicalPage::create(m_lower.offset(free_index.value() * PAGE_SIZE), supervisor); + return PhysicalPage::create(m_lower.offset((PhysicalPtr)free_index.value() * PAGE_SIZE), supervisor); } void PhysicalRegion::free_page_at(PhysicalAddress addr) @@ -142,10 +142,10 @@ void PhysicalRegion::free_page_at(PhysicalAddress addr) VERIFY_NOT_REACHED(); } - Checked local_offset = addr.get(); + Checked local_offset = addr.get(); local_offset -= m_lower.get(); VERIFY(!local_offset.has_overflow()); - VERIFY(local_offset.value() < (FlatPtr)(m_pages * PAGE_SIZE)); + VERIFY(local_offset.value() < ((PhysicalPtr)m_pages * PAGE_SIZE)); auto page = local_offset.value() / PAGE_SIZE; m_bitmap.set(page, false); diff --git a/Userland/Applications/SystemMonitor/MemoryStatsWidget.cpp b/Userland/Applications/SystemMonitor/MemoryStatsWidget.cpp index 8b66213a7c..cebf5d78b8 100644 --- a/Userland/Applications/SystemMonitor/MemoryStatsWidget.cpp +++ b/Userland/Applications/SystemMonitor/MemoryStatsWidget.cpp @@ -64,12 +64,12 @@ MemoryStatsWidget::~MemoryStatsWidget() { } -static inline size_t page_count_to_kb(size_t kb) +static inline u64 page_count_to_kb(u64 kb) { return (kb * 4096) / 1024; } -static inline size_t bytes_to_kb(size_t bytes) +static inline u64 bytes_to_kb(u64 bytes) { return bytes / 1024; } @@ -88,12 +88,12 @@ void MemoryStatsWidget::refresh() [[maybe_unused]] unsigned kmalloc_eternal_allocated = json.get("kmalloc_eternal_allocated").to_u32(); unsigned kmalloc_allocated = json.get("kmalloc_allocated").to_u32(); unsigned kmalloc_available = json.get("kmalloc_available").to_u32(); - unsigned user_physical_allocated = json.get("user_physical_allocated").to_u32(); - unsigned user_physical_available = json.get("user_physical_available").to_u32(); - unsigned user_physical_committed = json.get("user_physical_committed").to_u32(); - unsigned user_physical_uncommitted = json.get("user_physical_uncommitted").to_u32(); - unsigned super_physical_alloc = json.get("super_physical_allocated").to_u32(); - unsigned super_physical_free = json.get("super_physical_available").to_u32(); + unsigned user_physical_allocated = json.get("user_physical_allocated").to_u64(); + unsigned user_physical_available = json.get("user_physical_available").to_u64(); + unsigned user_physical_committed = json.get("user_physical_committed").to_u64(); + unsigned user_physical_uncommitted = json.get("user_physical_uncommitted").to_u64(); + unsigned super_physical_alloc = json.get("super_physical_allocated").to_u64(); + unsigned super_physical_free = json.get("super_physical_available").to_u64(); unsigned kmalloc_call_count = json.get("kmalloc_call_count").to_u32(); unsigned kfree_call_count = json.get("kfree_call_count").to_u32();