diff --git a/Kernel/Bus/PCI/WindowedMMIOAccess.cpp b/Kernel/Bus/PCI/WindowedMMIOAccess.cpp index 221bf55d3e..181d8e9920 100644 --- a/Kernel/Bus/PCI/WindowedMMIOAccess.cpp +++ b/Kernel/Bus/PCI/WindowedMMIOAccess.cpp @@ -22,7 +22,7 @@ UNMAP_AFTER_INIT DeviceConfigurationSpaceMapping::DeviceConfigurationSpaceMappin PhysicalAddress segment_lower_addr = mmio_segment.get_paddr(); PhysicalAddress device_physical_mmio_space = segment_lower_addr.offset( PCI_MMIO_CONFIG_SPACE_SIZE * m_device_address.function() + (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE) * m_device_address.device() + (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE * PCI_MAX_DEVICES_PER_BUS) * (m_device_address.bus() - mmio_segment.get_start_bus())); - m_mapped_region->physical_page_slot(0) = PhysicalPage::create(device_physical_mmio_space, false, false); + m_mapped_region->physical_page_slot(0) = PhysicalPage::create(device_physical_mmio_space, false); m_mapped_region->remap(); } diff --git a/Kernel/VM/AnonymousVMObject.cpp b/Kernel/VM/AnonymousVMObject.cpp index 6d0f6763f9..0d40fc02c5 100644 --- a/Kernel/VM/AnonymousVMObject.cpp +++ b/Kernel/VM/AnonymousVMObject.cpp @@ -106,7 +106,7 @@ AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size) { VERIFY(paddr.page_base() == paddr); for (size_t i = 0; i < page_count(); ++i) - physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), false, false); + physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), false); } AnonymousVMObject::AnonymousVMObject(PhysicalPage& page) diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 13792c91b1..be576ae43a 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -445,7 +445,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages() auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE); auto physical_page_index = PhysicalAddress::physical_page_index(pt_paddr.get()); auto& physical_page_entry = m_physical_page_entries[physical_page_index]; - auto physical_page = adopt_ref(*new (&physical_page_entry.physical_page) PhysicalPage(false, false)); + auto physical_page = adopt_ref(*new (&physical_page_entry.physical_page) PhysicalPage(false)); auto result = kernel_page_tables.set(virtual_page_array_current_page & ~0x1fffff, move(physical_page)); VERIFY(result == AK::HashSetResult::InsertedNewEntry); @@ -719,9 +719,11 @@ void MemoryManager::uncommit_user_physical_pages(size_t page_count) m_system_memory_info.user_physical_pages_committed -= page_count; } -void MemoryManager::deallocate_user_physical_page(PhysicalAddress paddr) +void MemoryManager::deallocate_physical_page(PhysicalAddress paddr) { ScopedSpinLock lock(s_mm_lock); + + // Are we returning a user page? for (auto& region : m_user_physical_regions) { if (!region.contains(paddr)) continue; @@ -736,8 +738,19 @@ void MemoryManager::deallocate_user_physical_page(PhysicalAddress paddr) return; } - dmesgln("MM: deallocate_user_physical_page couldn't figure out region for user page @ {}", paddr); - VERIFY_NOT_REACHED(); + // If it's not a user page, it should be a supervisor page. + for (auto& region : m_super_physical_regions) { + if (!region.contains(paddr)) { + dbgln("MM: deallocate_supervisor_physical_page: {} not in {} - {}", paddr, region.lower(), region.upper()); + continue; + } + + region.return_page(paddr); + --m_system_memory_info.super_physical_pages_used; + return; + } + + PANIC("MM: deallocate_user_physical_page couldn't figure out region for page @ {}", paddr); } RefPtr MemoryManager::find_free_user_physical_page(bool committed) @@ -755,7 +768,7 @@ RefPtr MemoryManager::find_free_user_physical_page(bool committed) m_system_memory_info.user_physical_pages_uncommitted--; } for (auto& region : m_user_physical_regions) { - page = region.take_free_page(false); + page = region.take_free_page(); if (!page.is_null()) { ++m_system_memory_info.user_physical_pages_used; break; @@ -816,24 +829,6 @@ RefPtr MemoryManager::allocate_user_physical_page(ShouldZeroFill s return page; } -void MemoryManager::deallocate_supervisor_physical_page(PhysicalAddress paddr) -{ - ScopedSpinLock lock(s_mm_lock); - for (auto& region : m_super_physical_regions) { - if (!region.contains(paddr)) { - dbgln("MM: deallocate_supervisor_physical_page: {} not in {} - {}", paddr, region.lower(), region.upper()); - continue; - } - - region.return_page(paddr); - --m_system_memory_info.super_physical_pages_used; - return; - } - - dbgln("MM: deallocate_supervisor_physical_page couldn't figure out region for super page @ {}", paddr); - VERIFY_NOT_REACHED(); -} - NonnullRefPtrVector MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size, size_t physical_alignment) { VERIFY(!(size % PAGE_SIZE)); @@ -842,7 +837,7 @@ NonnullRefPtrVector MemoryManager::allocate_contiguous_supervisor_ NonnullRefPtrVector physical_pages; for (auto& region : m_super_physical_regions) { - physical_pages = region.take_contiguous_free_pages(count, true, physical_alignment); + physical_pages = region.take_contiguous_free_pages(count, physical_alignment); if (!physical_pages.is_empty()) continue; } @@ -869,7 +864,7 @@ RefPtr MemoryManager::allocate_supervisor_physical_page() RefPtr page; for (auto& region : m_super_physical_regions) { - page = region.take_free_page(true); + page = region.take_free_page(); if (!page.is_null()) break; } diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 16d695e7c2..21aeb61621 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -145,8 +145,7 @@ public: RefPtr allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr); RefPtr allocate_supervisor_physical_page(); NonnullRefPtrVector allocate_contiguous_supervisor_physical_pages(size_t size, size_t physical_alignment = PAGE_SIZE); - void deallocate_user_physical_page(PhysicalAddress); - void deallocate_supervisor_physical_page(PhysicalAddress); + void deallocate_physical_page(PhysicalAddress); OwnPtr allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, size_t physical_alignment = PAGE_SIZE, Region::Cacheable = Region::Cacheable::Yes); OwnPtr allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes); diff --git a/Kernel/VM/PageDirectory.cpp b/Kernel/VM/PageDirectory.cpp index 08897da286..671ffb8266 100644 --- a/Kernel/VM/PageDirectory.cpp +++ b/Kernel/VM/PageDirectory.cpp @@ -47,7 +47,7 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory() #if ARCH(X86_64) PhysicalAddress boot_pml4t_paddr(virtual_to_low_physical((FlatPtr)boot_pml4t)); dmesgln("MM: boot_pml4t @ {}", boot_pml4t_paddr); - m_pml4t = PhysicalPage::create(boot_pml4t_paddr, true, false); + m_pml4t = PhysicalPage::create(boot_pml4t_paddr, false); #endif PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((FlatPtr)boot_pdpt)); PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((FlatPtr)boot_pd0)); @@ -55,9 +55,9 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory() dmesgln("MM: boot_pdpt @ {}", boot_pdpt_paddr); dmesgln("MM: boot_pd0 @ {}", boot_pd0_paddr); dmesgln("MM: boot_pd3 @ {}", boot_pd3_paddr); - m_directory_table = PhysicalPage::create(boot_pdpt_paddr, true, false); - m_directory_pages[0] = PhysicalPage::create(boot_pd0_paddr, true, false); - m_directory_pages[3] = PhysicalPage::create(boot_pd3_paddr, true, false); + m_directory_table = PhysicalPage::create(boot_pdpt_paddr, false); + m_directory_pages[0] = PhysicalPage::create(boot_pd0_paddr, false); + m_directory_pages[3] = PhysicalPage::create(boot_pd3_paddr, false); } PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator) diff --git a/Kernel/VM/PhysicalPage.cpp b/Kernel/VM/PhysicalPage.cpp index a1c565dc0f..6396cda0ea 100644 --- a/Kernel/VM/PhysicalPage.cpp +++ b/Kernel/VM/PhysicalPage.cpp @@ -10,15 +10,14 @@ namespace Kernel { -NonnullRefPtr PhysicalPage::create(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist) +NonnullRefPtr PhysicalPage::create(PhysicalAddress paddr, bool may_return_to_freelist) { auto& physical_page_entry = MM.get_physical_page_entry(paddr); - return adopt_ref(*new (&physical_page_entry.physical_page) PhysicalPage(supervisor, may_return_to_freelist)); + return adopt_ref(*new (&physical_page_entry.physical_page) PhysicalPage(may_return_to_freelist)); } -PhysicalPage::PhysicalPage(bool supervisor, bool may_return_to_freelist) +PhysicalPage::PhysicalPage(bool may_return_to_freelist) : m_may_return_to_freelist(may_return_to_freelist) - , m_supervisor(supervisor) { } @@ -31,14 +30,8 @@ void PhysicalPage::free_this() { if (m_may_return_to_freelist) { auto paddr = MM.get_physical_address(*this); - bool is_supervisor = m_supervisor; - this->~PhysicalPage(); // delete in place - - if (is_supervisor) - MM.deallocate_supervisor_physical_page(paddr); - else - MM.deallocate_user_physical_page(paddr); + MM.deallocate_physical_page(paddr); } else { this->~PhysicalPage(); // delete in place } diff --git a/Kernel/VM/PhysicalPage.h b/Kernel/VM/PhysicalPage.h index af68660538..9c61b7dec9 100644 --- a/Kernel/VM/PhysicalPage.h +++ b/Kernel/VM/PhysicalPage.h @@ -32,7 +32,7 @@ public: free_this(); } - static NonnullRefPtr create(PhysicalAddress, bool supervisor, bool may_return_to_freelist = true); + static NonnullRefPtr create(PhysicalAddress, bool may_return_to_freelist = true); u32 ref_count() const { return m_ref_count.load(AK::memory_order_consume); } @@ -40,14 +40,13 @@ public: bool is_lazy_committed_page() const; private: - PhysicalPage(bool supervisor, bool may_return_to_freelist = true); + explicit PhysicalPage(bool may_return_to_freelist = true); ~PhysicalPage() = default; void free_this(); Atomic m_ref_count { 1 }; bool m_may_return_to_freelist { true }; - bool m_supervisor { false }; }; struct PhysicalPageEntry { diff --git a/Kernel/VM/PhysicalRegion.cpp b/Kernel/VM/PhysicalRegion.cpp index 4e2e532ac5..32b7668f92 100644 --- a/Kernel/VM/PhysicalRegion.cpp +++ b/Kernel/VM/PhysicalRegion.cpp @@ -55,7 +55,7 @@ PhysicalRegion PhysicalRegion::take_pages_from_beginning(unsigned page_count) return taken_region; } -NonnullRefPtrVector PhysicalRegion::take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment) +NonnullRefPtrVector PhysicalRegion::take_contiguous_free_pages(size_t count, size_t physical_alignment) { VERIFY(m_pages); VERIFY(m_used != m_pages); @@ -66,7 +66,7 @@ NonnullRefPtrVector PhysicalRegion::take_contiguous_free_pages(siz auto first_contiguous_page = find_contiguous_free_pages(count, physical_alignment); for (size_t index = 0; index < count; index++) - physical_pages.append(PhysicalPage::create(m_lower.offset(PAGE_SIZE * (index + first_contiguous_page)), supervisor)); + physical_pages.append(PhysicalPage::create(m_lower.offset(PAGE_SIZE * (index + first_contiguous_page)))); return physical_pages; } @@ -134,7 +134,7 @@ Optional PhysicalRegion::find_and_allocate_contiguous_range(size_t cou return {}; } -RefPtr PhysicalRegion::take_free_page(bool supervisor) +RefPtr PhysicalRegion::take_free_page() { VERIFY(m_pages); @@ -142,7 +142,7 @@ RefPtr PhysicalRegion::take_free_page(bool supervisor) if (!free_index.has_value()) return nullptr; - return PhysicalPage::create(m_lower.offset((PhysicalPtr)free_index.value() * PAGE_SIZE), supervisor); + return PhysicalPage::create(m_lower.offset((PhysicalPtr)free_index.value() * PAGE_SIZE)); } void PhysicalRegion::free_page_at(PhysicalAddress addr) diff --git a/Kernel/VM/PhysicalRegion.h b/Kernel/VM/PhysicalRegion.h index 7f3249736c..343eb1dd6f 100644 --- a/Kernel/VM/PhysicalRegion.h +++ b/Kernel/VM/PhysicalRegion.h @@ -31,8 +31,8 @@ public: PhysicalRegion take_pages_from_beginning(unsigned); - RefPtr take_free_page(bool supervisor); - NonnullRefPtrVector take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment = PAGE_SIZE); + RefPtr take_free_page(); + NonnullRefPtrVector take_contiguous_free_pages(size_t count, size_t physical_alignment = PAGE_SIZE); void return_page(PhysicalAddress); private: