diff --git a/Kernel/VM/ContiguousVMObject.cpp b/Kernel/VM/ContiguousVMObject.cpp index 5ad8f9b8a9..59fa2a5855 100644 --- a/Kernel/VM/ContiguousVMObject.cpp +++ b/Kernel/VM/ContiguousVMObject.cpp @@ -10,9 +10,9 @@ namespace Kernel { -RefPtr ContiguousVMObject::try_create_with_size(size_t size, size_t physical_alignment) +RefPtr ContiguousVMObject::try_create_with_size(size_t size) { - auto contiguous_physical_pages = MM.allocate_contiguous_supervisor_physical_pages(size, physical_alignment); + auto contiguous_physical_pages = MM.allocate_contiguous_supervisor_physical_pages(size); if (contiguous_physical_pages.is_empty()) return {}; return adopt_ref_if_nonnull(new (nothrow) ContiguousVMObject(size, contiguous_physical_pages)); diff --git a/Kernel/VM/ContiguousVMObject.h b/Kernel/VM/ContiguousVMObject.h index 269c2667e6..f4af29ba05 100644 --- a/Kernel/VM/ContiguousVMObject.h +++ b/Kernel/VM/ContiguousVMObject.h @@ -15,7 +15,7 @@ class ContiguousVMObject final : public VMObject { public: virtual ~ContiguousVMObject() override; - static RefPtr try_create_with_size(size_t, size_t physical_alignment = PAGE_SIZE); + static RefPtr try_create_with_size(size_t); private: explicit ContiguousVMObject(size_t, NonnullRefPtrVector&); diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index e2cc4e019b..4d0795a059 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -629,14 +629,14 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault) return region->handle_fault(fault, lock); } -OwnPtr MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, size_t physical_alignment, Region::Cacheable cacheable) +OwnPtr MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) { VERIFY(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; - auto vmobject = ContiguousVMObject::try_create_with_size(size, physical_alignment); + auto vmobject = ContiguousVMObject::try_create_with_size(size); if (!vmobject) { kernel_page_directory().range_allocator().deallocate(range.value()); return {}; @@ -834,7 +834,7 @@ RefPtr MemoryManager::allocate_user_physical_page(ShouldZeroFill s return page; } -NonnullRefPtrVector MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size, size_t physical_alignment) +NonnullRefPtrVector MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size) { VERIFY(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); @@ -842,7 +842,7 @@ NonnullRefPtrVector MemoryManager::allocate_contiguous_supervisor_ NonnullRefPtrVector physical_pages; for (auto& region : m_super_physical_regions) { - physical_pages = region.take_contiguous_free_pages(count, physical_alignment); + physical_pages = region.take_contiguous_free_pages(count); if (!physical_pages.is_empty()) continue; } diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index a57767bae6..d52f665aae 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -145,10 +145,10 @@ public: NonnullRefPtr allocate_committed_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes); RefPtr allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr); RefPtr allocate_supervisor_physical_page(); - NonnullRefPtrVector allocate_contiguous_supervisor_physical_pages(size_t size, size_t physical_alignment = PAGE_SIZE); + NonnullRefPtrVector allocate_contiguous_supervisor_physical_pages(size_t size); void deallocate_physical_page(PhysicalAddress); - OwnPtr allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, size_t physical_alignment = PAGE_SIZE, Region::Cacheable = Region::Cacheable::Yes); + OwnPtr allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes); OwnPtr allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); diff --git a/Kernel/VM/PhysicalRegion.cpp b/Kernel/VM/PhysicalRegion.cpp index ae9d5f792f..da2f1ea8d3 100644 --- a/Kernel/VM/PhysicalRegion.cpp +++ b/Kernel/VM/PhysicalRegion.cpp @@ -91,11 +91,8 @@ OwnPtr PhysicalRegion::try_take_pages_from_beginning(unsigned pa return taken_region; } -NonnullRefPtrVector PhysicalRegion::take_contiguous_free_pages(size_t count, size_t physical_alignment) +NonnullRefPtrVector PhysicalRegion::take_contiguous_free_pages(size_t count) { - // FIXME: Care about alignment. - (void)physical_alignment; - auto rounded_page_count = next_power_of_two(count); auto order = __builtin_ctz(rounded_page_count); diff --git a/Kernel/VM/PhysicalRegion.h b/Kernel/VM/PhysicalRegion.h index 94daaac1d0..51bd5cb818 100644 --- a/Kernel/VM/PhysicalRegion.h +++ b/Kernel/VM/PhysicalRegion.h @@ -39,7 +39,7 @@ public: OwnPtr try_take_pages_from_beginning(unsigned); RefPtr take_free_page(); - NonnullRefPtrVector take_contiguous_free_pages(size_t count, size_t physical_alignment = PAGE_SIZE); + NonnullRefPtrVector take_contiguous_free_pages(size_t count); void return_page(PhysicalAddress); private: