diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 9374a3dee7..2923d9bd9f 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -157,7 +157,7 @@ UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges() m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, m_physical_memory_ranges.last().start.get() + m_physical_memory_ranges.last().length - range.start.get() }); } -bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_address, const Range& range) const +bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_address, Range const& range) const { VERIFY(!m_reserved_memory_ranges.is_empty()); for (auto& current_range : m_reserved_memory_ranges) { @@ -493,7 +493,7 @@ PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress u32 page_table_index = (vaddr.get() >> 12) & 0x1ff; auto* pd = quickmap_pd(const_cast(page_directory), page_directory_table_index); - const PageDirectoryEntry& pde = pd[page_directory_index]; + PageDirectoryEntry const& pde = pd[page_directory_index]; if (!pde.is_present()) return nullptr; @@ -616,7 +616,7 @@ Region* MemoryManager::find_region_from_vaddr(VirtualAddress vaddr) return find_user_region_from_vaddr(*page_directory->space(), vaddr); } -PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault) +PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault) { VERIFY_INTERRUPTS_DISABLED(); ScopedSpinLock lock(s_mm_lock); @@ -689,7 +689,7 @@ OwnPtr MemoryManager::allocate_kernel_region_identity(PhysicalAddress pa return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable); } -OwnPtr MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable) +OwnPtr MemoryManager::allocate_kernel_region_with_vmobject(Range const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable) { ScopedSpinLock lock(s_mm_lock); auto region = Region::try_create_kernel_only(range, vmobject, 0, KString::try_create(name), access, cacheable); @@ -915,7 +915,7 @@ void MemoryManager::flush_tlb_local(VirtualAddress vaddr, size_t page_count) Processor::flush_tlb_local(vaddr, page_count); } -void MemoryManager::flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count) +void MemoryManager::flush_tlb(PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count) { Processor::flush_tlb(page_directory, vaddr, page_count); } @@ -1008,7 +1008,7 @@ void MemoryManager::unquickmap_page() mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags); } -bool MemoryManager::validate_user_stack(const Process& process, VirtualAddress vaddr) const +bool MemoryManager::validate_user_stack(Process const& process, VirtualAddress vaddr) const { if (!is_user_address(vaddr)) return false; diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index b6df2b15a5..7d378702ae 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -121,7 +121,7 @@ public: return Processor::current().get_mm_data(); } - PageFaultResponse handle_page_fault(const PageFault&); + PageFaultResponse handle_page_fault(PageFault const&); void set_page_writable_direct(VirtualAddress, bool); @@ -131,7 +131,7 @@ public: static void enter_process_paging_scope(Process&); static void enter_space(Space&); - bool validate_user_stack(const Process&, VirtualAddress) const; + bool validate_user_stack(Process const&, VirtualAddress) const; enum class ShouldZeroFill { No, @@ -151,7 +151,7 @@ public: OwnPtr allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); - OwnPtr allocate_kernel_region_with_vmobject(const Range&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); + OwnPtr allocate_kernel_region_with_vmobject(Range const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); struct SystemMemoryInfo { PhysicalSize user_physical_pages { 0 }; @@ -193,8 +193,8 @@ public: PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; } - const Vector& used_memory_ranges() { return m_used_memory_ranges; } - bool is_allowed_to_mmap_to_userspace(PhysicalAddress, const Range&) const; + Vector const& used_memory_ranges() { return m_used_memory_ranges; } + bool is_allowed_to_mmap_to_userspace(PhysicalAddress, Range const&) const; PhysicalPageEntry& get_physical_page_entry(PhysicalAddress); PhysicalAddress get_physical_address(PhysicalPage const&); @@ -214,7 +214,7 @@ private: void protect_kernel_image(); void parse_memory_map(); static void flush_tlb_local(VirtualAddress, size_t page_count = 1); - static void flush_tlb(const PageDirectory*, VirtualAddress, size_t page_count = 1); + static void flush_tlb(PageDirectory const*, VirtualAddress, size_t page_count = 1); static Region* kernel_region_from_vaddr(VirtualAddress); @@ -286,7 +286,7 @@ inline bool is_user_range(VirtualAddress vaddr, size_t size) return is_user_address(vaddr) && is_user_address(vaddr.offset(size)); } -inline bool is_user_range(const Range& range) +inline bool is_user_range(Range const& range) { return is_user_range(range.base(), range.size()); }