From 2c72d495a300bb24dc8b9cf5dd61f09dac7a7736 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Wed, 24 Aug 2022 15:56:26 +0200 Subject: [PATCH] Kernel: Use RefPtr instead of LockRefPtr for PhysicalPage I believe this to be safe, as the main thing that LockRefPtr provides over RefPtr is safe copying from a shared LockRefPtr instance. I've inspected the uses of RefPtr and it seems they're all guarded by external locking. Some of it is less obvious, but this is an area where we're making continuous headway. --- Kernel/Memory/AnonymousVMObject.cpp | 23 +++++++++--------- Kernel/Memory/AnonymousVMObject.h | 16 ++++++------- Kernel/Memory/InodeVMObject.cpp | 4 ++-- Kernel/Memory/InodeVMObject.h | 4 ++-- Kernel/Memory/MemoryManager.cpp | 21 +++++++++-------- Kernel/Memory/MemoryManager.h | 18 +++++++------- Kernel/Memory/PageDirectory.h | 10 ++++---- Kernel/Memory/PhysicalPage.cpp | 4 ++-- Kernel/Memory/PhysicalPage.h | 6 ++--- Kernel/Memory/PhysicalRegion.cpp | 9 ++++--- Kernel/Memory/PhysicalRegion.h | 4 ++-- Kernel/Memory/PrivateInodeVMObject.cpp | 4 ++-- Kernel/Memory/PrivateInodeVMObject.h | 4 ++-- Kernel/Memory/Region.cpp | 12 +++++----- Kernel/Memory/Region.h | 8 +++---- Kernel/Memory/ScatterGatherList.cpp | 2 +- Kernel/Memory/ScatterGatherList.h | 2 +- Kernel/Memory/SharedFramebufferVMObject.cpp | 14 +++++------ Kernel/Memory/SharedFramebufferVMObject.h | 26 ++++++++++----------- Kernel/Memory/SharedInodeVMObject.cpp | 4 ++-- Kernel/Memory/SharedInodeVMObject.h | 4 ++-- Kernel/Memory/VMObject.cpp | 8 +++---- Kernel/Memory/VMObject.h | 14 +++++------ Kernel/Storage/ATA/AHCI/Port.cpp | 4 ++-- Kernel/Storage/ATA/AHCI/Port.h | 15 ++++++------ Kernel/Storage/ATA/ATAPort.h | 4 ++-- Kernel/Storage/NVMe/NVMeController.cpp | 10 ++++---- Kernel/Storage/NVMe/NVMeInterruptQueue.cpp | 2 +- Kernel/Storage/NVMe/NVMeInterruptQueue.h | 2 +- Kernel/Storage/NVMe/NVMePollQueue.cpp | 2 +- Kernel/Storage/NVMe/NVMePollQueue.h | 2 +- Kernel/Storage/NVMe/NVMeQueue.cpp | 6 ++--- Kernel/Storage/NVMe/NVMeQueue.h | 11 +++++---- 33 files changed, 141 insertions(+), 138 deletions(-) diff --git a/Kernel/Memory/AnonymousVMObject.cpp b/Kernel/Memory/AnonymousVMObject.cpp index faf48271d1..291951324d 100644 --- a/Kernel/Memory/AnonymousVMObject.cpp +++ b/Kernel/Memory/AnonymousVMObject.cpp @@ -4,6 +4,7 @@ * SPDX-License-Identifier: BSD-2-Clause */ +#include #include #include #include @@ -91,7 +92,7 @@ ErrorOr> AnonymousVMObject::try_create_phys { auto contiguous_physical_pages = TRY(MM.allocate_contiguous_physical_pages(size)); - auto new_physical_pages = TRY(FixedArray>::try_create(contiguous_physical_pages.span())); + auto new_physical_pages = TRY(FixedArray>::try_create(contiguous_physical_pages.span())); return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages))); } @@ -110,9 +111,9 @@ ErrorOr> AnonymousVMObject::try_create_purg return vmobject; } -ErrorOr> AnonymousVMObject::try_create_with_physical_pages(Span> physical_pages) +ErrorOr> AnonymousVMObject::try_create_with_physical_pages(Span> physical_pages) { - auto new_physical_pages = TRY(FixedArray>::try_create(physical_pages)); + auto new_physical_pages = TRY(FixedArray>::try_create(physical_pages)); return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages))); } @@ -129,7 +130,7 @@ ErrorOr> AnonymousVMObject::try_create_for_ return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(paddr, move(new_physical_pages))); } -ErrorOr> AnonymousVMObject::try_create_with_shared_cow(AnonymousVMObject const& other, NonnullLockRefPtr shared_committed_cow_pages, FixedArray>&& new_physical_pages) +ErrorOr> AnonymousVMObject::try_create_with_shared_cow(AnonymousVMObject const& other, NonnullLockRefPtr shared_committed_cow_pages, FixedArray>&& new_physical_pages) { auto weak_parent = TRY(other.try_make_weak_ptr()); auto vmobject = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(weak_parent), move(shared_committed_cow_pages), move(new_physical_pages)))); @@ -139,7 +140,7 @@ ErrorOr> AnonymousVMObject::try_create_with return vmobject; } -AnonymousVMObject::AnonymousVMObject(FixedArray>&& new_physical_pages, AllocationStrategy strategy, Optional committed_pages) +AnonymousVMObject::AnonymousVMObject(FixedArray>&& new_physical_pages, AllocationStrategy strategy, Optional committed_pages) : VMObject(move(new_physical_pages)) , m_unused_committed_pages(move(committed_pages)) { @@ -154,7 +155,7 @@ AnonymousVMObject::AnonymousVMObject(FixedArray>&& new_ } } -AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray>&& new_physical_pages) +AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray>&& new_physical_pages) : VMObject(move(new_physical_pages)) { VERIFY(paddr.page_base() == paddr); @@ -162,12 +163,12 @@ AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray>&& new_physical_pages) +AnonymousVMObject::AnonymousVMObject(FixedArray>&& new_physical_pages) : VMObject(move(new_physical_pages)) { } -AnonymousVMObject::AnonymousVMObject(LockWeakPtr other, NonnullLockRefPtr shared_committed_cow_pages, FixedArray>&& new_physical_pages) +AnonymousVMObject::AnonymousVMObject(LockWeakPtr other, NonnullLockRefPtr shared_committed_cow_pages, FixedArray>&& new_physical_pages) : VMObject(move(new_physical_pages)) , m_cow_parent(move(other)) , m_shared_committed_cow_pages(move(shared_committed_cow_pages)) @@ -270,7 +271,7 @@ ErrorOr AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged return {}; } -NonnullLockRefPtr AnonymousVMObject::allocate_committed_page(Badge) +NonnullRefPtr AnonymousVMObject::allocate_committed_page(Badge) { return m_unused_committed_pages->take_one(); } @@ -344,7 +345,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual return PageFaultResponse::Continue; } - LockRefPtr page; + RefPtr page; if (m_shared_committed_cow_pages) { dbgln_if(PAGE_FAULT_DEBUG, " >> It's a committed COW page and it's time to COW!"); page = m_shared_committed_cow_pages->take_one(); @@ -387,7 +388,7 @@ AnonymousVMObject::SharedCommittedCowPages::SharedCommittedCowPages(CommittedPhy AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages() = default; -NonnullLockRefPtr AnonymousVMObject::SharedCommittedCowPages::take_one() +NonnullRefPtr AnonymousVMObject::SharedCommittedCowPages::take_one() { SpinlockLocker locker(m_lock); return m_committed_pages.take_one(); diff --git a/Kernel/Memory/AnonymousVMObject.h b/Kernel/Memory/AnonymousVMObject.h index 40b31b6b7e..d880dd8b87 100644 --- a/Kernel/Memory/AnonymousVMObject.h +++ b/Kernel/Memory/AnonymousVMObject.h @@ -20,12 +20,12 @@ public: static ErrorOr> try_create_with_size(size_t, AllocationStrategy); static ErrorOr> try_create_for_physical_range(PhysicalAddress paddr, size_t size); - static ErrorOr> try_create_with_physical_pages(Span>); + static ErrorOr> try_create_with_physical_pages(Span>); static ErrorOr> try_create_purgeable_with_size(size_t, AllocationStrategy); static ErrorOr> try_create_physically_contiguous_with_size(size_t); virtual ErrorOr> try_clone() override; - [[nodiscard]] NonnullLockRefPtr allocate_committed_page(Badge); + [[nodiscard]] NonnullRefPtr allocate_committed_page(Badge); PageFaultResponse handle_cow_fault(size_t, VirtualAddress); size_t cow_pages() const; bool should_cow(size_t page_index, bool) const; @@ -41,12 +41,12 @@ public: private: class SharedCommittedCowPages; - static ErrorOr> try_create_with_shared_cow(AnonymousVMObject const&, NonnullLockRefPtr, FixedArray>&&); + static ErrorOr> try_create_with_shared_cow(AnonymousVMObject const&, NonnullLockRefPtr, FixedArray>&&); - explicit AnonymousVMObject(FixedArray>&&, AllocationStrategy, Optional); - explicit AnonymousVMObject(PhysicalAddress, FixedArray>&&); - explicit AnonymousVMObject(FixedArray>&&); - explicit AnonymousVMObject(LockWeakPtr, NonnullLockRefPtr, FixedArray>&&); + explicit AnonymousVMObject(FixedArray>&&, AllocationStrategy, Optional); + explicit AnonymousVMObject(PhysicalAddress, FixedArray>&&); + explicit AnonymousVMObject(FixedArray>&&); + explicit AnonymousVMObject(LockWeakPtr, NonnullLockRefPtr, FixedArray>&&); virtual StringView class_name() const override { return "AnonymousVMObject"sv; } @@ -74,7 +74,7 @@ private: [[nodiscard]] bool is_empty() const { return m_committed_pages.is_empty(); } - [[nodiscard]] NonnullLockRefPtr take_one(); + [[nodiscard]] NonnullRefPtr take_one(); void uncommit_one(); private: diff --git a/Kernel/Memory/InodeVMObject.cpp b/Kernel/Memory/InodeVMObject.cpp index 25a7541814..d0fbd558b1 100644 --- a/Kernel/Memory/InodeVMObject.cpp +++ b/Kernel/Memory/InodeVMObject.cpp @@ -9,14 +9,14 @@ namespace Kernel::Memory { -InodeVMObject::InodeVMObject(Inode& inode, FixedArray>&& new_physical_pages, Bitmap dirty_pages) +InodeVMObject::InodeVMObject(Inode& inode, FixedArray>&& new_physical_pages, Bitmap dirty_pages) : VMObject(move(new_physical_pages)) , m_inode(inode) , m_dirty_pages(move(dirty_pages)) { } -InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray>&& new_physical_pages, Bitmap dirty_pages) +InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray>&& new_physical_pages, Bitmap dirty_pages) : VMObject(move(new_physical_pages)) , m_inode(other.m_inode) , m_dirty_pages(move(dirty_pages)) diff --git a/Kernel/Memory/InodeVMObject.h b/Kernel/Memory/InodeVMObject.h index 0a7edd2adb..2b439b32fd 100644 --- a/Kernel/Memory/InodeVMObject.h +++ b/Kernel/Memory/InodeVMObject.h @@ -28,8 +28,8 @@ public: u32 writable_mappings() const; protected: - explicit InodeVMObject(Inode&, FixedArray>&&, Bitmap dirty_pages); - explicit InodeVMObject(InodeVMObject const&, FixedArray>&&, Bitmap dirty_pages); + explicit InodeVMObject(Inode&, FixedArray>&&, Bitmap dirty_pages); + explicit InodeVMObject(InodeVMObject const&, FixedArray>&&, Bitmap dirty_pages); InodeVMObject& operator=(InodeVMObject const&) = delete; InodeVMObject& operator=(InodeVMObject&&) = delete; diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 39701e7b9c..ac8fe14f0f 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -746,7 +747,7 @@ ErrorOr> MemoryManager::allocate_contiguous_kernel_region( return region; } -ErrorOr> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, LockRefPtr& dma_buffer_page) +ErrorOr> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr& dma_buffer_page) { dma_buffer_page = TRY(allocate_physical_page()); // Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behaviour by default) @@ -755,12 +756,12 @@ ErrorOr> MemoryManager::allocate_dma_buffer_page(S ErrorOr> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access) { - LockRefPtr dma_buffer_page; + RefPtr dma_buffer_page; return allocate_dma_buffer_page(name, access, dma_buffer_page); } -ErrorOr> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullLockRefPtrVector& dma_buffer_pages) +ErrorOr> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector& dma_buffer_pages) { VERIFY(!(size % PAGE_SIZE)); dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size)); @@ -771,7 +772,7 @@ ErrorOr> MemoryManager::allocate_dma_buffer_pages( ErrorOr> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access) { VERIFY(!(size % PAGE_SIZE)); - NonnullLockRefPtrVector dma_buffer_pages; + NonnullRefPtrVector dma_buffer_pages; return allocate_dma_buffer_pages(size, name, access, dma_buffer_pages); } @@ -881,10 +882,10 @@ void MemoryManager::deallocate_physical_page(PhysicalAddress paddr) PANIC("MM: deallocate_physical_page couldn't figure out region for page @ {}", paddr); } -LockRefPtr MemoryManager::find_free_physical_page(bool committed) +RefPtr MemoryManager::find_free_physical_page(bool committed) { SpinlockLocker mm_locker(s_mm_lock); - LockRefPtr page; + RefPtr page; if (committed) { // Draw from the committed pages pool. We should always have these pages available VERIFY(m_system_memory_info.physical_pages_committed > 0); @@ -906,7 +907,7 @@ LockRefPtr MemoryManager::find_free_physical_page(bool committed) return page; } -NonnullLockRefPtr MemoryManager::allocate_committed_physical_page(Badge, ShouldZeroFill should_zero_fill) +NonnullRefPtr MemoryManager::allocate_committed_physical_page(Badge, ShouldZeroFill should_zero_fill) { auto page = find_free_physical_page(true); if (should_zero_fill == ShouldZeroFill::Yes) { @@ -918,7 +919,7 @@ NonnullLockRefPtr MemoryManager::allocate_committed_physical_page( return page.release_nonnull(); } -ErrorOr> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge) +ErrorOr> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge) { SpinlockLocker lock(s_mm_lock); auto page = find_free_physical_page(false); @@ -974,7 +975,7 @@ ErrorOr> MemoryManager::allocate_physical_page(S return page.release_nonnull(); } -ErrorOr> MemoryManager::allocate_contiguous_physical_pages(size_t size) +ErrorOr> MemoryManager::allocate_contiguous_physical_pages(size_t size) { VERIFY(!(size % PAGE_SIZE)); SpinlockLocker mm_lock(s_mm_lock); @@ -1160,7 +1161,7 @@ CommittedPhysicalPageSet::~CommittedPhysicalPageSet() MM.uncommit_physical_pages({}, m_page_count); } -NonnullLockRefPtr CommittedPhysicalPageSet::take_one() +NonnullRefPtr CommittedPhysicalPageSet::take_one() { VERIFY(m_page_count > 0); --m_page_count; diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h index 4b62ed4e45..9b733a8d31 100644 --- a/Kernel/Memory/MemoryManager.h +++ b/Kernel/Memory/MemoryManager.h @@ -119,7 +119,7 @@ public: bool is_empty() const { return m_page_count == 0; } size_t page_count() const { return m_page_count; } - [[nodiscard]] NonnullLockRefPtr take_one(); + [[nodiscard]] NonnullRefPtr take_one(); void uncommit_one(); void operator=(CommittedPhysicalPageSet&&) = delete; @@ -169,15 +169,15 @@ public: ErrorOr commit_physical_pages(size_t page_count); void uncommit_physical_pages(Badge, size_t page_count); - NonnullLockRefPtr allocate_committed_physical_page(Badge, ShouldZeroFill = ShouldZeroFill::Yes); - ErrorOr> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr); - ErrorOr> allocate_contiguous_physical_pages(size_t size); + NonnullRefPtr allocate_committed_physical_page(Badge, ShouldZeroFill = ShouldZeroFill::Yes); + ErrorOr> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr); + ErrorOr> allocate_contiguous_physical_pages(size_t size); void deallocate_physical_page(PhysicalAddress); ErrorOr> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); - ErrorOr> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, LockRefPtr& dma_buffer_page); + ErrorOr> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr& dma_buffer_page); ErrorOr> allocate_dma_buffer_page(StringView name, Memory::Region::Access access); - ErrorOr> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullLockRefPtrVector& dma_buffer_pages); + ErrorOr> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector& dma_buffer_pages); ErrorOr> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access); ErrorOr> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes); ErrorOr> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); @@ -258,7 +258,7 @@ private: static Region* find_region_from_vaddr(VirtualAddress); - LockRefPtr find_free_physical_page(bool); + RefPtr find_free_physical_page(bool); ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page) { @@ -286,8 +286,8 @@ private: LockRefPtr m_kernel_page_directory; - LockRefPtr m_shared_zero_page; - LockRefPtr m_lazy_committed_page; + RefPtr m_shared_zero_page; + RefPtr m_lazy_committed_page; SystemMemoryInfo m_system_memory_info; diff --git a/Kernel/Memory/PageDirectory.h b/Kernel/Memory/PageDirectory.h index 20b4a059f3..e5e3f8d043 100644 --- a/Kernel/Memory/PageDirectory.h +++ b/Kernel/Memory/PageDirectory.h @@ -10,8 +10,8 @@ #include #include #include +#include #include -#include #include #include @@ -64,13 +64,13 @@ private: AddressSpace* m_space { nullptr }; #if ARCH(X86_64) - LockRefPtr m_pml4t; + RefPtr m_pml4t; #endif - LockRefPtr m_directory_table; + RefPtr m_directory_table; #if ARCH(X86_64) - LockRefPtr m_directory_pages[512]; + RefPtr m_directory_pages[512]; #else - LockRefPtr m_directory_pages[4]; + RefPtr m_directory_pages[4]; #endif RecursiveSpinlock m_lock { LockRank::None }; }; diff --git a/Kernel/Memory/PhysicalPage.cpp b/Kernel/Memory/PhysicalPage.cpp index 15474b99c6..b4890b5c4a 100644 --- a/Kernel/Memory/PhysicalPage.cpp +++ b/Kernel/Memory/PhysicalPage.cpp @@ -10,10 +10,10 @@ namespace Kernel::Memory { -NonnullLockRefPtr PhysicalPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist) +NonnullRefPtr PhysicalPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist) { auto& physical_page_entry = MM.get_physical_page_entry(paddr); - return adopt_lock_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(may_return_to_freelist)); + return adopt_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(may_return_to_freelist)); } PhysicalPage::PhysicalPage(MayReturnToFreeList may_return_to_freelist) diff --git a/Kernel/Memory/PhysicalPage.h b/Kernel/Memory/PhysicalPage.h index e4f31c9863..724eb6d5c4 100644 --- a/Kernel/Memory/PhysicalPage.h +++ b/Kernel/Memory/PhysicalPage.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2018-2020, Andreas Kling + * Copyright (c) 2018-2022, Andreas Kling * * SPDX-License-Identifier: BSD-2-Clause */ #pragma once -#include +#include #include namespace Kernel::Memory { @@ -36,7 +36,7 @@ public: free_this(); } - static NonnullLockRefPtr create(PhysicalAddress, MayReturnToFreeList may_return_to_freelist = MayReturnToFreeList::Yes); + static NonnullRefPtr create(PhysicalAddress, MayReturnToFreeList may_return_to_freelist = MayReturnToFreeList::Yes); u32 ref_count() const { return m_ref_count.load(AK::memory_order_consume); } diff --git a/Kernel/Memory/PhysicalRegion.cpp b/Kernel/Memory/PhysicalRegion.cpp index c245a604e4..3e30f0f3c7 100644 --- a/Kernel/Memory/PhysicalRegion.cpp +++ b/Kernel/Memory/PhysicalRegion.cpp @@ -5,9 +5,8 @@ */ #include +#include #include -#include -#include #include #include #include @@ -76,7 +75,7 @@ OwnPtr PhysicalRegion::try_take_pages_from_beginning(unsigned pa return try_create(taken_lower, taken_upper); } -NonnullLockRefPtrVector PhysicalRegion::take_contiguous_free_pages(size_t count) +NonnullRefPtrVector PhysicalRegion::take_contiguous_free_pages(size_t count) { auto rounded_page_count = next_power_of_two(count); auto order = count_trailing_zeroes(rounded_page_count); @@ -96,7 +95,7 @@ NonnullLockRefPtrVector PhysicalRegion::take_contiguous_free_pages if (!page_base.has_value()) return {}; - NonnullLockRefPtrVector physical_pages; + NonnullRefPtrVector physical_pages; physical_pages.ensure_capacity(count); for (size_t i = 0; i < count; ++i) @@ -104,7 +103,7 @@ NonnullLockRefPtrVector PhysicalRegion::take_contiguous_free_pages return physical_pages; } -LockRefPtr PhysicalRegion::take_free_page() +RefPtr PhysicalRegion::take_free_page() { if (m_usable_zones.is_empty()) return nullptr; diff --git a/Kernel/Memory/PhysicalRegion.h b/Kernel/Memory/PhysicalRegion.h index 9310ae9d44..19c527f7f1 100644 --- a/Kernel/Memory/PhysicalRegion.h +++ b/Kernel/Memory/PhysicalRegion.h @@ -33,8 +33,8 @@ public: OwnPtr try_take_pages_from_beginning(unsigned); - LockRefPtr take_free_page(); - NonnullLockRefPtrVector take_contiguous_free_pages(size_t count); + RefPtr take_free_page(); + NonnullRefPtrVector take_contiguous_free_pages(size_t count); void return_page(PhysicalAddress); private: diff --git a/Kernel/Memory/PrivateInodeVMObject.cpp b/Kernel/Memory/PrivateInodeVMObject.cpp index 65418b34db..314ee5351b 100644 --- a/Kernel/Memory/PrivateInodeVMObject.cpp +++ b/Kernel/Memory/PrivateInodeVMObject.cpp @@ -23,12 +23,12 @@ ErrorOr> PrivateInodeVMObject::try_clone() return adopt_nonnull_lock_ref_or_enomem(new (nothrow) PrivateInodeVMObject(*this, move(new_physical_pages), move(dirty_pages))); } -PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, FixedArray>&& new_physical_pages, Bitmap dirty_pages) +PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, FixedArray>&& new_physical_pages, Bitmap dirty_pages) : InodeVMObject(inode, move(new_physical_pages), move(dirty_pages)) { } -PrivateInodeVMObject::PrivateInodeVMObject(PrivateInodeVMObject const& other, FixedArray>&& new_physical_pages, Bitmap dirty_pages) +PrivateInodeVMObject::PrivateInodeVMObject(PrivateInodeVMObject const& other, FixedArray>&& new_physical_pages, Bitmap dirty_pages) : InodeVMObject(other, move(new_physical_pages), move(dirty_pages)) { } diff --git a/Kernel/Memory/PrivateInodeVMObject.h b/Kernel/Memory/PrivateInodeVMObject.h index 31b2df4757..c40b6d45c8 100644 --- a/Kernel/Memory/PrivateInodeVMObject.h +++ b/Kernel/Memory/PrivateInodeVMObject.h @@ -23,8 +23,8 @@ public: private: virtual bool is_private_inode() const override { return true; } - explicit PrivateInodeVMObject(Inode&, FixedArray>&&, Bitmap dirty_pages); - explicit PrivateInodeVMObject(PrivateInodeVMObject const&, FixedArray>&&, Bitmap dirty_pages); + explicit PrivateInodeVMObject(Inode&, FixedArray>&&, Bitmap dirty_pages); + explicit PrivateInodeVMObject(PrivateInodeVMObject const&, FixedArray>&&, Bitmap dirty_pages); virtual StringView class_name() const override { return "PrivateInodeVMObject"sv; } diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index 9ea6f1b7ad..9abed92284 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -202,7 +202,7 @@ ErrorOr Region::set_should_cow(size_t page_index, bool cow) return {}; } -bool Region::map_individual_page_impl(size_t page_index, LockRefPtr page) +bool Region::map_individual_page_impl(size_t page_index, RefPtr page) { VERIFY(m_page_directory->get_lock().is_locked_by_current_processor()); @@ -240,7 +240,7 @@ bool Region::map_individual_page_impl(size_t page_index, LockRefPtr page; + RefPtr page; { SpinlockLocker vmobject_locker(vmobject().m_lock); page = physical_page(page_index); @@ -249,7 +249,7 @@ bool Region::map_individual_page_impl(size_t page_index) return map_individual_page_impl(page_index, page); } -bool Region::remap_vmobject_page(size_t page_index, NonnullLockRefPtr physical_page) +bool Region::remap_vmobject_page(size_t page_index, NonnullRefPtr physical_page) { SpinlockLocker page_lock(m_page_directory->get_lock()); @@ -408,7 +408,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, Physica if (current_thread != nullptr) current_thread->did_zero_fault(); - LockRefPtr new_physical_page; + RefPtr new_physical_page; if (page_in_slot_at_time_of_fault.is_lazy_committed_page()) { VERIFY(m_vmobject->is_anonymous()); @@ -543,14 +543,14 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) return PageFaultResponse::Continue; } -LockRefPtr Region::physical_page(size_t index) const +RefPtr Region::physical_page(size_t index) const { SpinlockLocker vmobject_locker(vmobject().m_lock); VERIFY(index < page_count()); return vmobject().physical_pages()[first_page_index() + index]; } -LockRefPtr& Region::physical_page_slot(size_t index) +RefPtr& Region::physical_page_slot(size_t index) { VERIFY(vmobject().m_lock.is_locked_by_current_processor()); VERIFY(index < page_count()); diff --git a/Kernel/Memory/Region.h b/Kernel/Memory/Region.h index 3f26509261..aabceef63d 100644 --- a/Kernel/Memory/Region.h +++ b/Kernel/Memory/Region.h @@ -158,8 +158,8 @@ public: return size() / PAGE_SIZE; } - LockRefPtr physical_page(size_t index) const; - LockRefPtr& physical_page_slot(size_t index); + RefPtr physical_page(size_t index) const; + RefPtr& physical_page_slot(size_t index); [[nodiscard]] size_t offset_in_vmobject() const { @@ -208,7 +208,7 @@ private: Region(NonnullLockRefPtr, size_t offset_in_vmobject, OwnPtr, Region::Access access, Cacheable, bool shared); Region(VirtualRange const&, NonnullLockRefPtr, size_t offset_in_vmobject, OwnPtr, Region::Access access, Cacheable, bool shared); - [[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullLockRefPtr); + [[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullRefPtr); void set_access_bit(Access access, bool b) { @@ -223,7 +223,7 @@ private: [[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index, PhysicalPage& page_in_slot_at_time_of_fault); [[nodiscard]] bool map_individual_page_impl(size_t page_index); - [[nodiscard]] bool map_individual_page_impl(size_t page_index, LockRefPtr); + [[nodiscard]] bool map_individual_page_impl(size_t page_index, RefPtr); LockRefPtr m_page_directory; VirtualRange m_range; diff --git a/Kernel/Memory/ScatterGatherList.cpp b/Kernel/Memory/ScatterGatherList.cpp index 8149a886b7..1eb9b6e53b 100644 --- a/Kernel/Memory/ScatterGatherList.cpp +++ b/Kernel/Memory/ScatterGatherList.cpp @@ -8,7 +8,7 @@ namespace Kernel::Memory { -LockRefPtr ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span> allocated_pages, size_t device_block_size) +LockRefPtr ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span> allocated_pages, size_t device_block_size) { auto maybe_vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages); if (maybe_vm_object.is_error()) { diff --git a/Kernel/Memory/ScatterGatherList.h b/Kernel/Memory/ScatterGatherList.h index 1fc78bab70..bcd2e496ce 100644 --- a/Kernel/Memory/ScatterGatherList.h +++ b/Kernel/Memory/ScatterGatherList.h @@ -19,7 +19,7 @@ namespace Kernel::Memory { class ScatterGatherList final : public AtomicRefCounted { public: - static LockRefPtr try_create(AsyncBlockDeviceRequest&, Span> allocated_pages, size_t device_block_size); + static LockRefPtr try_create(AsyncBlockDeviceRequest&, Span> allocated_pages, size_t device_block_size); VMObject const& vmobject() const { return m_vm_object; } VirtualAddress dma_region() const { return m_dma_region->vaddr(); } size_t scatters_count() const { return m_vm_object->physical_pages().size(); } diff --git a/Kernel/Memory/SharedFramebufferVMObject.cpp b/Kernel/Memory/SharedFramebufferVMObject.cpp index 7b4c605314..d9401aa48e 100644 --- a/Kernel/Memory/SharedFramebufferVMObject.cpp +++ b/Kernel/Memory/SharedFramebufferVMObject.cpp @@ -56,21 +56,21 @@ ErrorOr SharedFramebufferVMObject::create_real_writes_framebuffer_vm_objec return {}; } -Span> SharedFramebufferVMObject::real_framebuffer_physical_pages() +Span> SharedFramebufferVMObject::real_framebuffer_physical_pages() { return m_real_framebuffer_vmobject->physical_pages(); } -Span const> SharedFramebufferVMObject::real_framebuffer_physical_pages() const +Span const> SharedFramebufferVMObject::real_framebuffer_physical_pages() const { return m_real_framebuffer_vmobject->physical_pages(); } -Span> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() +Span> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() { return m_physical_pages.span(); } -Span const> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() const +Span const> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() const { return m_physical_pages.span(); } @@ -92,14 +92,14 @@ void SharedFramebufferVMObject::switch_to_real_framebuffer_writes(Badge const> SharedFramebufferVMObject::physical_pages() const +Span const> SharedFramebufferVMObject::physical_pages() const { SpinlockLocker locker(m_writes_state_lock); if (m_writes_are_faked) return VMObject::physical_pages(); return m_real_framebuffer_vmobject->physical_pages(); } -Span> SharedFramebufferVMObject::physical_pages() +Span> SharedFramebufferVMObject::physical_pages() { SpinlockLocker locker(m_writes_state_lock); if (m_writes_are_faked) @@ -107,7 +107,7 @@ Span> SharedFramebufferVMObject::physical_pages() return m_real_framebuffer_vmobject->physical_pages(); } -SharedFramebufferVMObject::SharedFramebufferVMObject(FixedArray>&& new_physical_pages, CommittedPhysicalPageSet committed_pages, AnonymousVMObject& real_framebuffer_vmobject) +SharedFramebufferVMObject::SharedFramebufferVMObject(FixedArray>&& new_physical_pages, CommittedPhysicalPageSet committed_pages, AnonymousVMObject& real_framebuffer_vmobject) : VMObject(move(new_physical_pages)) , m_real_framebuffer_vmobject(real_framebuffer_vmobject) , m_committed_pages(move(committed_pages)) diff --git a/Kernel/Memory/SharedFramebufferVMObject.h b/Kernel/Memory/SharedFramebufferVMObject.h index 0823f92583..2c95d4eb6d 100644 --- a/Kernel/Memory/SharedFramebufferVMObject.h +++ b/Kernel/Memory/SharedFramebufferVMObject.h @@ -22,15 +22,15 @@ public: static ErrorOr> try_create(Badge, SharedFramebufferVMObject const& parent_object); private: - FakeWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray>&& new_physical_pages) + FakeWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray>&& new_physical_pages) : VMObject(move(new_physical_pages)) , m_parent_object(parent_object) { } virtual StringView class_name() const override { return "FakeWritesFramebufferVMObject"sv; } virtual ErrorOr> try_clone() override { return Error::from_errno(ENOTIMPL); } - virtual Span const> physical_pages() const override { return m_parent_object->fake_sink_framebuffer_physical_pages(); } - virtual Span> physical_pages() override { return m_parent_object->fake_sink_framebuffer_physical_pages(); } + virtual Span const> physical_pages() const override { return m_parent_object->fake_sink_framebuffer_physical_pages(); } + virtual Span> physical_pages() override { return m_parent_object->fake_sink_framebuffer_physical_pages(); } NonnullLockRefPtr m_parent_object; }; @@ -39,15 +39,15 @@ public: static ErrorOr> try_create(Badge, SharedFramebufferVMObject const& parent_object); private: - RealWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray>&& new_physical_pages) + RealWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray>&& new_physical_pages) : VMObject(move(new_physical_pages)) , m_parent_object(parent_object) { } virtual StringView class_name() const override { return "RealWritesFramebufferVMObject"sv; } virtual ErrorOr> try_clone() override { return Error::from_errno(ENOTIMPL); } - virtual Span const> physical_pages() const override { return m_parent_object->real_framebuffer_physical_pages(); } - virtual Span> physical_pages() override { return m_parent_object->real_framebuffer_physical_pages(); } + virtual Span const> physical_pages() const override { return m_parent_object->real_framebuffer_physical_pages(); } + virtual Span> physical_pages() override { return m_parent_object->real_framebuffer_physical_pages(); } NonnullLockRefPtr m_parent_object; }; @@ -60,14 +60,14 @@ public: void switch_to_fake_sink_framebuffer_writes(Badge); void switch_to_real_framebuffer_writes(Badge); - virtual Span const> physical_pages() const override; - virtual Span> physical_pages() override; + virtual Span const> physical_pages() const override; + virtual Span> physical_pages() override; - Span> fake_sink_framebuffer_physical_pages(); - Span const> fake_sink_framebuffer_physical_pages() const; + Span> fake_sink_framebuffer_physical_pages(); + Span const> fake_sink_framebuffer_physical_pages() const; - Span> real_framebuffer_physical_pages(); - Span const> real_framebuffer_physical_pages() const; + Span> real_framebuffer_physical_pages(); + Span const> real_framebuffer_physical_pages() const; FakeWritesFramebufferVMObject const& fake_writes_framebuffer_vmobject() const { return *m_fake_writes_framebuffer_vmobject; } FakeWritesFramebufferVMObject& fake_writes_framebuffer_vmobject() { return *m_fake_writes_framebuffer_vmobject; } @@ -76,7 +76,7 @@ public: RealWritesFramebufferVMObject& real_writes_framebuffer_vmobject() { return *m_real_writes_framebuffer_vmobject; } private: - SharedFramebufferVMObject(FixedArray>&& new_physical_pages, CommittedPhysicalPageSet, AnonymousVMObject& real_framebuffer_vmobject); + SharedFramebufferVMObject(FixedArray>&& new_physical_pages, CommittedPhysicalPageSet, AnonymousVMObject& real_framebuffer_vmobject); virtual StringView class_name() const override { return "SharedFramebufferVMObject"sv; } diff --git a/Kernel/Memory/SharedInodeVMObject.cpp b/Kernel/Memory/SharedInodeVMObject.cpp index 74853c8600..aa78cd2aac 100644 --- a/Kernel/Memory/SharedInodeVMObject.cpp +++ b/Kernel/Memory/SharedInodeVMObject.cpp @@ -29,12 +29,12 @@ ErrorOr> SharedInodeVMObject::try_clone() return adopt_nonnull_lock_ref_or_enomem(new (nothrow) SharedInodeVMObject(*this, move(new_physical_pages), move(dirty_pages))); } -SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray>&& new_physical_pages, Bitmap dirty_pages) +SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray>&& new_physical_pages, Bitmap dirty_pages) : InodeVMObject(inode, move(new_physical_pages), move(dirty_pages)) { } -SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray>&& new_physical_pages, Bitmap dirty_pages) +SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray>&& new_physical_pages, Bitmap dirty_pages) : InodeVMObject(other, move(new_physical_pages), move(dirty_pages)) { } diff --git a/Kernel/Memory/SharedInodeVMObject.h b/Kernel/Memory/SharedInodeVMObject.h index 6f3c554a1d..8a9ee21c46 100644 --- a/Kernel/Memory/SharedInodeVMObject.h +++ b/Kernel/Memory/SharedInodeVMObject.h @@ -23,8 +23,8 @@ public: private: virtual bool is_shared_inode() const override { return true; } - explicit SharedInodeVMObject(Inode&, FixedArray>&&, Bitmap dirty_pages); - explicit SharedInodeVMObject(SharedInodeVMObject const&, FixedArray>&&, Bitmap dirty_pages); + explicit SharedInodeVMObject(Inode&, FixedArray>&&, Bitmap dirty_pages); + explicit SharedInodeVMObject(SharedInodeVMObject const&, FixedArray>&&, Bitmap dirty_pages); virtual StringView class_name() const override { return "SharedInodeVMObject"sv; } diff --git a/Kernel/Memory/VMObject.cpp b/Kernel/Memory/VMObject.cpp index abe5fb5cc6..d6d09c888b 100644 --- a/Kernel/Memory/VMObject.cpp +++ b/Kernel/Memory/VMObject.cpp @@ -17,17 +17,17 @@ SpinlockProtected& VMObject::all_instances() return s_all_instances; } -ErrorOr>> VMObject::try_clone_physical_pages() const +ErrorOr>> VMObject::try_clone_physical_pages() const { return m_physical_pages.try_clone(); } -ErrorOr>> VMObject::try_create_physical_pages(size_t size) +ErrorOr>> VMObject::try_create_physical_pages(size_t size) { - return FixedArray>::try_create(ceil_div(size, static_cast(PAGE_SIZE))); + return FixedArray>::try_create(ceil_div(size, static_cast(PAGE_SIZE))); } -VMObject::VMObject(FixedArray>&& new_physical_pages) +VMObject::VMObject(FixedArray>&& new_physical_pages) : m_physical_pages(move(new_physical_pages)) { all_instances().with([&](auto& list) { list.append(*this); }); diff --git a/Kernel/Memory/VMObject.h b/Kernel/Memory/VMObject.h index b5801cddb2..d6cc4ecce4 100644 --- a/Kernel/Memory/VMObject.h +++ b/Kernel/Memory/VMObject.h @@ -8,9 +8,9 @@ #include #include +#include #include #include -#include #include #include #include @@ -35,8 +35,8 @@ public: size_t page_count() const { return m_physical_pages.size(); } - virtual Span const> physical_pages() const { return m_physical_pages.span(); } - virtual Span> physical_pages() { return m_physical_pages.span(); } + virtual Span const> physical_pages() const { return m_physical_pages.span(); } + virtual Span> physical_pages() { return m_physical_pages.span(); } size_t size() const { return m_physical_pages.size() * PAGE_SIZE; } @@ -55,15 +55,15 @@ public: } protected: - static ErrorOr>> try_create_physical_pages(size_t); - ErrorOr>> try_clone_physical_pages() const; - explicit VMObject(FixedArray>&&); + static ErrorOr>> try_create_physical_pages(size_t); + ErrorOr>> try_clone_physical_pages() const; + explicit VMObject(FixedArray>&&); template void for_each_region(Callback); IntrusiveListNode m_list_node; - FixedArray> m_physical_pages; + FixedArray> m_physical_pages; mutable RecursiveSpinlock m_lock { LockRank::None }; diff --git a/Kernel/Storage/ATA/AHCI/Port.cpp b/Kernel/Storage/ATA/AHCI/Port.cpp index e21f89127a..aff473adc8 100644 --- a/Kernel/Storage/ATA/AHCI/Port.cpp +++ b/Kernel/Storage/ATA/AHCI/Port.cpp @@ -54,7 +54,7 @@ ErrorOr AHCIPort::allocate_resources_and_initialize_ports() return {}; } -UNMAP_AFTER_INIT AHCIPort::AHCIPort(AHCIController const& controller, NonnullLockRefPtr identify_buffer_page, AHCI::HBADefinedCapabilities hba_capabilities, volatile AHCI::PortRegisters& registers, u32 port_index) +UNMAP_AFTER_INIT AHCIPort::AHCIPort(AHCIController const& controller, NonnullRefPtr identify_buffer_page, AHCI::HBADefinedCapabilities hba_capabilities, volatile AHCI::PortRegisters& registers, u32 port_index) : m_port_index(port_index) , m_hba_capabilities(hba_capabilities) , m_identify_buffer_page(move(identify_buffer_page)) @@ -496,7 +496,7 @@ Optional AHCIPort::prepare_and_set_scatter_li VERIFY(m_lock.is_locked()); VERIFY(request.block_count() > 0); - NonnullLockRefPtrVector allocated_dma_regions; + NonnullRefPtrVector allocated_dma_regions; for (size_t index = 0; index < calculate_descriptors_count(request.block_count()); index++) { allocated_dma_regions.append(m_dma_buffers.at(index)); } diff --git a/Kernel/Storage/ATA/AHCI/Port.h b/Kernel/Storage/ATA/AHCI/Port.h index bc5c81d68c..5c3f91df39 100644 --- a/Kernel/Storage/ATA/AHCI/Port.h +++ b/Kernel/Storage/ATA/AHCI/Port.h @@ -6,10 +6,11 @@ #pragma once +#include #include +#include #include #include -#include #include #include #include @@ -56,7 +57,7 @@ private: bool is_phy_enabled() const { return (m_port_registers.ssts & 0xf) == 3; } bool initialize(); - AHCIPort(AHCIController const&, NonnullLockRefPtr identify_buffer_page, AHCI::HBADefinedCapabilities, volatile AHCI::PortRegisters&, u32 port_index); + AHCIPort(AHCIController const&, NonnullRefPtr identify_buffer_page, AHCI::HBADefinedCapabilities, volatile AHCI::PortRegisters&, u32 port_index); ALWAYS_INLINE void clear_sata_error_register() const; @@ -111,11 +112,11 @@ private: mutable bool m_wait_for_completion { false }; - NonnullLockRefPtrVector m_dma_buffers; - NonnullLockRefPtrVector m_command_table_pages; - LockRefPtr m_command_list_page; + NonnullRefPtrVector m_dma_buffers; + NonnullRefPtrVector m_command_table_pages; + RefPtr m_command_list_page; OwnPtr m_command_list_region; - LockRefPtr m_fis_receive_page; + RefPtr m_fis_receive_page; LockRefPtr m_connected_device; u32 m_port_index; @@ -125,7 +126,7 @@ private: // it's probably better to just "cache" this here instead. AHCI::HBADefinedCapabilities const m_hba_capabilities; - NonnullLockRefPtr m_identify_buffer_page; + NonnullRefPtr m_identify_buffer_page; volatile AHCI::PortRegisters& m_port_registers; LockWeakPtr m_parent_controller; diff --git a/Kernel/Storage/ATA/ATAPort.h b/Kernel/Storage/ATA/ATAPort.h index 0ae444012b..0b576f403d 100644 --- a/Kernel/Storage/ATA/ATAPort.h +++ b/Kernel/Storage/ATA/ATAPort.h @@ -145,8 +145,8 @@ protected: OwnPtr m_prdt_region; OwnPtr m_dma_buffer_region; - LockRefPtr m_prdt_page; - LockRefPtr m_dma_buffer_page; + RefPtr m_prdt_page; + RefPtr m_dma_buffer_page; const u8 m_port_index; NonnullLockRefPtrVector m_ata_devices; diff --git a/Kernel/Storage/NVMe/NVMeController.cpp b/Kernel/Storage/NVMe/NVMeController.cpp index 92cc8b9f65..3bd8fd53cf 100644 --- a/Kernel/Storage/NVMe/NVMeController.cpp +++ b/Kernel/Storage/NVMe/NVMeController.cpp @@ -152,7 +152,7 @@ UNMAP_AFTER_INIT u32 NVMeController::get_admin_q_dept() UNMAP_AFTER_INIT ErrorOr NVMeController::identify_and_init_namespaces() { - LockRefPtr prp_dma_buffer; + RefPtr prp_dma_buffer; OwnPtr prp_dma_region; auto namespace_data_struct = TRY(ByteBuffer::create_zeroed(NVMe_IDENTIFY_SIZE)); u32 active_namespace_list[NVMe_IDENTIFY_SIZE / sizeof(u32)]; @@ -259,9 +259,9 @@ UNMAP_AFTER_INIT ErrorOr NVMeController::create_admin_queue(Optional i { auto qdepth = get_admin_q_dept(); OwnPtr cq_dma_region; - NonnullLockRefPtrVector cq_dma_pages; + NonnullRefPtrVector cq_dma_pages; OwnPtr sq_dma_region; - NonnullLockRefPtrVector sq_dma_pages; + NonnullRefPtrVector sq_dma_pages; auto cq_size = round_up_to_power_of_two(CQ_SIZE(qdepth), 4096); auto sq_size = round_up_to_power_of_two(SQ_SIZE(qdepth), 4096); if (!reset_controller()) { @@ -300,9 +300,9 @@ UNMAP_AFTER_INIT ErrorOr NVMeController::create_admin_queue(Optional i UNMAP_AFTER_INIT ErrorOr NVMeController::create_io_queue(u8 qid, Optional irq) { OwnPtr cq_dma_region; - NonnullLockRefPtrVector cq_dma_pages; + NonnullRefPtrVector cq_dma_pages; OwnPtr sq_dma_region; - NonnullLockRefPtrVector sq_dma_pages; + NonnullRefPtrVector sq_dma_pages; auto cq_size = round_up_to_power_of_two(CQ_SIZE(IO_QUEUE_SIZE), 4096); auto sq_size = round_up_to_power_of_two(SQ_SIZE(IO_QUEUE_SIZE), 4096); diff --git a/Kernel/Storage/NVMe/NVMeInterruptQueue.cpp b/Kernel/Storage/NVMe/NVMeInterruptQueue.cpp index 310a491da6..105c4a6251 100644 --- a/Kernel/Storage/NVMe/NVMeInterruptQueue.cpp +++ b/Kernel/Storage/NVMe/NVMeInterruptQueue.cpp @@ -11,7 +11,7 @@ namespace Kernel { -UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(NonnullOwnPtr rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr cq_dma_region, NonnullLockRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullLockRefPtrVector sq_dma_page, Memory::TypedMapping db_regs) +UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(NonnullOwnPtr rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr cq_dma_region, NonnullRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullRefPtrVector sq_dma_page, Memory::TypedMapping db_regs) : NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs)) , IRQHandler(irq) { diff --git a/Kernel/Storage/NVMe/NVMeInterruptQueue.h b/Kernel/Storage/NVMe/NVMeInterruptQueue.h index b69370cc66..9b562efb58 100644 --- a/Kernel/Storage/NVMe/NVMeInterruptQueue.h +++ b/Kernel/Storage/NVMe/NVMeInterruptQueue.h @@ -13,7 +13,7 @@ namespace Kernel { class NVMeInterruptQueue : public NVMeQueue , public IRQHandler { public: - NVMeInterruptQueue(NonnullOwnPtr rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr cq_dma_region, NonnullLockRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullLockRefPtrVector sq_dma_page, Memory::TypedMapping db_regs); + NVMeInterruptQueue(NonnullOwnPtr rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr cq_dma_region, NonnullRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullRefPtrVector sq_dma_page, Memory::TypedMapping db_regs); void submit_sqe(NVMeSubmission& submission) override; virtual ~NVMeInterruptQueue() override {}; diff --git a/Kernel/Storage/NVMe/NVMePollQueue.cpp b/Kernel/Storage/NVMe/NVMePollQueue.cpp index fa9aec2496..7219034666 100644 --- a/Kernel/Storage/NVMe/NVMePollQueue.cpp +++ b/Kernel/Storage/NVMe/NVMePollQueue.cpp @@ -10,7 +10,7 @@ #include "NVMeDefinitions.h" namespace Kernel { -UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr cq_dma_region, NonnullLockRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullLockRefPtrVector sq_dma_page, Memory::TypedMapping db_regs) +UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr cq_dma_region, NonnullRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullRefPtrVector sq_dma_page, Memory::TypedMapping db_regs) : NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs)) { } diff --git a/Kernel/Storage/NVMe/NVMePollQueue.h b/Kernel/Storage/NVMe/NVMePollQueue.h index 8a68881309..458f11c14f 100644 --- a/Kernel/Storage/NVMe/NVMePollQueue.h +++ b/Kernel/Storage/NVMe/NVMePollQueue.h @@ -12,7 +12,7 @@ namespace Kernel { class NVMePollQueue : public NVMeQueue { public: - NVMePollQueue(NonnullOwnPtr rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr cq_dma_region, NonnullLockRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullLockRefPtrVector sq_dma_page, Memory::TypedMapping db_regs); + NVMePollQueue(NonnullOwnPtr rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr cq_dma_region, NonnullRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullRefPtrVector sq_dma_page, Memory::TypedMapping db_regs); void submit_sqe(NVMeSubmission& submission) override; virtual ~NVMePollQueue() override {}; diff --git a/Kernel/Storage/NVMe/NVMeQueue.cpp b/Kernel/Storage/NVMe/NVMeQueue.cpp index b160e86d57..8e206e07aa 100644 --- a/Kernel/Storage/NVMe/NVMeQueue.cpp +++ b/Kernel/Storage/NVMe/NVMeQueue.cpp @@ -13,10 +13,10 @@ #include namespace Kernel { -ErrorOr> NVMeQueue::try_create(u16 qid, Optional irq, u32 q_depth, OwnPtr cq_dma_region, NonnullLockRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullLockRefPtrVector sq_dma_page, Memory::TypedMapping db_regs) +ErrorOr> NVMeQueue::try_create(u16 qid, Optional irq, u32 q_depth, OwnPtr cq_dma_region, NonnullRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullRefPtrVector sq_dma_page, Memory::TypedMapping db_regs) { // Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it) - LockRefPtr rw_dma_page; + RefPtr rw_dma_page; auto rw_dma_region = TRY(MM.allocate_dma_buffer_page("NVMe Queue Read/Write DMA"sv, Memory::Region::Access::ReadWrite, rw_dma_page)); if (!irq.has_value()) { auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMePollQueue(move(rw_dma_region), *rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs)))); @@ -26,7 +26,7 @@ ErrorOr> NVMeQueue::try_create(u16 qid, Optional rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr cq_dma_region, NonnullLockRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullLockRefPtrVector sq_dma_page, Memory::TypedMapping db_regs) +UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr cq_dma_region, NonnullRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullRefPtrVector sq_dma_page, Memory::TypedMapping db_regs) : m_current_request(nullptr) , m_rw_dma_region(move(rw_dma_region)) , m_qid(qid) diff --git a/Kernel/Storage/NVMe/NVMeQueue.h b/Kernel/Storage/NVMe/NVMeQueue.h index 7f1a792079..e58a06765e 100644 --- a/Kernel/Storage/NVMe/NVMeQueue.h +++ b/Kernel/Storage/NVMe/NVMeQueue.h @@ -7,6 +7,7 @@ #pragma once #include +#include #include #include #include @@ -29,7 +30,7 @@ struct DoorbellRegister { class AsyncBlockDeviceRequest; class NVMeQueue : public AtomicRefCounted { public: - static ErrorOr> try_create(u16 qid, Optional irq, u32 q_depth, OwnPtr cq_dma_region, NonnullLockRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullLockRefPtrVector sq_dma_page, Memory::TypedMapping db_regs); + static ErrorOr> try_create(u16 qid, Optional irq, u32 q_depth, OwnPtr cq_dma_region, NonnullRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullRefPtrVector sq_dma_page, Memory::TypedMapping db_regs); bool is_admin_queue() { return m_admin_queue; }; u16 submit_sync_sqe(NVMeSubmission&); void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count); @@ -43,7 +44,7 @@ protected: { m_db_regs->sq_tail = m_sq_tail; } - NVMeQueue(NonnullOwnPtr rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr cq_dma_region, NonnullLockRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullLockRefPtrVector sq_dma_page, Memory::TypedMapping db_regs); + NVMeQueue(NonnullOwnPtr rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr cq_dma_region, NonnullRefPtrVector cq_dma_page, OwnPtr sq_dma_region, NonnullRefPtrVector sq_dma_page, Memory::TypedMapping db_regs); private: bool cqe_available(); @@ -70,12 +71,12 @@ private: u32 m_qdepth {}; Spinlock m_sq_lock { LockRank::Interrupts }; OwnPtr m_cq_dma_region; - NonnullLockRefPtrVector m_cq_dma_page; + NonnullRefPtrVector m_cq_dma_page; Span m_sqe_array; OwnPtr m_sq_dma_region; - NonnullLockRefPtrVector m_sq_dma_page; + NonnullRefPtrVector m_sq_dma_page; Span m_cqe_array; Memory::TypedMapping m_db_regs; - NonnullLockRefPtr m_rw_dma_page; + NonnullRefPtr m_rw_dma_page; }; }