mirror of
				https://github.com/RGBCube/serenity
				synced 2025-10-31 09:42:45 +00:00 
			
		
		
		
	Kernel: Use RefPtr instead of LockRefPtr for PhysicalPage
I believe this to be safe, as the main thing that LockRefPtr provides over RefPtr is safe copying from a shared LockRefPtr instance. I've inspected the uses of RefPtr<PhysicalPage> and it seems they're all guarded by external locking. Some of it is less obvious, but this is an area where we're making continuous headway.
This commit is contained in:
		
							parent
							
								
									5a804b9a1d
								
							
						
					
					
						commit
						2c72d495a3
					
				
					 33 changed files with 141 additions and 138 deletions
				
			
		|  | @ -4,6 +4,7 @@ | ||||||
|  * SPDX-License-Identifier: BSD-2-Clause |  * SPDX-License-Identifier: BSD-2-Clause | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
|  | #include <AK/NonnullRefPtrVector.h> | ||||||
| #include <Kernel/Arch/SafeMem.h> | #include <Kernel/Arch/SafeMem.h> | ||||||
| #include <Kernel/Arch/SmapDisabler.h> | #include <Kernel/Arch/SmapDisabler.h> | ||||||
| #include <Kernel/Debug.h> | #include <Kernel/Debug.h> | ||||||
|  | @ -91,7 +92,7 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_phys | ||||||
| { | { | ||||||
|     auto contiguous_physical_pages = TRY(MM.allocate_contiguous_physical_pages(size)); |     auto contiguous_physical_pages = TRY(MM.allocate_contiguous_physical_pages(size)); | ||||||
| 
 | 
 | ||||||
|     auto new_physical_pages = TRY(FixedArray<LockRefPtr<PhysicalPage>>::try_create(contiguous_physical_pages.span())); |     auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalPage>>::try_create(contiguous_physical_pages.span())); | ||||||
| 
 | 
 | ||||||
|     return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages))); |     return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages))); | ||||||
| } | } | ||||||
|  | @ -110,9 +111,9 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purg | ||||||
|     return vmobject; |     return vmobject; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullLockRefPtr<PhysicalPage>> physical_pages) | ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> physical_pages) | ||||||
| { | { | ||||||
|     auto new_physical_pages = TRY(FixedArray<LockRefPtr<PhysicalPage>>::try_create(physical_pages)); |     auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalPage>>::try_create(physical_pages)); | ||||||
|     return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages))); |     return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages))); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -129,7 +130,7 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_for_ | ||||||
|     return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(paddr, move(new_physical_pages))); |     return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(paddr, move(new_physical_pages))); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_shared_cow(AnonymousVMObject const& other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages) | ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_shared_cow(AnonymousVMObject const& other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages) | ||||||
| { | { | ||||||
|     auto weak_parent = TRY(other.try_make_weak_ptr<AnonymousVMObject>()); |     auto weak_parent = TRY(other.try_make_weak_ptr<AnonymousVMObject>()); | ||||||
|     auto vmobject = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(weak_parent), move(shared_committed_cow_pages), move(new_physical_pages)))); |     auto vmobject = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(weak_parent), move(shared_committed_cow_pages), move(new_physical_pages)))); | ||||||
|  | @ -139,7 +140,7 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with | ||||||
|     return vmobject; |     return vmobject; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| AnonymousVMObject::AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages) | AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages) | ||||||
|     : VMObject(move(new_physical_pages)) |     : VMObject(move(new_physical_pages)) | ||||||
|     , m_unused_committed_pages(move(committed_pages)) |     , m_unused_committed_pages(move(committed_pages)) | ||||||
| { | { | ||||||
|  | @ -154,7 +155,7 @@ AnonymousVMObject::AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_ | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages) | AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages) | ||||||
|     : VMObject(move(new_physical_pages)) |     : VMObject(move(new_physical_pages)) | ||||||
| { | { | ||||||
|     VERIFY(paddr.page_base() == paddr); |     VERIFY(paddr.page_base() == paddr); | ||||||
|  | @ -162,12 +163,12 @@ AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<LockRefPt | ||||||
|         physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), MayReturnToFreeList::No); |         physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), MayReturnToFreeList::No); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| AnonymousVMObject::AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages) | AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages) | ||||||
|     : VMObject(move(new_physical_pages)) |     : VMObject(move(new_physical_pages)) | ||||||
| { | { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| AnonymousVMObject::AnonymousVMObject(LockWeakPtr<AnonymousVMObject> other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages) | AnonymousVMObject::AnonymousVMObject(LockWeakPtr<AnonymousVMObject> other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages) | ||||||
|     : VMObject(move(new_physical_pages)) |     : VMObject(move(new_physical_pages)) | ||||||
|     , m_cow_parent(move(other)) |     , m_cow_parent(move(other)) | ||||||
|     , m_shared_committed_cow_pages(move(shared_committed_cow_pages)) |     , m_shared_committed_cow_pages(move(shared_committed_cow_pages)) | ||||||
|  | @ -270,7 +271,7 @@ ErrorOr<void> AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged | ||||||
|     return {}; |     return {}; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| NonnullLockRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>) | NonnullRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>) | ||||||
| { | { | ||||||
|     return m_unused_committed_pages->take_one(); |     return m_unused_committed_pages->take_one(); | ||||||
| } | } | ||||||
|  | @ -344,7 +345,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual | ||||||
|         return PageFaultResponse::Continue; |         return PageFaultResponse::Continue; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     LockRefPtr<PhysicalPage> page; |     RefPtr<PhysicalPage> page; | ||||||
|     if (m_shared_committed_cow_pages) { |     if (m_shared_committed_cow_pages) { | ||||||
|         dbgln_if(PAGE_FAULT_DEBUG, "    >> It's a committed COW page and it's time to COW!"); |         dbgln_if(PAGE_FAULT_DEBUG, "    >> It's a committed COW page and it's time to COW!"); | ||||||
|         page = m_shared_committed_cow_pages->take_one(); |         page = m_shared_committed_cow_pages->take_one(); | ||||||
|  | @ -387,7 +388,7 @@ AnonymousVMObject::SharedCommittedCowPages::SharedCommittedCowPages(CommittedPhy | ||||||
| 
 | 
 | ||||||
| AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages() = default; | AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages() = default; | ||||||
| 
 | 
 | ||||||
| NonnullLockRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one() | NonnullRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one() | ||||||
| { | { | ||||||
|     SpinlockLocker locker(m_lock); |     SpinlockLocker locker(m_lock); | ||||||
|     return m_committed_pages.take_one(); |     return m_committed_pages.take_one(); | ||||||
|  |  | ||||||
|  | @ -20,12 +20,12 @@ public: | ||||||
| 
 | 
 | ||||||
|     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_size(size_t, AllocationStrategy); |     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_size(size_t, AllocationStrategy); | ||||||
|     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size); |     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size); | ||||||
|     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullLockRefPtr<PhysicalPage>>); |     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>); | ||||||
|     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_purgeable_with_size(size_t, AllocationStrategy); |     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_purgeable_with_size(size_t, AllocationStrategy); | ||||||
|     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_physically_contiguous_with_size(size_t); |     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_physically_contiguous_with_size(size_t); | ||||||
|     virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override; |     virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override; | ||||||
| 
 | 
 | ||||||
|     [[nodiscard]] NonnullLockRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>); |     [[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>); | ||||||
|     PageFaultResponse handle_cow_fault(size_t, VirtualAddress); |     PageFaultResponse handle_cow_fault(size_t, VirtualAddress); | ||||||
|     size_t cow_pages() const; |     size_t cow_pages() const; | ||||||
|     bool should_cow(size_t page_index, bool) const; |     bool should_cow(size_t page_index, bool) const; | ||||||
|  | @ -41,12 +41,12 @@ public: | ||||||
| private: | private: | ||||||
|     class SharedCommittedCowPages; |     class SharedCommittedCowPages; | ||||||
| 
 | 
 | ||||||
|     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_shared_cow(AnonymousVMObject const&, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<LockRefPtr<PhysicalPage>>&&); |     static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_shared_cow(AnonymousVMObject const&, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<RefPtr<PhysicalPage>>&&); | ||||||
| 
 | 
 | ||||||
|     explicit AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&&, AllocationStrategy, Optional<CommittedPhysicalPageSet>); |     explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&&, AllocationStrategy, Optional<CommittedPhysicalPageSet>); | ||||||
|     explicit AnonymousVMObject(PhysicalAddress, FixedArray<LockRefPtr<PhysicalPage>>&&); |     explicit AnonymousVMObject(PhysicalAddress, FixedArray<RefPtr<PhysicalPage>>&&); | ||||||
|     explicit AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&&); |     explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&&); | ||||||
|     explicit AnonymousVMObject(LockWeakPtr<AnonymousVMObject>, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<LockRefPtr<PhysicalPage>>&&); |     explicit AnonymousVMObject(LockWeakPtr<AnonymousVMObject>, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<RefPtr<PhysicalPage>>&&); | ||||||
| 
 | 
 | ||||||
|     virtual StringView class_name() const override { return "AnonymousVMObject"sv; } |     virtual StringView class_name() const override { return "AnonymousVMObject"sv; } | ||||||
| 
 | 
 | ||||||
|  | @ -74,7 +74,7 @@ private: | ||||||
| 
 | 
 | ||||||
|         [[nodiscard]] bool is_empty() const { return m_committed_pages.is_empty(); } |         [[nodiscard]] bool is_empty() const { return m_committed_pages.is_empty(); } | ||||||
| 
 | 
 | ||||||
|         [[nodiscard]] NonnullLockRefPtr<PhysicalPage> take_one(); |         [[nodiscard]] NonnullRefPtr<PhysicalPage> take_one(); | ||||||
|         void uncommit_one(); |         void uncommit_one(); | ||||||
| 
 | 
 | ||||||
|     private: |     private: | ||||||
|  |  | ||||||
|  | @ -9,14 +9,14 @@ | ||||||
| 
 | 
 | ||||||
| namespace Kernel::Memory { | namespace Kernel::Memory { | ||||||
| 
 | 
 | ||||||
| InodeVMObject::InodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) | InodeVMObject::InodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) | ||||||
|     : VMObject(move(new_physical_pages)) |     : VMObject(move(new_physical_pages)) | ||||||
|     , m_inode(inode) |     , m_inode(inode) | ||||||
|     , m_dirty_pages(move(dirty_pages)) |     , m_dirty_pages(move(dirty_pages)) | ||||||
| { | { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) | InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) | ||||||
|     : VMObject(move(new_physical_pages)) |     : VMObject(move(new_physical_pages)) | ||||||
|     , m_inode(other.m_inode) |     , m_inode(other.m_inode) | ||||||
|     , m_dirty_pages(move(dirty_pages)) |     , m_dirty_pages(move(dirty_pages)) | ||||||
|  |  | ||||||
|  | @ -28,8 +28,8 @@ public: | ||||||
|     u32 writable_mappings() const; |     u32 writable_mappings() const; | ||||||
| 
 | 
 | ||||||
| protected: | protected: | ||||||
|     explicit InodeVMObject(Inode&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages); |     explicit InodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages); | ||||||
|     explicit InodeVMObject(InodeVMObject const&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages); |     explicit InodeVMObject(InodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages); | ||||||
| 
 | 
 | ||||||
|     InodeVMObject& operator=(InodeVMObject const&) = delete; |     InodeVMObject& operator=(InodeVMObject const&) = delete; | ||||||
|     InodeVMObject& operator=(InodeVMObject&&) = delete; |     InodeVMObject& operator=(InodeVMObject&&) = delete; | ||||||
|  |  | ||||||
|  | @ -6,6 +6,7 @@ | ||||||
| 
 | 
 | ||||||
| #include <AK/Assertions.h> | #include <AK/Assertions.h> | ||||||
| #include <AK/Memory.h> | #include <AK/Memory.h> | ||||||
|  | #include <AK/NonnullRefPtrVector.h> | ||||||
| #include <AK/StringView.h> | #include <AK/StringView.h> | ||||||
| #include <Kernel/Arch/CPU.h> | #include <Kernel/Arch/CPU.h> | ||||||
| #include <Kernel/Arch/InterruptDisabler.h> | #include <Kernel/Arch/InterruptDisabler.h> | ||||||
|  | @ -746,7 +747,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region( | ||||||
|     return region; |     return region; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, LockRefPtr<Memory::PhysicalPage>& dma_buffer_page) | ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page) | ||||||
| { | { | ||||||
|     dma_buffer_page = TRY(allocate_physical_page()); |     dma_buffer_page = TRY(allocate_physical_page()); | ||||||
|     // Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behaviour by default)
 |     // Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behaviour by default)
 | ||||||
|  | @ -755,12 +756,12 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(S | ||||||
| 
 | 
 | ||||||
| ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access) | ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access) | ||||||
| { | { | ||||||
|     LockRefPtr<Memory::PhysicalPage> dma_buffer_page; |     RefPtr<Memory::PhysicalPage> dma_buffer_page; | ||||||
| 
 | 
 | ||||||
|     return allocate_dma_buffer_page(name, access, dma_buffer_page); |     return allocate_dma_buffer_page(name, access, dma_buffer_page); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullLockRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages) | ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages) | ||||||
| { | { | ||||||
|     VERIFY(!(size % PAGE_SIZE)); |     VERIFY(!(size % PAGE_SIZE)); | ||||||
|     dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size)); |     dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size)); | ||||||
|  | @ -771,7 +772,7 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages( | ||||||
| ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access) | ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access) | ||||||
| { | { | ||||||
|     VERIFY(!(size % PAGE_SIZE)); |     VERIFY(!(size % PAGE_SIZE)); | ||||||
|     NonnullLockRefPtrVector<Memory::PhysicalPage> dma_buffer_pages; |     NonnullRefPtrVector<Memory::PhysicalPage> dma_buffer_pages; | ||||||
| 
 | 
 | ||||||
|     return allocate_dma_buffer_pages(size, name, access, dma_buffer_pages); |     return allocate_dma_buffer_pages(size, name, access, dma_buffer_pages); | ||||||
| } | } | ||||||
|  | @ -881,10 +882,10 @@ void MemoryManager::deallocate_physical_page(PhysicalAddress paddr) | ||||||
|     PANIC("MM: deallocate_physical_page couldn't figure out region for page @ {}", paddr); |     PANIC("MM: deallocate_physical_page couldn't figure out region for page @ {}", paddr); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| LockRefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed) | RefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed) | ||||||
| { | { | ||||||
|     SpinlockLocker mm_locker(s_mm_lock); |     SpinlockLocker mm_locker(s_mm_lock); | ||||||
|     LockRefPtr<PhysicalPage> page; |     RefPtr<PhysicalPage> page; | ||||||
|     if (committed) { |     if (committed) { | ||||||
|         // Draw from the committed pages pool. We should always have these pages available
 |         // Draw from the committed pages pool. We should always have these pages available
 | ||||||
|         VERIFY(m_system_memory_info.physical_pages_committed > 0); |         VERIFY(m_system_memory_info.physical_pages_committed > 0); | ||||||
|  | @ -906,7 +907,7 @@ LockRefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed) | ||||||
|     return page; |     return page; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| NonnullLockRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill) | NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill) | ||||||
| { | { | ||||||
|     auto page = find_free_physical_page(true); |     auto page = find_free_physical_page(true); | ||||||
|     if (should_zero_fill == ShouldZeroFill::Yes) { |     if (should_zero_fill == ShouldZeroFill::Yes) { | ||||||
|  | @ -918,7 +919,7 @@ NonnullLockRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page( | ||||||
|     return page.release_nonnull(); |     return page.release_nonnull(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| ErrorOr<NonnullLockRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge) | ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge) | ||||||
| { | { | ||||||
|     SpinlockLocker lock(s_mm_lock); |     SpinlockLocker lock(s_mm_lock); | ||||||
|     auto page = find_free_physical_page(false); |     auto page = find_free_physical_page(false); | ||||||
|  | @ -974,7 +975,7 @@ ErrorOr<NonnullLockRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(S | ||||||
|     return page.release_nonnull(); |     return page.release_nonnull(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| ErrorOr<NonnullLockRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_physical_pages(size_t size) | ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_physical_pages(size_t size) | ||||||
| { | { | ||||||
|     VERIFY(!(size % PAGE_SIZE)); |     VERIFY(!(size % PAGE_SIZE)); | ||||||
|     SpinlockLocker mm_lock(s_mm_lock); |     SpinlockLocker mm_lock(s_mm_lock); | ||||||
|  | @ -1160,7 +1161,7 @@ CommittedPhysicalPageSet::~CommittedPhysicalPageSet() | ||||||
|         MM.uncommit_physical_pages({}, m_page_count); |         MM.uncommit_physical_pages({}, m_page_count); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| NonnullLockRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one() | NonnullRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one() | ||||||
| { | { | ||||||
|     VERIFY(m_page_count > 0); |     VERIFY(m_page_count > 0); | ||||||
|     --m_page_count; |     --m_page_count; | ||||||
|  |  | ||||||
|  | @ -119,7 +119,7 @@ public: | ||||||
|     bool is_empty() const { return m_page_count == 0; } |     bool is_empty() const { return m_page_count == 0; } | ||||||
|     size_t page_count() const { return m_page_count; } |     size_t page_count() const { return m_page_count; } | ||||||
| 
 | 
 | ||||||
|     [[nodiscard]] NonnullLockRefPtr<PhysicalPage> take_one(); |     [[nodiscard]] NonnullRefPtr<PhysicalPage> take_one(); | ||||||
|     void uncommit_one(); |     void uncommit_one(); | ||||||
| 
 | 
 | ||||||
|     void operator=(CommittedPhysicalPageSet&&) = delete; |     void operator=(CommittedPhysicalPageSet&&) = delete; | ||||||
|  | @ -169,15 +169,15 @@ public: | ||||||
|     ErrorOr<CommittedPhysicalPageSet> commit_physical_pages(size_t page_count); |     ErrorOr<CommittedPhysicalPageSet> commit_physical_pages(size_t page_count); | ||||||
|     void uncommit_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count); |     void uncommit_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count); | ||||||
| 
 | 
 | ||||||
|     NonnullLockRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes); |     NonnullRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes); | ||||||
|     ErrorOr<NonnullLockRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr); |     ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr); | ||||||
|     ErrorOr<NonnullLockRefPtrVector<PhysicalPage>> allocate_contiguous_physical_pages(size_t size); |     ErrorOr<NonnullRefPtrVector<PhysicalPage>> allocate_contiguous_physical_pages(size_t size); | ||||||
|     void deallocate_physical_page(PhysicalAddress); |     void deallocate_physical_page(PhysicalAddress); | ||||||
| 
 | 
 | ||||||
|     ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); |     ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); | ||||||
|     ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, LockRefPtr<Memory::PhysicalPage>& dma_buffer_page); |     ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page); | ||||||
|     ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access); |     ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access); | ||||||
|     ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullLockRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages); |     ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages); | ||||||
|     ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access); |     ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access); | ||||||
|     ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes); |     ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes); | ||||||
|     ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); |     ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); | ||||||
|  | @ -258,7 +258,7 @@ private: | ||||||
| 
 | 
 | ||||||
|     static Region* find_region_from_vaddr(VirtualAddress); |     static Region* find_region_from_vaddr(VirtualAddress); | ||||||
| 
 | 
 | ||||||
|     LockRefPtr<PhysicalPage> find_free_physical_page(bool); |     RefPtr<PhysicalPage> find_free_physical_page(bool); | ||||||
| 
 | 
 | ||||||
|     ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page) |     ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page) | ||||||
|     { |     { | ||||||
|  | @ -286,8 +286,8 @@ private: | ||||||
| 
 | 
 | ||||||
|     LockRefPtr<PageDirectory> m_kernel_page_directory; |     LockRefPtr<PageDirectory> m_kernel_page_directory; | ||||||
| 
 | 
 | ||||||
|     LockRefPtr<PhysicalPage> m_shared_zero_page; |     RefPtr<PhysicalPage> m_shared_zero_page; | ||||||
|     LockRefPtr<PhysicalPage> m_lazy_committed_page; |     RefPtr<PhysicalPage> m_lazy_committed_page; | ||||||
| 
 | 
 | ||||||
|     SystemMemoryInfo m_system_memory_info; |     SystemMemoryInfo m_system_memory_info; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -10,8 +10,8 @@ | ||||||
| #include <AK/Badge.h> | #include <AK/Badge.h> | ||||||
| #include <AK/HashMap.h> | #include <AK/HashMap.h> | ||||||
| #include <AK/IntrusiveRedBlackTree.h> | #include <AK/IntrusiveRedBlackTree.h> | ||||||
|  | #include <AK/RefPtr.h> | ||||||
| #include <Kernel/Forward.h> | #include <Kernel/Forward.h> | ||||||
| #include <Kernel/Library/LockRefPtr.h> |  | ||||||
| #include <Kernel/Locking/Spinlock.h> | #include <Kernel/Locking/Spinlock.h> | ||||||
| #include <Kernel/Memory/PhysicalPage.h> | #include <Kernel/Memory/PhysicalPage.h> | ||||||
| 
 | 
 | ||||||
|  | @ -64,13 +64,13 @@ private: | ||||||
| 
 | 
 | ||||||
|     AddressSpace* m_space { nullptr }; |     AddressSpace* m_space { nullptr }; | ||||||
| #if ARCH(X86_64) | #if ARCH(X86_64) | ||||||
|     LockRefPtr<PhysicalPage> m_pml4t; |     RefPtr<PhysicalPage> m_pml4t; | ||||||
| #endif | #endif | ||||||
|     LockRefPtr<PhysicalPage> m_directory_table; |     RefPtr<PhysicalPage> m_directory_table; | ||||||
| #if ARCH(X86_64) | #if ARCH(X86_64) | ||||||
|     LockRefPtr<PhysicalPage> m_directory_pages[512]; |     RefPtr<PhysicalPage> m_directory_pages[512]; | ||||||
| #else | #else | ||||||
|     LockRefPtr<PhysicalPage> m_directory_pages[4]; |     RefPtr<PhysicalPage> m_directory_pages[4]; | ||||||
| #endif | #endif | ||||||
|     RecursiveSpinlock m_lock { LockRank::None }; |     RecursiveSpinlock m_lock { LockRank::None }; | ||||||
| }; | }; | ||||||
|  |  | ||||||
|  | @ -10,10 +10,10 @@ | ||||||
| 
 | 
 | ||||||
| namespace Kernel::Memory { | namespace Kernel::Memory { | ||||||
| 
 | 
 | ||||||
| NonnullLockRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist) | NonnullRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist) | ||||||
| { | { | ||||||
|     auto& physical_page_entry = MM.get_physical_page_entry(paddr); |     auto& physical_page_entry = MM.get_physical_page_entry(paddr); | ||||||
|     return adopt_lock_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(may_return_to_freelist)); |     return adopt_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(may_return_to_freelist)); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| PhysicalPage::PhysicalPage(MayReturnToFreeList may_return_to_freelist) | PhysicalPage::PhysicalPage(MayReturnToFreeList may_return_to_freelist) | ||||||
|  |  | ||||||
|  | @ -1,12 +1,12 @@ | ||||||
| /*
 | /*
 | ||||||
|  * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org> |  * Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org> | ||||||
|  * |  * | ||||||
|  * SPDX-License-Identifier: BSD-2-Clause |  * SPDX-License-Identifier: BSD-2-Clause | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
| #pragma once | #pragma once | ||||||
| 
 | 
 | ||||||
| #include <Kernel/Library/NonnullLockRefPtr.h> | #include <AK/NonnullRefPtr.h> | ||||||
| #include <Kernel/PhysicalAddress.h> | #include <Kernel/PhysicalAddress.h> | ||||||
| 
 | 
 | ||||||
| namespace Kernel::Memory { | namespace Kernel::Memory { | ||||||
|  | @ -36,7 +36,7 @@ public: | ||||||
|             free_this(); |             free_this(); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     static NonnullLockRefPtr<PhysicalPage> create(PhysicalAddress, MayReturnToFreeList may_return_to_freelist = MayReturnToFreeList::Yes); |     static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, MayReturnToFreeList may_return_to_freelist = MayReturnToFreeList::Yes); | ||||||
| 
 | 
 | ||||||
|     u32 ref_count() const { return m_ref_count.load(AK::memory_order_consume); } |     u32 ref_count() const { return m_ref_count.load(AK::memory_order_consume); } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -5,9 +5,8 @@ | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
| #include <AK/BuiltinWrappers.h> | #include <AK/BuiltinWrappers.h> | ||||||
|  | #include <AK/NonnullRefPtrVector.h> | ||||||
| #include <Kernel/Assertions.h> | #include <Kernel/Assertions.h> | ||||||
| #include <Kernel/Library/LockRefPtr.h> |  | ||||||
| #include <Kernel/Library/NonnullLockRefPtr.h> |  | ||||||
| #include <Kernel/Memory/MemoryManager.h> | #include <Kernel/Memory/MemoryManager.h> | ||||||
| #include <Kernel/Memory/PhysicalRegion.h> | #include <Kernel/Memory/PhysicalRegion.h> | ||||||
| #include <Kernel/Memory/PhysicalZone.h> | #include <Kernel/Memory/PhysicalZone.h> | ||||||
|  | @ -76,7 +75,7 @@ OwnPtr<PhysicalRegion> PhysicalRegion::try_take_pages_from_beginning(unsigned pa | ||||||
|     return try_create(taken_lower, taken_upper); |     return try_create(taken_lower, taken_upper); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| NonnullLockRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count) | NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count) | ||||||
| { | { | ||||||
|     auto rounded_page_count = next_power_of_two(count); |     auto rounded_page_count = next_power_of_two(count); | ||||||
|     auto order = count_trailing_zeroes(rounded_page_count); |     auto order = count_trailing_zeroes(rounded_page_count); | ||||||
|  | @ -96,7 +95,7 @@ NonnullLockRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages | ||||||
|     if (!page_base.has_value()) |     if (!page_base.has_value()) | ||||||
|         return {}; |         return {}; | ||||||
| 
 | 
 | ||||||
|     NonnullLockRefPtrVector<PhysicalPage> physical_pages; |     NonnullRefPtrVector<PhysicalPage> physical_pages; | ||||||
|     physical_pages.ensure_capacity(count); |     physical_pages.ensure_capacity(count); | ||||||
| 
 | 
 | ||||||
|     for (size_t i = 0; i < count; ++i) |     for (size_t i = 0; i < count; ++i) | ||||||
|  | @ -104,7 +103,7 @@ NonnullLockRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages | ||||||
|     return physical_pages; |     return physical_pages; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| LockRefPtr<PhysicalPage> PhysicalRegion::take_free_page() | RefPtr<PhysicalPage> PhysicalRegion::take_free_page() | ||||||
| { | { | ||||||
|     if (m_usable_zones.is_empty()) |     if (m_usable_zones.is_empty()) | ||||||
|         return nullptr; |         return nullptr; | ||||||
|  |  | ||||||
|  | @ -33,8 +33,8 @@ public: | ||||||
| 
 | 
 | ||||||
|     OwnPtr<PhysicalRegion> try_take_pages_from_beginning(unsigned); |     OwnPtr<PhysicalRegion> try_take_pages_from_beginning(unsigned); | ||||||
| 
 | 
 | ||||||
|     LockRefPtr<PhysicalPage> take_free_page(); |     RefPtr<PhysicalPage> take_free_page(); | ||||||
|     NonnullLockRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count); |     NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count); | ||||||
|     void return_page(PhysicalAddress); |     void return_page(PhysicalAddress); | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|  |  | ||||||
|  | @ -23,12 +23,12 @@ ErrorOr<NonnullLockRefPtr<VMObject>> PrivateInodeVMObject::try_clone() | ||||||
|     return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) PrivateInodeVMObject(*this, move(new_physical_pages), move(dirty_pages))); |     return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) PrivateInodeVMObject(*this, move(new_physical_pages), move(dirty_pages))); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) | PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) | ||||||
|     : InodeVMObject(inode, move(new_physical_pages), move(dirty_pages)) |     : InodeVMObject(inode, move(new_physical_pages), move(dirty_pages)) | ||||||
| { | { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| PrivateInodeVMObject::PrivateInodeVMObject(PrivateInodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) | PrivateInodeVMObject::PrivateInodeVMObject(PrivateInodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) | ||||||
|     : InodeVMObject(other, move(new_physical_pages), move(dirty_pages)) |     : InodeVMObject(other, move(new_physical_pages), move(dirty_pages)) | ||||||
| { | { | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -23,8 +23,8 @@ public: | ||||||
| private: | private: | ||||||
|     virtual bool is_private_inode() const override { return true; } |     virtual bool is_private_inode() const override { return true; } | ||||||
| 
 | 
 | ||||||
|     explicit PrivateInodeVMObject(Inode&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages); |     explicit PrivateInodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages); | ||||||
|     explicit PrivateInodeVMObject(PrivateInodeVMObject const&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages); |     explicit PrivateInodeVMObject(PrivateInodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages); | ||||||
| 
 | 
 | ||||||
|     virtual StringView class_name() const override { return "PrivateInodeVMObject"sv; } |     virtual StringView class_name() const override { return "PrivateInodeVMObject"sv; } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -202,7 +202,7 @@ ErrorOr<void> Region::set_should_cow(size_t page_index, bool cow) | ||||||
|     return {}; |     return {}; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| bool Region::map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage> page) | bool Region::map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage> page) | ||||||
| { | { | ||||||
|     VERIFY(m_page_directory->get_lock().is_locked_by_current_processor()); |     VERIFY(m_page_directory->get_lock().is_locked_by_current_processor()); | ||||||
| 
 | 
 | ||||||
|  | @ -240,7 +240,7 @@ bool Region::map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage | ||||||
| 
 | 
 | ||||||
| bool Region::map_individual_page_impl(size_t page_index) | bool Region::map_individual_page_impl(size_t page_index) | ||||||
| { | { | ||||||
|     LockRefPtr<PhysicalPage> page; |     RefPtr<PhysicalPage> page; | ||||||
|     { |     { | ||||||
|         SpinlockLocker vmobject_locker(vmobject().m_lock); |         SpinlockLocker vmobject_locker(vmobject().m_lock); | ||||||
|         page = physical_page(page_index); |         page = physical_page(page_index); | ||||||
|  | @ -249,7 +249,7 @@ bool Region::map_individual_page_impl(size_t page_index) | ||||||
|     return map_individual_page_impl(page_index, page); |     return map_individual_page_impl(page_index, page); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| bool Region::remap_vmobject_page(size_t page_index, NonnullLockRefPtr<PhysicalPage> physical_page) | bool Region::remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalPage> physical_page) | ||||||
| { | { | ||||||
|     SpinlockLocker page_lock(m_page_directory->get_lock()); |     SpinlockLocker page_lock(m_page_directory->get_lock()); | ||||||
| 
 | 
 | ||||||
|  | @ -408,7 +408,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, Physica | ||||||
|     if (current_thread != nullptr) |     if (current_thread != nullptr) | ||||||
|         current_thread->did_zero_fault(); |         current_thread->did_zero_fault(); | ||||||
| 
 | 
 | ||||||
|     LockRefPtr<PhysicalPage> new_physical_page; |     RefPtr<PhysicalPage> new_physical_page; | ||||||
| 
 | 
 | ||||||
|     if (page_in_slot_at_time_of_fault.is_lazy_committed_page()) { |     if (page_in_slot_at_time_of_fault.is_lazy_committed_page()) { | ||||||
|         VERIFY(m_vmobject->is_anonymous()); |         VERIFY(m_vmobject->is_anonymous()); | ||||||
|  | @ -543,14 +543,14 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) | ||||||
|     return PageFaultResponse::Continue; |     return PageFaultResponse::Continue; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| LockRefPtr<PhysicalPage> Region::physical_page(size_t index) const | RefPtr<PhysicalPage> Region::physical_page(size_t index) const | ||||||
| { | { | ||||||
|     SpinlockLocker vmobject_locker(vmobject().m_lock); |     SpinlockLocker vmobject_locker(vmobject().m_lock); | ||||||
|     VERIFY(index < page_count()); |     VERIFY(index < page_count()); | ||||||
|     return vmobject().physical_pages()[first_page_index() + index]; |     return vmobject().physical_pages()[first_page_index() + index]; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| LockRefPtr<PhysicalPage>& Region::physical_page_slot(size_t index) | RefPtr<PhysicalPage>& Region::physical_page_slot(size_t index) | ||||||
| { | { | ||||||
|     VERIFY(vmobject().m_lock.is_locked_by_current_processor()); |     VERIFY(vmobject().m_lock.is_locked_by_current_processor()); | ||||||
|     VERIFY(index < page_count()); |     VERIFY(index < page_count()); | ||||||
|  |  | ||||||
|  | @ -158,8 +158,8 @@ public: | ||||||
|         return size() / PAGE_SIZE; |         return size() / PAGE_SIZE; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     LockRefPtr<PhysicalPage> physical_page(size_t index) const; |     RefPtr<PhysicalPage> physical_page(size_t index) const; | ||||||
|     LockRefPtr<PhysicalPage>& physical_page_slot(size_t index); |     RefPtr<PhysicalPage>& physical_page_slot(size_t index); | ||||||
| 
 | 
 | ||||||
|     [[nodiscard]] size_t offset_in_vmobject() const |     [[nodiscard]] size_t offset_in_vmobject() const | ||||||
|     { |     { | ||||||
|  | @ -208,7 +208,7 @@ private: | ||||||
|     Region(NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared); |     Region(NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared); | ||||||
|     Region(VirtualRange const&, NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared); |     Region(VirtualRange const&, NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared); | ||||||
| 
 | 
 | ||||||
|     [[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullLockRefPtr<PhysicalPage>); |     [[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalPage>); | ||||||
| 
 | 
 | ||||||
|     void set_access_bit(Access access, bool b) |     void set_access_bit(Access access, bool b) | ||||||
|     { |     { | ||||||
|  | @ -223,7 +223,7 @@ private: | ||||||
|     [[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index, PhysicalPage& page_in_slot_at_time_of_fault); |     [[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index, PhysicalPage& page_in_slot_at_time_of_fault); | ||||||
| 
 | 
 | ||||||
|     [[nodiscard]] bool map_individual_page_impl(size_t page_index); |     [[nodiscard]] bool map_individual_page_impl(size_t page_index); | ||||||
|     [[nodiscard]] bool map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage>); |     [[nodiscard]] bool map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage>); | ||||||
| 
 | 
 | ||||||
|     LockRefPtr<PageDirectory> m_page_directory; |     LockRefPtr<PageDirectory> m_page_directory; | ||||||
|     VirtualRange m_range; |     VirtualRange m_range; | ||||||
|  |  | ||||||
|  | @ -8,7 +8,7 @@ | ||||||
| 
 | 
 | ||||||
| namespace Kernel::Memory { | namespace Kernel::Memory { | ||||||
| 
 | 
 | ||||||
| LockRefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullLockRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size) | LockRefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size) | ||||||
| { | { | ||||||
|     auto maybe_vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages); |     auto maybe_vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages); | ||||||
|     if (maybe_vm_object.is_error()) { |     if (maybe_vm_object.is_error()) { | ||||||
|  |  | ||||||
|  | @ -19,7 +19,7 @@ namespace Kernel::Memory { | ||||||
| 
 | 
 | ||||||
| class ScatterGatherList final : public AtomicRefCounted<ScatterGatherList> { | class ScatterGatherList final : public AtomicRefCounted<ScatterGatherList> { | ||||||
| public: | public: | ||||||
|     static LockRefPtr<ScatterGatherList> try_create(AsyncBlockDeviceRequest&, Span<NonnullLockRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size); |     static LockRefPtr<ScatterGatherList> try_create(AsyncBlockDeviceRequest&, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size); | ||||||
|     VMObject const& vmobject() const { return m_vm_object; } |     VMObject const& vmobject() const { return m_vm_object; } | ||||||
|     VirtualAddress dma_region() const { return m_dma_region->vaddr(); } |     VirtualAddress dma_region() const { return m_dma_region->vaddr(); } | ||||||
|     size_t scatters_count() const { return m_vm_object->physical_pages().size(); } |     size_t scatters_count() const { return m_vm_object->physical_pages().size(); } | ||||||
|  |  | ||||||
|  | @ -56,21 +56,21 @@ ErrorOr<void> SharedFramebufferVMObject::create_real_writes_framebuffer_vm_objec | ||||||
|     return {}; |     return {}; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::real_framebuffer_physical_pages() | Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::real_framebuffer_physical_pages() | ||||||
| { | { | ||||||
|     return m_real_framebuffer_vmobject->physical_pages(); |     return m_real_framebuffer_vmobject->physical_pages(); | ||||||
| } | } | ||||||
| Span<LockRefPtr<PhysicalPage> const> SharedFramebufferVMObject::real_framebuffer_physical_pages() const | Span<RefPtr<PhysicalPage> const> SharedFramebufferVMObject::real_framebuffer_physical_pages() const | ||||||
| { | { | ||||||
|     return m_real_framebuffer_vmobject->physical_pages(); |     return m_real_framebuffer_vmobject->physical_pages(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() | Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() | ||||||
| { | { | ||||||
|     return m_physical_pages.span(); |     return m_physical_pages.span(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Span<LockRefPtr<PhysicalPage> const> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() const | Span<RefPtr<PhysicalPage> const> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() const | ||||||
| { | { | ||||||
|     return m_physical_pages.span(); |     return m_physical_pages.span(); | ||||||
| } | } | ||||||
|  | @ -92,14 +92,14 @@ void SharedFramebufferVMObject::switch_to_real_framebuffer_writes(Badge<Kernel:: | ||||||
|     }); |     }); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| Span<LockRefPtr<PhysicalPage> const> SharedFramebufferVMObject::physical_pages() const | Span<RefPtr<PhysicalPage> const> SharedFramebufferVMObject::physical_pages() const | ||||||
| { | { | ||||||
|     SpinlockLocker locker(m_writes_state_lock); |     SpinlockLocker locker(m_writes_state_lock); | ||||||
|     if (m_writes_are_faked) |     if (m_writes_are_faked) | ||||||
|         return VMObject::physical_pages(); |         return VMObject::physical_pages(); | ||||||
|     return m_real_framebuffer_vmobject->physical_pages(); |     return m_real_framebuffer_vmobject->physical_pages(); | ||||||
| } | } | ||||||
| Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages() | Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages() | ||||||
| { | { | ||||||
|     SpinlockLocker locker(m_writes_state_lock); |     SpinlockLocker locker(m_writes_state_lock); | ||||||
|     if (m_writes_are_faked) |     if (m_writes_are_faked) | ||||||
|  | @ -107,7 +107,7 @@ Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages() | ||||||
|     return m_real_framebuffer_vmobject->physical_pages(); |     return m_real_framebuffer_vmobject->physical_pages(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| SharedFramebufferVMObject::SharedFramebufferVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet committed_pages, AnonymousVMObject& real_framebuffer_vmobject) | SharedFramebufferVMObject::SharedFramebufferVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet committed_pages, AnonymousVMObject& real_framebuffer_vmobject) | ||||||
|     : VMObject(move(new_physical_pages)) |     : VMObject(move(new_physical_pages)) | ||||||
|     , m_real_framebuffer_vmobject(real_framebuffer_vmobject) |     , m_real_framebuffer_vmobject(real_framebuffer_vmobject) | ||||||
|     , m_committed_pages(move(committed_pages)) |     , m_committed_pages(move(committed_pages)) | ||||||
|  |  | ||||||
|  | @ -22,15 +22,15 @@ public: | ||||||
|         static ErrorOr<NonnullLockRefPtr<FakeWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object); |         static ErrorOr<NonnullLockRefPtr<FakeWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object); | ||||||
| 
 | 
 | ||||||
|     private: |     private: | ||||||
|         FakeWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages) |         FakeWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages) | ||||||
|             : VMObject(move(new_physical_pages)) |             : VMObject(move(new_physical_pages)) | ||||||
|             , m_parent_object(parent_object) |             , m_parent_object(parent_object) | ||||||
|         { |         { | ||||||
|         } |         } | ||||||
|         virtual StringView class_name() const override { return "FakeWritesFramebufferVMObject"sv; } |         virtual StringView class_name() const override { return "FakeWritesFramebufferVMObject"sv; } | ||||||
|         virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); } |         virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); } | ||||||
|         virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->fake_sink_framebuffer_physical_pages(); } |         virtual Span<RefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->fake_sink_framebuffer_physical_pages(); } | ||||||
|         virtual Span<LockRefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->fake_sink_framebuffer_physical_pages(); } |         virtual Span<RefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->fake_sink_framebuffer_physical_pages(); } | ||||||
|         NonnullLockRefPtr<SharedFramebufferVMObject> m_parent_object; |         NonnullLockRefPtr<SharedFramebufferVMObject> m_parent_object; | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
|  | @ -39,15 +39,15 @@ public: | ||||||
|         static ErrorOr<NonnullLockRefPtr<RealWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object); |         static ErrorOr<NonnullLockRefPtr<RealWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object); | ||||||
| 
 | 
 | ||||||
|     private: |     private: | ||||||
|         RealWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages) |         RealWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages) | ||||||
|             : VMObject(move(new_physical_pages)) |             : VMObject(move(new_physical_pages)) | ||||||
|             , m_parent_object(parent_object) |             , m_parent_object(parent_object) | ||||||
|         { |         { | ||||||
|         } |         } | ||||||
|         virtual StringView class_name() const override { return "RealWritesFramebufferVMObject"sv; } |         virtual StringView class_name() const override { return "RealWritesFramebufferVMObject"sv; } | ||||||
|         virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); } |         virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); } | ||||||
|         virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->real_framebuffer_physical_pages(); } |         virtual Span<RefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->real_framebuffer_physical_pages(); } | ||||||
|         virtual Span<LockRefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->real_framebuffer_physical_pages(); } |         virtual Span<RefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->real_framebuffer_physical_pages(); } | ||||||
|         NonnullLockRefPtr<SharedFramebufferVMObject> m_parent_object; |         NonnullLockRefPtr<SharedFramebufferVMObject> m_parent_object; | ||||||
|     }; |     }; | ||||||
| 
 | 
 | ||||||
|  | @ -60,14 +60,14 @@ public: | ||||||
|     void switch_to_fake_sink_framebuffer_writes(Badge<Kernel::DisplayConnector>); |     void switch_to_fake_sink_framebuffer_writes(Badge<Kernel::DisplayConnector>); | ||||||
|     void switch_to_real_framebuffer_writes(Badge<Kernel::DisplayConnector>); |     void switch_to_real_framebuffer_writes(Badge<Kernel::DisplayConnector>); | ||||||
| 
 | 
 | ||||||
|     virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const override; |     virtual Span<RefPtr<PhysicalPage> const> physical_pages() const override; | ||||||
|     virtual Span<LockRefPtr<PhysicalPage>> physical_pages() override; |     virtual Span<RefPtr<PhysicalPage>> physical_pages() override; | ||||||
| 
 | 
 | ||||||
|     Span<LockRefPtr<PhysicalPage>> fake_sink_framebuffer_physical_pages(); |     Span<RefPtr<PhysicalPage>> fake_sink_framebuffer_physical_pages(); | ||||||
|     Span<LockRefPtr<PhysicalPage> const> fake_sink_framebuffer_physical_pages() const; |     Span<RefPtr<PhysicalPage> const> fake_sink_framebuffer_physical_pages() const; | ||||||
| 
 | 
 | ||||||
|     Span<LockRefPtr<PhysicalPage>> real_framebuffer_physical_pages(); |     Span<RefPtr<PhysicalPage>> real_framebuffer_physical_pages(); | ||||||
|     Span<LockRefPtr<PhysicalPage> const> real_framebuffer_physical_pages() const; |     Span<RefPtr<PhysicalPage> const> real_framebuffer_physical_pages() const; | ||||||
| 
 | 
 | ||||||
|     FakeWritesFramebufferVMObject const& fake_writes_framebuffer_vmobject() const { return *m_fake_writes_framebuffer_vmobject; } |     FakeWritesFramebufferVMObject const& fake_writes_framebuffer_vmobject() const { return *m_fake_writes_framebuffer_vmobject; } | ||||||
|     FakeWritesFramebufferVMObject& fake_writes_framebuffer_vmobject() { return *m_fake_writes_framebuffer_vmobject; } |     FakeWritesFramebufferVMObject& fake_writes_framebuffer_vmobject() { return *m_fake_writes_framebuffer_vmobject; } | ||||||
|  | @ -76,7 +76,7 @@ public: | ||||||
|     RealWritesFramebufferVMObject& real_writes_framebuffer_vmobject() { return *m_real_writes_framebuffer_vmobject; } |     RealWritesFramebufferVMObject& real_writes_framebuffer_vmobject() { return *m_real_writes_framebuffer_vmobject; } | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     SharedFramebufferVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet, AnonymousVMObject& real_framebuffer_vmobject); |     SharedFramebufferVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet, AnonymousVMObject& real_framebuffer_vmobject); | ||||||
| 
 | 
 | ||||||
|     virtual StringView class_name() const override { return "SharedFramebufferVMObject"sv; } |     virtual StringView class_name() const override { return "SharedFramebufferVMObject"sv; } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -29,12 +29,12 @@ ErrorOr<NonnullLockRefPtr<VMObject>> SharedInodeVMObject::try_clone() | ||||||
|     return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) SharedInodeVMObject(*this, move(new_physical_pages), move(dirty_pages))); |     return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) SharedInodeVMObject(*this, move(new_physical_pages), move(dirty_pages))); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) | SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) | ||||||
|     : InodeVMObject(inode, move(new_physical_pages), move(dirty_pages)) |     : InodeVMObject(inode, move(new_physical_pages), move(dirty_pages)) | ||||||
| { | { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) | SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages) | ||||||
|     : InodeVMObject(other, move(new_physical_pages), move(dirty_pages)) |     : InodeVMObject(other, move(new_physical_pages), move(dirty_pages)) | ||||||
| { | { | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -23,8 +23,8 @@ public: | ||||||
| private: | private: | ||||||
|     virtual bool is_shared_inode() const override { return true; } |     virtual bool is_shared_inode() const override { return true; } | ||||||
| 
 | 
 | ||||||
|     explicit SharedInodeVMObject(Inode&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages); |     explicit SharedInodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages); | ||||||
|     explicit SharedInodeVMObject(SharedInodeVMObject const&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages); |     explicit SharedInodeVMObject(SharedInodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages); | ||||||
| 
 | 
 | ||||||
|     virtual StringView class_name() const override { return "SharedInodeVMObject"sv; } |     virtual StringView class_name() const override { return "SharedInodeVMObject"sv; } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -17,17 +17,17 @@ SpinlockProtected<VMObject::AllInstancesList>& VMObject::all_instances() | ||||||
|     return s_all_instances; |     return s_all_instances; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> VMObject::try_clone_physical_pages() const | ErrorOr<FixedArray<RefPtr<PhysicalPage>>> VMObject::try_clone_physical_pages() const | ||||||
| { | { | ||||||
|     return m_physical_pages.try_clone(); |     return m_physical_pages.try_clone(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> VMObject::try_create_physical_pages(size_t size) | ErrorOr<FixedArray<RefPtr<PhysicalPage>>> VMObject::try_create_physical_pages(size_t size) | ||||||
| { | { | ||||||
|     return FixedArray<LockRefPtr<PhysicalPage>>::try_create(ceil_div(size, static_cast<size_t>(PAGE_SIZE))); |     return FixedArray<RefPtr<PhysicalPage>>::try_create(ceil_div(size, static_cast<size_t>(PAGE_SIZE))); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| VMObject::VMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages) | VMObject::VMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages) | ||||||
|     : m_physical_pages(move(new_physical_pages)) |     : m_physical_pages(move(new_physical_pages)) | ||||||
| { | { | ||||||
|     all_instances().with([&](auto& list) { list.append(*this); }); |     all_instances().with([&](auto& list) { list.append(*this); }); | ||||||
|  |  | ||||||
|  | @ -8,9 +8,9 @@ | ||||||
| 
 | 
 | ||||||
| #include <AK/FixedArray.h> | #include <AK/FixedArray.h> | ||||||
| #include <AK/IntrusiveList.h> | #include <AK/IntrusiveList.h> | ||||||
|  | #include <AK/RefPtr.h> | ||||||
| #include <Kernel/Forward.h> | #include <Kernel/Forward.h> | ||||||
| #include <Kernel/Library/ListedRefCounted.h> | #include <Kernel/Library/ListedRefCounted.h> | ||||||
| #include <Kernel/Library/LockRefPtr.h> |  | ||||||
| #include <Kernel/Library/LockWeakable.h> | #include <Kernel/Library/LockWeakable.h> | ||||||
| #include <Kernel/Locking/Mutex.h> | #include <Kernel/Locking/Mutex.h> | ||||||
| #include <Kernel/Memory/Region.h> | #include <Kernel/Memory/Region.h> | ||||||
|  | @ -35,8 +35,8 @@ public: | ||||||
| 
 | 
 | ||||||
|     size_t page_count() const { return m_physical_pages.size(); } |     size_t page_count() const { return m_physical_pages.size(); } | ||||||
| 
 | 
 | ||||||
|     virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const { return m_physical_pages.span(); } |     virtual Span<RefPtr<PhysicalPage> const> physical_pages() const { return m_physical_pages.span(); } | ||||||
|     virtual Span<LockRefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); } |     virtual Span<RefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); } | ||||||
| 
 | 
 | ||||||
|     size_t size() const { return m_physical_pages.size() * PAGE_SIZE; } |     size_t size() const { return m_physical_pages.size() * PAGE_SIZE; } | ||||||
| 
 | 
 | ||||||
|  | @ -55,15 +55,15 @@ public: | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| protected: | protected: | ||||||
|     static ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> try_create_physical_pages(size_t); |     static ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_create_physical_pages(size_t); | ||||||
|     ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> try_clone_physical_pages() const; |     ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_clone_physical_pages() const; | ||||||
|     explicit VMObject(FixedArray<LockRefPtr<PhysicalPage>>&&); |     explicit VMObject(FixedArray<RefPtr<PhysicalPage>>&&); | ||||||
| 
 | 
 | ||||||
|     template<typename Callback> |     template<typename Callback> | ||||||
|     void for_each_region(Callback); |     void for_each_region(Callback); | ||||||
| 
 | 
 | ||||||
|     IntrusiveListNode<VMObject> m_list_node; |     IntrusiveListNode<VMObject> m_list_node; | ||||||
|     FixedArray<LockRefPtr<PhysicalPage>> m_physical_pages; |     FixedArray<RefPtr<PhysicalPage>> m_physical_pages; | ||||||
| 
 | 
 | ||||||
|     mutable RecursiveSpinlock m_lock { LockRank::None }; |     mutable RecursiveSpinlock m_lock { LockRank::None }; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -54,7 +54,7 @@ ErrorOr<void> AHCIPort::allocate_resources_and_initialize_ports() | ||||||
|     return {}; |     return {}; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| UNMAP_AFTER_INIT AHCIPort::AHCIPort(AHCIController const& controller, NonnullLockRefPtr<Memory::PhysicalPage> identify_buffer_page, AHCI::HBADefinedCapabilities hba_capabilities, volatile AHCI::PortRegisters& registers, u32 port_index) | UNMAP_AFTER_INIT AHCIPort::AHCIPort(AHCIController const& controller, NonnullRefPtr<Memory::PhysicalPage> identify_buffer_page, AHCI::HBADefinedCapabilities hba_capabilities, volatile AHCI::PortRegisters& registers, u32 port_index) | ||||||
|     : m_port_index(port_index) |     : m_port_index(port_index) | ||||||
|     , m_hba_capabilities(hba_capabilities) |     , m_hba_capabilities(hba_capabilities) | ||||||
|     , m_identify_buffer_page(move(identify_buffer_page)) |     , m_identify_buffer_page(move(identify_buffer_page)) | ||||||
|  | @ -496,7 +496,7 @@ Optional<AsyncDeviceRequest::RequestResult> AHCIPort::prepare_and_set_scatter_li | ||||||
|     VERIFY(m_lock.is_locked()); |     VERIFY(m_lock.is_locked()); | ||||||
|     VERIFY(request.block_count() > 0); |     VERIFY(request.block_count() > 0); | ||||||
| 
 | 
 | ||||||
|     NonnullLockRefPtrVector<Memory::PhysicalPage> allocated_dma_regions; |     NonnullRefPtrVector<Memory::PhysicalPage> allocated_dma_regions; | ||||||
|     for (size_t index = 0; index < calculate_descriptors_count(request.block_count()); index++) { |     for (size_t index = 0; index < calculate_descriptors_count(request.block_count()); index++) { | ||||||
|         allocated_dma_regions.append(m_dma_buffers.at(index)); |         allocated_dma_regions.append(m_dma_buffers.at(index)); | ||||||
|     } |     } | ||||||
|  |  | ||||||
|  | @ -6,10 +6,11 @@ | ||||||
| 
 | 
 | ||||||
| #pragma once | #pragma once | ||||||
| 
 | 
 | ||||||
|  | #include <AK/NonnullRefPtrVector.h> | ||||||
| #include <AK/OwnPtr.h> | #include <AK/OwnPtr.h> | ||||||
|  | #include <AK/RefPtr.h> | ||||||
| #include <Kernel/Devices/Device.h> | #include <Kernel/Devices/Device.h> | ||||||
| #include <Kernel/Interrupts/IRQHandler.h> | #include <Kernel/Interrupts/IRQHandler.h> | ||||||
| #include <Kernel/Library/LockRefPtr.h> |  | ||||||
| #include <Kernel/Library/LockWeakPtr.h> | #include <Kernel/Library/LockWeakPtr.h> | ||||||
| #include <Kernel/Library/LockWeakable.h> | #include <Kernel/Library/LockWeakable.h> | ||||||
| #include <Kernel/Locking/Mutex.h> | #include <Kernel/Locking/Mutex.h> | ||||||
|  | @ -56,7 +57,7 @@ private: | ||||||
|     bool is_phy_enabled() const { return (m_port_registers.ssts & 0xf) == 3; } |     bool is_phy_enabled() const { return (m_port_registers.ssts & 0xf) == 3; } | ||||||
|     bool initialize(); |     bool initialize(); | ||||||
| 
 | 
 | ||||||
|     AHCIPort(AHCIController const&, NonnullLockRefPtr<Memory::PhysicalPage> identify_buffer_page, AHCI::HBADefinedCapabilities, volatile AHCI::PortRegisters&, u32 port_index); |     AHCIPort(AHCIController const&, NonnullRefPtr<Memory::PhysicalPage> identify_buffer_page, AHCI::HBADefinedCapabilities, volatile AHCI::PortRegisters&, u32 port_index); | ||||||
| 
 | 
 | ||||||
|     ALWAYS_INLINE void clear_sata_error_register() const; |     ALWAYS_INLINE void clear_sata_error_register() const; | ||||||
| 
 | 
 | ||||||
|  | @ -111,11 +112,11 @@ private: | ||||||
| 
 | 
 | ||||||
|     mutable bool m_wait_for_completion { false }; |     mutable bool m_wait_for_completion { false }; | ||||||
| 
 | 
 | ||||||
|     NonnullLockRefPtrVector<Memory::PhysicalPage> m_dma_buffers; |     NonnullRefPtrVector<Memory::PhysicalPage> m_dma_buffers; | ||||||
|     NonnullLockRefPtrVector<Memory::PhysicalPage> m_command_table_pages; |     NonnullRefPtrVector<Memory::PhysicalPage> m_command_table_pages; | ||||||
|     LockRefPtr<Memory::PhysicalPage> m_command_list_page; |     RefPtr<Memory::PhysicalPage> m_command_list_page; | ||||||
|     OwnPtr<Memory::Region> m_command_list_region; |     OwnPtr<Memory::Region> m_command_list_region; | ||||||
|     LockRefPtr<Memory::PhysicalPage> m_fis_receive_page; |     RefPtr<Memory::PhysicalPage> m_fis_receive_page; | ||||||
|     LockRefPtr<ATADevice> m_connected_device; |     LockRefPtr<ATADevice> m_connected_device; | ||||||
| 
 | 
 | ||||||
|     u32 m_port_index; |     u32 m_port_index; | ||||||
|  | @ -125,7 +126,7 @@ private: | ||||||
|     // it's probably better to just "cache" this here instead.
 |     // it's probably better to just "cache" this here instead.
 | ||||||
|     AHCI::HBADefinedCapabilities const m_hba_capabilities; |     AHCI::HBADefinedCapabilities const m_hba_capabilities; | ||||||
| 
 | 
 | ||||||
|     NonnullLockRefPtr<Memory::PhysicalPage> m_identify_buffer_page; |     NonnullRefPtr<Memory::PhysicalPage> m_identify_buffer_page; | ||||||
| 
 | 
 | ||||||
|     volatile AHCI::PortRegisters& m_port_registers; |     volatile AHCI::PortRegisters& m_port_registers; | ||||||
|     LockWeakPtr<AHCIController> m_parent_controller; |     LockWeakPtr<AHCIController> m_parent_controller; | ||||||
|  |  | ||||||
|  | @ -145,8 +145,8 @@ protected: | ||||||
| 
 | 
 | ||||||
|     OwnPtr<Memory::Region> m_prdt_region; |     OwnPtr<Memory::Region> m_prdt_region; | ||||||
|     OwnPtr<Memory::Region> m_dma_buffer_region; |     OwnPtr<Memory::Region> m_dma_buffer_region; | ||||||
|     LockRefPtr<Memory::PhysicalPage> m_prdt_page; |     RefPtr<Memory::PhysicalPage> m_prdt_page; | ||||||
|     LockRefPtr<Memory::PhysicalPage> m_dma_buffer_page; |     RefPtr<Memory::PhysicalPage> m_dma_buffer_page; | ||||||
| 
 | 
 | ||||||
|     const u8 m_port_index; |     const u8 m_port_index; | ||||||
|     NonnullLockRefPtrVector<ATADevice> m_ata_devices; |     NonnullLockRefPtrVector<ATADevice> m_ata_devices; | ||||||
|  |  | ||||||
|  | @ -152,7 +152,7 @@ UNMAP_AFTER_INIT u32 NVMeController::get_admin_q_dept() | ||||||
| UNMAP_AFTER_INIT ErrorOr<void> NVMeController::identify_and_init_namespaces() | UNMAP_AFTER_INIT ErrorOr<void> NVMeController::identify_and_init_namespaces() | ||||||
| { | { | ||||||
| 
 | 
 | ||||||
|     LockRefPtr<Memory::PhysicalPage> prp_dma_buffer; |     RefPtr<Memory::PhysicalPage> prp_dma_buffer; | ||||||
|     OwnPtr<Memory::Region> prp_dma_region; |     OwnPtr<Memory::Region> prp_dma_region; | ||||||
|     auto namespace_data_struct = TRY(ByteBuffer::create_zeroed(NVMe_IDENTIFY_SIZE)); |     auto namespace_data_struct = TRY(ByteBuffer::create_zeroed(NVMe_IDENTIFY_SIZE)); | ||||||
|     u32 active_namespace_list[NVMe_IDENTIFY_SIZE / sizeof(u32)]; |     u32 active_namespace_list[NVMe_IDENTIFY_SIZE / sizeof(u32)]; | ||||||
|  | @ -259,9 +259,9 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(Optional<u8> i | ||||||
| { | { | ||||||
|     auto qdepth = get_admin_q_dept(); |     auto qdepth = get_admin_q_dept(); | ||||||
|     OwnPtr<Memory::Region> cq_dma_region; |     OwnPtr<Memory::Region> cq_dma_region; | ||||||
|     NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_pages; |     NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_pages; | ||||||
|     OwnPtr<Memory::Region> sq_dma_region; |     OwnPtr<Memory::Region> sq_dma_region; | ||||||
|     NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_pages; |     NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_pages; | ||||||
|     auto cq_size = round_up_to_power_of_two(CQ_SIZE(qdepth), 4096); |     auto cq_size = round_up_to_power_of_two(CQ_SIZE(qdepth), 4096); | ||||||
|     auto sq_size = round_up_to_power_of_two(SQ_SIZE(qdepth), 4096); |     auto sq_size = round_up_to_power_of_two(SQ_SIZE(qdepth), 4096); | ||||||
|     if (!reset_controller()) { |     if (!reset_controller()) { | ||||||
|  | @ -300,9 +300,9 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(Optional<u8> i | ||||||
| UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, Optional<u8> irq) | UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, Optional<u8> irq) | ||||||
| { | { | ||||||
|     OwnPtr<Memory::Region> cq_dma_region; |     OwnPtr<Memory::Region> cq_dma_region; | ||||||
|     NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_pages; |     NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_pages; | ||||||
|     OwnPtr<Memory::Region> sq_dma_region; |     OwnPtr<Memory::Region> sq_dma_region; | ||||||
|     NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_pages; |     NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_pages; | ||||||
|     auto cq_size = round_up_to_power_of_two(CQ_SIZE(IO_QUEUE_SIZE), 4096); |     auto cq_size = round_up_to_power_of_two(CQ_SIZE(IO_QUEUE_SIZE), 4096); | ||||||
|     auto sq_size = round_up_to_power_of_two(SQ_SIZE(IO_QUEUE_SIZE), 4096); |     auto sq_size = round_up_to_power_of_two(SQ_SIZE(IO_QUEUE_SIZE), 4096); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -11,7 +11,7 @@ | ||||||
| 
 | 
 | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| 
 | 
 | ||||||
| UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs) | UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs) | ||||||
|     : NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs)) |     : NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs)) | ||||||
|     , IRQHandler(irq) |     , IRQHandler(irq) | ||||||
| { | { | ||||||
|  |  | ||||||
|  | @ -13,7 +13,7 @@ namespace Kernel { | ||||||
| class NVMeInterruptQueue : public NVMeQueue | class NVMeInterruptQueue : public NVMeQueue | ||||||
|     , public IRQHandler { |     , public IRQHandler { | ||||||
| public: | public: | ||||||
|     NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs); |     NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs); | ||||||
|     void submit_sqe(NVMeSubmission& submission) override; |     void submit_sqe(NVMeSubmission& submission) override; | ||||||
|     virtual ~NVMeInterruptQueue() override {}; |     virtual ~NVMeInterruptQueue() override {}; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -10,7 +10,7 @@ | ||||||
| #include "NVMeDefinitions.h" | #include "NVMeDefinitions.h" | ||||||
| 
 | 
 | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs) | UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs) | ||||||
|     : NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs)) |     : NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs)) | ||||||
| { | { | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -12,7 +12,7 @@ namespace Kernel { | ||||||
| 
 | 
 | ||||||
| class NVMePollQueue : public NVMeQueue { | class NVMePollQueue : public NVMeQueue { | ||||||
| public: | public: | ||||||
|     NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs); |     NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs); | ||||||
|     void submit_sqe(NVMeSubmission& submission) override; |     void submit_sqe(NVMeSubmission& submission) override; | ||||||
|     virtual ~NVMePollQueue() override {}; |     virtual ~NVMePollQueue() override {}; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -13,10 +13,10 @@ | ||||||
| #include <Kernel/Storage/NVMe/NVMePollQueue.h> | #include <Kernel/Storage/NVMe/NVMePollQueue.h> | ||||||
| 
 | 
 | ||||||
| namespace Kernel { | namespace Kernel { | ||||||
| ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs) | ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs) | ||||||
| { | { | ||||||
|     // Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it)
 |     // Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it)
 | ||||||
|     LockRefPtr<Memory::PhysicalPage> rw_dma_page; |     RefPtr<Memory::PhysicalPage> rw_dma_page; | ||||||
|     auto rw_dma_region = TRY(MM.allocate_dma_buffer_page("NVMe Queue Read/Write DMA"sv, Memory::Region::Access::ReadWrite, rw_dma_page)); |     auto rw_dma_region = TRY(MM.allocate_dma_buffer_page("NVMe Queue Read/Write DMA"sv, Memory::Region::Access::ReadWrite, rw_dma_page)); | ||||||
|     if (!irq.has_value()) { |     if (!irq.has_value()) { | ||||||
|         auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMePollQueue(move(rw_dma_region), *rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs)))); |         auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMePollQueue(move(rw_dma_region), *rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs)))); | ||||||
|  | @ -26,7 +26,7 @@ ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8 | ||||||
|     return queue; |     return queue; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs) | UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs) | ||||||
|     : m_current_request(nullptr) |     : m_current_request(nullptr) | ||||||
|     , m_rw_dma_region(move(rw_dma_region)) |     , m_rw_dma_region(move(rw_dma_region)) | ||||||
|     , m_qid(qid) |     , m_qid(qid) | ||||||
|  |  | ||||||
|  | @ -7,6 +7,7 @@ | ||||||
| #pragma once | #pragma once | ||||||
| 
 | 
 | ||||||
| #include <AK/AtomicRefCounted.h> | #include <AK/AtomicRefCounted.h> | ||||||
|  | #include <AK/NonnullRefPtrVector.h> | ||||||
| #include <AK/OwnPtr.h> | #include <AK/OwnPtr.h> | ||||||
| #include <AK/Types.h> | #include <AK/Types.h> | ||||||
| #include <Kernel/Bus/PCI/Device.h> | #include <Kernel/Bus/PCI/Device.h> | ||||||
|  | @ -29,7 +30,7 @@ struct DoorbellRegister { | ||||||
| class AsyncBlockDeviceRequest; | class AsyncBlockDeviceRequest; | ||||||
| class NVMeQueue : public AtomicRefCounted<NVMeQueue> { | class NVMeQueue : public AtomicRefCounted<NVMeQueue> { | ||||||
| public: | public: | ||||||
|     static ErrorOr<NonnullLockRefPtr<NVMeQueue>> try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs); |     static ErrorOr<NonnullLockRefPtr<NVMeQueue>> try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs); | ||||||
|     bool is_admin_queue() { return m_admin_queue; }; |     bool is_admin_queue() { return m_admin_queue; }; | ||||||
|     u16 submit_sync_sqe(NVMeSubmission&); |     u16 submit_sync_sqe(NVMeSubmission&); | ||||||
|     void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count); |     void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count); | ||||||
|  | @ -43,7 +44,7 @@ protected: | ||||||
|     { |     { | ||||||
|         m_db_regs->sq_tail = m_sq_tail; |         m_db_regs->sq_tail = m_sq_tail; | ||||||
|     } |     } | ||||||
|     NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullLockRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs); |     NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs); | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     bool cqe_available(); |     bool cqe_available(); | ||||||
|  | @ -70,12 +71,12 @@ private: | ||||||
|     u32 m_qdepth {}; |     u32 m_qdepth {}; | ||||||
|     Spinlock m_sq_lock { LockRank::Interrupts }; |     Spinlock m_sq_lock { LockRank::Interrupts }; | ||||||
|     OwnPtr<Memory::Region> m_cq_dma_region; |     OwnPtr<Memory::Region> m_cq_dma_region; | ||||||
|     NonnullLockRefPtrVector<Memory::PhysicalPage> m_cq_dma_page; |     NonnullRefPtrVector<Memory::PhysicalPage> m_cq_dma_page; | ||||||
|     Span<NVMeSubmission> m_sqe_array; |     Span<NVMeSubmission> m_sqe_array; | ||||||
|     OwnPtr<Memory::Region> m_sq_dma_region; |     OwnPtr<Memory::Region> m_sq_dma_region; | ||||||
|     NonnullLockRefPtrVector<Memory::PhysicalPage> m_sq_dma_page; |     NonnullRefPtrVector<Memory::PhysicalPage> m_sq_dma_page; | ||||||
|     Span<NVMeCompletion> m_cqe_array; |     Span<NVMeCompletion> m_cqe_array; | ||||||
|     Memory::TypedMapping<DoorbellRegister volatile> m_db_regs; |     Memory::TypedMapping<DoorbellRegister volatile> m_db_regs; | ||||||
|     NonnullLockRefPtr<Memory::PhysicalPage> m_rw_dma_page; |     NonnullRefPtr<Memory::PhysicalPage> m_rw_dma_page; | ||||||
| }; | }; | ||||||
| } | } | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Andreas Kling
						Andreas Kling