1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 19:37:36 +00:00

Kernel: Use RefPtr instead of LockRefPtr for PhysicalPage

I believe this to be safe, as the main thing that LockRefPtr provides
over RefPtr is safe copying from a shared LockRefPtr instance. I've
inspected the uses of RefPtr<PhysicalPage> and it seems they're all
guarded by external locking. Some of it is less obvious, but this is
an area where we're making continuous headway.
This commit is contained in:
Andreas Kling 2022-08-24 15:56:26 +02:00
parent 5a804b9a1d
commit 2c72d495a3
33 changed files with 141 additions and 138 deletions

View file

@ -4,6 +4,7 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/NonnullRefPtrVector.h>
#include <Kernel/Arch/SafeMem.h>
#include <Kernel/Arch/SmapDisabler.h>
#include <Kernel/Debug.h>
@ -91,7 +92,7 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_phys
{
auto contiguous_physical_pages = TRY(MM.allocate_contiguous_physical_pages(size));
auto new_physical_pages = TRY(FixedArray<LockRefPtr<PhysicalPage>>::try_create(contiguous_physical_pages.span()));
auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalPage>>::try_create(contiguous_physical_pages.span()));
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages)));
}
@ -110,9 +111,9 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purg
return vmobject;
}
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullLockRefPtr<PhysicalPage>> physical_pages)
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
{
auto new_physical_pages = TRY(FixedArray<LockRefPtr<PhysicalPage>>::try_create(physical_pages));
auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalPage>>::try_create(physical_pages));
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages)));
}
@ -129,7 +130,7 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_for_
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(paddr, move(new_physical_pages)));
}
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_shared_cow(AnonymousVMObject const& other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_shared_cow(AnonymousVMObject const& other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
{
auto weak_parent = TRY(other.try_make_weak_ptr<AnonymousVMObject>());
auto vmobject = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(weak_parent), move(shared_committed_cow_pages), move(new_physical_pages))));
@ -139,7 +140,7 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with
return vmobject;
}
AnonymousVMObject::AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)
AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)
: VMObject(move(new_physical_pages))
, m_unused_committed_pages(move(committed_pages))
{
@ -154,7 +155,7 @@ AnonymousVMObject::AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_
}
}
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
: VMObject(move(new_physical_pages))
{
VERIFY(paddr.page_base() == paddr);
@ -162,12 +163,12 @@ AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<LockRefPt
physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), MayReturnToFreeList::No);
}
AnonymousVMObject::AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
: VMObject(move(new_physical_pages))
{
}
AnonymousVMObject::AnonymousVMObject(LockWeakPtr<AnonymousVMObject> other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
AnonymousVMObject::AnonymousVMObject(LockWeakPtr<AnonymousVMObject> other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
: VMObject(move(new_physical_pages))
, m_cow_parent(move(other))
, m_shared_committed_cow_pages(move(shared_committed_cow_pages))
@ -270,7 +271,7 @@ ErrorOr<void> AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged
return {};
}
NonnullLockRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>)
NonnullRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>)
{
return m_unused_committed_pages->take_one();
}
@ -344,7 +345,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
return PageFaultResponse::Continue;
}
LockRefPtr<PhysicalPage> page;
RefPtr<PhysicalPage> page;
if (m_shared_committed_cow_pages) {
dbgln_if(PAGE_FAULT_DEBUG, " >> It's a committed COW page and it's time to COW!");
page = m_shared_committed_cow_pages->take_one();
@ -387,7 +388,7 @@ AnonymousVMObject::SharedCommittedCowPages::SharedCommittedCowPages(CommittedPhy
AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages() = default;
NonnullLockRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one()
NonnullRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one()
{
SpinlockLocker locker(m_lock);
return m_committed_pages.take_one();

View file

@ -20,12 +20,12 @@ public:
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_size(size_t, AllocationStrategy);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullLockRefPtr<PhysicalPage>>);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_purgeable_with_size(size_t, AllocationStrategy);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_physically_contiguous_with_size(size_t);
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override;
[[nodiscard]] NonnullLockRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>);
[[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>);
PageFaultResponse handle_cow_fault(size_t, VirtualAddress);
size_t cow_pages() const;
bool should_cow(size_t page_index, bool) const;
@ -41,12 +41,12 @@ public:
private:
class SharedCommittedCowPages;
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_shared_cow(AnonymousVMObject const&, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<LockRefPtr<PhysicalPage>>&&);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_shared_cow(AnonymousVMObject const&, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<RefPtr<PhysicalPage>>&&);
explicit AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&&, AllocationStrategy, Optional<CommittedPhysicalPageSet>);
explicit AnonymousVMObject(PhysicalAddress, FixedArray<LockRefPtr<PhysicalPage>>&&);
explicit AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&&);
explicit AnonymousVMObject(LockWeakPtr<AnonymousVMObject>, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<LockRefPtr<PhysicalPage>>&&);
explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&&, AllocationStrategy, Optional<CommittedPhysicalPageSet>);
explicit AnonymousVMObject(PhysicalAddress, FixedArray<RefPtr<PhysicalPage>>&&);
explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&&);
explicit AnonymousVMObject(LockWeakPtr<AnonymousVMObject>, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<RefPtr<PhysicalPage>>&&);
virtual StringView class_name() const override { return "AnonymousVMObject"sv; }
@ -74,7 +74,7 @@ private:
[[nodiscard]] bool is_empty() const { return m_committed_pages.is_empty(); }
[[nodiscard]] NonnullLockRefPtr<PhysicalPage> take_one();
[[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
void uncommit_one();
private:

View file

@ -9,14 +9,14 @@
namespace Kernel::Memory {
InodeVMObject::InodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
InodeVMObject::InodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
: VMObject(move(new_physical_pages))
, m_inode(inode)
, m_dirty_pages(move(dirty_pages))
{
}
InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
: VMObject(move(new_physical_pages))
, m_inode(other.m_inode)
, m_dirty_pages(move(dirty_pages))

View file

@ -28,8 +28,8 @@ public:
u32 writable_mappings() const;
protected:
explicit InodeVMObject(Inode&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit InodeVMObject(InodeVMObject const&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit InodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit InodeVMObject(InodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
InodeVMObject& operator=(InodeVMObject const&) = delete;
InodeVMObject& operator=(InodeVMObject&&) = delete;

View file

@ -6,6 +6,7 @@
#include <AK/Assertions.h>
#include <AK/Memory.h>
#include <AK/NonnullRefPtrVector.h>
#include <AK/StringView.h>
#include <Kernel/Arch/CPU.h>
#include <Kernel/Arch/InterruptDisabler.h>
@ -746,7 +747,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
return region;
}
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, LockRefPtr<Memory::PhysicalPage>& dma_buffer_page)
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page)
{
dma_buffer_page = TRY(allocate_physical_page());
// Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behaviour by default)
@ -755,12 +756,12 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(S
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access)
{
LockRefPtr<Memory::PhysicalPage> dma_buffer_page;
RefPtr<Memory::PhysicalPage> dma_buffer_page;
return allocate_dma_buffer_page(name, access, dma_buffer_page);
}
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullLockRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages)
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages)
{
VERIFY(!(size % PAGE_SIZE));
dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size));
@ -771,7 +772,7 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access)
{
VERIFY(!(size % PAGE_SIZE));
NonnullLockRefPtrVector<Memory::PhysicalPage> dma_buffer_pages;
NonnullRefPtrVector<Memory::PhysicalPage> dma_buffer_pages;
return allocate_dma_buffer_pages(size, name, access, dma_buffer_pages);
}
@ -881,10 +882,10 @@ void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
PANIC("MM: deallocate_physical_page couldn't figure out region for page @ {}", paddr);
}
LockRefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
RefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
{
SpinlockLocker mm_locker(s_mm_lock);
LockRefPtr<PhysicalPage> page;
RefPtr<PhysicalPage> page;
if (committed) {
// Draw from the committed pages pool. We should always have these pages available
VERIFY(m_system_memory_info.physical_pages_committed > 0);
@ -906,7 +907,7 @@ LockRefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
return page;
}
NonnullLockRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
{
auto page = find_free_physical_page(true);
if (should_zero_fill == ShouldZeroFill::Yes) {
@ -918,7 +919,7 @@ NonnullLockRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(
return page.release_nonnull();
}
ErrorOr<NonnullLockRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
{
SpinlockLocker lock(s_mm_lock);
auto page = find_free_physical_page(false);
@ -974,7 +975,7 @@ ErrorOr<NonnullLockRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(S
return page.release_nonnull();
}
ErrorOr<NonnullLockRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
{
VERIFY(!(size % PAGE_SIZE));
SpinlockLocker mm_lock(s_mm_lock);
@ -1160,7 +1161,7 @@ CommittedPhysicalPageSet::~CommittedPhysicalPageSet()
MM.uncommit_physical_pages({}, m_page_count);
}
NonnullLockRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one()
NonnullRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one()
{
VERIFY(m_page_count > 0);
--m_page_count;

View file

@ -119,7 +119,7 @@ public:
bool is_empty() const { return m_page_count == 0; }
size_t page_count() const { return m_page_count; }
[[nodiscard]] NonnullLockRefPtr<PhysicalPage> take_one();
[[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
void uncommit_one();
void operator=(CommittedPhysicalPageSet&&) = delete;
@ -169,15 +169,15 @@ public:
ErrorOr<CommittedPhysicalPageSet> commit_physical_pages(size_t page_count);
void uncommit_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
NonnullLockRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
ErrorOr<NonnullLockRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
ErrorOr<NonnullLockRefPtrVector<PhysicalPage>> allocate_contiguous_physical_pages(size_t size);
NonnullRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
ErrorOr<NonnullRefPtrVector<PhysicalPage>> allocate_contiguous_physical_pages(size_t size);
void deallocate_physical_page(PhysicalAddress);
ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, LockRefPtr<Memory::PhysicalPage>& dma_buffer_page);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullLockRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
@ -258,7 +258,7 @@ private:
static Region* find_region_from_vaddr(VirtualAddress);
LockRefPtr<PhysicalPage> find_free_physical_page(bool);
RefPtr<PhysicalPage> find_free_physical_page(bool);
ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page)
{
@ -286,8 +286,8 @@ private:
LockRefPtr<PageDirectory> m_kernel_page_directory;
LockRefPtr<PhysicalPage> m_shared_zero_page;
LockRefPtr<PhysicalPage> m_lazy_committed_page;
RefPtr<PhysicalPage> m_shared_zero_page;
RefPtr<PhysicalPage> m_lazy_committed_page;
SystemMemoryInfo m_system_memory_info;

View file

@ -10,8 +10,8 @@
#include <AK/Badge.h>
#include <AK/HashMap.h>
#include <AK/IntrusiveRedBlackTree.h>
#include <AK/RefPtr.h>
#include <Kernel/Forward.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/PhysicalPage.h>
@ -64,13 +64,13 @@ private:
AddressSpace* m_space { nullptr };
#if ARCH(X86_64)
LockRefPtr<PhysicalPage> m_pml4t;
RefPtr<PhysicalPage> m_pml4t;
#endif
LockRefPtr<PhysicalPage> m_directory_table;
RefPtr<PhysicalPage> m_directory_table;
#if ARCH(X86_64)
LockRefPtr<PhysicalPage> m_directory_pages[512];
RefPtr<PhysicalPage> m_directory_pages[512];
#else
LockRefPtr<PhysicalPage> m_directory_pages[4];
RefPtr<PhysicalPage> m_directory_pages[4];
#endif
RecursiveSpinlock m_lock { LockRank::None };
};

View file

@ -10,10 +10,10 @@
namespace Kernel::Memory {
NonnullLockRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist)
NonnullRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist)
{
auto& physical_page_entry = MM.get_physical_page_entry(paddr);
return adopt_lock_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(may_return_to_freelist));
return adopt_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(may_return_to_freelist));
}
PhysicalPage::PhysicalPage(MayReturnToFreeList may_return_to_freelist)

View file

@ -1,12 +1,12 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Library/NonnullLockRefPtr.h>
#include <AK/NonnullRefPtr.h>
#include <Kernel/PhysicalAddress.h>
namespace Kernel::Memory {
@ -36,7 +36,7 @@ public:
free_this();
}
static NonnullLockRefPtr<PhysicalPage> create(PhysicalAddress, MayReturnToFreeList may_return_to_freelist = MayReturnToFreeList::Yes);
static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, MayReturnToFreeList may_return_to_freelist = MayReturnToFreeList::Yes);
u32 ref_count() const { return m_ref_count.load(AK::memory_order_consume); }

View file

@ -5,9 +5,8 @@
*/
#include <AK/BuiltinWrappers.h>
#include <AK/NonnullRefPtrVector.h>
#include <Kernel/Assertions.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Library/NonnullLockRefPtr.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/PhysicalRegion.h>
#include <Kernel/Memory/PhysicalZone.h>
@ -76,7 +75,7 @@ OwnPtr<PhysicalRegion> PhysicalRegion::try_take_pages_from_beginning(unsigned pa
return try_create(taken_lower, taken_upper);
}
NonnullLockRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count)
NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count)
{
auto rounded_page_count = next_power_of_two(count);
auto order = count_trailing_zeroes(rounded_page_count);
@ -96,7 +95,7 @@ NonnullLockRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages
if (!page_base.has_value())
return {};
NonnullLockRefPtrVector<PhysicalPage> physical_pages;
NonnullRefPtrVector<PhysicalPage> physical_pages;
physical_pages.ensure_capacity(count);
for (size_t i = 0; i < count; ++i)
@ -104,7 +103,7 @@ NonnullLockRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages
return physical_pages;
}
LockRefPtr<PhysicalPage> PhysicalRegion::take_free_page()
RefPtr<PhysicalPage> PhysicalRegion::take_free_page()
{
if (m_usable_zones.is_empty())
return nullptr;

View file

@ -33,8 +33,8 @@ public:
OwnPtr<PhysicalRegion> try_take_pages_from_beginning(unsigned);
LockRefPtr<PhysicalPage> take_free_page();
NonnullLockRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count);
RefPtr<PhysicalPage> take_free_page();
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count);
void return_page(PhysicalAddress);
private:

View file

@ -23,12 +23,12 @@ ErrorOr<NonnullLockRefPtr<VMObject>> PrivateInodeVMObject::try_clone()
return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) PrivateInodeVMObject(*this, move(new_physical_pages), move(dirty_pages)));
}
PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
: InodeVMObject(inode, move(new_physical_pages), move(dirty_pages))
{
}
PrivateInodeVMObject::PrivateInodeVMObject(PrivateInodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
PrivateInodeVMObject::PrivateInodeVMObject(PrivateInodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
: InodeVMObject(other, move(new_physical_pages), move(dirty_pages))
{
}

View file

@ -23,8 +23,8 @@ public:
private:
virtual bool is_private_inode() const override { return true; }
explicit PrivateInodeVMObject(Inode&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit PrivateInodeVMObject(PrivateInodeVMObject const&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit PrivateInodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit PrivateInodeVMObject(PrivateInodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
virtual StringView class_name() const override { return "PrivateInodeVMObject"sv; }

View file

@ -202,7 +202,7 @@ ErrorOr<void> Region::set_should_cow(size_t page_index, bool cow)
return {};
}
bool Region::map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage> page)
bool Region::map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage> page)
{
VERIFY(m_page_directory->get_lock().is_locked_by_current_processor());
@ -240,7 +240,7 @@ bool Region::map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage
bool Region::map_individual_page_impl(size_t page_index)
{
LockRefPtr<PhysicalPage> page;
RefPtr<PhysicalPage> page;
{
SpinlockLocker vmobject_locker(vmobject().m_lock);
page = physical_page(page_index);
@ -249,7 +249,7 @@ bool Region::map_individual_page_impl(size_t page_index)
return map_individual_page_impl(page_index, page);
}
bool Region::remap_vmobject_page(size_t page_index, NonnullLockRefPtr<PhysicalPage> physical_page)
bool Region::remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalPage> physical_page)
{
SpinlockLocker page_lock(m_page_directory->get_lock());
@ -408,7 +408,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, Physica
if (current_thread != nullptr)
current_thread->did_zero_fault();
LockRefPtr<PhysicalPage> new_physical_page;
RefPtr<PhysicalPage> new_physical_page;
if (page_in_slot_at_time_of_fault.is_lazy_committed_page()) {
VERIFY(m_vmobject->is_anonymous());
@ -543,14 +543,14 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
return PageFaultResponse::Continue;
}
LockRefPtr<PhysicalPage> Region::physical_page(size_t index) const
RefPtr<PhysicalPage> Region::physical_page(size_t index) const
{
SpinlockLocker vmobject_locker(vmobject().m_lock);
VERIFY(index < page_count());
return vmobject().physical_pages()[first_page_index() + index];
}
LockRefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
RefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
{
VERIFY(vmobject().m_lock.is_locked_by_current_processor());
VERIFY(index < page_count());

View file

@ -158,8 +158,8 @@ public:
return size() / PAGE_SIZE;
}
LockRefPtr<PhysicalPage> physical_page(size_t index) const;
LockRefPtr<PhysicalPage>& physical_page_slot(size_t index);
RefPtr<PhysicalPage> physical_page(size_t index) const;
RefPtr<PhysicalPage>& physical_page_slot(size_t index);
[[nodiscard]] size_t offset_in_vmobject() const
{
@ -208,7 +208,7 @@ private:
Region(NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
Region(VirtualRange const&, NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
[[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullLockRefPtr<PhysicalPage>);
[[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalPage>);
void set_access_bit(Access access, bool b)
{
@ -223,7 +223,7 @@ private:
[[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index, PhysicalPage& page_in_slot_at_time_of_fault);
[[nodiscard]] bool map_individual_page_impl(size_t page_index);
[[nodiscard]] bool map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage>);
[[nodiscard]] bool map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage>);
LockRefPtr<PageDirectory> m_page_directory;
VirtualRange m_range;

View file

@ -8,7 +8,7 @@
namespace Kernel::Memory {
LockRefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullLockRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size)
LockRefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size)
{
auto maybe_vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages);
if (maybe_vm_object.is_error()) {

View file

@ -19,7 +19,7 @@ namespace Kernel::Memory {
class ScatterGatherList final : public AtomicRefCounted<ScatterGatherList> {
public:
static LockRefPtr<ScatterGatherList> try_create(AsyncBlockDeviceRequest&, Span<NonnullLockRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size);
static LockRefPtr<ScatterGatherList> try_create(AsyncBlockDeviceRequest&, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size);
VMObject const& vmobject() const { return m_vm_object; }
VirtualAddress dma_region() const { return m_dma_region->vaddr(); }
size_t scatters_count() const { return m_vm_object->physical_pages().size(); }

View file

@ -56,21 +56,21 @@ ErrorOr<void> SharedFramebufferVMObject::create_real_writes_framebuffer_vm_objec
return {};
}
Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::real_framebuffer_physical_pages()
Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::real_framebuffer_physical_pages()
{
return m_real_framebuffer_vmobject->physical_pages();
}
Span<LockRefPtr<PhysicalPage> const> SharedFramebufferVMObject::real_framebuffer_physical_pages() const
Span<RefPtr<PhysicalPage> const> SharedFramebufferVMObject::real_framebuffer_physical_pages() const
{
return m_real_framebuffer_vmobject->physical_pages();
}
Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages()
Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages()
{
return m_physical_pages.span();
}
Span<LockRefPtr<PhysicalPage> const> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() const
Span<RefPtr<PhysicalPage> const> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() const
{
return m_physical_pages.span();
}
@ -92,14 +92,14 @@ void SharedFramebufferVMObject::switch_to_real_framebuffer_writes(Badge<Kernel::
});
}
Span<LockRefPtr<PhysicalPage> const> SharedFramebufferVMObject::physical_pages() const
Span<RefPtr<PhysicalPage> const> SharedFramebufferVMObject::physical_pages() const
{
SpinlockLocker locker(m_writes_state_lock);
if (m_writes_are_faked)
return VMObject::physical_pages();
return m_real_framebuffer_vmobject->physical_pages();
}
Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages()
Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages()
{
SpinlockLocker locker(m_writes_state_lock);
if (m_writes_are_faked)
@ -107,7 +107,7 @@ Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages()
return m_real_framebuffer_vmobject->physical_pages();
}
SharedFramebufferVMObject::SharedFramebufferVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet committed_pages, AnonymousVMObject& real_framebuffer_vmobject)
SharedFramebufferVMObject::SharedFramebufferVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet committed_pages, AnonymousVMObject& real_framebuffer_vmobject)
: VMObject(move(new_physical_pages))
, m_real_framebuffer_vmobject(real_framebuffer_vmobject)
, m_committed_pages(move(committed_pages))

View file

@ -22,15 +22,15 @@ public:
static ErrorOr<NonnullLockRefPtr<FakeWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object);
private:
FakeWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
FakeWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
: VMObject(move(new_physical_pages))
, m_parent_object(parent_object)
{
}
virtual StringView class_name() const override { return "FakeWritesFramebufferVMObject"sv; }
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); }
virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
virtual Span<LockRefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
virtual Span<RefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
virtual Span<RefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
NonnullLockRefPtr<SharedFramebufferVMObject> m_parent_object;
};
@ -39,15 +39,15 @@ public:
static ErrorOr<NonnullLockRefPtr<RealWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object);
private:
RealWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
RealWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
: VMObject(move(new_physical_pages))
, m_parent_object(parent_object)
{
}
virtual StringView class_name() const override { return "RealWritesFramebufferVMObject"sv; }
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); }
virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->real_framebuffer_physical_pages(); }
virtual Span<LockRefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->real_framebuffer_physical_pages(); }
virtual Span<RefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->real_framebuffer_physical_pages(); }
virtual Span<RefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->real_framebuffer_physical_pages(); }
NonnullLockRefPtr<SharedFramebufferVMObject> m_parent_object;
};
@ -60,14 +60,14 @@ public:
void switch_to_fake_sink_framebuffer_writes(Badge<Kernel::DisplayConnector>);
void switch_to_real_framebuffer_writes(Badge<Kernel::DisplayConnector>);
virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const override;
virtual Span<LockRefPtr<PhysicalPage>> physical_pages() override;
virtual Span<RefPtr<PhysicalPage> const> physical_pages() const override;
virtual Span<RefPtr<PhysicalPage>> physical_pages() override;
Span<LockRefPtr<PhysicalPage>> fake_sink_framebuffer_physical_pages();
Span<LockRefPtr<PhysicalPage> const> fake_sink_framebuffer_physical_pages() const;
Span<RefPtr<PhysicalPage>> fake_sink_framebuffer_physical_pages();
Span<RefPtr<PhysicalPage> const> fake_sink_framebuffer_physical_pages() const;
Span<LockRefPtr<PhysicalPage>> real_framebuffer_physical_pages();
Span<LockRefPtr<PhysicalPage> const> real_framebuffer_physical_pages() const;
Span<RefPtr<PhysicalPage>> real_framebuffer_physical_pages();
Span<RefPtr<PhysicalPage> const> real_framebuffer_physical_pages() const;
FakeWritesFramebufferVMObject const& fake_writes_framebuffer_vmobject() const { return *m_fake_writes_framebuffer_vmobject; }
FakeWritesFramebufferVMObject& fake_writes_framebuffer_vmobject() { return *m_fake_writes_framebuffer_vmobject; }
@ -76,7 +76,7 @@ public:
RealWritesFramebufferVMObject& real_writes_framebuffer_vmobject() { return *m_real_writes_framebuffer_vmobject; }
private:
SharedFramebufferVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet, AnonymousVMObject& real_framebuffer_vmobject);
SharedFramebufferVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet, AnonymousVMObject& real_framebuffer_vmobject);
virtual StringView class_name() const override { return "SharedFramebufferVMObject"sv; }

View file

@ -29,12 +29,12 @@ ErrorOr<NonnullLockRefPtr<VMObject>> SharedInodeVMObject::try_clone()
return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) SharedInodeVMObject(*this, move(new_physical_pages), move(dirty_pages)));
}
SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
: InodeVMObject(inode, move(new_physical_pages), move(dirty_pages))
{
}
SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
: InodeVMObject(other, move(new_physical_pages), move(dirty_pages))
{
}

View file

@ -23,8 +23,8 @@ public:
private:
virtual bool is_shared_inode() const override { return true; }
explicit SharedInodeVMObject(Inode&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit SharedInodeVMObject(SharedInodeVMObject const&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit SharedInodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
explicit SharedInodeVMObject(SharedInodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
virtual StringView class_name() const override { return "SharedInodeVMObject"sv; }

View file

@ -17,17 +17,17 @@ SpinlockProtected<VMObject::AllInstancesList>& VMObject::all_instances()
return s_all_instances;
}
ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> VMObject::try_clone_physical_pages() const
ErrorOr<FixedArray<RefPtr<PhysicalPage>>> VMObject::try_clone_physical_pages() const
{
return m_physical_pages.try_clone();
}
ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> VMObject::try_create_physical_pages(size_t size)
ErrorOr<FixedArray<RefPtr<PhysicalPage>>> VMObject::try_create_physical_pages(size_t size)
{
return FixedArray<LockRefPtr<PhysicalPage>>::try_create(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
return FixedArray<RefPtr<PhysicalPage>>::try_create(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
}
VMObject::VMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
VMObject::VMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
: m_physical_pages(move(new_physical_pages))
{
all_instances().with([&](auto& list) { list.append(*this); });

View file

@ -8,9 +8,9 @@
#include <AK/FixedArray.h>
#include <AK/IntrusiveList.h>
#include <AK/RefPtr.h>
#include <Kernel/Forward.h>
#include <Kernel/Library/ListedRefCounted.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Library/LockWeakable.h>
#include <Kernel/Locking/Mutex.h>
#include <Kernel/Memory/Region.h>
@ -35,8 +35,8 @@ public:
size_t page_count() const { return m_physical_pages.size(); }
virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const { return m_physical_pages.span(); }
virtual Span<LockRefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); }
virtual Span<RefPtr<PhysicalPage> const> physical_pages() const { return m_physical_pages.span(); }
virtual Span<RefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); }
size_t size() const { return m_physical_pages.size() * PAGE_SIZE; }
@ -55,15 +55,15 @@ public:
}
protected:
static ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> try_create_physical_pages(size_t);
ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> try_clone_physical_pages() const;
explicit VMObject(FixedArray<LockRefPtr<PhysicalPage>>&&);
static ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_create_physical_pages(size_t);
ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_clone_physical_pages() const;
explicit VMObject(FixedArray<RefPtr<PhysicalPage>>&&);
template<typename Callback>
void for_each_region(Callback);
IntrusiveListNode<VMObject> m_list_node;
FixedArray<LockRefPtr<PhysicalPage>> m_physical_pages;
FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
mutable RecursiveSpinlock m_lock { LockRank::None };