mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 04:07:45 +00:00
Kernel: Make self-contained locking smart pointers their own classes
Until now, our kernel has reimplemented a number of AK classes to provide automatic internal locking: - RefPtr - NonnullRefPtr - WeakPtr - Weakable This patch renames the Kernel classes so that they can coexist with the original AK classes: - RefPtr => LockRefPtr - NonnullRefPtr => NonnullLockRefPtr - WeakPtr => LockWeakPtr - Weakable => LockWeakable The goal here is to eventually get rid of the Lock* classes in favor of using external locking.
This commit is contained in:
parent
e475263113
commit
11eee67b85
360 changed files with 1703 additions and 1672 deletions
|
@ -38,7 +38,7 @@ ErrorOr<NonnullOwnPtr<AddressSpace>> AddressSpace::try_create(AddressSpace const
|
|||
return space;
|
||||
}
|
||||
|
||||
AddressSpace::AddressSpace(NonnullRefPtr<PageDirectory> page_directory, VirtualRange total_range)
|
||||
AddressSpace::AddressSpace(NonnullLockRefPtr<PageDirectory> page_directory, VirtualRange total_range)
|
||||
: m_page_directory(move(page_directory))
|
||||
, m_region_tree(total_range)
|
||||
{
|
||||
|
@ -173,12 +173,12 @@ ErrorOr<Region*> AddressSpace::allocate_region(RandomizeVirtualAddress randomize
|
|||
return region.leak_ptr();
|
||||
}
|
||||
|
||||
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange requested_range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
|
||||
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange requested_range, NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
|
||||
{
|
||||
return allocate_region_with_vmobject(RandomizeVirtualAddress::Yes, requested_range.base(), requested_range.size(), PAGE_SIZE, move(vmobject), offset_in_vmobject, name, prot, shared);
|
||||
}
|
||||
|
||||
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(RandomizeVirtualAddress randomize_virtual_address, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
|
||||
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(RandomizeVirtualAddress randomize_virtual_address, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
|
||||
{
|
||||
if (!requested_address.is_page_aligned())
|
||||
return EINVAL;
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
#include <AK/RedBlackTree.h>
|
||||
#include <AK/Vector.h>
|
||||
#include <AK/WeakPtr.h>
|
||||
#include <Kernel/Library/LockWeakPtr.h>
|
||||
#include <Kernel/Memory/AllocationStrategy.h>
|
||||
#include <Kernel/Memory/PageDirectory.h>
|
||||
#include <Kernel/Memory/Region.h>
|
||||
|
@ -33,8 +33,8 @@ public:
|
|||
|
||||
ErrorOr<void> unmap_mmap_range(VirtualAddress, size_t);
|
||||
|
||||
ErrorOr<Region*> allocate_region_with_vmobject(VirtualRange requested_range, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
|
||||
ErrorOr<Region*> allocate_region_with_vmobject(RandomizeVirtualAddress, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
|
||||
ErrorOr<Region*> allocate_region_with_vmobject(VirtualRange requested_range, NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
|
||||
ErrorOr<Region*> allocate_region_with_vmobject(RandomizeVirtualAddress, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
|
||||
ErrorOr<Region*> allocate_region(RandomizeVirtualAddress, VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
|
||||
void deallocate_region(Region& region);
|
||||
NonnullOwnPtr<Region> take_region(Region& region);
|
||||
|
@ -65,11 +65,11 @@ public:
|
|||
auto& region_tree() { return m_region_tree; }
|
||||
|
||||
private:
|
||||
AddressSpace(NonnullRefPtr<PageDirectory>, VirtualRange total_range);
|
||||
AddressSpace(NonnullLockRefPtr<PageDirectory>, VirtualRange total_range);
|
||||
|
||||
mutable RecursiveSpinlock m_lock { LockRank::None };
|
||||
|
||||
RefPtr<PageDirectory> m_page_directory;
|
||||
LockRefPtr<PageDirectory> m_page_directory;
|
||||
|
||||
RegionTree m_region_tree;
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
ErrorOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
|
||||
ErrorOr<NonnullLockRefPtr<VMObject>> AnonymousVMObject::try_clone()
|
||||
{
|
||||
// We need to acquire our lock so we copy a sane state
|
||||
SpinlockLocker lock(m_lock);
|
||||
|
@ -50,7 +50,7 @@ ErrorOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
|
|||
// one would keep the one it still has. This ensures that the original
|
||||
// one and this one, as well as the clone have sufficient resources
|
||||
// to cow all pages as needed
|
||||
auto new_shared_committed_cow_pages = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) SharedCommittedCowPages(move(committed_pages))));
|
||||
auto new_shared_committed_cow_pages = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) SharedCommittedCowPages(move(committed_pages))));
|
||||
auto new_physical_pages = TRY(this->try_clone_physical_pages());
|
||||
auto clone = TRY(try_create_with_shared_cow(*this, *new_shared_committed_cow_pages, move(new_physical_pages)));
|
||||
|
||||
|
@ -75,7 +75,7 @@ ErrorOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
|
|||
return clone;
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy strategy)
|
||||
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy strategy)
|
||||
{
|
||||
Optional<CommittedPhysicalPageSet> committed_pages;
|
||||
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
|
||||
|
@ -84,19 +84,19 @@ ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_siz
|
|||
|
||||
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
|
||||
|
||||
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages), strategy, move(committed_pages)));
|
||||
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages), strategy, move(committed_pages)));
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size)
|
||||
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size)
|
||||
{
|
||||
auto contiguous_physical_pages = TRY(MM.allocate_contiguous_physical_pages(size));
|
||||
|
||||
auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalPage>>::try_create(contiguous_physical_pages.span()));
|
||||
auto new_physical_pages = TRY(FixedArray<LockRefPtr<PhysicalPage>>::try_create(contiguous_physical_pages.span()));
|
||||
|
||||
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages)));
|
||||
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages)));
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy strategy)
|
||||
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy strategy)
|
||||
{
|
||||
Optional<CommittedPhysicalPageSet> committed_pages;
|
||||
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
|
||||
|
@ -105,18 +105,18 @@ ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purgeabl
|
|||
|
||||
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
|
||||
|
||||
auto vmobject = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages), strategy, move(committed_pages))));
|
||||
auto vmobject = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages), strategy, move(committed_pages))));
|
||||
vmobject->m_purgeable = true;
|
||||
return vmobject;
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
|
||||
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullLockRefPtr<PhysicalPage>> physical_pages)
|
||||
{
|
||||
auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalPage>>::try_create(physical_pages));
|
||||
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages)));
|
||||
auto new_physical_pages = TRY(FixedArray<LockRefPtr<PhysicalPage>>::try_create(physical_pages));
|
||||
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages)));
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size)
|
||||
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size)
|
||||
{
|
||||
if (paddr.offset(size) < paddr) {
|
||||
dbgln("Shenanigans! try_create_for_physical_range({}, {}) would wrap around", paddr, size);
|
||||
|
@ -126,20 +126,20 @@ ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_for_phys
|
|||
|
||||
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
|
||||
|
||||
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(paddr, move(new_physical_pages)));
|
||||
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(paddr, move(new_physical_pages)));
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_shared_cow(AnonymousVMObject const& other, NonnullRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_shared_cow(AnonymousVMObject const& other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
{
|
||||
auto weak_parent = TRY(other.try_make_weak_ptr<AnonymousVMObject>());
|
||||
auto vmobject = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(move(weak_parent), move(shared_committed_cow_pages), move(new_physical_pages))));
|
||||
auto vmobject = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(weak_parent), move(shared_committed_cow_pages), move(new_physical_pages))));
|
||||
|
||||
TRY(vmobject->ensure_cow_map());
|
||||
|
||||
return vmobject;
|
||||
}
|
||||
|
||||
AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)
|
||||
AnonymousVMObject::AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)
|
||||
: VMObject(move(new_physical_pages))
|
||||
, m_unused_committed_pages(move(committed_pages))
|
||||
{
|
||||
|
@ -154,7 +154,7 @@ AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_phys
|
|||
}
|
||||
}
|
||||
|
||||
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
: VMObject(move(new_physical_pages))
|
||||
{
|
||||
VERIFY(paddr.page_base() == paddr);
|
||||
|
@ -162,12 +162,12 @@ AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, FixedArray<RefPtr<Ph
|
|||
physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), MayReturnToFreeList::No);
|
||||
}
|
||||
|
||||
AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
AnonymousVMObject::AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
: VMObject(move(new_physical_pages))
|
||||
{
|
||||
}
|
||||
|
||||
AnonymousVMObject::AnonymousVMObject(WeakPtr<AnonymousVMObject> other, NonnullRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
AnonymousVMObject::AnonymousVMObject(LockWeakPtr<AnonymousVMObject> other, NonnullLockRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
: VMObject(move(new_physical_pages))
|
||||
, m_cow_parent(move(other))
|
||||
, m_shared_committed_cow_pages(move(shared_committed_cow_pages))
|
||||
|
@ -270,7 +270,7 @@ ErrorOr<void> AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged
|
|||
return {};
|
||||
}
|
||||
|
||||
NonnullRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>)
|
||||
NonnullLockRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>)
|
||||
{
|
||||
return m_unused_committed_pages->take_one();
|
||||
}
|
||||
|
@ -344,7 +344,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
|
|||
return PageFaultResponse::Continue;
|
||||
}
|
||||
|
||||
RefPtr<PhysicalPage> page;
|
||||
LockRefPtr<PhysicalPage> page;
|
||||
if (m_shared_committed_cow_pages) {
|
||||
dbgln_if(PAGE_FAULT_DEBUG, " >> It's a committed COW page and it's time to COW!");
|
||||
page = m_shared_committed_cow_pages->take_one();
|
||||
|
@ -388,7 +388,7 @@ AnonymousVMObject::SharedCommittedCowPages::SharedCommittedCowPages(CommittedPhy
|
|||
|
||||
AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages() = default;
|
||||
|
||||
NonnullRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one()
|
||||
NonnullLockRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one()
|
||||
{
|
||||
SpinlockLocker locker(m_lock);
|
||||
return m_committed_pages.take_one();
|
||||
|
|
|
@ -18,14 +18,14 @@ class AnonymousVMObject final : public VMObject {
|
|||
public:
|
||||
virtual ~AnonymousVMObject() override;
|
||||
|
||||
static ErrorOr<NonnullRefPtr<AnonymousVMObject>> try_create_with_size(size_t, AllocationStrategy);
|
||||
static ErrorOr<NonnullRefPtr<AnonymousVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
|
||||
static ErrorOr<NonnullRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
|
||||
static ErrorOr<NonnullRefPtr<AnonymousVMObject>> try_create_purgeable_with_size(size_t, AllocationStrategy);
|
||||
static ErrorOr<NonnullRefPtr<AnonymousVMObject>> try_create_physically_contiguous_with_size(size_t);
|
||||
virtual ErrorOr<NonnullRefPtr<VMObject>> try_clone() override;
|
||||
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_size(size_t, AllocationStrategy);
|
||||
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
|
||||
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullLockRefPtr<PhysicalPage>>);
|
||||
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_purgeable_with_size(size_t, AllocationStrategy);
|
||||
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_physically_contiguous_with_size(size_t);
|
||||
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override;
|
||||
|
||||
[[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>);
|
||||
[[nodiscard]] NonnullLockRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>);
|
||||
PageFaultResponse handle_cow_fault(size_t, VirtualAddress);
|
||||
size_t cow_pages() const;
|
||||
bool should_cow(size_t page_index, bool) const;
|
||||
|
@ -41,12 +41,12 @@ public:
|
|||
private:
|
||||
class SharedCommittedCowPages;
|
||||
|
||||
static ErrorOr<NonnullRefPtr<AnonymousVMObject>> try_create_with_shared_cow(AnonymousVMObject const&, NonnullRefPtr<SharedCommittedCowPages>, FixedArray<RefPtr<PhysicalPage>>&&);
|
||||
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_shared_cow(AnonymousVMObject const&, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<LockRefPtr<PhysicalPage>>&&);
|
||||
|
||||
explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&&, AllocationStrategy, Optional<CommittedPhysicalPageSet>);
|
||||
explicit AnonymousVMObject(PhysicalAddress, FixedArray<RefPtr<PhysicalPage>>&&);
|
||||
explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&&);
|
||||
explicit AnonymousVMObject(WeakPtr<AnonymousVMObject>, NonnullRefPtr<SharedCommittedCowPages>, FixedArray<RefPtr<PhysicalPage>>&&);
|
||||
explicit AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&&, AllocationStrategy, Optional<CommittedPhysicalPageSet>);
|
||||
explicit AnonymousVMObject(PhysicalAddress, FixedArray<LockRefPtr<PhysicalPage>>&&);
|
||||
explicit AnonymousVMObject(FixedArray<LockRefPtr<PhysicalPage>>&&);
|
||||
explicit AnonymousVMObject(LockWeakPtr<AnonymousVMObject>, NonnullLockRefPtr<SharedCommittedCowPages>, FixedArray<LockRefPtr<PhysicalPage>>&&);
|
||||
|
||||
virtual StringView class_name() const override { return "AnonymousVMObject"sv; }
|
||||
|
||||
|
@ -74,7 +74,7 @@ private:
|
|||
|
||||
[[nodiscard]] bool is_empty() const { return m_committed_pages.is_empty(); }
|
||||
|
||||
[[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
|
||||
[[nodiscard]] NonnullLockRefPtr<PhysicalPage> take_one();
|
||||
void uncommit_one();
|
||||
|
||||
private:
|
||||
|
@ -82,8 +82,8 @@ private:
|
|||
CommittedPhysicalPageSet m_committed_pages;
|
||||
};
|
||||
|
||||
WeakPtr<AnonymousVMObject> m_cow_parent;
|
||||
RefPtr<SharedCommittedCowPages> m_shared_committed_cow_pages;
|
||||
LockWeakPtr<AnonymousVMObject> m_cow_parent;
|
||||
LockRefPtr<SharedCommittedCowPages> m_shared_committed_cow_pages;
|
||||
|
||||
bool m_purgeable { false };
|
||||
bool m_volatile { false };
|
||||
|
|
|
@ -9,14 +9,14 @@
|
|||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
InodeVMObject::InodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
||||
InodeVMObject::InodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
||||
: VMObject(move(new_physical_pages))
|
||||
, m_inode(inode)
|
||||
, m_dirty_pages(move(dirty_pages))
|
||||
{
|
||||
}
|
||||
|
||||
InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
||||
InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
||||
: VMObject(move(new_physical_pages))
|
||||
, m_inode(other.m_inode)
|
||||
, m_dirty_pages(move(dirty_pages))
|
||||
|
|
|
@ -29,8 +29,8 @@ public:
|
|||
u32 executable_mappings() const;
|
||||
|
||||
protected:
|
||||
explicit InodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
|
||||
explicit InodeVMObject(InodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
|
||||
explicit InodeVMObject(Inode&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
|
||||
explicit InodeVMObject(InodeVMObject const&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
|
||||
|
||||
InodeVMObject& operator=(InodeVMObject const&) = delete;
|
||||
InodeVMObject& operator=(InodeVMObject&&) = delete;
|
||||
|
@ -38,7 +38,7 @@ protected:
|
|||
|
||||
virtual bool is_inode() const final { return true; }
|
||||
|
||||
NonnullRefPtr<Inode> m_inode;
|
||||
NonnullLockRefPtr<Inode> m_inode;
|
||||
Bitmap m_dirty_pages;
|
||||
};
|
||||
|
||||
|
|
|
@ -512,7 +512,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
|
|||
auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
|
||||
auto physical_page_index = PhysicalAddress::physical_page_index(pt_paddr.get());
|
||||
auto& physical_page_entry = m_physical_page_entries[physical_page_index];
|
||||
auto physical_page = adopt_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(MayReturnToFreeList::No));
|
||||
auto physical_page = adopt_lock_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(MayReturnToFreeList::No));
|
||||
|
||||
// NOTE: This leaked ref is matched by the unref in MemoryManager::release_pte()
|
||||
(void)physical_page.leak_ref();
|
||||
|
@ -757,7 +757,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
|
|||
return region;
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page)
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, LockRefPtr<Memory::PhysicalPage>& dma_buffer_page)
|
||||
{
|
||||
dma_buffer_page = TRY(allocate_physical_page());
|
||||
// Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behaviour by default)
|
||||
|
@ -766,12 +766,12 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(S
|
|||
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access)
|
||||
{
|
||||
RefPtr<Memory::PhysicalPage> dma_buffer_page;
|
||||
LockRefPtr<Memory::PhysicalPage> dma_buffer_page;
|
||||
|
||||
return allocate_dma_buffer_page(name, access, dma_buffer_page);
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages)
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullLockRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size));
|
||||
|
@ -782,7 +782,7 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(
|
|||
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
NonnullRefPtrVector<Memory::PhysicalPage> dma_buffer_pages;
|
||||
NonnullLockRefPtrVector<Memory::PhysicalPage> dma_buffer_pages;
|
||||
|
||||
return allocate_dma_buffer_pages(size, name, access, dma_buffer_pages);
|
||||
}
|
||||
|
@ -884,10 +884,10 @@ void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
|
|||
PANIC("MM: deallocate_physical_page couldn't figure out region for page @ {}", paddr);
|
||||
}
|
||||
|
||||
RefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
|
||||
LockRefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
|
||||
{
|
||||
VERIFY(s_mm_lock.is_locked());
|
||||
RefPtr<PhysicalPage> page;
|
||||
LockRefPtr<PhysicalPage> page;
|
||||
if (committed) {
|
||||
// Draw from the committed pages pool. We should always have these pages available
|
||||
VERIFY(m_system_memory_info.physical_pages_committed > 0);
|
||||
|
@ -909,7 +909,7 @@ RefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
|
|||
return page;
|
||||
}
|
||||
|
||||
NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
|
||||
NonnullLockRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
|
||||
{
|
||||
SpinlockLocker lock(s_mm_lock);
|
||||
auto page = find_free_physical_page(true);
|
||||
|
@ -921,7 +921,7 @@ NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badg
|
|||
return page.release_nonnull();
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
|
||||
ErrorOr<NonnullLockRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
|
||||
{
|
||||
SpinlockLocker lock(s_mm_lock);
|
||||
auto page = find_free_physical_page(false);
|
||||
|
@ -977,7 +977,7 @@ ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(Shoul
|
|||
return page.release_nonnull();
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
|
||||
ErrorOr<NonnullLockRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
SpinlockLocker mm_lock(s_mm_lock);
|
||||
|
@ -1189,7 +1189,7 @@ CommittedPhysicalPageSet::~CommittedPhysicalPageSet()
|
|||
MM.uncommit_physical_pages({}, m_page_count);
|
||||
}
|
||||
|
||||
NonnullRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one()
|
||||
NonnullLockRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one()
|
||||
{
|
||||
VERIFY(m_page_count > 0);
|
||||
--m_page_count;
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
#include <AK/HashTable.h>
|
||||
#include <AK/IntrusiveRedBlackTree.h>
|
||||
#include <AK/NonnullOwnPtrVector.h>
|
||||
#include <AK/NonnullRefPtrVector.h>
|
||||
#include <Kernel/Forward.h>
|
||||
#include <Kernel/Library/NonnullLockRefPtrVector.h>
|
||||
#include <Kernel/Locking/Spinlock.h>
|
||||
#include <Kernel/Memory/AllocationStrategy.h>
|
||||
#include <Kernel/Memory/PhysicalPage.h>
|
||||
|
@ -122,7 +122,7 @@ public:
|
|||
bool is_empty() const { return m_page_count == 0; }
|
||||
size_t page_count() const { return m_page_count; }
|
||||
|
||||
[[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
|
||||
[[nodiscard]] NonnullLockRefPtr<PhysicalPage> take_one();
|
||||
void uncommit_one();
|
||||
|
||||
void operator=(CommittedPhysicalPageSet&&) = delete;
|
||||
|
@ -173,15 +173,15 @@ public:
|
|||
ErrorOr<CommittedPhysicalPageSet> commit_physical_pages(size_t page_count);
|
||||
void uncommit_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
|
||||
|
||||
NonnullRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
|
||||
ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
|
||||
ErrorOr<NonnullRefPtrVector<PhysicalPage>> allocate_contiguous_physical_pages(size_t size);
|
||||
NonnullLockRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
|
||||
ErrorOr<NonnullLockRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
|
||||
ErrorOr<NonnullLockRefPtrVector<PhysicalPage>> allocate_contiguous_physical_pages(size_t size);
|
||||
void deallocate_physical_page(PhysicalAddress);
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page);
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, LockRefPtr<Memory::PhysicalPage>& dma_buffer_page);
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access);
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages);
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullLockRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages);
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access);
|
||||
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
|
||||
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
|
@ -263,7 +263,7 @@ private:
|
|||
|
||||
static Region* find_region_from_vaddr(VirtualAddress);
|
||||
|
||||
RefPtr<PhysicalPage> find_free_physical_page(bool);
|
||||
LockRefPtr<PhysicalPage> find_free_physical_page(bool);
|
||||
|
||||
ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page)
|
||||
{
|
||||
|
@ -289,10 +289,10 @@ private:
|
|||
VERIFY(m_system_memory_info.physical_pages == (m_system_memory_info.physical_pages_used + physical_pages_unused));
|
||||
}
|
||||
|
||||
RefPtr<PageDirectory> m_kernel_page_directory;
|
||||
LockRefPtr<PageDirectory> m_kernel_page_directory;
|
||||
|
||||
RefPtr<PhysicalPage> m_shared_zero_page;
|
||||
RefPtr<PhysicalPage> m_lazy_committed_page;
|
||||
LockRefPtr<PhysicalPage> m_shared_zero_page;
|
||||
LockRefPtr<PhysicalPage> m_lazy_committed_page;
|
||||
|
||||
SystemMemoryInfo m_system_memory_info;
|
||||
|
||||
|
|
|
@ -20,14 +20,14 @@ extern u8 end_of_kernel_image[];
|
|||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
UNMAP_AFTER_INIT NonnullRefPtr<PageDirectory> PageDirectory::must_create_kernel_page_directory()
|
||||
UNMAP_AFTER_INIT NonnullLockRefPtr<PageDirectory> PageDirectory::must_create_kernel_page_directory()
|
||||
{
|
||||
return adopt_ref_if_nonnull(new (nothrow) PageDirectory).release_nonnull();
|
||||
return adopt_lock_ref_if_nonnull(new (nothrow) PageDirectory).release_nonnull();
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<PageDirectory>> PageDirectory::try_create_for_userspace()
|
||||
ErrorOr<NonnullLockRefPtr<PageDirectory>> PageDirectory::try_create_for_userspace()
|
||||
{
|
||||
auto directory = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) PageDirectory));
|
||||
auto directory = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) PageDirectory));
|
||||
|
||||
// NOTE: Take the MM lock since we need it for quickmap.
|
||||
SpinlockLocker lock(s_mm_lock);
|
||||
|
|
|
@ -10,8 +10,8 @@
|
|||
#include <AK/Badge.h>
|
||||
#include <AK/HashMap.h>
|
||||
#include <AK/IntrusiveRedBlackTree.h>
|
||||
#include <AK/RefPtr.h>
|
||||
#include <Kernel/Forward.h>
|
||||
#include <Kernel/Library/LockRefPtr.h>
|
||||
#include <Kernel/Locking/Spinlock.h>
|
||||
#include <Kernel/Memory/PhysicalPage.h>
|
||||
|
||||
|
@ -21,9 +21,9 @@ class PageDirectory final : public AtomicRefCounted<PageDirectory> {
|
|||
friend class MemoryManager;
|
||||
|
||||
public:
|
||||
static ErrorOr<NonnullRefPtr<PageDirectory>> try_create_for_userspace();
|
||||
static NonnullRefPtr<PageDirectory> must_create_kernel_page_directory();
|
||||
static RefPtr<PageDirectory> find_current();
|
||||
static ErrorOr<NonnullLockRefPtr<PageDirectory>> try_create_for_userspace();
|
||||
static NonnullLockRefPtr<PageDirectory> must_create_kernel_page_directory();
|
||||
static LockRefPtr<PageDirectory> find_current();
|
||||
|
||||
~PageDirectory();
|
||||
|
||||
|
@ -64,13 +64,13 @@ private:
|
|||
|
||||
AddressSpace* m_space { nullptr };
|
||||
#if ARCH(X86_64)
|
||||
RefPtr<PhysicalPage> m_pml4t;
|
||||
LockRefPtr<PhysicalPage> m_pml4t;
|
||||
#endif
|
||||
RefPtr<PhysicalPage> m_directory_table;
|
||||
LockRefPtr<PhysicalPage> m_directory_table;
|
||||
#if ARCH(X86_64)
|
||||
RefPtr<PhysicalPage> m_directory_pages[512];
|
||||
LockRefPtr<PhysicalPage> m_directory_pages[512];
|
||||
#else
|
||||
RefPtr<PhysicalPage> m_directory_pages[4];
|
||||
LockRefPtr<PhysicalPage> m_directory_pages[4];
|
||||
#endif
|
||||
RecursiveSpinlock m_lock { LockRank::None };
|
||||
};
|
||||
|
|
|
@ -10,10 +10,10 @@
|
|||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
NonnullRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist)
|
||||
NonnullLockRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist)
|
||||
{
|
||||
auto& physical_page_entry = MM.get_physical_page_entry(paddr);
|
||||
return adopt_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(may_return_to_freelist));
|
||||
return adopt_lock_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(may_return_to_freelist));
|
||||
}
|
||||
|
||||
PhysicalPage::PhysicalPage(MayReturnToFreeList may_return_to_freelist)
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <AK/NonnullRefPtr.h>
|
||||
#include <Kernel/Library/NonnullLockRefPtr.h>
|
||||
#include <Kernel/PhysicalAddress.h>
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
@ -36,7 +36,7 @@ public:
|
|||
free_this();
|
||||
}
|
||||
|
||||
static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, MayReturnToFreeList may_return_to_freelist = MayReturnToFreeList::Yes);
|
||||
static NonnullLockRefPtr<PhysicalPage> create(PhysicalAddress, MayReturnToFreeList may_return_to_freelist = MayReturnToFreeList::Yes);
|
||||
|
||||
u32 ref_count() const { return m_ref_count.load(AK::memory_order_consume); }
|
||||
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
*/
|
||||
|
||||
#include <AK/BuiltinWrappers.h>
|
||||
#include <AK/NonnullRefPtr.h>
|
||||
#include <AK/RefPtr.h>
|
||||
#include <Kernel/Assertions.h>
|
||||
#include <Kernel/Library/LockRefPtr.h>
|
||||
#include <Kernel/Library/NonnullLockRefPtr.h>
|
||||
#include <Kernel/Memory/MemoryManager.h>
|
||||
#include <Kernel/Memory/PhysicalRegion.h>
|
||||
#include <Kernel/Memory/PhysicalZone.h>
|
||||
|
@ -76,7 +76,7 @@ OwnPtr<PhysicalRegion> PhysicalRegion::try_take_pages_from_beginning(unsigned pa
|
|||
return try_create(taken_lower, taken_upper);
|
||||
}
|
||||
|
||||
NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count)
|
||||
NonnullLockRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count)
|
||||
{
|
||||
auto rounded_page_count = next_power_of_two(count);
|
||||
auto order = count_trailing_zeroes(rounded_page_count);
|
||||
|
@ -96,7 +96,7 @@ NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(siz
|
|||
if (!page_base.has_value())
|
||||
return {};
|
||||
|
||||
NonnullRefPtrVector<PhysicalPage> physical_pages;
|
||||
NonnullLockRefPtrVector<PhysicalPage> physical_pages;
|
||||
physical_pages.ensure_capacity(count);
|
||||
|
||||
for (size_t i = 0; i < count; ++i)
|
||||
|
@ -104,7 +104,7 @@ NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(siz
|
|||
return physical_pages;
|
||||
}
|
||||
|
||||
RefPtr<PhysicalPage> PhysicalRegion::take_free_page()
|
||||
LockRefPtr<PhysicalPage> PhysicalRegion::take_free_page()
|
||||
{
|
||||
if (m_usable_zones.is_empty())
|
||||
return nullptr;
|
||||
|
|
|
@ -33,8 +33,8 @@ public:
|
|||
|
||||
OwnPtr<PhysicalRegion> try_take_pages_from_beginning(unsigned);
|
||||
|
||||
RefPtr<PhysicalPage> take_free_page();
|
||||
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count);
|
||||
LockRefPtr<PhysicalPage> take_free_page();
|
||||
NonnullLockRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count);
|
||||
void return_page(PhysicalAddress);
|
||||
|
||||
private:
|
||||
|
|
|
@ -9,26 +9,26 @@
|
|||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
ErrorOr<NonnullRefPtr<PrivateInodeVMObject>> PrivateInodeVMObject::try_create_with_inode(Inode& inode)
|
||||
ErrorOr<NonnullLockRefPtr<PrivateInodeVMObject>> PrivateInodeVMObject::try_create_with_inode(Inode& inode)
|
||||
{
|
||||
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(inode.size()));
|
||||
auto dirty_pages = TRY(Bitmap::try_create(new_physical_pages.size(), false));
|
||||
return adopt_nonnull_ref_or_enomem(new (nothrow) PrivateInodeVMObject(inode, move(new_physical_pages), move(dirty_pages)));
|
||||
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) PrivateInodeVMObject(inode, move(new_physical_pages), move(dirty_pages)));
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<VMObject>> PrivateInodeVMObject::try_clone()
|
||||
ErrorOr<NonnullLockRefPtr<VMObject>> PrivateInodeVMObject::try_clone()
|
||||
{
|
||||
auto new_physical_pages = TRY(this->try_clone_physical_pages());
|
||||
auto dirty_pages = TRY(Bitmap::try_create(new_physical_pages.size(), false));
|
||||
return adopt_nonnull_ref_or_enomem<VMObject>(new (nothrow) PrivateInodeVMObject(*this, move(new_physical_pages), move(dirty_pages)));
|
||||
return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) PrivateInodeVMObject(*this, move(new_physical_pages), move(dirty_pages)));
|
||||
}
|
||||
|
||||
PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
||||
PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
||||
: InodeVMObject(inode, move(new_physical_pages), move(dirty_pages))
|
||||
{
|
||||
}
|
||||
|
||||
PrivateInodeVMObject::PrivateInodeVMObject(PrivateInodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
||||
PrivateInodeVMObject::PrivateInodeVMObject(PrivateInodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
||||
: InodeVMObject(other, move(new_physical_pages), move(dirty_pages))
|
||||
{
|
||||
}
|
||||
|
|
|
@ -17,14 +17,14 @@ class PrivateInodeVMObject final : public InodeVMObject {
|
|||
public:
|
||||
virtual ~PrivateInodeVMObject() override;
|
||||
|
||||
static ErrorOr<NonnullRefPtr<PrivateInodeVMObject>> try_create_with_inode(Inode&);
|
||||
virtual ErrorOr<NonnullRefPtr<VMObject>> try_clone() override;
|
||||
static ErrorOr<NonnullLockRefPtr<PrivateInodeVMObject>> try_create_with_inode(Inode&);
|
||||
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override;
|
||||
|
||||
private:
|
||||
virtual bool is_private_inode() const override { return true; }
|
||||
|
||||
explicit PrivateInodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
|
||||
explicit PrivateInodeVMObject(PrivateInodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
|
||||
explicit PrivateInodeVMObject(Inode&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
|
||||
explicit PrivateInodeVMObject(PrivateInodeVMObject const&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
|
||||
|
||||
virtual StringView class_name() const override { return "PrivateInodeVMObject"sv; }
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ Region::Region()
|
|||
{
|
||||
}
|
||||
|
||||
Region::Region(NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
||||
Region::Region(NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
||||
: m_range(VirtualRange({}, 0))
|
||||
, m_offset_in_vmobject(offset_in_vmobject)
|
||||
, m_vmobject(move(vmobject))
|
||||
|
@ -39,7 +39,7 @@ Region::Region(NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnP
|
|||
m_vmobject->add_region(*this);
|
||||
}
|
||||
|
||||
Region::Region(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
||||
Region::Region(VirtualRange const& range, NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
||||
: m_range(range)
|
||||
, m_offset_in_vmobject(offset_in_vmobject)
|
||||
, m_vmobject(move(vmobject))
|
||||
|
@ -84,7 +84,7 @@ ErrorOr<NonnullOwnPtr<Region>> Region::create_unbacked()
|
|||
return adopt_nonnull_own_or_enomem(new (nothrow) Region);
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Region>> Region::create_unplaced(NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
||||
ErrorOr<NonnullOwnPtr<Region>> Region::create_unplaced(NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
||||
{
|
||||
return adopt_nonnull_own_or_enomem(new (nothrow) Region(move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
|
||||
}
|
||||
|
@ -137,7 +137,7 @@ ErrorOr<NonnullOwnPtr<Region>> Region::try_clone()
|
|||
return clone_region;
|
||||
}
|
||||
|
||||
void Region::set_vmobject(NonnullRefPtr<VMObject>&& obj)
|
||||
void Region::set_vmobject(NonnullLockRefPtr<VMObject>&& obj)
|
||||
{
|
||||
if (m_vmobject.ptr() == obj.ptr())
|
||||
return;
|
||||
|
@ -182,7 +182,7 @@ size_t Region::amount_shared() const
|
|||
return bytes;
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Region>> Region::try_create_user_accessible(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
||||
ErrorOr<NonnullOwnPtr<Region>> Region::try_create_user_accessible(VirtualRange const& range, NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
||||
{
|
||||
return adopt_nonnull_own_or_enomem(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ ErrorOr<void> Region::set_should_cow(size_t page_index, bool cow)
|
|||
return {};
|
||||
}
|
||||
|
||||
bool Region::map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage> page)
|
||||
bool Region::map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage> page)
|
||||
{
|
||||
VERIFY(m_page_directory->get_lock().is_locked_by_current_processor());
|
||||
|
||||
|
@ -241,7 +241,7 @@ bool Region::map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage> pa
|
|||
|
||||
bool Region::map_individual_page_impl(size_t page_index)
|
||||
{
|
||||
RefPtr<PhysicalPage> page;
|
||||
LockRefPtr<PhysicalPage> page;
|
||||
{
|
||||
SpinlockLocker vmobject_locker(vmobject().m_lock);
|
||||
page = physical_page(page_index);
|
||||
|
@ -250,7 +250,7 @@ bool Region::map_individual_page_impl(size_t page_index)
|
|||
return map_individual_page_impl(page_index, page);
|
||||
}
|
||||
|
||||
bool Region::remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalPage> physical_page)
|
||||
bool Region::remap_vmobject_page(size_t page_index, NonnullLockRefPtr<PhysicalPage> physical_page)
|
||||
{
|
||||
SpinlockLocker page_lock(m_page_directory->get_lock());
|
||||
|
||||
|
@ -410,7 +410,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, Physica
|
|||
if (current_thread != nullptr)
|
||||
current_thread->did_zero_fault();
|
||||
|
||||
RefPtr<PhysicalPage> new_physical_page;
|
||||
LockRefPtr<PhysicalPage> new_physical_page;
|
||||
|
||||
if (page_in_slot_at_time_of_fault.is_lazy_committed_page()) {
|
||||
VERIFY(m_vmobject->is_anonymous());
|
||||
|
@ -546,14 +546,14 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
|
|||
return PageFaultResponse::Continue;
|
||||
}
|
||||
|
||||
RefPtr<PhysicalPage> Region::physical_page(size_t index) const
|
||||
LockRefPtr<PhysicalPage> Region::physical_page(size_t index) const
|
||||
{
|
||||
SpinlockLocker vmobject_locker(vmobject().m_lock);
|
||||
VERIFY(index < page_count());
|
||||
return vmobject().physical_pages()[first_page_index() + index];
|
||||
}
|
||||
|
||||
RefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
|
||||
LockRefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
|
||||
{
|
||||
VERIFY(vmobject().m_lock.is_locked_by_current_processor());
|
||||
VERIFY(index < page_count());
|
||||
|
|
|
@ -10,9 +10,9 @@
|
|||
#include <AK/EnumBits.h>
|
||||
#include <AK/IntrusiveList.h>
|
||||
#include <AK/IntrusiveRedBlackTree.h>
|
||||
#include <AK/Weakable.h>
|
||||
#include <Kernel/Forward.h>
|
||||
#include <Kernel/KString.h>
|
||||
#include <Kernel/Library/LockWeakable.h>
|
||||
#include <Kernel/Memory/PageFaultResponse.h>
|
||||
#include <Kernel/Memory/VirtualRange.h>
|
||||
#include <Kernel/Sections.h>
|
||||
|
@ -30,7 +30,7 @@ enum class ShouldFlushTLB {
|
|||
};
|
||||
|
||||
class Region final
|
||||
: public Weakable<Region> {
|
||||
: public LockWeakable<Region> {
|
||||
friend class AddressSpace;
|
||||
friend class MemoryManager;
|
||||
friend class RegionTree;
|
||||
|
@ -54,9 +54,9 @@ public:
|
|||
Yes,
|
||||
};
|
||||
|
||||
static ErrorOr<NonnullOwnPtr<Region>> try_create_user_accessible(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
|
||||
static ErrorOr<NonnullOwnPtr<Region>> try_create_user_accessible(VirtualRange const&, NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
|
||||
static ErrorOr<NonnullOwnPtr<Region>> create_unbacked();
|
||||
static ErrorOr<NonnullOwnPtr<Region>> create_unplaced(NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes, bool shared = false);
|
||||
static ErrorOr<NonnullOwnPtr<Region>> create_unplaced(NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes, bool shared = false);
|
||||
|
||||
~Region();
|
||||
|
||||
|
@ -80,7 +80,7 @@ public:
|
|||
|
||||
[[nodiscard]] VMObject const& vmobject() const { return *m_vmobject; }
|
||||
[[nodiscard]] VMObject& vmobject() { return *m_vmobject; }
|
||||
void set_vmobject(NonnullRefPtr<VMObject>&&);
|
||||
void set_vmobject(NonnullLockRefPtr<VMObject>&&);
|
||||
|
||||
[[nodiscard]] bool is_shared() const { return m_shared; }
|
||||
void set_shared(bool shared) { m_shared = shared; }
|
||||
|
@ -152,8 +152,8 @@ public:
|
|||
return size() / PAGE_SIZE;
|
||||
}
|
||||
|
||||
RefPtr<PhysicalPage> physical_page(size_t index) const;
|
||||
RefPtr<PhysicalPage>& physical_page_slot(size_t index);
|
||||
LockRefPtr<PhysicalPage> physical_page(size_t index) const;
|
||||
LockRefPtr<PhysicalPage>& physical_page_slot(size_t index);
|
||||
|
||||
[[nodiscard]] size_t offset_in_vmobject() const
|
||||
{
|
||||
|
@ -196,10 +196,10 @@ public:
|
|||
|
||||
private:
|
||||
Region();
|
||||
Region(NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
|
||||
Region(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
|
||||
Region(NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
|
||||
Region(VirtualRange const&, NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
|
||||
|
||||
[[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalPage>);
|
||||
[[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullLockRefPtr<PhysicalPage>);
|
||||
|
||||
void set_access_bit(Access access, bool b)
|
||||
{
|
||||
|
@ -214,12 +214,12 @@ private:
|
|||
[[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index, PhysicalPage& page_in_slot_at_time_of_fault);
|
||||
|
||||
[[nodiscard]] bool map_individual_page_impl(size_t page_index);
|
||||
[[nodiscard]] bool map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage>);
|
||||
[[nodiscard]] bool map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage>);
|
||||
|
||||
RefPtr<PageDirectory> m_page_directory;
|
||||
LockRefPtr<PageDirectory> m_page_directory;
|
||||
VirtualRange m_range;
|
||||
size_t m_offset_in_vmobject { 0 };
|
||||
RefPtr<VMObject> m_vmobject;
|
||||
LockRefPtr<VMObject> m_vmobject;
|
||||
OwnPtr<KString> m_name;
|
||||
u8 m_access { Region::None };
|
||||
bool m_shared : 1 { false };
|
||||
|
|
|
@ -8,17 +8,17 @@
|
|||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
RefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size)
|
||||
LockRefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullLockRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size)
|
||||
{
|
||||
auto maybe_vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages);
|
||||
if (maybe_vm_object.is_error()) {
|
||||
// FIXME: Would be nice to be able to return a ErrorOr here.
|
||||
return {};
|
||||
}
|
||||
return adopt_ref_if_nonnull(new (nothrow) ScatterGatherList(maybe_vm_object.release_value(), request, device_block_size));
|
||||
return adopt_lock_ref_if_nonnull(new (nothrow) ScatterGatherList(maybe_vm_object.release_value(), request, device_block_size));
|
||||
}
|
||||
|
||||
ScatterGatherList::ScatterGatherList(NonnullRefPtr<AnonymousVMObject> vm_object, AsyncBlockDeviceRequest& request, size_t device_block_size)
|
||||
ScatterGatherList::ScatterGatherList(NonnullLockRefPtr<AnonymousVMObject> vm_object, AsyncBlockDeviceRequest& request, size_t device_block_size)
|
||||
: m_vm_object(move(vm_object))
|
||||
{
|
||||
auto region_or_error = MM.allocate_kernel_region_with_vmobject(m_vm_object, page_round_up((request.block_count() * device_block_size)).release_value_but_fixme_should_propagate_errors(), "AHCI Scattered DMA"sv, Region::Access::Read | Region::Access::Write, Region::Cacheable::Yes);
|
||||
|
|
|
@ -19,14 +19,14 @@ namespace Kernel::Memory {
|
|||
|
||||
class ScatterGatherList final : public AtomicRefCounted<ScatterGatherList> {
|
||||
public:
|
||||
static RefPtr<ScatterGatherList> try_create(AsyncBlockDeviceRequest&, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size);
|
||||
static LockRefPtr<ScatterGatherList> try_create(AsyncBlockDeviceRequest&, Span<NonnullLockRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size);
|
||||
VMObject const& vmobject() const { return m_vm_object; }
|
||||
VirtualAddress dma_region() const { return m_dma_region->vaddr(); }
|
||||
size_t scatters_count() const { return m_vm_object->physical_pages().size(); }
|
||||
|
||||
private:
|
||||
ScatterGatherList(NonnullRefPtr<AnonymousVMObject>, AsyncBlockDeviceRequest&, size_t device_block_size);
|
||||
NonnullRefPtr<AnonymousVMObject> m_vm_object;
|
||||
ScatterGatherList(NonnullLockRefPtr<AnonymousVMObject>, AsyncBlockDeviceRequest&, size_t device_block_size);
|
||||
NonnullLockRefPtr<AnonymousVMObject> m_vm_object;
|
||||
OwnPtr<Region> m_dma_region;
|
||||
};
|
||||
|
||||
|
|
|
@ -10,38 +10,38 @@
|
|||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
ErrorOr<NonnullRefPtr<SharedFramebufferVMObject>> SharedFramebufferVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size)
|
||||
ErrorOr<NonnullLockRefPtr<SharedFramebufferVMObject>> SharedFramebufferVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size)
|
||||
{
|
||||
auto real_framebuffer_vmobject = TRY(AnonymousVMObject::try_create_for_physical_range(paddr, size));
|
||||
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
|
||||
auto committed_pages = TRY(MM.commit_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))));
|
||||
auto vm_object = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) SharedFramebufferVMObject(move(new_physical_pages), move(committed_pages), real_framebuffer_vmobject)));
|
||||
auto vm_object = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) SharedFramebufferVMObject(move(new_physical_pages), move(committed_pages), real_framebuffer_vmobject)));
|
||||
TRY(vm_object->create_fake_writes_framebuffer_vm_object());
|
||||
TRY(vm_object->create_real_writes_framebuffer_vm_object());
|
||||
return vm_object;
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<SharedFramebufferVMObject>> SharedFramebufferVMObject::try_create_at_arbitrary_physical_range(size_t size)
|
||||
ErrorOr<NonnullLockRefPtr<SharedFramebufferVMObject>> SharedFramebufferVMObject::try_create_at_arbitrary_physical_range(size_t size)
|
||||
{
|
||||
auto real_framebuffer_vmobject = TRY(AnonymousVMObject::try_create_with_size(size, AllocationStrategy::AllocateNow));
|
||||
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
|
||||
auto committed_pages = TRY(MM.commit_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))));
|
||||
auto vm_object = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) SharedFramebufferVMObject(move(new_physical_pages), move(committed_pages), real_framebuffer_vmobject)));
|
||||
auto vm_object = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) SharedFramebufferVMObject(move(new_physical_pages), move(committed_pages), real_framebuffer_vmobject)));
|
||||
TRY(vm_object->create_fake_writes_framebuffer_vm_object());
|
||||
TRY(vm_object->create_real_writes_framebuffer_vm_object());
|
||||
return vm_object;
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<SharedFramebufferVMObject::FakeWritesFramebufferVMObject>> SharedFramebufferVMObject::FakeWritesFramebufferVMObject::try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object)
|
||||
ErrorOr<NonnullLockRefPtr<SharedFramebufferVMObject::FakeWritesFramebufferVMObject>> SharedFramebufferVMObject::FakeWritesFramebufferVMObject::try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object)
|
||||
{
|
||||
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(0));
|
||||
return adopt_nonnull_ref_or_enomem(new (nothrow) FakeWritesFramebufferVMObject(parent_object, move(new_physical_pages)));
|
||||
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) FakeWritesFramebufferVMObject(parent_object, move(new_physical_pages)));
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<SharedFramebufferVMObject::RealWritesFramebufferVMObject>> SharedFramebufferVMObject::RealWritesFramebufferVMObject::try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object)
|
||||
ErrorOr<NonnullLockRefPtr<SharedFramebufferVMObject::RealWritesFramebufferVMObject>> SharedFramebufferVMObject::RealWritesFramebufferVMObject::try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object)
|
||||
{
|
||||
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(0));
|
||||
return adopt_nonnull_ref_or_enomem(new (nothrow) RealWritesFramebufferVMObject(parent_object, move(new_physical_pages)));
|
||||
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) RealWritesFramebufferVMObject(parent_object, move(new_physical_pages)));
|
||||
}
|
||||
|
||||
ErrorOr<void> SharedFramebufferVMObject::create_fake_writes_framebuffer_vm_object()
|
||||
|
@ -56,21 +56,21 @@ ErrorOr<void> SharedFramebufferVMObject::create_real_writes_framebuffer_vm_objec
|
|||
return {};
|
||||
}
|
||||
|
||||
Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::real_framebuffer_physical_pages()
|
||||
Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::real_framebuffer_physical_pages()
|
||||
{
|
||||
return m_real_framebuffer_vmobject->physical_pages();
|
||||
}
|
||||
Span<RefPtr<PhysicalPage> const> SharedFramebufferVMObject::real_framebuffer_physical_pages() const
|
||||
Span<LockRefPtr<PhysicalPage> const> SharedFramebufferVMObject::real_framebuffer_physical_pages() const
|
||||
{
|
||||
return m_real_framebuffer_vmobject->physical_pages();
|
||||
}
|
||||
|
||||
Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages()
|
||||
Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages()
|
||||
{
|
||||
return m_physical_pages.span();
|
||||
}
|
||||
|
||||
Span<RefPtr<PhysicalPage> const> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() const
|
||||
Span<LockRefPtr<PhysicalPage> const> SharedFramebufferVMObject::fake_sink_framebuffer_physical_pages() const
|
||||
{
|
||||
return m_physical_pages.span();
|
||||
}
|
||||
|
@ -92,14 +92,14 @@ void SharedFramebufferVMObject::switch_to_real_framebuffer_writes(Badge<Kernel::
|
|||
});
|
||||
}
|
||||
|
||||
Span<RefPtr<PhysicalPage> const> SharedFramebufferVMObject::physical_pages() const
|
||||
Span<LockRefPtr<PhysicalPage> const> SharedFramebufferVMObject::physical_pages() const
|
||||
{
|
||||
SpinlockLocker locker(m_writes_state_lock);
|
||||
if (m_writes_are_faked)
|
||||
return VMObject::physical_pages();
|
||||
return m_real_framebuffer_vmobject->physical_pages();
|
||||
}
|
||||
Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages()
|
||||
Span<LockRefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages()
|
||||
{
|
||||
SpinlockLocker locker(m_writes_state_lock);
|
||||
if (m_writes_are_faked)
|
||||
|
@ -107,7 +107,7 @@ Span<RefPtr<PhysicalPage>> SharedFramebufferVMObject::physical_pages()
|
|||
return m_real_framebuffer_vmobject->physical_pages();
|
||||
}
|
||||
|
||||
SharedFramebufferVMObject::SharedFramebufferVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet committed_pages, AnonymousVMObject& real_framebuffer_vmobject)
|
||||
SharedFramebufferVMObject::SharedFramebufferVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet committed_pages, AnonymousVMObject& real_framebuffer_vmobject)
|
||||
: VMObject(move(new_physical_pages))
|
||||
, m_real_framebuffer_vmobject(real_framebuffer_vmobject)
|
||||
, m_committed_pages(move(committed_pages))
|
||||
|
|
|
@ -19,55 +19,55 @@ class SharedFramebufferVMObject final : public VMObject {
|
|||
public:
|
||||
class FakeWritesFramebufferVMObject final : public VMObject {
|
||||
public:
|
||||
static ErrorOr<NonnullRefPtr<FakeWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object);
|
||||
static ErrorOr<NonnullLockRefPtr<FakeWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object);
|
||||
|
||||
private:
|
||||
FakeWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
FakeWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
: VMObject(move(new_physical_pages))
|
||||
, m_parent_object(parent_object)
|
||||
{
|
||||
}
|
||||
virtual StringView class_name() const override { return "FakeWritesFramebufferVMObject"sv; }
|
||||
virtual ErrorOr<NonnullRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); }
|
||||
virtual Span<RefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
|
||||
virtual Span<RefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
|
||||
NonnullRefPtr<SharedFramebufferVMObject> m_parent_object;
|
||||
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); }
|
||||
virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
|
||||
virtual Span<LockRefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->fake_sink_framebuffer_physical_pages(); }
|
||||
NonnullLockRefPtr<SharedFramebufferVMObject> m_parent_object;
|
||||
};
|
||||
|
||||
class RealWritesFramebufferVMObject final : public VMObject {
|
||||
public:
|
||||
static ErrorOr<NonnullRefPtr<RealWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object);
|
||||
static ErrorOr<NonnullLockRefPtr<RealWritesFramebufferVMObject>> try_create(Badge<SharedFramebufferVMObject>, SharedFramebufferVMObject const& parent_object);
|
||||
|
||||
private:
|
||||
RealWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
RealWritesFramebufferVMObject(SharedFramebufferVMObject const& parent_object, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
: VMObject(move(new_physical_pages))
|
||||
, m_parent_object(parent_object)
|
||||
{
|
||||
}
|
||||
virtual StringView class_name() const override { return "RealWritesFramebufferVMObject"sv; }
|
||||
virtual ErrorOr<NonnullRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); }
|
||||
virtual Span<RefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->real_framebuffer_physical_pages(); }
|
||||
virtual Span<RefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->real_framebuffer_physical_pages(); }
|
||||
NonnullRefPtr<SharedFramebufferVMObject> m_parent_object;
|
||||
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); }
|
||||
virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const override { return m_parent_object->real_framebuffer_physical_pages(); }
|
||||
virtual Span<LockRefPtr<PhysicalPage>> physical_pages() override { return m_parent_object->real_framebuffer_physical_pages(); }
|
||||
NonnullLockRefPtr<SharedFramebufferVMObject> m_parent_object;
|
||||
};
|
||||
|
||||
virtual ~SharedFramebufferVMObject() override = default;
|
||||
|
||||
static ErrorOr<NonnullRefPtr<SharedFramebufferVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
|
||||
static ErrorOr<NonnullRefPtr<SharedFramebufferVMObject>> try_create_at_arbitrary_physical_range(size_t size);
|
||||
virtual ErrorOr<NonnullRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); }
|
||||
static ErrorOr<NonnullLockRefPtr<SharedFramebufferVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
|
||||
static ErrorOr<NonnullLockRefPtr<SharedFramebufferVMObject>> try_create_at_arbitrary_physical_range(size_t size);
|
||||
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return Error::from_errno(ENOTIMPL); }
|
||||
|
||||
void switch_to_fake_sink_framebuffer_writes(Badge<Kernel::DisplayConnector>);
|
||||
void switch_to_real_framebuffer_writes(Badge<Kernel::DisplayConnector>);
|
||||
|
||||
virtual Span<RefPtr<PhysicalPage> const> physical_pages() const override;
|
||||
virtual Span<RefPtr<PhysicalPage>> physical_pages() override;
|
||||
virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const override;
|
||||
virtual Span<LockRefPtr<PhysicalPage>> physical_pages() override;
|
||||
|
||||
Span<RefPtr<PhysicalPage>> fake_sink_framebuffer_physical_pages();
|
||||
Span<RefPtr<PhysicalPage> const> fake_sink_framebuffer_physical_pages() const;
|
||||
Span<LockRefPtr<PhysicalPage>> fake_sink_framebuffer_physical_pages();
|
||||
Span<LockRefPtr<PhysicalPage> const> fake_sink_framebuffer_physical_pages() const;
|
||||
|
||||
Span<RefPtr<PhysicalPage>> real_framebuffer_physical_pages();
|
||||
Span<RefPtr<PhysicalPage> const> real_framebuffer_physical_pages() const;
|
||||
Span<LockRefPtr<PhysicalPage>> real_framebuffer_physical_pages();
|
||||
Span<LockRefPtr<PhysicalPage> const> real_framebuffer_physical_pages() const;
|
||||
|
||||
FakeWritesFramebufferVMObject const& fake_writes_framebuffer_vmobject() const { return *m_fake_writes_framebuffer_vmobject; }
|
||||
FakeWritesFramebufferVMObject& fake_writes_framebuffer_vmobject() { return *m_fake_writes_framebuffer_vmobject; }
|
||||
|
@ -76,16 +76,16 @@ public:
|
|||
RealWritesFramebufferVMObject& real_writes_framebuffer_vmobject() { return *m_real_writes_framebuffer_vmobject; }
|
||||
|
||||
private:
|
||||
SharedFramebufferVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet, AnonymousVMObject& real_framebuffer_vmobject);
|
||||
SharedFramebufferVMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, CommittedPhysicalPageSet, AnonymousVMObject& real_framebuffer_vmobject);
|
||||
|
||||
virtual StringView class_name() const override { return "SharedFramebufferVMObject"sv; }
|
||||
|
||||
ErrorOr<void> create_fake_writes_framebuffer_vm_object();
|
||||
ErrorOr<void> create_real_writes_framebuffer_vm_object();
|
||||
|
||||
NonnullRefPtr<AnonymousVMObject> m_real_framebuffer_vmobject;
|
||||
RefPtr<FakeWritesFramebufferVMObject> m_fake_writes_framebuffer_vmobject;
|
||||
RefPtr<RealWritesFramebufferVMObject> m_real_writes_framebuffer_vmobject;
|
||||
NonnullLockRefPtr<AnonymousVMObject> m_real_framebuffer_vmobject;
|
||||
LockRefPtr<FakeWritesFramebufferVMObject> m_fake_writes_framebuffer_vmobject;
|
||||
LockRefPtr<RealWritesFramebufferVMObject> m_real_writes_framebuffer_vmobject;
|
||||
bool m_writes_are_faked { false };
|
||||
mutable RecursiveSpinlock m_writes_state_lock { LockRank::None };
|
||||
CommittedPhysicalPageSet m_committed_pages;
|
||||
|
|
|
@ -10,31 +10,31 @@
|
|||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
ErrorOr<NonnullRefPtr<SharedInodeVMObject>> SharedInodeVMObject::try_create_with_inode(Inode& inode)
|
||||
ErrorOr<NonnullLockRefPtr<SharedInodeVMObject>> SharedInodeVMObject::try_create_with_inode(Inode& inode)
|
||||
{
|
||||
size_t size = inode.size();
|
||||
if (auto shared_vmobject = inode.shared_vmobject())
|
||||
return shared_vmobject.release_nonnull();
|
||||
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
|
||||
auto dirty_pages = TRY(Bitmap::try_create(new_physical_pages.size(), false));
|
||||
auto vmobject = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) SharedInodeVMObject(inode, move(new_physical_pages), move(dirty_pages))));
|
||||
auto vmobject = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) SharedInodeVMObject(inode, move(new_physical_pages), move(dirty_pages))));
|
||||
TRY(vmobject->inode().set_shared_vmobject(*vmobject));
|
||||
return vmobject;
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtr<VMObject>> SharedInodeVMObject::try_clone()
|
||||
ErrorOr<NonnullLockRefPtr<VMObject>> SharedInodeVMObject::try_clone()
|
||||
{
|
||||
auto new_physical_pages = TRY(this->try_clone_physical_pages());
|
||||
auto dirty_pages = TRY(Bitmap::try_create(new_physical_pages.size(), false));
|
||||
return adopt_nonnull_ref_or_enomem<VMObject>(new (nothrow) SharedInodeVMObject(*this, move(new_physical_pages), move(dirty_pages)));
|
||||
return adopt_nonnull_lock_ref_or_enomem<VMObject>(new (nothrow) SharedInodeVMObject(*this, move(new_physical_pages), move(dirty_pages)));
|
||||
}
|
||||
|
||||
SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
||||
SharedInodeVMObject::SharedInodeVMObject(Inode& inode, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
||||
: InodeVMObject(inode, move(new_physical_pages), move(dirty_pages))
|
||||
{
|
||||
}
|
||||
|
||||
SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
||||
SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other, FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
||||
: InodeVMObject(other, move(new_physical_pages), move(dirty_pages))
|
||||
{
|
||||
}
|
||||
|
|
|
@ -15,16 +15,16 @@ class SharedInodeVMObject final : public InodeVMObject {
|
|||
AK_MAKE_NONMOVABLE(SharedInodeVMObject);
|
||||
|
||||
public:
|
||||
static ErrorOr<NonnullRefPtr<SharedInodeVMObject>> try_create_with_inode(Inode&);
|
||||
virtual ErrorOr<NonnullRefPtr<VMObject>> try_clone() override;
|
||||
static ErrorOr<NonnullLockRefPtr<SharedInodeVMObject>> try_create_with_inode(Inode&);
|
||||
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override;
|
||||
|
||||
ErrorOr<void> sync(off_t offset_in_pages = 0, size_t pages = -1);
|
||||
|
||||
private:
|
||||
virtual bool is_shared_inode() const override { return true; }
|
||||
|
||||
explicit SharedInodeVMObject(Inode&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
|
||||
explicit SharedInodeVMObject(SharedInodeVMObject const&, FixedArray<RefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
|
||||
explicit SharedInodeVMObject(Inode&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
|
||||
explicit SharedInodeVMObject(SharedInodeVMObject const&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);
|
||||
|
||||
virtual StringView class_name() const override { return "SharedInodeVMObject"sv; }
|
||||
|
||||
|
|
|
@ -17,17 +17,17 @@ SpinlockProtected<VMObject::AllInstancesList>& VMObject::all_instances()
|
|||
return s_all_instances;
|
||||
}
|
||||
|
||||
ErrorOr<FixedArray<RefPtr<PhysicalPage>>> VMObject::try_clone_physical_pages() const
|
||||
ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> VMObject::try_clone_physical_pages() const
|
||||
{
|
||||
return m_physical_pages.try_clone();
|
||||
}
|
||||
|
||||
ErrorOr<FixedArray<RefPtr<PhysicalPage>>> VMObject::try_create_physical_pages(size_t size)
|
||||
ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> VMObject::try_create_physical_pages(size_t size)
|
||||
{
|
||||
return FixedArray<RefPtr<PhysicalPage>>::try_create(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
|
||||
return FixedArray<LockRefPtr<PhysicalPage>>::try_create(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
|
||||
}
|
||||
|
||||
VMObject::VMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
VMObject::VMObject(FixedArray<LockRefPtr<PhysicalPage>>&& new_physical_pages)
|
||||
: m_physical_pages(move(new_physical_pages))
|
||||
{
|
||||
all_instances().with([&](auto& list) { list.append(*this); });
|
||||
|
|
|
@ -8,10 +8,10 @@
|
|||
|
||||
#include <AK/FixedArray.h>
|
||||
#include <AK/IntrusiveList.h>
|
||||
#include <AK/RefPtr.h>
|
||||
#include <AK/Weakable.h>
|
||||
#include <Kernel/Forward.h>
|
||||
#include <Kernel/Library/ListedRefCounted.h>
|
||||
#include <Kernel/Library/LockRefPtr.h>
|
||||
#include <Kernel/Library/LockWeakable.h>
|
||||
#include <Kernel/Locking/Mutex.h>
|
||||
#include <Kernel/Memory/Region.h>
|
||||
|
||||
|
@ -19,14 +19,14 @@ namespace Kernel::Memory {
|
|||
|
||||
class VMObject
|
||||
: public ListedRefCounted<VMObject, LockType::Spinlock>
|
||||
, public Weakable<VMObject> {
|
||||
, public LockWeakable<VMObject> {
|
||||
friend class MemoryManager;
|
||||
friend class Region;
|
||||
|
||||
public:
|
||||
virtual ~VMObject();
|
||||
|
||||
virtual ErrorOr<NonnullRefPtr<VMObject>> try_clone() = 0;
|
||||
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() = 0;
|
||||
|
||||
virtual bool is_anonymous() const { return false; }
|
||||
virtual bool is_inode() const { return false; }
|
||||
|
@ -35,8 +35,8 @@ public:
|
|||
|
||||
size_t page_count() const { return m_physical_pages.size(); }
|
||||
|
||||
virtual Span<RefPtr<PhysicalPage> const> physical_pages() const { return m_physical_pages.span(); }
|
||||
virtual Span<RefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); }
|
||||
virtual Span<LockRefPtr<PhysicalPage> const> physical_pages() const { return m_physical_pages.span(); }
|
||||
virtual Span<LockRefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); }
|
||||
|
||||
size_t size() const { return m_physical_pages.size() * PAGE_SIZE; }
|
||||
|
||||
|
@ -55,15 +55,15 @@ public:
|
|||
}
|
||||
|
||||
protected:
|
||||
static ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_create_physical_pages(size_t);
|
||||
ErrorOr<FixedArray<RefPtr<PhysicalPage>>> try_clone_physical_pages() const;
|
||||
explicit VMObject(FixedArray<RefPtr<PhysicalPage>>&&);
|
||||
static ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> try_create_physical_pages(size_t);
|
||||
ErrorOr<FixedArray<LockRefPtr<PhysicalPage>>> try_clone_physical_pages() const;
|
||||
explicit VMObject(FixedArray<LockRefPtr<PhysicalPage>>&&);
|
||||
|
||||
template<typename Callback>
|
||||
void for_each_region(Callback);
|
||||
|
||||
IntrusiveListNode<VMObject> m_list_node;
|
||||
FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
|
||||
FixedArray<LockRefPtr<PhysicalPage>> m_physical_pages;
|
||||
|
||||
mutable RecursiveSpinlock m_lock { LockRank::None };
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue