mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 15:07:45 +00:00
AK: Rename RetainPtr => RefPtr and Retained => NonnullRefPtr.
This commit is contained in:
parent
77b9fa89dd
commit
90b1354688
188 changed files with 562 additions and 562 deletions
|
@ -81,7 +81,7 @@ void MemoryManager::initialize_paging()
|
|||
#endif
|
||||
m_quickmap_addr = VirtualAddress((1 * MB) - PAGE_SIZE);
|
||||
|
||||
RetainPtr<PhysicalRegion> region = nullptr;
|
||||
RefPtr<PhysicalRegion> region = nullptr;
|
||||
bool region_is_super = false;
|
||||
|
||||
for (auto* mmap = (multiboot_memory_map_t*)multiboot_info_ptr->mmap_addr; (unsigned long)mmap < multiboot_info_ptr->mmap_addr + multiboot_info_ptr->mmap_length; mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) {
|
||||
|
@ -151,7 +151,7 @@ void MemoryManager::initialize_paging()
|
|||
#endif
|
||||
}
|
||||
|
||||
RetainPtr<PhysicalPage> MemoryManager::allocate_page_table(PageDirectory& page_directory, unsigned index)
|
||||
RefPtr<PhysicalPage> MemoryManager::allocate_page_table(PageDirectory& page_directory, unsigned index)
|
||||
{
|
||||
ASSERT(!page_directory.m_physical_pages.contains(index));
|
||||
auto physical_page = allocate_supervisor_physical_page();
|
||||
|
@ -444,7 +444,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
|
|||
return PageFaultResponse::ShouldCrash;
|
||||
}
|
||||
|
||||
RetainPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String&& name)
|
||||
RefPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String&& name)
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
|
||||
|
@ -478,11 +478,11 @@ void MemoryManager::deallocate_user_physical_page(PhysicalPage&& page)
|
|||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
|
||||
RetainPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill)
|
||||
RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill)
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
|
||||
RetainPtr<PhysicalPage> page = nullptr;
|
||||
RefPtr<PhysicalPage> page = nullptr;
|
||||
|
||||
for (auto& region : m_user_physical_regions) {
|
||||
page = region->take_free_page(false);
|
||||
|
@ -535,11 +535,11 @@ void MemoryManager::deallocate_supervisor_physical_page(PhysicalPage&& page)
|
|||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
|
||||
RetainPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
|
||||
RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
|
||||
RetainPtr<PhysicalPage> page = nullptr;
|
||||
RefPtr<PhysicalPage> page = nullptr;
|
||||
|
||||
for (auto& region : m_super_physical_regions) {
|
||||
page = region->take_free_page(true);
|
||||
|
|
|
@ -61,8 +61,8 @@ public:
|
|||
Yes
|
||||
};
|
||||
|
||||
RetainPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill);
|
||||
RetainPtr<PhysicalPage> allocate_supervisor_physical_page();
|
||||
RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill);
|
||||
RefPtr<PhysicalPage> allocate_supervisor_physical_page();
|
||||
void deallocate_user_physical_page(PhysicalPage&&);
|
||||
void deallocate_supervisor_physical_page(PhysicalPage&&);
|
||||
|
||||
|
@ -70,7 +70,7 @@ public:
|
|||
|
||||
void map_for_kernel(VirtualAddress, PhysicalAddress);
|
||||
|
||||
RetainPtr<Region> allocate_kernel_region(size_t, String&& name);
|
||||
RefPtr<Region> allocate_kernel_region(size_t, String&& name);
|
||||
void map_region_at_address(PageDirectory&, Region&, VirtualAddress, bool user_accessible);
|
||||
|
||||
unsigned user_physical_pages() const { return m_user_physical_pages; }
|
||||
|
@ -93,7 +93,7 @@ private:
|
|||
void flush_entire_tlb();
|
||||
void flush_tlb(VirtualAddress);
|
||||
|
||||
RetainPtr<PhysicalPage> allocate_page_table(PageDirectory&, unsigned index);
|
||||
RefPtr<PhysicalPage> allocate_page_table(PageDirectory&, unsigned index);
|
||||
|
||||
void map_protected(VirtualAddress, size_t length);
|
||||
|
||||
|
@ -214,7 +214,7 @@ private:
|
|||
|
||||
PageTableEntry ensure_pte(PageDirectory&, VirtualAddress);
|
||||
|
||||
RetainPtr<PageDirectory> m_kernel_page_directory;
|
||||
RefPtr<PageDirectory> m_kernel_page_directory;
|
||||
dword* m_page_table_zero { nullptr };
|
||||
dword* m_page_table_one { nullptr };
|
||||
|
||||
|
@ -225,8 +225,8 @@ private:
|
|||
unsigned m_super_physical_pages { 0 };
|
||||
unsigned m_super_physical_pages_used { 0 };
|
||||
|
||||
Vector<Retained<PhysicalRegion>> m_user_physical_regions {};
|
||||
Vector<Retained<PhysicalRegion>> m_super_physical_regions {};
|
||||
Vector<NonnullRefPtr<PhysicalRegion>> m_user_physical_regions {};
|
||||
Vector<NonnullRefPtr<PhysicalRegion>> m_super_physical_regions {};
|
||||
|
||||
HashTable<VMObject*> m_vmos;
|
||||
HashTable<Region*> m_user_regions;
|
||||
|
|
|
@ -10,8 +10,8 @@ class PageDirectory : public RefCounted<PageDirectory> {
|
|||
friend class MemoryManager;
|
||||
|
||||
public:
|
||||
static Retained<PageDirectory> create_for_userspace(const RangeAllocator* parent_range_allocator = nullptr) { return adopt(*new PageDirectory(parent_range_allocator)); }
|
||||
static Retained<PageDirectory> create_at_fixed_address(PhysicalAddress paddr) { return adopt(*new PageDirectory(paddr)); }
|
||||
static NonnullRefPtr<PageDirectory> create_for_userspace(const RangeAllocator* parent_range_allocator = nullptr) { return adopt(*new PageDirectory(parent_range_allocator)); }
|
||||
static NonnullRefPtr<PageDirectory> create_at_fixed_address(PhysicalAddress paddr) { return adopt(*new PageDirectory(paddr)); }
|
||||
~PageDirectory();
|
||||
|
||||
dword cr3() const { return m_directory_page->paddr().get(); }
|
||||
|
@ -26,6 +26,6 @@ private:
|
|||
explicit PageDirectory(PhysicalAddress);
|
||||
|
||||
RangeAllocator m_range_allocator;
|
||||
RetainPtr<PhysicalPage> m_directory_page;
|
||||
HashMap<unsigned, RetainPtr<PhysicalPage>> m_physical_pages;
|
||||
RefPtr<PhysicalPage> m_directory_page;
|
||||
HashMap<unsigned, RefPtr<PhysicalPage>> m_physical_pages;
|
||||
};
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#include <Kernel/VM/PhysicalPage.h>
|
||||
#include <Kernel/kmalloc.h>
|
||||
|
||||
Retained<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist)
|
||||
NonnullRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist)
|
||||
{
|
||||
void* slot = kmalloc(sizeof(PhysicalPage));
|
||||
new (slot) PhysicalPage(paddr, supervisor, may_return_to_freelist);
|
||||
|
|
|
@ -28,7 +28,7 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
static Retained<PhysicalPage> create(PhysicalAddress, bool supervisor, bool may_return_to_freelist = true);
|
||||
static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, bool supervisor, bool may_return_to_freelist = true);
|
||||
|
||||
word ref_count() const { return m_retain_count; }
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include <Kernel/VM/PhysicalPage.h>
|
||||
#include <Kernel/VM/PhysicalRegion.h>
|
||||
|
||||
Retained<PhysicalRegion> PhysicalRegion::create(PhysicalAddress lower, PhysicalAddress upper)
|
||||
NonnullRefPtr<PhysicalRegion> PhysicalRegion::create(PhysicalAddress lower, PhysicalAddress upper)
|
||||
{
|
||||
return adopt(*new PhysicalRegion(lower, upper));
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ unsigned PhysicalRegion::finalize_capacity()
|
|||
return size();
|
||||
}
|
||||
|
||||
RetainPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
|
||||
RefPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
|
||||
{
|
||||
ASSERT(m_pages);
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ class PhysicalRegion : public RefCounted<PhysicalRegion> {
|
|||
AK_MAKE_ETERNAL
|
||||
|
||||
public:
|
||||
static Retained<PhysicalRegion> create(PhysicalAddress lower, PhysicalAddress upper);
|
||||
static NonnullRefPtr<PhysicalRegion> create(PhysicalAddress lower, PhysicalAddress upper);
|
||||
~PhysicalRegion() {}
|
||||
|
||||
void expand(PhysicalAddress lower, PhysicalAddress upper);
|
||||
|
@ -23,7 +23,7 @@ public:
|
|||
unsigned free() const { return m_pages - m_used; }
|
||||
bool contains(PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; }
|
||||
|
||||
RetainPtr<PhysicalPage> take_free_page(bool supervisor);
|
||||
RefPtr<PhysicalPage> take_free_page(bool supervisor);
|
||||
void return_page_at(PhysicalAddress addr);
|
||||
void return_page(PhysicalPage&& page) { return_page_at(page.paddr()); }
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ Region::Region(const Range& range, const String& name, byte access, bool cow)
|
|||
MM.register_region(*this);
|
||||
}
|
||||
|
||||
Region::Region(const Range& range, RetainPtr<Inode>&& inode, const String& name, byte access)
|
||||
Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, byte access)
|
||||
: m_range(range)
|
||||
, m_vmo(VMObject::create_file_backed(move(inode)))
|
||||
, m_name(name)
|
||||
|
@ -25,7 +25,7 @@ Region::Region(const Range& range, RetainPtr<Inode>&& inode, const String& name,
|
|||
MM.register_region(*this);
|
||||
}
|
||||
|
||||
Region::Region(const Range& range, Retained<VMObject>&& vmo, size_t offset_in_vmo, const String& name, byte access, bool cow)
|
||||
Region::Region(const Range& range, NonnullRefPtr<VMObject>&& vmo, size_t offset_in_vmo, const String& name, byte access, bool cow)
|
||||
: m_range(range)
|
||||
, m_offset_in_vmo(offset_in_vmo)
|
||||
, m_vmo(move(vmo))
|
||||
|
@ -66,7 +66,7 @@ bool Region::page_in()
|
|||
return true;
|
||||
}
|
||||
|
||||
Retained<Region> Region::clone()
|
||||
NonnullRefPtr<Region> Region::clone()
|
||||
{
|
||||
ASSERT(current);
|
||||
if (m_shared || (is_readable() && !is_writable())) {
|
||||
|
|
|
@ -19,8 +19,8 @@ public:
|
|||
};
|
||||
|
||||
Region(const Range&, const String&, byte access, bool cow = false);
|
||||
Region(const Range&, Retained<VMObject>&&, size_t offset_in_vmo, const String&, byte access, bool cow = false);
|
||||
Region(const Range&, RetainPtr<Inode>&&, const String&, byte access);
|
||||
Region(const Range&, NonnullRefPtr<VMObject>&&, size_t offset_in_vmo, const String&, byte access, bool cow = false);
|
||||
Region(const Range&, RefPtr<Inode>&&, const String&, byte access);
|
||||
~Region();
|
||||
|
||||
VirtualAddress vaddr() const { return m_range.base(); }
|
||||
|
@ -38,7 +38,7 @@ public:
|
|||
bool is_shared() const { return m_shared; }
|
||||
void set_shared(bool shared) { m_shared = shared; }
|
||||
|
||||
Retained<Region> clone();
|
||||
NonnullRefPtr<Region> clone();
|
||||
|
||||
bool contains(VirtualAddress vaddr) const
|
||||
{
|
||||
|
@ -97,10 +97,10 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
RetainPtr<PageDirectory> m_page_directory;
|
||||
RefPtr<PageDirectory> m_page_directory;
|
||||
Range m_range;
|
||||
size_t m_offset_in_vmo { 0 };
|
||||
Retained<VMObject> m_vmo;
|
||||
NonnullRefPtr<VMObject> m_vmo;
|
||||
String m_name;
|
||||
byte m_access { 0 };
|
||||
bool m_shared { false };
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#include <Kernel/VM/MemoryManager.h>
|
||||
#include <Kernel/VM/VMObject.h>
|
||||
|
||||
Retained<VMObject> VMObject::create_file_backed(RetainPtr<Inode>&& inode)
|
||||
NonnullRefPtr<VMObject> VMObject::create_file_backed(RefPtr<Inode>&& inode)
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
if (inode->vmo())
|
||||
|
@ -13,13 +13,13 @@ Retained<VMObject> VMObject::create_file_backed(RetainPtr<Inode>&& inode)
|
|||
return vmo;
|
||||
}
|
||||
|
||||
Retained<VMObject> VMObject::create_anonymous(size_t size)
|
||||
NonnullRefPtr<VMObject> VMObject::create_anonymous(size_t size)
|
||||
{
|
||||
size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
|
||||
return adopt(*new VMObject(size));
|
||||
}
|
||||
|
||||
Retained<VMObject> VMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
|
||||
NonnullRefPtr<VMObject> VMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
|
||||
{
|
||||
size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
|
||||
auto vmo = adopt(*new VMObject(paddr, size));
|
||||
|
@ -27,7 +27,7 @@ Retained<VMObject> VMObject::create_for_physical_range(PhysicalAddress paddr, si
|
|||
return vmo;
|
||||
}
|
||||
|
||||
Retained<VMObject> VMObject::clone()
|
||||
NonnullRefPtr<VMObject> VMObject::clone()
|
||||
{
|
||||
return adopt(*new VMObject(*this));
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ VMObject::VMObject(PhysicalAddress paddr, size_t size)
|
|||
ASSERT(m_physical_pages.size() == page_count());
|
||||
}
|
||||
|
||||
VMObject::VMObject(RetainPtr<Inode>&& inode)
|
||||
VMObject::VMObject(RefPtr<Inode>&& inode)
|
||||
: m_inode(move(inode))
|
||||
{
|
||||
ASSERT(m_inode);
|
||||
|
|
|
@ -18,10 +18,10 @@ class VMObject : public RefCounted<VMObject>
|
|||
friend class MemoryManager;
|
||||
|
||||
public:
|
||||
static Retained<VMObject> create_file_backed(RetainPtr<Inode>&&);
|
||||
static Retained<VMObject> create_anonymous(size_t);
|
||||
static Retained<VMObject> create_for_physical_range(PhysicalAddress, size_t);
|
||||
Retained<VMObject> clone();
|
||||
static NonnullRefPtr<VMObject> create_file_backed(RefPtr<Inode>&&);
|
||||
static NonnullRefPtr<VMObject> create_anonymous(size_t);
|
||||
static NonnullRefPtr<VMObject> create_for_physical_range(PhysicalAddress, size_t);
|
||||
NonnullRefPtr<VMObject> clone();
|
||||
|
||||
~VMObject();
|
||||
bool is_anonymous() const { return !m_inode; }
|
||||
|
@ -34,8 +34,8 @@ public:
|
|||
void set_name(const String& name) { m_name = name; }
|
||||
|
||||
size_t page_count() const { return m_size / PAGE_SIZE; }
|
||||
const Vector<RetainPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
|
||||
Vector<RetainPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
|
||||
const Vector<RefPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
|
||||
Vector<RefPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
|
||||
|
||||
void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const byte*);
|
||||
void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
|
||||
|
@ -43,7 +43,7 @@ public:
|
|||
size_t size() const { return m_size; }
|
||||
|
||||
private:
|
||||
VMObject(RetainPtr<Inode>&&);
|
||||
VMObject(RefPtr<Inode>&&);
|
||||
explicit VMObject(VMObject&);
|
||||
explicit VMObject(size_t);
|
||||
VMObject(PhysicalAddress, size_t);
|
||||
|
@ -55,7 +55,7 @@ private:
|
|||
bool m_allow_cpu_caching { true };
|
||||
off_t m_inode_offset { 0 };
|
||||
size_t m_size { 0 };
|
||||
RetainPtr<Inode> m_inode;
|
||||
Vector<RetainPtr<PhysicalPage>> m_physical_pages;
|
||||
RefPtr<Inode> m_inode;
|
||||
Vector<RefPtr<PhysicalPage>> m_physical_pages;
|
||||
Lock m_paging_lock { "VMObject" };
|
||||
};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue