1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 14:38:11 +00:00

Kernel: Make self-contained locking smart pointers their own classes

Until now, our kernel has reimplemented a number of AK classes to
provide automatic internal locking:

- RefPtr
- NonnullRefPtr
- WeakPtr
- Weakable

This patch renames the Kernel classes so that they can coexist with
the original AK classes:

- RefPtr => LockRefPtr
- NonnullRefPtr => NonnullLockRefPtr
- WeakPtr => LockWeakPtr
- Weakable => LockWeakable

The goal here is to eventually get rid of the Lock* classes in favor of
using external locking.
This commit is contained in:
Andreas Kling 2022-08-19 20:53:40 +02:00
parent e475263113
commit 11eee67b85
360 changed files with 1703 additions and 1672 deletions

View file

@ -27,7 +27,7 @@ Region::Region()
{
}
Region::Region(NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
Region::Region(NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
: m_range(VirtualRange({}, 0))
, m_offset_in_vmobject(offset_in_vmobject)
, m_vmobject(move(vmobject))
@ -39,7 +39,7 @@ Region::Region(NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnP
m_vmobject->add_region(*this);
}
Region::Region(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
Region::Region(VirtualRange const& range, NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
: m_range(range)
, m_offset_in_vmobject(offset_in_vmobject)
, m_vmobject(move(vmobject))
@ -84,7 +84,7 @@ ErrorOr<NonnullOwnPtr<Region>> Region::create_unbacked()
return adopt_nonnull_own_or_enomem(new (nothrow) Region);
}
ErrorOr<NonnullOwnPtr<Region>> Region::create_unplaced(NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
ErrorOr<NonnullOwnPtr<Region>> Region::create_unplaced(NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
{
return adopt_nonnull_own_or_enomem(new (nothrow) Region(move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
}
@ -137,7 +137,7 @@ ErrorOr<NonnullOwnPtr<Region>> Region::try_clone()
return clone_region;
}
void Region::set_vmobject(NonnullRefPtr<VMObject>&& obj)
void Region::set_vmobject(NonnullLockRefPtr<VMObject>&& obj)
{
if (m_vmobject.ptr() == obj.ptr())
return;
@ -182,7 +182,7 @@ size_t Region::amount_shared() const
return bytes;
}
ErrorOr<NonnullOwnPtr<Region>> Region::try_create_user_accessible(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
ErrorOr<NonnullOwnPtr<Region>> Region::try_create_user_accessible(VirtualRange const& range, NonnullLockRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
{
return adopt_nonnull_own_or_enomem(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
}
@ -202,7 +202,7 @@ ErrorOr<void> Region::set_should_cow(size_t page_index, bool cow)
return {};
}
bool Region::map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage> page)
bool Region::map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage> page)
{
VERIFY(m_page_directory->get_lock().is_locked_by_current_processor());
@ -241,7 +241,7 @@ bool Region::map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage> pa
bool Region::map_individual_page_impl(size_t page_index)
{
RefPtr<PhysicalPage> page;
LockRefPtr<PhysicalPage> page;
{
SpinlockLocker vmobject_locker(vmobject().m_lock);
page = physical_page(page_index);
@ -250,7 +250,7 @@ bool Region::map_individual_page_impl(size_t page_index)
return map_individual_page_impl(page_index, page);
}
bool Region::remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalPage> physical_page)
bool Region::remap_vmobject_page(size_t page_index, NonnullLockRefPtr<PhysicalPage> physical_page)
{
SpinlockLocker page_lock(m_page_directory->get_lock());
@ -410,7 +410,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, Physica
if (current_thread != nullptr)
current_thread->did_zero_fault();
RefPtr<PhysicalPage> new_physical_page;
LockRefPtr<PhysicalPage> new_physical_page;
if (page_in_slot_at_time_of_fault.is_lazy_committed_page()) {
VERIFY(m_vmobject->is_anonymous());
@ -546,14 +546,14 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
return PageFaultResponse::Continue;
}
RefPtr<PhysicalPage> Region::physical_page(size_t index) const
LockRefPtr<PhysicalPage> Region::physical_page(size_t index) const
{
SpinlockLocker vmobject_locker(vmobject().m_lock);
VERIFY(index < page_count());
return vmobject().physical_pages()[first_page_index() + index];
}
RefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
LockRefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
{
VERIFY(vmobject().m_lock.is_locked_by_current_processor());
VERIFY(index < page_count());