1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-28 00:17:45 +00:00

AK+Kernel: Make automatically locking RefPtr & co a kernel-only thing

Some time ago, automatic locking was added to the AK smart pointers to
paper over various race conditions in the kernel. Until we've actually
solved the issues in the kernel, we're stuck with the locking.

However, we don't need to punish single-threaded userspace programs with
the high cost of locking. This patch moves the thread-safe variants of
RefPtr, NonnullRefPtr, WeakPtr and RefCounted into Kernel/Library/.
This commit is contained in:
Andreas Kling 2021-10-07 19:12:37 +02:00
parent ca060d8566
commit 5b1f697460
8 changed files with 1418 additions and 470 deletions

View file

@ -6,15 +6,14 @@
#pragma once
#include <AK/Assertions.h>
#include <AK/Atomic.h>
#include <AK/Format.h>
#include <AK/Traits.h>
#include <AK/Types.h>
#ifdef KERNEL
# include <Kernel/Arch/x86/Processor.h>
# include <Kernel/Arch/x86/ScopedCritical.h>
#endif
# include <Kernel/Library/ThreadSafeNonnullRefPtr.h>
#else
# include <AK/Assertions.h>
# include <AK/Atomic.h>
# include <AK/Format.h>
# include <AK/Traits.h>
# include <AK/Types.h>
namespace AK {
@ -51,52 +50,55 @@ public:
enum AdoptTag { Adopt };
ALWAYS_INLINE NonnullRefPtr(const T& object)
: m_bits((FlatPtr)&object)
ALWAYS_INLINE NonnullRefPtr(T const& object)
: m_ptr(const_cast<T*>(&object))
{
VERIFY(!(m_bits & 1));
const_cast<T&>(object).ref();
m_ptr->ref();
}
template<typename U>
ALWAYS_INLINE NonnullRefPtr(const U& object) requires(IsConvertible<U*, T*>)
: m_bits((FlatPtr) static_cast<const T*>(&object))
ALWAYS_INLINE NonnullRefPtr(U const& object) requires(IsConvertible<U*, T*>)
: m_ptr(const_cast<T*>(static_cast<T const*>(&object)))
{
VERIFY(!(m_bits & 1));
const_cast<T&>(static_cast<const T&>(object)).ref();
m_ptr->ref();
}
ALWAYS_INLINE NonnullRefPtr(AdoptTag, T& object)
: m_bits((FlatPtr)&object)
: m_ptr(&object)
{
VERIFY(!(m_bits & 1));
}
ALWAYS_INLINE NonnullRefPtr(NonnullRefPtr&& other)
: m_bits((FlatPtr)&other.leak_ref())
: m_ptr(&other.leak_ref())
{
VERIFY(!(m_bits & 1));
}
template<typename U>
ALWAYS_INLINE NonnullRefPtr(NonnullRefPtr<U>&& other) requires(IsConvertible<U*, T*>)
: m_bits((FlatPtr)&other.leak_ref())
: m_ptr(static_cast<T*>(&other.leak_ref()))
{
VERIFY(!(m_bits & 1));
}
ALWAYS_INLINE NonnullRefPtr(const NonnullRefPtr& other)
: m_bits((FlatPtr)other.add_ref())
ALWAYS_INLINE NonnullRefPtr(NonnullRefPtr const& other)
: m_ptr(const_cast<T*>(other.ptr()))
{
VERIFY(!(m_bits & 1));
m_ptr->ref();
}
template<typename U>
ALWAYS_INLINE NonnullRefPtr(const NonnullRefPtr<U>& other) requires(IsConvertible<U*, T*>)
: m_bits((FlatPtr)other.add_ref())
ALWAYS_INLINE NonnullRefPtr(NonnullRefPtr<U> const& other) requires(IsConvertible<U*, T*>)
: m_ptr(const_cast<T*>(static_cast<T const*>(other.ptr())))
{
VERIFY(!(m_bits & 1));
m_ptr->ref();
}
ALWAYS_INLINE ~NonnullRefPtr()
{
assign(nullptr);
#ifdef SANITIZE_PTRS
m_bits.store(explode_byte(0xb0), AK::MemoryOrder::memory_order_relaxed);
#endif
unref_if_not_null(m_ptr);
m_ptr = nullptr;
# ifdef SANITIZE_PTRS
m_ptr = reinterpret_cast<T*>(explode_byte(0xb0));
# endif
}
template<typename U>
@ -111,44 +113,46 @@ public:
NonnullRefPtr(const RefPtr<T>&) = delete;
NonnullRefPtr& operator=(const RefPtr<T>&) = delete;
NonnullRefPtr& operator=(const NonnullRefPtr& other)
NonnullRefPtr& operator=(NonnullRefPtr const& other)
{
if (this != &other)
assign(other.add_ref());
NonnullRefPtr tmp { other };
swap(tmp);
return *this;
}
template<typename U>
NonnullRefPtr& operator=(const NonnullRefPtr<U>& other) requires(IsConvertible<U*, T*>)
NonnullRefPtr& operator=(NonnullRefPtr<U> const& other) requires(IsConvertible<U*, T*>)
{
assign(other.add_ref());
NonnullRefPtr tmp { other };
swap(tmp);
return *this;
}
ALWAYS_INLINE NonnullRefPtr& operator=(NonnullRefPtr&& other)
{
if (this != &other)
assign(&other.leak_ref());
NonnullRefPtr tmp { move(other) };
swap(tmp);
return *this;
}
template<typename U>
NonnullRefPtr& operator=(NonnullRefPtr<U>&& other) requires(IsConvertible<U*, T*>)
{
assign(&other.leak_ref());
NonnullRefPtr tmp { move(other) };
swap(tmp);
return *this;
}
NonnullRefPtr& operator=(const T& object)
NonnullRefPtr& operator=(T const& object)
{
const_cast<T&>(object).ref();
assign(const_cast<T*>(&object));
NonnullRefPtr tmp { object };
swap(tmp);
return *this;
}
[[nodiscard]] ALWAYS_INLINE T& leak_ref()
{
T* ptr = exchange(nullptr);
T* ptr = exchange(m_ptr, nullptr);
VERIFY(ptr);
return *ptr;
}
@ -203,113 +207,24 @@ public:
void swap(NonnullRefPtr& other)
{
if (this == &other)
return;
// NOTE: swap is not atomic!
T* other_ptr = other.exchange(nullptr);
T* ptr = exchange(other_ptr);
other.exchange(ptr);
AK::swap(m_ptr, other.m_ptr);
}
template<typename U>
void swap(NonnullRefPtr<U>& other) requires(IsConvertible<U*, T*>)
{
// NOTE: swap is not atomic!
U* other_ptr = other.exchange(nullptr);
T* ptr = exchange(other_ptr);
other.exchange(ptr);
AK::swap(m_ptr, other.m_ptr);
}
private:
NonnullRefPtr() = delete;
ALWAYS_INLINE T* as_ptr() const
{
return (T*)(m_bits.load(AK::MemoryOrder::memory_order_relaxed) & ~(FlatPtr)1);
}
ALWAYS_INLINE RETURNS_NONNULL T* as_nonnull_ptr() const
{
T* ptr = (T*)(m_bits.load(AK::MemoryOrder::memory_order_relaxed) & ~(FlatPtr)1);
VERIFY(ptr);
return ptr;
VERIFY(m_ptr);
return m_ptr;
}
template<typename F>
void do_while_locked(F f) const
{
#ifdef KERNEL
// We don't want to be pre-empted while we have the lock bit set
Kernel::ScopedCritical critical;
#endif
FlatPtr bits;
for (;;) {
bits = m_bits.fetch_or(1, AK::MemoryOrder::memory_order_acq_rel);
if (!(bits & 1))
break;
#ifdef KERNEL
Kernel::Processor::wait_check();
#endif
}
VERIFY(!(bits & 1));
f((T*)bits);
m_bits.store(bits, AK::MemoryOrder::memory_order_release);
}
ALWAYS_INLINE void assign(T* new_ptr)
{
T* prev_ptr = exchange(new_ptr);
unref_if_not_null(prev_ptr);
}
ALWAYS_INLINE T* exchange(T* new_ptr)
{
VERIFY(!((FlatPtr)new_ptr & 1));
#ifdef KERNEL
// We don't want to be pre-empted while we have the lock bit set
Kernel::ScopedCritical critical;
#endif
// Only exchange while not locked
FlatPtr expected = m_bits.load(AK::MemoryOrder::memory_order_relaxed);
for (;;) {
expected &= ~(FlatPtr)1; // only if lock bit is not set
if (m_bits.compare_exchange_strong(expected, (FlatPtr)new_ptr, AK::MemoryOrder::memory_order_acq_rel))
break;
#ifdef KERNEL
Kernel::Processor::wait_check();
#endif
}
VERIFY(!(expected & 1));
return (T*)expected;
}
T* add_ref() const
{
#ifdef KERNEL
// We don't want to be pre-empted while we have the lock bit set
Kernel::ScopedCritical critical;
#endif
// Lock the pointer
FlatPtr expected = m_bits.load(AK::MemoryOrder::memory_order_relaxed);
for (;;) {
expected &= ~(FlatPtr)1; // only if lock bit is not set
if (m_bits.compare_exchange_strong(expected, expected | 1, AK::MemoryOrder::memory_order_acq_rel))
break;
#ifdef KERNEL
Kernel::Processor::wait_check();
#endif
}
// Add a reference now that we locked the pointer
ref_if_not_null((T*)expected);
// Unlock the pointer again
m_bits.store(expected, AK::MemoryOrder::memory_order_release);
return (T*)expected;
}
mutable Atomic<FlatPtr> m_bits { 0 };
T* m_ptr { nullptr };
};
template<typename T>
@ -357,3 +272,5 @@ struct Traits<NonnullRefPtr<T>> : public GenericTraits<NonnullRefPtr<T>> {
using AK::adopt_ref;
using AK::make_ref_counted;
using AK::NonnullRefPtr;
#endif