mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 00:47:45 +00:00
AK: Make Weakable non-atomic
Let's not punish single-threaded workloads with the performance cost of atomic weakables. The kernel keeps using LockWeakable.
This commit is contained in:
parent
159f9688dc
commit
53c0038d2c
1 changed files with 6 additions and 40 deletions
|
@ -7,11 +7,9 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <AK/Assertions.h>
|
#include <AK/Assertions.h>
|
||||||
#include <AK/Atomic.h>
|
|
||||||
#include <AK/RefCounted.h>
|
#include <AK/RefCounted.h>
|
||||||
#include <AK/RefPtr.h>
|
#include <AK/RefPtr.h>
|
||||||
#include <AK/StdLibExtras.h>
|
#include <AK/StdLibExtras.h>
|
||||||
#include <sched.h>
|
|
||||||
|
|
||||||
namespace AK {
|
namespace AK {
|
||||||
|
|
||||||
|
@ -31,50 +29,18 @@ public:
|
||||||
RefPtr<T> strong_ref() const
|
RefPtr<T> strong_ref() const
|
||||||
requires(IsBaseOf<RefCountedBase, T>)
|
requires(IsBaseOf<RefCountedBase, T>)
|
||||||
{
|
{
|
||||||
RefPtr<T> ref;
|
return static_cast<T*>(m_ptr);
|
||||||
|
|
||||||
{
|
|
||||||
if (!(m_consumers.fetch_add(1u << 1, AK::MemoryOrder::memory_order_acquire) & 1u)) {
|
|
||||||
T* ptr = (T*)m_ptr.load(AK::MemoryOrder::memory_order_acquire);
|
|
||||||
if (ptr && ptr->try_ref())
|
|
||||||
ref = adopt_ref(*ptr);
|
|
||||||
}
|
|
||||||
m_consumers.fetch_sub(1u << 1, AK::MemoryOrder::memory_order_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ref;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
T* unsafe_ptr() const
|
T* unsafe_ptr() const
|
||||||
{
|
{
|
||||||
if (m_consumers.load(AK::MemoryOrder::memory_order_relaxed) & 1u)
|
return static_cast<T*>(m_ptr);
|
||||||
return nullptr;
|
|
||||||
// NOTE: This may return a non-null pointer even if revocation
|
|
||||||
// has been triggered as there is a possible race! But it's "unsafe"
|
|
||||||
// anyway because we return a raw pointer without ensuring a
|
|
||||||
// reference...
|
|
||||||
return (T*)m_ptr.load(AK::MemoryOrder::memory_order_acquire);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_null() const
|
bool is_null() const { return m_ptr == nullptr; }
|
||||||
{
|
|
||||||
return unsafe_ptr<void>() == nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void revoke()
|
void revoke() { m_ptr = nullptr; }
|
||||||
{
|
|
||||||
auto current_consumers = m_consumers.fetch_or(1u, AK::MemoryOrder::memory_order_relaxed);
|
|
||||||
VERIFY(!(current_consumers & 1u));
|
|
||||||
// We flagged revocation, now wait until everyone trying to obtain
|
|
||||||
// a strong reference is done
|
|
||||||
while (current_consumers > 0) {
|
|
||||||
sched_yield();
|
|
||||||
current_consumers = m_consumers.load(AK::MemoryOrder::memory_order_acquire) & ~1u;
|
|
||||||
}
|
|
||||||
// No one is trying to use it (anymore)
|
|
||||||
m_ptr.store(nullptr, AK::MemoryOrder::memory_order_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
template<typename T>
|
template<typename T>
|
||||||
|
@ -82,8 +48,8 @@ private:
|
||||||
: m_ptr(&weakable)
|
: m_ptr(&weakable)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
mutable Atomic<void*> m_ptr;
|
|
||||||
mutable Atomic<unsigned> m_consumers; // LSB indicates revocation in progress
|
mutable void* m_ptr { nullptr };
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue