mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 00:47:45 +00:00
Kernel: Add AtomicEdgeAction class
This class acts like a combined ref-count as well as a spin-lock (only when adding the first or removing the last reference), allowing to run a specific action atomically when adding the first or dropping the last reference.
This commit is contained in:
parent
116f1c5c56
commit
942bb976e2
1 changed files with 87 additions and 0 deletions
87
Kernel/AtomicEdgeAction.h
Normal file
87
Kernel/AtomicEdgeAction.h
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2021, the SerenityOS developers.
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: BSD-2-Clause
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <AK/Atomic.h>
|
||||||
|
#include <Kernel/Arch/x86/Processor.h>
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
template<typename AtomicRefCountType>
|
||||||
|
class AtomicEdgeAction {
|
||||||
|
public:
|
||||||
|
template<typename FirstRefAction>
|
||||||
|
bool ref(FirstRefAction first_ref_action)
|
||||||
|
{
|
||||||
|
AtomicRefCountType expected = 0;
|
||||||
|
AtomicRefCountType desired = (1 << 1) | 1;
|
||||||
|
// Least significant bit indicates we're busy protecting/unprotecting
|
||||||
|
for (;;) {
|
||||||
|
if (m_atomic_ref_count.compare_exchange_strong(expected, desired, AK::memory_order_relaxed))
|
||||||
|
break;
|
||||||
|
|
||||||
|
Processor::wait_check();
|
||||||
|
|
||||||
|
expected &= ~1;
|
||||||
|
desired = expected + (1 << 1);
|
||||||
|
VERIFY(desired > expected);
|
||||||
|
if (expected == 0)
|
||||||
|
desired |= 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic_thread_fence(AK::memory_order_acquire);
|
||||||
|
|
||||||
|
if (expected == 0) {
|
||||||
|
first_ref_action();
|
||||||
|
|
||||||
|
// drop the busy flag
|
||||||
|
m_atomic_ref_count.store(desired & ~1, AK::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename LastRefAction>
|
||||||
|
bool unref(LastRefAction last_ref_action)
|
||||||
|
{
|
||||||
|
AtomicRefCountType expected = 1 << 1;
|
||||||
|
AtomicRefCountType desired = (1 << 1) | 1;
|
||||||
|
// Least significant bit indicates we're busy protecting/unprotecting
|
||||||
|
for (;;) {
|
||||||
|
if (m_atomic_ref_count.compare_exchange_strong(expected, desired, AK::memory_order_relaxed))
|
||||||
|
break;
|
||||||
|
|
||||||
|
Processor::wait_check();
|
||||||
|
|
||||||
|
expected &= ~1;
|
||||||
|
VERIFY(expected != 0); // Someone should always have at least one reference
|
||||||
|
|
||||||
|
if (expected == 1 << 1) {
|
||||||
|
desired = (1 << 1) | 1;
|
||||||
|
} else {
|
||||||
|
desired = expected - (1 << 1);
|
||||||
|
VERIFY(desired < expected);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
AK::atomic_thread_fence(AK::memory_order_release);
|
||||||
|
|
||||||
|
if (expected == 1 << 1) {
|
||||||
|
last_ref_action();
|
||||||
|
|
||||||
|
// drop the busy flag and release reference
|
||||||
|
m_atomic_ref_count.store(0, AK::memory_order_release);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
Atomic<AtomicRefCountType> m_atomic_ref_count { 0 };
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue