mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 02:57:36 +00:00
Kernel+Userland: Remove global futexes
We only ever use private futexes, so it doesn't make sense to carry around all the complexity required for global (cross-process) futexes.
This commit is contained in:
parent
7979b5a8bb
commit
4226b662cd
8 changed files with 48 additions and 188 deletions
|
@ -52,9 +52,8 @@ extern "C" {
|
||||||
#define FUTEX_WAIT_BITSET 9
|
#define FUTEX_WAIT_BITSET 9
|
||||||
#define FUTEX_WAKE_BITSET 10
|
#define FUTEX_WAKE_BITSET 10
|
||||||
|
|
||||||
#define FUTEX_PRIVATE_FLAG (1 << 7)
|
|
||||||
#define FUTEX_CLOCK_REALTIME (1 << 8)
|
#define FUTEX_CLOCK_REALTIME (1 << 8)
|
||||||
#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
|
#define FUTEX_CMD_MASK ~(FUTEX_CLOCK_REALTIME)
|
||||||
|
|
||||||
#define FUTEX_BITSET_MATCH_ANY 0xffffffff
|
#define FUTEX_BITSET_MATCH_ANY 0xffffffff
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,14 @@
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
|
FutexQueue::FutexQueue()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
FutexQueue::~FutexQueue()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
bool FutexQueue::should_add_blocker(Thread::Blocker& b, void* data)
|
bool FutexQueue::should_add_blocker(Thread::Blocker& b, void* data)
|
||||||
{
|
{
|
||||||
VERIFY(data != nullptr); // Thread that is requesting to be blocked
|
VERIFY(data != nullptr); // Thread that is requesting to be blocked
|
||||||
|
|
|
@ -14,11 +14,11 @@
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
class FutexQueue : public Thread::BlockCondition
|
class FutexQueue
|
||||||
, public RefCounted<FutexQueue>
|
: public RefCounted<FutexQueue>
|
||||||
, public Memory::VMObjectDeletedHandler {
|
, public Thread::BlockCondition {
|
||||||
public:
|
public:
|
||||||
FutexQueue(FlatPtr user_address_or_offset, Memory::VMObject* vmobject = nullptr);
|
FutexQueue();
|
||||||
virtual ~FutexQueue();
|
virtual ~FutexQueue();
|
||||||
|
|
||||||
u32 wake_n_requeue(u32, const Function<FutexQueue*()>&, u32, bool&, bool&);
|
u32 wake_n_requeue(u32, const Function<FutexQueue*()>&, u32, bool&, bool&);
|
||||||
|
@ -31,8 +31,6 @@ public:
|
||||||
return Thread::current()->block<Thread::FutexBlocker>(timeout, *this, forward<Args>(args)...);
|
return Thread::current()->block<Thread::FutexBlocker>(timeout, *this, forward<Args>(args)...);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void vmobject_deleted(Memory::VMObject&) override;
|
|
||||||
|
|
||||||
bool queue_imminent_wait();
|
bool queue_imminent_wait();
|
||||||
void did_remove();
|
void did_remove();
|
||||||
bool try_remove();
|
bool try_remove();
|
||||||
|
@ -48,11 +46,6 @@ protected:
|
||||||
virtual bool should_add_blocker(Thread::Blocker& b, void* data) override;
|
virtual bool should_add_blocker(Thread::Blocker& b, void* data) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// For private futexes we just use the user space address.
|
|
||||||
// But for global futexes we use the offset into the VMObject
|
|
||||||
const FlatPtr m_user_address_or_offset;
|
|
||||||
WeakPtr<Memory::VMObject> m_vmobject;
|
|
||||||
const bool m_is_global;
|
|
||||||
size_t m_imminent_waits { 1 }; // We only create this object if we're going to be waiting, so start out with 1
|
size_t m_imminent_waits { 1 }; // We only create this object if we're going to be waiting, so start out with 1
|
||||||
bool m_was_removed { false };
|
bool m_was_removed { false };
|
||||||
};
|
};
|
||||||
|
|
|
@ -31,13 +31,6 @@ VMObject::VMObject(size_t size)
|
||||||
|
|
||||||
VMObject::~VMObject()
|
VMObject::~VMObject()
|
||||||
{
|
{
|
||||||
{
|
|
||||||
ScopedSpinLock lock(m_on_deleted_lock);
|
|
||||||
for (auto& it : m_on_deleted)
|
|
||||||
it->vmobject_deleted(*this);
|
|
||||||
m_on_deleted.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
VERIFY(m_regions.is_empty());
|
VERIFY(m_regions.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,11 +7,8 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <AK/FixedArray.h>
|
#include <AK/FixedArray.h>
|
||||||
#include <AK/HashTable.h>
|
|
||||||
#include <AK/IntrusiveList.h>
|
#include <AK/IntrusiveList.h>
|
||||||
#include <AK/RefCounted.h>
|
|
||||||
#include <AK/RefPtr.h>
|
#include <AK/RefPtr.h>
|
||||||
#include <AK/Vector.h>
|
|
||||||
#include <AK/Weakable.h>
|
#include <AK/Weakable.h>
|
||||||
#include <Kernel/Forward.h>
|
#include <Kernel/Forward.h>
|
||||||
#include <Kernel/Library/ListedRefCounted.h>
|
#include <Kernel/Library/ListedRefCounted.h>
|
||||||
|
@ -20,12 +17,6 @@
|
||||||
|
|
||||||
namespace Kernel::Memory {
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
class VMObjectDeletedHandler {
|
|
||||||
public:
|
|
||||||
virtual ~VMObjectDeletedHandler() = default;
|
|
||||||
virtual void vmobject_deleted(VMObject&) = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
class VMObject
|
class VMObject
|
||||||
: public ListedRefCounted<VMObject>
|
: public ListedRefCounted<VMObject>
|
||||||
, public Weakable<VMObject> {
|
, public Weakable<VMObject> {
|
||||||
|
@ -63,17 +54,6 @@ public:
|
||||||
m_regions.remove(region);
|
m_regions.remove(region);
|
||||||
}
|
}
|
||||||
|
|
||||||
void register_on_deleted_handler(VMObjectDeletedHandler& handler)
|
|
||||||
{
|
|
||||||
ScopedSpinLock locker(m_on_deleted_lock);
|
|
||||||
m_on_deleted.set(&handler);
|
|
||||||
}
|
|
||||||
void unregister_on_deleted_handler(VMObjectDeletedHandler& handler)
|
|
||||||
{
|
|
||||||
ScopedSpinLock locker(m_on_deleted_lock);
|
|
||||||
m_on_deleted.remove(&handler);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
explicit VMObject(size_t);
|
explicit VMObject(size_t);
|
||||||
explicit VMObject(VMObject const&);
|
explicit VMObject(VMObject const&);
|
||||||
|
@ -91,9 +71,6 @@ private:
|
||||||
VMObject& operator=(VMObject&&) = delete;
|
VMObject& operator=(VMObject&&) = delete;
|
||||||
VMObject(VMObject&&) = delete;
|
VMObject(VMObject&&) = delete;
|
||||||
|
|
||||||
HashTable<VMObjectDeletedHandler*> m_on_deleted;
|
|
||||||
SpinLock<u8> m_on_deleted_lock;
|
|
||||||
|
|
||||||
Region::ListInVMObject m_regions;
|
Region::ListInVMObject m_regions;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -11,66 +11,6 @@
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
static SpinLock<u8> g_global_futex_lock;
|
|
||||||
static Singleton<HashMap<Memory::VMObject*, FutexQueues>> g_global_futex_queues;
|
|
||||||
|
|
||||||
FutexQueue::FutexQueue(FlatPtr user_address_or_offset, Memory::VMObject* vmobject)
|
|
||||||
: m_user_address_or_offset(user_address_or_offset)
|
|
||||||
, m_is_global(vmobject != nullptr)
|
|
||||||
{
|
|
||||||
dbgln_if(FUTEX_DEBUG, "Futex @ {}{}",
|
|
||||||
this,
|
|
||||||
m_is_global ? " (global)" : " (local)");
|
|
||||||
|
|
||||||
if (m_is_global) {
|
|
||||||
// Only register for global futexes
|
|
||||||
m_vmobject = vmobject->make_weak_ptr();
|
|
||||||
vmobject->register_on_deleted_handler(*this);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
FutexQueue::~FutexQueue()
|
|
||||||
{
|
|
||||||
if (m_is_global) {
|
|
||||||
if (auto vmobject = m_vmobject.strong_ref())
|
|
||||||
vmobject->unregister_on_deleted_handler(*this);
|
|
||||||
}
|
|
||||||
dbgln_if(FUTEX_DEBUG, "~Futex @ {}{}",
|
|
||||||
this,
|
|
||||||
m_is_global ? " (global)" : " (local)");
|
|
||||||
}
|
|
||||||
|
|
||||||
void FutexQueue::vmobject_deleted(Memory::VMObject& vmobject)
|
|
||||||
{
|
|
||||||
VERIFY(m_is_global); // If we got called we must be a global futex
|
|
||||||
// Because we're taking ourselves out of the global queue, we need
|
|
||||||
// to make sure we have at last a reference until we're done
|
|
||||||
NonnullRefPtr<FutexQueue> own_ref(*this);
|
|
||||||
|
|
||||||
dbgln_if(FUTEX_DEBUG, "Futex::vmobject_deleted @ {}{}",
|
|
||||||
this,
|
|
||||||
m_is_global ? " (global)" : " (local)");
|
|
||||||
|
|
||||||
// Because this is called from the VMObject's destructor, getting a
|
|
||||||
// strong_ref in this function is unsafe!
|
|
||||||
m_vmobject = nullptr; // Just to be safe...
|
|
||||||
|
|
||||||
{
|
|
||||||
ScopedSpinLock lock(g_global_futex_lock);
|
|
||||||
g_global_futex_queues->remove(&vmobject);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool did_wake_all;
|
|
||||||
auto wake_count = wake_all(did_wake_all);
|
|
||||||
|
|
||||||
if constexpr (FUTEX_DEBUG) {
|
|
||||||
if (wake_count > 0)
|
|
||||||
dbgln("Futex @ {} unblocked {} waiters due to vmobject free", this, wake_count);
|
|
||||||
}
|
|
||||||
|
|
||||||
VERIFY(did_wake_all); // No one should be left behind...
|
|
||||||
}
|
|
||||||
|
|
||||||
void Process::clear_futex_queues_on_exec()
|
void Process::clear_futex_queues_on_exec()
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(m_futex_lock);
|
ScopedSpinLock lock(m_futex_lock);
|
||||||
|
@ -120,100 +60,50 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_private = (params.futex_op & FUTEX_PRIVATE_FLAG) != 0;
|
auto find_futex_queue = [&](FlatPtr user_address, bool create_if_not_found, bool* did_create = nullptr) -> RefPtr<FutexQueue> {
|
||||||
auto& queue_lock = is_private ? m_futex_lock : g_global_futex_lock;
|
|
||||||
auto user_address_or_offset = FlatPtr(params.userspace_address);
|
|
||||||
auto user_address_or_offset2 = FlatPtr(params.userspace_address2);
|
|
||||||
|
|
||||||
// If this is a global lock, look up the underlying VMObject *before*
|
|
||||||
// acquiring the queue lock
|
|
||||||
RefPtr<Memory::VMObject> vmobject, vmobject2;
|
|
||||||
if (!is_private) {
|
|
||||||
auto region = address_space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset }, sizeof(u32) });
|
|
||||||
if (!region)
|
|
||||||
return EFAULT;
|
|
||||||
vmobject = region->vmobject();
|
|
||||||
user_address_or_offset = region->offset_in_vmobject_from_vaddr(VirtualAddress(user_address_or_offset));
|
|
||||||
|
|
||||||
switch (cmd) {
|
|
||||||
case FUTEX_REQUEUE:
|
|
||||||
case FUTEX_CMP_REQUEUE:
|
|
||||||
case FUTEX_WAKE_OP: {
|
|
||||||
auto region2 = address_space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset2 }, sizeof(u32) });
|
|
||||||
if (!region2)
|
|
||||||
return EFAULT;
|
|
||||||
vmobject2 = region2->vmobject();
|
|
||||||
user_address_or_offset2 = region->offset_in_vmobject_from_vaddr(VirtualAddress(user_address_or_offset2));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
auto find_global_futex_queues = [&](Memory::VMObject& vmobject, bool create_if_not_found) -> FutexQueues* {
|
|
||||||
auto& global_queues = *g_global_futex_queues;
|
|
||||||
auto it = global_queues.find(&vmobject);
|
|
||||||
if (it != global_queues.end())
|
|
||||||
return &it->value;
|
|
||||||
if (create_if_not_found) {
|
|
||||||
// TODO: is there a better way than setting and finding it again?
|
|
||||||
auto result = global_queues.set(&vmobject, {});
|
|
||||||
VERIFY(result == AK::HashSetResult::InsertedNewEntry);
|
|
||||||
it = global_queues.find(&vmobject);
|
|
||||||
VERIFY(it != global_queues.end());
|
|
||||||
return &it->value;
|
|
||||||
}
|
|
||||||
return nullptr;
|
|
||||||
};
|
|
||||||
|
|
||||||
auto find_futex_queue = [&](Memory::VMObject* vmobject, FlatPtr user_address_or_offset, bool create_if_not_found, bool* did_create = nullptr) -> RefPtr<FutexQueue> {
|
|
||||||
VERIFY(is_private || vmobject);
|
|
||||||
VERIFY(!create_if_not_found || did_create != nullptr);
|
VERIFY(!create_if_not_found || did_create != nullptr);
|
||||||
auto* queues = is_private ? &m_futex_queues : find_global_futex_queues(*vmobject, create_if_not_found);
|
auto* queues = &m_futex_queues;
|
||||||
if (!queues)
|
auto it = m_futex_queues.find(user_address);
|
||||||
return {};
|
if (it != m_futex_queues.end())
|
||||||
auto it = queues->find(user_address_or_offset);
|
|
||||||
if (it != queues->end())
|
|
||||||
return it->value;
|
return it->value;
|
||||||
if (create_if_not_found) {
|
if (create_if_not_found) {
|
||||||
*did_create = true;
|
*did_create = true;
|
||||||
auto futex_queue = adopt_ref(*new FutexQueue(user_address_or_offset, vmobject));
|
auto futex_queue = adopt_ref(*new FutexQueue);
|
||||||
auto result = queues->set(user_address_or_offset, futex_queue);
|
auto result = queues->set(user_address, futex_queue);
|
||||||
VERIFY(result == AK::HashSetResult::InsertedNewEntry);
|
VERIFY(result == AK::HashSetResult::InsertedNewEntry);
|
||||||
return futex_queue;
|
return futex_queue;
|
||||||
}
|
}
|
||||||
return {};
|
return {};
|
||||||
};
|
};
|
||||||
|
|
||||||
auto remove_futex_queue = [&](Memory::VMObject* vmobject, FlatPtr user_address_or_offset) {
|
auto remove_futex_queue = [&](FlatPtr user_address) {
|
||||||
auto* queues = is_private ? &m_futex_queues : find_global_futex_queues(*vmobject, false);
|
if (auto it = m_futex_queues.find(user_address); it != m_futex_queues.end()) {
|
||||||
if (queues) {
|
if (it->value->try_remove()) {
|
||||||
if (auto it = queues->find(user_address_or_offset); it != queues->end()) {
|
it->value->did_remove();
|
||||||
if (it->value->try_remove()) {
|
m_futex_queues.remove(it);
|
||||||
it->value->did_remove();
|
|
||||||
queues->remove(it);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (!is_private && queues->is_empty())
|
|
||||||
g_global_futex_queues->remove(vmobject);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
auto do_wake = [&](Memory::VMObject* vmobject, FlatPtr user_address_or_offset, u32 count, Optional<u32> bitmask) -> int {
|
auto do_wake = [&](FlatPtr user_address, u32 count, Optional<u32> bitmask) -> int {
|
||||||
if (count == 0)
|
if (count == 0)
|
||||||
return 0;
|
return 0;
|
||||||
ScopedSpinLock lock(queue_lock);
|
ScopedSpinLock locker(m_futex_lock);
|
||||||
auto futex_queue = find_futex_queue(vmobject, user_address_or_offset, false);
|
auto futex_queue = find_futex_queue(user_address, false);
|
||||||
if (!futex_queue)
|
if (!futex_queue)
|
||||||
return 0;
|
return 0;
|
||||||
bool is_empty;
|
bool is_empty;
|
||||||
u32 woke_count = futex_queue->wake_n(count, bitmask, is_empty);
|
u32 woke_count = futex_queue->wake_n(count, bitmask, is_empty);
|
||||||
if (is_empty) {
|
if (is_empty) {
|
||||||
// If there are no more waiters, we want to get rid of the futex!
|
// If there are no more waiters, we want to get rid of the futex!
|
||||||
remove_futex_queue(vmobject, user_address_or_offset);
|
remove_futex_queue(user_address);
|
||||||
}
|
}
|
||||||
return (int)woke_count;
|
return (int)woke_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
auto user_address = FlatPtr(params.userspace_address);
|
||||||
|
auto user_address2 = FlatPtr(params.userspace_address2);
|
||||||
|
|
||||||
auto do_wait = [&](u32 bitset) -> int {
|
auto do_wait = [&](u32 bitset) -> int {
|
||||||
bool did_create;
|
bool did_create;
|
||||||
RefPtr<FutexQueue> futex_queue;
|
RefPtr<FutexQueue> futex_queue;
|
||||||
|
@ -227,9 +117,9 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
||||||
}
|
}
|
||||||
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
|
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
|
||||||
|
|
||||||
ScopedSpinLock lock(queue_lock);
|
ScopedSpinLock locker(m_futex_lock);
|
||||||
did_create = false;
|
did_create = false;
|
||||||
futex_queue = find_futex_queue(vmobject.ptr(), user_address_or_offset, true, &did_create);
|
futex_queue = find_futex_queue(user_address, true, &did_create);
|
||||||
VERIFY(futex_queue);
|
VERIFY(futex_queue);
|
||||||
// We need to try again if we didn't create this queue and the existing queue
|
// We need to try again if we didn't create this queue and the existing queue
|
||||||
// was removed before we were able to queue an imminent wait.
|
// was removed before we were able to queue an imminent wait.
|
||||||
|
@ -240,10 +130,10 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
||||||
|
|
||||||
Thread::BlockResult block_result = futex_queue->wait_on(timeout, bitset);
|
Thread::BlockResult block_result = futex_queue->wait_on(timeout, bitset);
|
||||||
|
|
||||||
ScopedSpinLock lock(queue_lock);
|
ScopedSpinLock locker(m_futex_lock);
|
||||||
if (futex_queue->is_empty_and_no_imminent_waits()) {
|
if (futex_queue->is_empty_and_no_imminent_waits()) {
|
||||||
// If there are no more waiters, we want to get rid of the futex!
|
// If there are no more waiters, we want to get rid of the futex!
|
||||||
remove_futex_queue(vmobject, user_address_or_offset);
|
remove_futex_queue(user_address);
|
||||||
}
|
}
|
||||||
if (block_result == Thread::BlockResult::InterruptedByTimeout) {
|
if (block_result == Thread::BlockResult::InterruptedByTimeout) {
|
||||||
return ETIMEDOUT;
|
return ETIMEDOUT;
|
||||||
|
@ -260,8 +150,8 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
||||||
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
|
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
|
||||||
|
|
||||||
int woken_or_requeued = 0;
|
int woken_or_requeued = 0;
|
||||||
ScopedSpinLock lock(queue_lock);
|
ScopedSpinLock locker(m_futex_lock);
|
||||||
if (auto futex_queue = find_futex_queue(vmobject.ptr(), user_address_or_offset, false)) {
|
if (auto futex_queue = find_futex_queue(user_address, false)) {
|
||||||
RefPtr<FutexQueue> target_futex_queue;
|
RefPtr<FutexQueue> target_futex_queue;
|
||||||
bool is_empty, is_target_empty;
|
bool is_empty, is_target_empty;
|
||||||
woken_or_requeued = futex_queue->wake_n_requeue(
|
woken_or_requeued = futex_queue->wake_n_requeue(
|
||||||
|
@ -269,14 +159,14 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
||||||
// NOTE: futex_queue's lock is being held while this callback is called
|
// NOTE: futex_queue's lock is being held while this callback is called
|
||||||
// The reason we're doing this in a callback is that we don't want to always
|
// The reason we're doing this in a callback is that we don't want to always
|
||||||
// create a target queue, only if we actually have anything to move to it!
|
// create a target queue, only if we actually have anything to move to it!
|
||||||
target_futex_queue = find_futex_queue(vmobject2.ptr(), user_address_or_offset2, true);
|
target_futex_queue = find_futex_queue(user_address2, true);
|
||||||
return target_futex_queue.ptr();
|
return target_futex_queue.ptr();
|
||||||
},
|
},
|
||||||
params.val2, is_empty, is_target_empty);
|
params.val2, is_empty, is_target_empty);
|
||||||
if (is_empty)
|
if (is_empty)
|
||||||
remove_futex_queue(vmobject, user_address_or_offset);
|
remove_futex_queue(user_address);
|
||||||
if (is_target_empty && target_futex_queue)
|
if (is_target_empty && target_futex_queue)
|
||||||
remove_futex_queue(vmobject2, user_address_or_offset2);
|
remove_futex_queue(user_address2);
|
||||||
}
|
}
|
||||||
return woken_or_requeued;
|
return woken_or_requeued;
|
||||||
};
|
};
|
||||||
|
@ -286,7 +176,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
||||||
return do_wait(0);
|
return do_wait(0);
|
||||||
|
|
||||||
case FUTEX_WAKE:
|
case FUTEX_WAKE:
|
||||||
return do_wake(vmobject.ptr(), user_address_or_offset, params.val, {});
|
return do_wake(user_address, params.val, {});
|
||||||
|
|
||||||
case FUTEX_WAKE_OP: {
|
case FUTEX_WAKE_OP: {
|
||||||
Optional<u32> oldval;
|
Optional<u32> oldval;
|
||||||
|
@ -319,7 +209,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
||||||
if (!oldval.has_value())
|
if (!oldval.has_value())
|
||||||
return EFAULT;
|
return EFAULT;
|
||||||
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
|
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
|
||||||
int result = do_wake(vmobject.ptr(), user_address_or_offset, params.val, {});
|
int result = do_wake(user_address, params.val, {});
|
||||||
if (params.val2 > 0) {
|
if (params.val2 > 0) {
|
||||||
bool compare_result;
|
bool compare_result;
|
||||||
switch (_FUTEX_CMP(params.val3)) {
|
switch (_FUTEX_CMP(params.val3)) {
|
||||||
|
@ -345,7 +235,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
if (compare_result)
|
if (compare_result)
|
||||||
result += do_wake(vmobject2.ptr(), user_address_or_offset2, params.val2, {});
|
result += do_wake(user_address2, params.val2, {});
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -366,7 +256,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
||||||
VERIFY(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAKE
|
VERIFY(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAKE
|
||||||
if (params.val3 == 0)
|
if (params.val3 == 0)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
return do_wake(vmobject.ptr(), user_address_or_offset, params.val, params.val3);
|
return do_wake(user_address, params.val, params.val3);
|
||||||
}
|
}
|
||||||
return ENOSYS;
|
return ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,18 +31,18 @@ static ALWAYS_INLINE int futex_wait(uint32_t* userspace_address, uint32_t value,
|
||||||
|
|
||||||
if (abstime) {
|
if (abstime) {
|
||||||
// NOTE: FUTEX_WAIT takes a relative timeout, so use FUTEX_WAIT_BITSET instead!
|
// NOTE: FUTEX_WAIT takes a relative timeout, so use FUTEX_WAIT_BITSET instead!
|
||||||
op = FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG;
|
op = FUTEX_WAIT_BITSET;
|
||||||
if (clockid == CLOCK_REALTIME || clockid == CLOCK_REALTIME_COARSE)
|
if (clockid == CLOCK_REALTIME || clockid == CLOCK_REALTIME_COARSE)
|
||||||
op |= FUTEX_CLOCK_REALTIME;
|
op |= FUTEX_CLOCK_REALTIME;
|
||||||
} else {
|
} else {
|
||||||
op = FUTEX_WAIT | FUTEX_PRIVATE_FLAG;
|
op = FUTEX_WAIT;
|
||||||
}
|
}
|
||||||
return futex(userspace_address, op, value, abstime, nullptr, FUTEX_BITSET_MATCH_ANY);
|
return futex(userspace_address, op, value, abstime, nullptr, FUTEX_BITSET_MATCH_ANY);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ALWAYS_INLINE int futex_wake(uint32_t* userspace_address, uint32_t count)
|
static ALWAYS_INLINE int futex_wake(uint32_t* userspace_address, uint32_t count)
|
||||||
{
|
{
|
||||||
return futex(userspace_address, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count, NULL, NULL, 0);
|
return futex(userspace_address, FUTEX_WAKE, count, NULL, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int purge(int mode);
|
int purge(int mode);
|
||||||
|
|
|
@ -127,7 +127,7 @@ int pthread_cond_broadcast(pthread_cond_t* cond)
|
||||||
pthread_mutex_t* mutex = AK::atomic_load(&cond->mutex, AK::memory_order_relaxed);
|
pthread_mutex_t* mutex = AK::atomic_load(&cond->mutex, AK::memory_order_relaxed);
|
||||||
VERIFY(mutex);
|
VERIFY(mutex);
|
||||||
|
|
||||||
int rc = futex(&cond->value, FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG, 1, nullptr, &mutex->lock, INT_MAX);
|
int rc = futex(&cond->value, FUTEX_REQUEUE, 1, nullptr, &mutex->lock, INT_MAX);
|
||||||
VERIFY(rc >= 0);
|
VERIFY(rc >= 0);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue