mirror of
https://github.com/RGBCube/serenity
synced 2025-07-28 20:37:35 +00:00
Kernel: Clean up sys$futex and add support for cross-process futexes
This commit is contained in:
parent
55c7496200
commit
9db10887a1
9 changed files with 164 additions and 70 deletions
|
@ -655,7 +655,7 @@ static int rwlock_rdlock_maybe_timed(u32* lockp, const struct timespec* timeout
|
|||
|
||||
// Seems like someone is writing (or is interested in writing and we let them have the lock)
|
||||
// wait until they're done.
|
||||
auto rc = futex(lockp, FUTEX_WAIT_BITSET, current, timeout, nullptr, reader_wake_mask);
|
||||
auto rc = futex(lockp, FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, current, timeout, nullptr, reader_wake_mask);
|
||||
if (rc < 0 && errno == ETIMEDOUT && timeout) {
|
||||
return value_if_timeout;
|
||||
}
|
||||
|
@ -703,7 +703,7 @@ static int rwlock_wrlock_maybe_timed(pthread_rwlock_t* lockval_p, const struct t
|
|||
|
||||
// Seems like someone is writing (or is interested in writing and we let them have the lock)
|
||||
// wait until they're done.
|
||||
auto rc = futex(lockp, FUTEX_WAIT_BITSET, current, timeout, nullptr, writer_wake_mask);
|
||||
auto rc = futex(lockp, FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, current, timeout, nullptr, writer_wake_mask);
|
||||
if (rc < 0 && errno == ETIMEDOUT && timeout) {
|
||||
return value_if_timeout;
|
||||
}
|
||||
|
@ -794,7 +794,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t* lockval_p)
|
|||
auto desired = current & ~(writer_locked_mask | writer_intent_mask);
|
||||
AK::atomic_store(lockp, desired, AK::MemoryOrder::memory_order_release);
|
||||
// Then wake both readers and writers, if any.
|
||||
auto rc = futex(lockp, FUTEX_WAKE_BITSET, current, nullptr, nullptr, (current & writer_wake_mask) | reader_wake_mask);
|
||||
auto rc = futex(lockp, FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, current, nullptr, nullptr, (current & writer_wake_mask) | reader_wake_mask);
|
||||
if (rc < 0)
|
||||
return errno;
|
||||
return 0;
|
||||
|
|
|
@ -98,7 +98,7 @@ int pthread_cond_timedwait(pthread_cond_t* cond, pthread_mutex_t* mutex, const s
|
|||
// value might change as soon as we unlock it.
|
||||
u32 value = AK::atomic_fetch_or(&cond->value, NEED_TO_WAKE_ONE | NEED_TO_WAKE_ALL, AK::memory_order_release) | NEED_TO_WAKE_ONE | NEED_TO_WAKE_ALL;
|
||||
pthread_mutex_unlock(mutex);
|
||||
int rc = futex_wait(&cond->value, value, abstime, cond->clockid);
|
||||
int rc = futex_wait(&cond->value, value, abstime, cond->clockid, false);
|
||||
if (rc < 0 && errno != EAGAIN)
|
||||
return errno;
|
||||
|
||||
|
@ -129,7 +129,7 @@ int pthread_cond_signal(pthread_cond_t* cond)
|
|||
if (!(value & NEED_TO_WAKE_ONE)) [[likely]]
|
||||
return 0;
|
||||
// ...try to wake someone...
|
||||
int rc = futex_wake(&cond->value, 1);
|
||||
int rc = futex_wake(&cond->value, 1, false);
|
||||
VERIFY(rc >= 0);
|
||||
// ...and if we have woken someone, put the flag back.
|
||||
if (rc > 0)
|
||||
|
@ -152,7 +152,7 @@ int pthread_cond_broadcast(pthread_cond_t* cond)
|
|||
pthread_mutex_t* mutex = AK::atomic_load(&cond->mutex, AK::memory_order_relaxed);
|
||||
VERIFY(mutex);
|
||||
|
||||
int rc = futex(&cond->value, FUTEX_REQUEUE, 1, nullptr, &mutex->lock, INT_MAX);
|
||||
int rc = futex(&cond->value, FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG, 1, nullptr, &mutex->lock, INT_MAX);
|
||||
VERIFY(rc >= 0);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -155,7 +155,7 @@ int pthread_mutex_lock(pthread_mutex_t* mutex)
|
|||
value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
|
||||
|
||||
while (value != MUTEX_UNLOCKED) {
|
||||
futex_wait(&mutex->lock, value, nullptr, 0);
|
||||
futex_wait(&mutex->lock, value, nullptr, 0, false);
|
||||
value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
|
||||
}
|
||||
|
||||
|
@ -172,7 +172,7 @@ int __pthread_mutex_lock_pessimistic_np(pthread_mutex_t* mutex)
|
|||
// because we know we don't. Used in the condition variable implementation.
|
||||
u32 value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
|
||||
while (value != MUTEX_UNLOCKED) {
|
||||
futex_wait(&mutex->lock, value, nullptr, 0);
|
||||
futex_wait(&mutex->lock, value, nullptr, 0, false);
|
||||
value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
|
||||
}
|
||||
|
||||
|
@ -195,7 +195,7 @@ int pthread_mutex_unlock(pthread_mutex_t* mutex)
|
|||
|
||||
u32 value = AK::atomic_exchange(&mutex->lock, MUTEX_UNLOCKED, AK::memory_order_release);
|
||||
if (value == MUTEX_LOCKED_NEED_TO_WAKE) [[unlikely]] {
|
||||
int rc = futex_wake(&mutex->lock, 1);
|
||||
int rc = futex_wake(&mutex->lock, 1, false);
|
||||
VERIFY(rc >= 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ int pthread_once(pthread_once_t* self, void (*callback)(void))
|
|||
// anyone.
|
||||
break;
|
||||
case State::PERFORMING_WITH_WAITERS:
|
||||
futex_wake(self, INT_MAX);
|
||||
futex_wake(self, INT_MAX, false);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ int pthread_once(pthread_once_t* self, void (*callback)(void))
|
|||
[[fallthrough]];
|
||||
case State::PERFORMING_WITH_WAITERS:
|
||||
// Let's wait for it.
|
||||
futex_wait(self, state2, nullptr, 0);
|
||||
futex_wait(self, state2, nullptr, 0, false);
|
||||
// We have been woken up, but that might have been due to a signal
|
||||
// or something, so we have to reevaluate. We need acquire ordering
|
||||
// here for the same reason as above. Hopefully we'll just see
|
||||
|
|
|
@ -83,7 +83,7 @@ int sem_post(sem_t* sem)
|
|||
// Check if another sem_post() call has handled it already.
|
||||
if (!(value & POST_WAKES)) [[likely]]
|
||||
return 0;
|
||||
int rc = futex_wake(&sem->value, 1);
|
||||
int rc = futex_wake(&sem->value, 1, false);
|
||||
VERIFY(rc >= 0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ int sem_timedwait(sem_t* sem, const struct timespec* abstime)
|
|||
// Re-evaluate.
|
||||
continue;
|
||||
if (going_to_wake) [[unlikely]] {
|
||||
int rc = futex_wake(&sem->value, count - 1);
|
||||
int rc = futex_wake(&sem->value, count - 1, false);
|
||||
VERIFY(rc >= 0);
|
||||
}
|
||||
return 0;
|
||||
|
@ -162,7 +162,7 @@ int sem_timedwait(sem_t* sem, const struct timespec* abstime)
|
|||
}
|
||||
// At this point, we're committed to sleeping.
|
||||
responsible_for_waking = true;
|
||||
futex_wait(&sem->value, value, abstime, CLOCK_REALTIME);
|
||||
futex_wait(&sem->value, value, abstime, CLOCK_REALTIME, false);
|
||||
// This is the state we will probably see upon being waked:
|
||||
value = 1;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ int futex(uint32_t* userspace_address, int futex_op, uint32_t value, const struc
|
|||
# define ALWAYS_INLINE_SERENITY_H
|
||||
#endif
|
||||
|
||||
static ALWAYS_INLINE int futex_wait(uint32_t* userspace_address, uint32_t value, const struct timespec* abstime, int clockid)
|
||||
static ALWAYS_INLINE int futex_wait(uint32_t* userspace_address, uint32_t value, const struct timespec* abstime, int clockid, int process_shared)
|
||||
{
|
||||
int op;
|
||||
|
||||
|
@ -39,12 +39,12 @@ static ALWAYS_INLINE int futex_wait(uint32_t* userspace_address, uint32_t value,
|
|||
} else {
|
||||
op = FUTEX_WAIT;
|
||||
}
|
||||
return futex(userspace_address, op, value, abstime, NULL, FUTEX_BITSET_MATCH_ANY);
|
||||
return futex(userspace_address, op | (process_shared ? 0 : FUTEX_PRIVATE_FLAG), value, abstime, NULL, FUTEX_BITSET_MATCH_ANY);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE int futex_wake(uint32_t* userspace_address, uint32_t count)
|
||||
static ALWAYS_INLINE int futex_wake(uint32_t* userspace_address, uint32_t count, int process_shared)
|
||||
{
|
||||
return futex(userspace_address, FUTEX_WAKE, count, NULL, NULL, 0);
|
||||
return futex(userspace_address, FUTEX_WAKE | (process_shared ? 0 : FUTEX_PRIVATE_FLAG), count, NULL, NULL, 0);
|
||||
}
|
||||
|
||||
#ifdef ALWAYS_INLINE_SERENITY_H
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue