1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-14 07:14:58 +00:00
serenity/Userland/Libraries/LibC/pthread_integration.cpp
Sergey Bugaev 5536f3c277 LibC: Add __pthread_mutex_lock_pessimistic_np()
This is a private function that locks the lock much like the regular
pthread_mutex_lock(), but causes the corresponding unlock operation to
always assume there may be other waiters. This is useful in case some
waiters are made to wait on the mutex's futex directly, without going
through pthread_mutex_lock(). This is going to be used by the condition
variable implementation in the next commit.
2021-07-05 20:26:01 +02:00

203 lines
6.3 KiB
C++

/*
* Copyright (c) 2021, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Atomic.h>
#include <AK/NeverDestroyed.h>
#include <AK/Types.h>
#include <AK/Vector.h>
#include <bits/pthread_integration.h>
#include <errno.h>
#include <sched.h>
#include <serenity.h>
#include <unistd.h>
namespace {
// Most programs don't need this, no need to incur an extra mutex lock/unlock on them
static Atomic<bool> g_did_touch_atfork { false };
static pthread_mutex_t g_atfork_list_mutex __PTHREAD_MUTEX_INITIALIZER;
static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_prepare_list;
static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_child_list;
static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_parent_list;
}
extern "C" {
void __pthread_fork_prepare(void)
{
if (!g_did_touch_atfork.load())
return;
__pthread_mutex_lock(&g_atfork_list_mutex);
for (auto entry : g_atfork_prepare_list.get())
entry();
__pthread_mutex_unlock(&g_atfork_list_mutex);
}
void __pthread_fork_child(void)
{
if (!g_did_touch_atfork.load())
return;
__pthread_mutex_lock(&g_atfork_list_mutex);
for (auto entry : g_atfork_child_list.get())
entry();
__pthread_mutex_unlock(&g_atfork_list_mutex);
}
void __pthread_fork_parent(void)
{
if (!g_did_touch_atfork.load())
return;
__pthread_mutex_lock(&g_atfork_list_mutex);
for (auto entry : g_atfork_parent_list.get())
entry();
__pthread_mutex_unlock(&g_atfork_list_mutex);
}
void __pthread_fork_atfork_register_prepare(void (*func)(void))
{
g_did_touch_atfork.store(true);
__pthread_mutex_lock(&g_atfork_list_mutex);
g_atfork_prepare_list->append(func);
__pthread_mutex_unlock(&g_atfork_list_mutex);
}
void __pthread_fork_atfork_register_parent(void (*func)(void))
{
g_did_touch_atfork.store(true);
__pthread_mutex_lock(&g_atfork_list_mutex);
g_atfork_parent_list->append(func);
__pthread_mutex_unlock(&g_atfork_list_mutex);
}
void __pthread_fork_atfork_register_child(void (*func)(void))
{
g_did_touch_atfork.store(true);
__pthread_mutex_lock(&g_atfork_list_mutex);
g_atfork_child_list->append(func);
__pthread_mutex_unlock(&g_atfork_list_mutex);
}
int __pthread_self()
{
return gettid();
}
int pthread_self() __attribute__((weak, alias("__pthread_self")));
static constexpr u32 MUTEX_UNLOCKED = 0;
static constexpr u32 MUTEX_LOCKED_NO_NEED_TO_WAKE = 1;
static constexpr u32 MUTEX_LOCKED_NEED_TO_WAKE = 2;
int __pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attributes)
{
mutex->lock = 0;
mutex->owner = 0;
mutex->level = 0;
mutex->type = attributes ? attributes->type : __PTHREAD_MUTEX_NORMAL;
return 0;
}
int pthread_mutex_init(pthread_mutex_t*, const pthread_mutexattr_t*) __attribute__((weak, alias("__pthread_mutex_init")));
int __pthread_mutex_trylock(pthread_mutex_t* mutex)
{
u32 expected = MUTEX_UNLOCKED;
bool exchanged = AK::atomic_compare_exchange_strong(&mutex->lock, expected, MUTEX_LOCKED_NO_NEED_TO_WAKE, AK::memory_order_acquire);
if (exchanged) [[likely]] {
AK::atomic_store(&mutex->owner, __pthread_self(), AK::memory_order_relaxed);
mutex->level = 0;
return 0;
} else if (mutex->type == __PTHREAD_MUTEX_RECURSIVE) {
pthread_t owner = AK::atomic_load(&mutex->owner, AK::memory_order_relaxed);
if (owner == __pthread_self()) {
// We already own the mutex!
mutex->level++;
return 0;
}
}
return EBUSY;
}
int pthread_mutex_trylock(pthread_mutex_t* mutex) __attribute__((weak, alias("__pthread_mutex_trylock")));
int __pthread_mutex_lock(pthread_mutex_t* mutex)
{
pthread_t this_thread = __pthread_self();
// Fast path: attempt to claim the mutex without waiting.
u32 value = MUTEX_UNLOCKED;
bool exchanged = AK::atomic_compare_exchange_strong(&mutex->lock, value, MUTEX_LOCKED_NO_NEED_TO_WAKE, AK::memory_order_acquire);
if (exchanged) [[likely]] {
AK::atomic_store(&mutex->owner, this_thread, AK::memory_order_relaxed);
mutex->level = 0;
return 0;
} else if (mutex->type == __PTHREAD_MUTEX_RECURSIVE) {
pthread_t owner = AK::atomic_load(&mutex->owner, AK::memory_order_relaxed);
if (owner == this_thread) {
// We already own the mutex!
mutex->level++;
return 0;
}
}
// Slow path: wait, record the fact that we're going to wait, and always
// remember to wake the next thread up once we release the mutex.
if (value != MUTEX_LOCKED_NEED_TO_WAKE)
value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
while (value != MUTEX_UNLOCKED) {
futex_wait(&mutex->lock, value, nullptr, 0);
value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
}
AK::atomic_store(&mutex->owner, this_thread, AK::memory_order_relaxed);
mutex->level = 0;
return 0;
}
int pthread_mutex_lock(pthread_mutex_t*) __attribute__((weak, alias("__pthread_mutex_lock")));
int __pthread_mutex_lock_pessimistic_np(pthread_mutex_t* mutex)
{
// Same as pthread_mutex_lock(), but always set MUTEX_LOCKED_NEED_TO_WAKE,
// and also don't bother checking for already owning the mutex recursively,
// because we know we don't. Used in the condition variable implementation.
u32 value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
while (value != MUTEX_UNLOCKED) {
futex_wait(&mutex->lock, value, nullptr, 0);
value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
}
AK::atomic_store(&mutex->owner, __pthread_self(), AK::memory_order_relaxed);
mutex->level = 0;
return 0;
}
int __pthread_mutex_unlock(pthread_mutex_t* mutex)
{
if (mutex->type == __PTHREAD_MUTEX_RECURSIVE && mutex->level > 0) {
mutex->level--;
return 0;
}
AK::atomic_store(&mutex->owner, 0, AK::memory_order_relaxed);
u32 value = AK::atomic_exchange(&mutex->lock, MUTEX_UNLOCKED, AK::memory_order_release);
if (value == MUTEX_LOCKED_NEED_TO_WAKE) [[unlikely]]
futex_wake(&mutex->lock, 1);
return 0;
}
int pthread_mutex_unlock(pthread_mutex_t*) __attribute__((weak, alias("__pthread_mutex_unlock")));
}