1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 21:27:45 +00:00

LibPthread: Move the pthread and semaphore implementation to LibC

This additionally adds some compatibility code to redirect linking
attempts for LibPthread to LibC instead.
This commit is contained in:
Tim Schumacher 2022-06-12 20:16:06 +02:00 committed by Linus Groh
parent e156f79f53
commit 2f3b9c49a5
16 changed files with 11 additions and 159 deletions

View file

@ -27,8 +27,10 @@ set(LIBC_SOURCES
netdb.cpp
poll.cpp
priority.cpp
pthread_forward.cpp
pthread.cpp
pthread_cond.cpp
pthread_integration.cpp
pthread_once.cpp
pthread_tls.cpp
pty.cpp
pwd.cpp
@ -38,6 +40,7 @@ set(LIBC_SOURCES
scanf.cpp
sched.cpp
search.cpp
semaphore.cpp
serenity.cpp
shadow.cpp
signal.cpp

View file

@ -1,29 +0,0 @@
/*
* Copyright (c) 2021, Gunnar Beutner <gunnar@beutner.name>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <pthread.h>
struct PthreadFunctions {
int (*pthread_mutex_trylock)(pthread_mutex_t* mutex);
int (*pthread_mutex_destroy)(pthread_mutex_t*);
int (*pthread_mutexattr_init)(pthread_mutexattr_t*);
int (*pthread_mutexattr_settype)(pthread_mutexattr_t*, int);
int (*pthread_mutexattr_destroy)(pthread_mutexattr_t*);
int (*pthread_once)(pthread_once_t*, void (*)(void));
int (*pthread_cond_broadcast)(pthread_cond_t*);
int (*pthread_cond_init)(pthread_cond_t*, pthread_condattr_t const*);
int (*pthread_cond_signal)(pthread_cond_t*);
int (*pthread_cond_wait)(pthread_cond_t*, pthread_mutex_t*);
int (*pthread_cond_destroy)(pthread_cond_t*);
int (*pthread_cond_timedwait)(pthread_cond_t*, pthread_mutex_t*, const struct timespec*);
};
void __init_pthread_forward(PthreadFunctions);

View file

@ -0,0 +1,923 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/Atomic.h>
#include <AK/Debug.h>
#include <AK/Format.h>
#include <AK/SinglyLinkedList.h>
#include <AK/StdLibExtras.h>
#include <Kernel/API/Syscall.h>
#include <LibSystem/syscall.h>
#include <bits/pthread_integration.h>
#include <errno.h>
#include <limits.h>
#include <mallocdefs.h>
#include <pthread.h>
#include <serenity.h>
#include <signal.h>
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
#include <syscall.h>
#include <time.h>
#include <unistd.h>
namespace {
using PthreadAttrImpl = Syscall::SC_create_thread_params;
} // end anonymous namespace
static constexpr size_t required_stack_alignment = 4 * MiB;
static constexpr size_t highest_reasonable_guard_size = 32 * PAGE_SIZE;
static constexpr size_t highest_reasonable_stack_size = 8 * MiB; // That's the default in Ubuntu?
__thread void* s_stack_location;
__thread size_t s_stack_size;
#define __RETURN_PTHREAD_ERROR(rc) \
return ((rc) < 0 ? -(rc) : 0)
struct CleanupHandler {
void (*routine)(void*);
void* argument;
};
static thread_local SinglyLinkedList<CleanupHandler> cleanup_handlers;
extern "C" {
[[noreturn]] static void exit_thread(void* code, void* stack_location, size_t stack_size)
{
__pthread_key_destroy_for_current_thread();
syscall(SC_exit_thread, code, stack_location, stack_size);
VERIFY_NOT_REACHED();
}
[[noreturn]] static void pthread_exit_without_cleanup_handlers(void* value_ptr)
{
exit_thread(value_ptr, s_stack_location, s_stack_size);
}
static void* pthread_create_helper(void* (*routine)(void*), void* argument, void* stack_location, size_t stack_size)
{
// HACK: This is a __thread - marked thread-local variable. If we initialize it globally, VERY weird errors happen.
// Therefore, we need to do the initialization here and in __malloc_init().
s_allocation_enabled = true;
s_stack_location = stack_location;
s_stack_size = stack_size;
void* ret_val = routine(argument);
pthread_exit_without_cleanup_handlers(ret_val);
}
static int create_thread(pthread_t* thread, void* (*entry)(void*), void* argument, PthreadAttrImpl* thread_params)
{
void** stack = (void**)((uintptr_t)thread_params->stack_location + thread_params->stack_size);
auto push_on_stack = [&](void* data) {
stack--;
*stack = data;
thread_params->stack_size -= sizeof(void*);
};
// We set up the stack for pthread_create_helper.
// Note that we need to align the stack to 16B, accounting for
// the fact that we also push 16 bytes.
while (((uintptr_t)stack - 16) % 16 != 0)
push_on_stack(nullptr);
#if ARCH(I386)
push_on_stack((void*)(uintptr_t)thread_params->stack_size);
push_on_stack(thread_params->stack_location);
push_on_stack(argument);
push_on_stack((void*)entry);
#else
thread_params->rdi = (FlatPtr)entry;
thread_params->rsi = (FlatPtr)argument;
thread_params->rdx = (FlatPtr)thread_params->stack_location;
thread_params->rcx = thread_params->stack_size;
#endif
VERIFY((uintptr_t)stack % 16 == 0);
// Push a fake return address
push_on_stack(nullptr);
int rc = syscall(SC_create_thread, pthread_create_helper, thread_params);
if (rc >= 0)
*thread = rc;
__RETURN_PTHREAD_ERROR(rc);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_self.html
int pthread_self()
{
return __pthread_self();
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_create.html
int pthread_create(pthread_t* thread, pthread_attr_t* attributes, void* (*start_routine)(void*), void* argument_to_start_routine)
{
if (!thread)
return -EINVAL;
PthreadAttrImpl default_attributes {};
PthreadAttrImpl** arg_attributes = reinterpret_cast<PthreadAttrImpl**>(attributes);
PthreadAttrImpl* used_attributes = arg_attributes ? *arg_attributes : &default_attributes;
if (!used_attributes->stack_location) {
// adjust stack size, user might have called setstacksize, which has no restrictions on size/alignment
if (0 != (used_attributes->stack_size % required_stack_alignment))
used_attributes->stack_size += required_stack_alignment - (used_attributes->stack_size % required_stack_alignment);
used_attributes->stack_location = mmap_with_name(nullptr, used_attributes->stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, 0, 0, "Thread stack");
if (!used_attributes->stack_location)
return -1;
}
dbgln_if(PTHREAD_DEBUG, "pthread_create: Creating thread with attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
used_attributes,
(PTHREAD_CREATE_JOINABLE == used_attributes->detach_state) ? "joinable" : "detached",
used_attributes->schedule_priority,
used_attributes->guard_page_size,
used_attributes->stack_size,
used_attributes->stack_location);
return create_thread(thread, start_routine, argument_to_start_routine, used_attributes);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_exit.html
void pthread_exit(void* value_ptr)
{
while (!cleanup_handlers.is_empty()) {
auto handler = cleanup_handlers.take_first();
handler.routine(handler.argument);
}
pthread_exit_without_cleanup_handlers(value_ptr);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cleanup_push.html
void pthread_cleanup_push(void (*routine)(void*), void* arg)
{
cleanup_handlers.prepend({ routine, arg });
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cleanup_pop.html
void pthread_cleanup_pop(int execute)
{
VERIFY(!cleanup_handlers.is_empty());
auto handler = cleanup_handlers.take_first();
if (execute)
handler.routine(handler.argument);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_join.html
int pthread_join(pthread_t thread, void** exit_value_ptr)
{
int rc = syscall(SC_join_thread, thread, exit_value_ptr);
__RETURN_PTHREAD_ERROR(rc);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_kill.html
int pthread_kill(pthread_t thread, int sig)
{
int rc = syscall(SC_kill_thread, thread, sig);
__RETURN_PTHREAD_ERROR(rc);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_detach.html
int pthread_detach(pthread_t thread)
{
int rc = syscall(SC_detach_thread, thread);
__RETURN_PTHREAD_ERROR(rc);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_sigmask.html
int pthread_sigmask(int how, sigset_t const* set, sigset_t* old_set)
{
if (sigprocmask(how, set, old_set))
return errno;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_init.html
int pthread_mutex_init(pthread_mutex_t* mutex, pthread_mutexattr_t const* attributes)
{
return __pthread_mutex_init(mutex, attributes);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_destroy.html
int pthread_mutex_destroy(pthread_mutex_t*)
{
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_lock.html
int pthread_mutex_lock(pthread_mutex_t* mutex)
{
return __pthread_mutex_lock(mutex);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_trylock.html
int pthread_mutex_trylock(pthread_mutex_t* mutex)
{
return __pthread_mutex_trylock(mutex);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_unlock.html
int pthread_mutex_unlock(pthread_mutex_t* mutex)
{
return __pthread_mutex_unlock(mutex);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutexattr_init.html
int pthread_mutexattr_init(pthread_mutexattr_t* attr)
{
attr->type = PTHREAD_MUTEX_NORMAL;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutexattr_destroy.html
int pthread_mutexattr_destroy(pthread_mutexattr_t*)
{
return 0;
}
int pthread_mutexattr_settype(pthread_mutexattr_t* attr, int type)
{
if (!attr)
return EINVAL;
if (type != PTHREAD_MUTEX_NORMAL && type != PTHREAD_MUTEX_RECURSIVE)
return EINVAL;
attr->type = type;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutexattr_gettype.html
int pthread_mutexattr_gettype(pthread_mutexattr_t* attr, int* type)
{
*type = attr->type;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_init.html
int pthread_attr_init(pthread_attr_t* attributes)
{
auto* impl = new PthreadAttrImpl {};
*attributes = impl;
dbgln_if(PTHREAD_DEBUG, "pthread_attr_init: New thread attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
impl,
(PTHREAD_CREATE_JOINABLE == impl->detach_state) ? "joinable" : "detached",
impl->schedule_priority,
impl->guard_page_size,
impl->stack_size,
impl->stack_location);
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_destroy.html
int pthread_attr_destroy(pthread_attr_t* attributes)
{
auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
delete attributes_impl;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_getdetachstate.html
int pthread_attr_getdetachstate(pthread_attr_t const* attributes, int* p_detach_state)
{
auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl const* const*>(attributes));
if (!attributes_impl || !p_detach_state)
return EINVAL;
*p_detach_state = attributes_impl->detach_state;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_setdetachstate.html
int pthread_attr_setdetachstate(pthread_attr_t* attributes, int detach_state)
{
auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
if (!attributes_impl)
return EINVAL;
if (detach_state != PTHREAD_CREATE_JOINABLE && detach_state != PTHREAD_CREATE_DETACHED)
return EINVAL;
attributes_impl->detach_state = detach_state;
dbgln_if(PTHREAD_DEBUG, "pthread_attr_setdetachstate: Thread attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
attributes_impl,
(PTHREAD_CREATE_JOINABLE == attributes_impl->detach_state) ? "joinable" : "detached",
attributes_impl->schedule_priority,
attributes_impl->guard_page_size,
attributes_impl->stack_size,
attributes_impl->stack_location);
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_getguardsize.html
int pthread_attr_getguardsize(pthread_attr_t const* attributes, size_t* p_guard_size)
{
auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl const* const*>(attributes));
if (!attributes_impl || !p_guard_size)
return EINVAL;
*p_guard_size = attributes_impl->reported_guard_page_size;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_setguardsize.html
int pthread_attr_setguardsize(pthread_attr_t* attributes, size_t guard_size)
{
auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
if (!attributes_impl)
return EINVAL;
size_t actual_guard_size = guard_size;
// round up
if (0 != (guard_size % PAGE_SIZE))
actual_guard_size += PAGE_SIZE - (guard_size % PAGE_SIZE);
// what is the user even doing?
if (actual_guard_size > highest_reasonable_guard_size) {
return EINVAL;
}
attributes_impl->guard_page_size = actual_guard_size;
attributes_impl->reported_guard_page_size = guard_size; // POSIX, why?
dbgln_if(PTHREAD_DEBUG, "pthread_attr_setguardsize: Thread attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
attributes_impl,
(PTHREAD_CREATE_JOINABLE == attributes_impl->detach_state) ? "joinable" : "detached",
attributes_impl->schedule_priority,
attributes_impl->guard_page_size,
attributes_impl->stack_size,
attributes_impl->stack_location);
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_getschedparam.html
int pthread_attr_getschedparam(pthread_attr_t const* attributes, struct sched_param* p_sched_param)
{
auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl const* const*>(attributes));
if (!attributes_impl || !p_sched_param)
return EINVAL;
p_sched_param->sched_priority = attributes_impl->schedule_priority;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_setschedparam.html
int pthread_attr_setschedparam(pthread_attr_t* attributes, const struct sched_param* p_sched_param)
{
auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
if (!attributes_impl || !p_sched_param)
return EINVAL;
if (p_sched_param->sched_priority < THREAD_PRIORITY_MIN || p_sched_param->sched_priority > THREAD_PRIORITY_MAX)
return ENOTSUP;
attributes_impl->schedule_priority = p_sched_param->sched_priority;
dbgln_if(PTHREAD_DEBUG, "pthread_attr_setschedparam: Thread attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
attributes_impl,
(PTHREAD_CREATE_JOINABLE == attributes_impl->detach_state) ? "joinable" : "detached",
attributes_impl->schedule_priority,
attributes_impl->guard_page_size,
attributes_impl->stack_size,
attributes_impl->stack_location);
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_getstack.html
int pthread_attr_getstack(pthread_attr_t const* attributes, void** p_stack_ptr, size_t* p_stack_size)
{
auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl const* const*>(attributes));
if (!attributes_impl || !p_stack_ptr || !p_stack_size)
return EINVAL;
*p_stack_ptr = attributes_impl->stack_location;
*p_stack_size = attributes_impl->stack_size;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_setstack.html
int pthread_attr_setstack(pthread_attr_t* attributes, void* p_stack, size_t stack_size)
{
auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
if (!attributes_impl || !p_stack)
return EINVAL;
// Check for required alignment on size
if (0 != (stack_size % required_stack_alignment))
return EINVAL;
// FIXME: Check for required alignment on pointer?
// FIXME: "[EACCES] The stack page(s) described by stackaddr and stacksize are not both readable and writable by the thread."
// Have to check that the whole range is mapped to this process/thread? Can we defer this to create_thread?
attributes_impl->stack_size = stack_size;
attributes_impl->stack_location = p_stack;
dbgln_if(PTHREAD_DEBUG, "pthread_attr_setstack: Thread attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
attributes_impl,
(PTHREAD_CREATE_JOINABLE == attributes_impl->detach_state) ? "joinable" : "detached",
attributes_impl->schedule_priority,
attributes_impl->guard_page_size,
attributes_impl->stack_size,
attributes_impl->stack_location);
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_getstacksize.html
int pthread_attr_getstacksize(pthread_attr_t const* attributes, size_t* p_stack_size)
{
auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl const* const*>(attributes));
if (!attributes_impl || !p_stack_size)
return EINVAL;
*p_stack_size = attributes_impl->stack_size;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_setstacksize.html
int pthread_attr_setstacksize(pthread_attr_t* attributes, size_t stack_size)
{
auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
if (!attributes_impl)
return EINVAL;
if ((stack_size < PTHREAD_STACK_MIN) || stack_size > highest_reasonable_stack_size)
return EINVAL;
attributes_impl->stack_size = stack_size;
dbgln_if(PTHREAD_DEBUG, "pthread_attr_setstacksize: Thread attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
attributes_impl,
(PTHREAD_CREATE_JOINABLE == attributes_impl->detach_state) ? "joinable" : "detached",
attributes_impl->schedule_priority,
attributes_impl->guard_page_size,
attributes_impl->stack_size,
attributes_impl->stack_location);
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_getscope.html
int pthread_attr_getscope([[maybe_unused]] pthread_attr_t const* attributes, [[maybe_unused]] int* contention_scope)
{
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_setscope.html
int pthread_attr_setscope([[maybe_unused]] pthread_attr_t* attributes, [[maybe_unused]] int contention_scope)
{
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_getschedparam.html
int pthread_getschedparam([[maybe_unused]] pthread_t thread, [[maybe_unused]] int* policy, [[maybe_unused]] struct sched_param* param)
{
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_setschedparam.html
int pthread_setschedparam([[maybe_unused]] pthread_t thread, [[maybe_unused]] int policy, [[maybe_unused]] const struct sched_param* param)
{
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cancel.html
// NOTE: libgcc expects this function to exist in libpthread, even if it is not implemented.
int pthread_cancel(pthread_t)
{
TODO();
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_key_create.html
int pthread_key_create(pthread_key_t* key, KeyDestructor destructor)
{
return __pthread_key_create(key, destructor);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_key_delete.html
int pthread_key_delete(pthread_key_t key)
{
return __pthread_key_delete(key);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_getspecific.html
void* pthread_getspecific(pthread_key_t key)
{
return __pthread_getspecific(key);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_setspecific.html
int pthread_setspecific(pthread_key_t key, void const* value)
{
return __pthread_setspecific(key, value);
}
int pthread_setname_np(pthread_t thread, char const* name)
{
if (!name)
return EFAULT;
int rc = syscall(SC_set_thread_name, thread, name, strlen(name));
__RETURN_PTHREAD_ERROR(rc);
}
int pthread_getname_np(pthread_t thread, char* buffer, size_t buffer_size)
{
int rc = syscall(SC_get_thread_name, thread, buffer, buffer_size);
__RETURN_PTHREAD_ERROR(rc);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_setcancelstate.html
int pthread_setcancelstate(int state, int* oldstate)
{
if (oldstate)
*oldstate = PTHREAD_CANCEL_DISABLE;
dbgln("FIXME: Implement pthread_setcancelstate({}, ...)", state);
if (state != PTHREAD_CANCEL_DISABLE)
return EINVAL;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_setcanceltype.html
int pthread_setcanceltype(int type, int* oldtype)
{
if (oldtype)
*oldtype = PTHREAD_CANCEL_DEFERRED;
dbgln("FIXME: Implement pthread_setcanceltype({}, ...)", type);
if (type != PTHREAD_CANCEL_DEFERRED)
return EINVAL;
return 0;
}
constexpr static pid_t spinlock_unlock_sentinel = 0;
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_spin_destroy.html
int pthread_spin_destroy(pthread_spinlock_t* lock)
{
auto current = AK::atomic_load(&lock->m_lock);
if (current != spinlock_unlock_sentinel)
return EBUSY;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_spin_init.html
int pthread_spin_init(pthread_spinlock_t* lock, [[maybe_unused]] int shared)
{
lock->m_lock = spinlock_unlock_sentinel;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_spin_lock.html
int pthread_spin_lock(pthread_spinlock_t* lock)
{
auto const desired = gettid();
while (true) {
auto current = AK::atomic_load(&lock->m_lock);
if (current == desired)
return EDEADLK;
if (AK::atomic_compare_exchange_strong(&lock->m_lock, current, desired, AK::MemoryOrder::memory_order_acquire))
break;
}
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_spin_trylock.html
int pthread_spin_trylock(pthread_spinlock_t* lock)
{
// We expect the current value to be unlocked, as the specification
// states that trylock should lock only if it is not held by ANY thread.
auto current = spinlock_unlock_sentinel;
auto desired = gettid();
if (AK::atomic_compare_exchange_strong(&lock->m_lock, current, desired, AK::MemoryOrder::memory_order_acquire)) {
return 0;
} else {
return EBUSY;
}
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_spin_unlock.html
int pthread_spin_unlock(pthread_spinlock_t* lock)
{
auto current = AK::atomic_load(&lock->m_lock);
if (gettid() != current)
return EPERM;
AK::atomic_store(&lock->m_lock, spinlock_unlock_sentinel);
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_equal.html
int pthread_equal(pthread_t t1, pthread_t t2)
{
return t1 == t2;
}
// FIXME: Use the fancy futex mechanism above to write an rw lock.
// For the time being, let's just use a less-than-good lock to get things working.
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_destroy.html
int pthread_rwlock_destroy(pthread_rwlock_t* rl)
{
if (!rl)
return 0;
return 0;
}
// In a very non-straightforward way, this value is composed of two 32-bit integers
// the top 32 bits are reserved for the ID of write-locking thread (if any)
// and the bottom 32 bits are:
// top 2 bits (30,31): reader wake mask, writer wake mask
// middle 16 bits: information
// bit 16: someone is waiting to write
// bit 17: locked for write
// bottom 16 bits (0..15): reader count
constexpr static u32 reader_wake_mask = 1 << 30;
constexpr static u32 writer_wake_mask = 1 << 31;
constexpr static u32 writer_locked_mask = 1 << 17;
constexpr static u32 writer_intent_mask = 1 << 16;
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_init.html
int pthread_rwlock_init(pthread_rwlock_t* __restrict lockp, pthread_rwlockattr_t const* __restrict attr)
{
// Just ignore the attributes. use defaults for now.
(void)attr;
// No readers, no writer, not locked at all.
*lockp = 0;
return 0;
}
// Note that this function does not care about the top 32 bits at all.
static int rwlock_rdlock_maybe_timed(u32* lockp, const struct timespec* timeout = nullptr, bool only_once = false, int value_if_timeout = -1, int value_if_okay = -2)
{
auto current = AK::atomic_load(lockp);
for (; !only_once;) {
// First, see if this is locked for writing
// if it's not, try to add to the counter.
// If someone is waiting to write, and there is one or no other readers, let them have the lock.
if (!(current & writer_locked_mask)) {
auto count = (u16)current;
if (!(current & writer_intent_mask) || count > 1) {
++count;
auto desired = (current & 0xffff0000u) | count;
auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_acquire);
if (!did_exchange)
continue; // tough luck, try again.
return value_if_okay;
}
}
// If no one else is waiting for the read wake bit, set it.
if (!(current & reader_wake_mask)) {
auto desired = current | reader_wake_mask;
auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_acquire);
if (!did_exchange)
continue; // Something interesting happened!
current = desired;
}
// Seems like someone is writing (or is interested in writing and we let them have the lock)
// wait until they're done.
auto rc = futex(lockp, FUTEX_WAIT_BITSET, current, timeout, nullptr, reader_wake_mask);
if (rc < 0 && errno == ETIMEDOUT && timeout) {
return value_if_timeout;
}
if (rc < 0 && errno != EAGAIN) {
// Something broke. let's just bail out.
return errno;
}
errno = 0;
// Reload the 'current' value
current = AK::atomic_load(lockp);
}
return value_if_timeout;
}
static int rwlock_wrlock_maybe_timed(pthread_rwlock_t* lockval_p, const struct timespec* timeout = nullptr, bool only_once = false, int value_if_timeout = -1, int value_if_okay = -2)
{
u32* lockp = reinterpret_cast<u32*>(lockval_p);
auto current = AK::atomic_load(lockp);
for (; !only_once;) {
// First, see if this is locked for writing, and if there are any readers.
// if not, lock it.
// If someone is waiting to write, let them have the lock.
if (!(current & writer_locked_mask) && ((u16)current) == 0) {
if (!(current & writer_intent_mask)) {
auto desired = current | writer_locked_mask | writer_intent_mask;
auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_acquire);
if (!did_exchange)
continue;
// Now that we've locked the value, it's safe to set our thread ID.
AK::atomic_store(reinterpret_cast<i32*>(lockval_p) + 1, pthread_self());
return value_if_okay;
}
}
// That didn't work, if no one else is waiting for the write bit, set it.
if (!(current & writer_wake_mask)) {
auto desired = current | writer_wake_mask | writer_intent_mask;
auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_acquire);
if (!did_exchange)
continue; // Something interesting happened!
current = desired;
}
// Seems like someone is writing (or is interested in writing and we let them have the lock)
// wait until they're done.
auto rc = futex(lockp, FUTEX_WAIT_BITSET, current, timeout, nullptr, writer_wake_mask);
if (rc < 0 && errno == ETIMEDOUT && timeout) {
return value_if_timeout;
}
if (rc < 0 && errno != EAGAIN) {
// Something broke. let's just bail out.
return errno;
}
errno = 0;
// Reload the 'current' value
current = AK::atomic_load(lockp);
}
return value_if_timeout;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_rdlock.html
int pthread_rwlock_rdlock(pthread_rwlock_t* lockp)
{
if (!lockp)
return EINVAL;
return rwlock_rdlock_maybe_timed(reinterpret_cast<u32*>(lockp), nullptr, false, 0, 0);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_timedrdlock.html
int pthread_rwlock_timedrdlock(pthread_rwlock_t* __restrict lockp, const struct timespec* __restrict timespec)
{
if (!lockp)
return EINVAL;
auto rc = rwlock_rdlock_maybe_timed(reinterpret_cast<u32*>(lockp), timespec);
if (rc == -1) // "ok"
return 0;
if (rc == -2) // "timed out"
return 1;
return rc;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_timedwrlock.html
int pthread_rwlock_timedwrlock(pthread_rwlock_t* __restrict lockp, const struct timespec* __restrict timespec)
{
if (!lockp)
return EINVAL;
auto rc = rwlock_wrlock_maybe_timed(lockp, timespec);
if (rc == -1) // "ok"
return 0;
if (rc == -2) // "timed out"
return 1;
return rc;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_tryrdlock.html
int pthread_rwlock_tryrdlock(pthread_rwlock_t* lockp)
{
if (!lockp)
return EINVAL;
return rwlock_rdlock_maybe_timed(reinterpret_cast<u32*>(lockp), nullptr, true, EBUSY, 0);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_trywrlock.html
int pthread_rwlock_trywrlock(pthread_rwlock_t* lockp)
{
if (!lockp)
return EINVAL;
return rwlock_wrlock_maybe_timed(lockp, nullptr, true, EBUSY, 0);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_unlock.html
int pthread_rwlock_unlock(pthread_rwlock_t* lockval_p)
{
if (!lockval_p)
return EINVAL;
// This is a weird API, we don't really know whether we're unlocking write or read...
auto lockp = reinterpret_cast<u32*>(lockval_p);
auto current = AK::atomic_load(lockp, AK::MemoryOrder::memory_order_relaxed);
if (current & writer_locked_mask) {
// If this lock is locked for writing, its owner better be us!
auto owner_id = AK::atomic_load(reinterpret_cast<i32*>(lockval_p) + 1);
auto my_id = pthread_self();
if (owner_id != my_id)
return EINVAL; // you don't own this lock, silly.
// Now just unlock it.
auto desired = current & ~(writer_locked_mask | writer_intent_mask);
AK::atomic_store(lockp, desired, AK::MemoryOrder::memory_order_release);
// Then wake both readers and writers, if any.
auto rc = futex(lockp, FUTEX_WAKE_BITSET, current, nullptr, nullptr, (current & writer_wake_mask) | reader_wake_mask);
if (rc < 0)
return errno;
return 0;
}
for (;;) {
auto count = (u16)current;
if (!count) {
// Are you crazy? this isn't even locked!
return EINVAL;
}
--count;
auto desired = (current & 0xffff0000u) | count;
auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_release);
if (did_exchange)
break;
// tough luck, try again.
}
// Finally, unlocked at last!
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_wrlock.html
int pthread_rwlock_wrlock(pthread_rwlock_t* lockp)
{
if (!lockp)
return EINVAL;
return rwlock_wrlock_maybe_timed(lockp, nullptr, false, 0, 0);
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlockattr_destroy.html
int pthread_rwlockattr_destroy(pthread_rwlockattr_t*)
{
return 0;
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlockattr_getpshared.html
int pthread_rwlockattr_getpshared(pthread_rwlockattr_t const* __restrict, int* __restrict)
{
VERIFY_NOT_REACHED();
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlockattr_init.html
int pthread_rwlockattr_init(pthread_rwlockattr_t*)
{
VERIFY_NOT_REACHED();
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlockattr_setpshared.html
int pthread_rwlockattr_setpshared(pthread_rwlockattr_t*, int)
{
VERIFY_NOT_REACHED();
}
// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_atfork.html
int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
{
if (prepare)
__pthread_fork_atfork_register_prepare(prepare);
if (parent)
__pthread_fork_atfork_register_parent(parent);
if (child)
__pthread_fork_atfork_register_child(child);
return 0;
}
} // extern "C"

View file

@ -0,0 +1,146 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <bits/pthread_integration.h>
#include <sched.h>
#include <stdint.h>
#include <sys/cdefs.h>
#include <sys/types.h>
#include <time.h>
__BEGIN_DECLS
int pthread_create(pthread_t*, pthread_attr_t*, void* (*)(void*), void*);
void pthread_exit(void*) __attribute__((noreturn));
int pthread_kill(pthread_t, int);
void pthread_cleanup_push(void (*)(void*), void*);
void pthread_cleanup_pop(int);
int pthread_join(pthread_t, void**);
int pthread_mutex_lock(pthread_mutex_t*);
int pthread_mutex_trylock(pthread_mutex_t* mutex);
int pthread_mutex_unlock(pthread_mutex_t*);
int pthread_mutex_init(pthread_mutex_t*, pthread_mutexattr_t const*);
int pthread_mutex_destroy(pthread_mutex_t*);
int pthread_attr_init(pthread_attr_t*);
int pthread_attr_destroy(pthread_attr_t*);
#define PTHREAD_CREATE_JOINABLE 0
#define PTHREAD_CREATE_DETACHED 1
#define PTHREAD_CANCELED (-1)
int pthread_attr_getdetachstate(pthread_attr_t const*, int*);
int pthread_attr_setdetachstate(pthread_attr_t*, int);
int pthread_attr_getguardsize(pthread_attr_t const*, size_t*);
int pthread_attr_setguardsize(pthread_attr_t*, size_t);
int pthread_attr_getschedparam(pthread_attr_t const*, struct sched_param*);
int pthread_attr_setschedparam(pthread_attr_t*, const struct sched_param*);
int pthread_attr_getstack(pthread_attr_t const*, void**, size_t*);
int pthread_attr_setstack(pthread_attr_t* attr, void*, size_t);
int pthread_attr_getstacksize(pthread_attr_t const*, size_t*);
int pthread_attr_setstacksize(pthread_attr_t*, size_t);
#define PTHREAD_SCOPE_SYSTEM 0
#define PTHREAD_SCOPE_PROCESS 1
int pthread_attr_getscope(pthread_attr_t const*, int*);
int pthread_attr_setscope(pthread_attr_t*, int);
int pthread_once(pthread_once_t*, void (*)(void));
#define PTHREAD_ONCE_INIT 0
void* pthread_getspecific(pthread_key_t key);
int pthread_setspecific(pthread_key_t key, void const* value);
int pthread_getschedparam(pthread_t thread, int* policy, struct sched_param* param);
int pthread_setschedparam(pthread_t thread, int policy, const struct sched_param* param);
#define PTHREAD_MUTEX_NORMAL __PTHREAD_MUTEX_NORMAL
#define PTHREAD_MUTEX_RECURSIVE __PTHREAD_MUTEX_RECURSIVE
#define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_NORMAL
#define PTHREAD_MUTEX_INITIALIZER __PTHREAD_MUTEX_INITIALIZER
#define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP __PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
#define PTHREAD_PROCESS_PRIVATE 1
#define PTHREAD_PROCESS_SHARED 2
#define PTHREAD_COND_INITIALIZER \
{ \
0, 0, CLOCK_MONOTONIC_COARSE \
}
// FIXME: Actually implement this!
#define PTHREAD_RWLOCK_INITIALIZER \
NULL
#define PTHREAD_KEYS_MAX 64
#define PTHREAD_DESTRUCTOR_ITERATIONS 4
int pthread_key_create(pthread_key_t* key, void (*destructor)(void*));
int pthread_key_delete(pthread_key_t key);
int pthread_cond_broadcast(pthread_cond_t*);
int pthread_cond_init(pthread_cond_t*, pthread_condattr_t const*);
int pthread_cond_signal(pthread_cond_t*);
int pthread_cond_wait(pthread_cond_t*, pthread_mutex_t*);
int pthread_condattr_init(pthread_condattr_t*);
int pthread_condattr_getclock(pthread_condattr_t* attr, clockid_t* clock);
int pthread_condattr_setclock(pthread_condattr_t*, clockid_t);
int pthread_condattr_destroy(pthread_condattr_t*);
int pthread_cond_destroy(pthread_cond_t*);
int pthread_cond_timedwait(pthread_cond_t*, pthread_mutex_t*, const struct timespec*);
#define PTHREAD_CANCEL_ENABLE 1
#define PTHREAD_CANCEL_DISABLE 2
#define PTHREAD_CANCEL_DEFERRED 1
#define PTHREAD_CANCEL_ASYNCHRONOUS 2
int pthread_cancel(pthread_t);
int pthread_setcancelstate(int state, int* oldstate);
int pthread_setcanceltype(int type, int* oldtype);
void pthread_testcancel(void);
int pthread_spin_destroy(pthread_spinlock_t*);
int pthread_spin_init(pthread_spinlock_t*, int);
int pthread_spin_lock(pthread_spinlock_t*);
int pthread_spin_trylock(pthread_spinlock_t*);
int pthread_spin_unlock(pthread_spinlock_t*);
pthread_t pthread_self(void);
int pthread_detach(pthread_t);
int pthread_equal(pthread_t, pthread_t);
int pthread_mutexattr_init(pthread_mutexattr_t*);
int pthread_mutexattr_settype(pthread_mutexattr_t*, int);
int pthread_mutexattr_gettype(pthread_mutexattr_t*, int*);
int pthread_mutexattr_destroy(pthread_mutexattr_t*);
int pthread_setname_np(pthread_t, char const*);
int pthread_getname_np(pthread_t, char*, size_t);
int pthread_equal(pthread_t t1, pthread_t t2);
int pthread_rwlock_destroy(pthread_rwlock_t*);
int pthread_rwlock_init(pthread_rwlock_t* __restrict, pthread_rwlockattr_t const* __restrict);
int pthread_rwlock_rdlock(pthread_rwlock_t*);
int pthread_rwlock_timedrdlock(pthread_rwlock_t* __restrict, const struct timespec* __restrict);
int pthread_rwlock_timedwrlock(pthread_rwlock_t* __restrict, const struct timespec* __restrict);
int pthread_rwlock_tryrdlock(pthread_rwlock_t*);
int pthread_rwlock_trywrlock(pthread_rwlock_t*);
int pthread_rwlock_unlock(pthread_rwlock_t*);
int pthread_rwlock_wrlock(pthread_rwlock_t*);
int pthread_rwlockattr_destroy(pthread_rwlockattr_t*);
int pthread_rwlockattr_getpshared(pthread_rwlockattr_t const* __restrict, int* __restrict);
int pthread_rwlockattr_init(pthread_rwlockattr_t*);
int pthread_rwlockattr_setpshared(pthread_rwlockattr_t*, int);
int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void));
__END_DECLS

View file

@ -0,0 +1,158 @@
/*
* Copyright (c) 2019, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2021, Sergey Bugaev <bugaevc@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/Atomic.h>
#include <AK/Types.h>
#include <errno.h>
#include <pthread.h>
#include <serenity.h>
#include <sys/types.h>
#include <time.h>
// Condition variable attributes.
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_init.html
int pthread_condattr_init(pthread_condattr_t* attr)
{
attr->clockid = CLOCK_MONOTONIC_COARSE;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_destroy.html
int pthread_condattr_destroy(pthread_condattr_t*)
{
return 0;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_getclock.html
int pthread_condattr_getclock(pthread_condattr_t* attr, clockid_t* clock)
{
*clock = attr->clockid;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_setclock.html
int pthread_condattr_setclock(pthread_condattr_t* attr, clockid_t clock)
{
switch (clock) {
case CLOCK_REALTIME:
case CLOCK_REALTIME_COARSE:
case CLOCK_MONOTONIC:
case CLOCK_MONOTONIC_COARSE:
case CLOCK_MONOTONIC_RAW:
attr->clockid = clock;
return 0;
default:
return EINVAL;
}
}
// Condition variables.
// cond->value is the generation number (number of times the variable has been
// signaled) multiplied by INCREMENT, or'ed with the NEED_TO_WAKE flags. It's
// done this way instead of putting the flags into the high bits because the
// sequence number can easily overflow, which is completely fine but should not
// cause it to corrupt the flags.
static constexpr u32 NEED_TO_WAKE_ONE = 1;
static constexpr u32 NEED_TO_WAKE_ALL = 2;
static constexpr u32 INCREMENT = 4;
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_init.html
int pthread_cond_init(pthread_cond_t* cond, pthread_condattr_t const* attr)
{
cond->mutex = nullptr;
cond->value = 0;
cond->clockid = attr ? attr->clockid : CLOCK_REALTIME_COARSE;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_destroy.html
int pthread_cond_destroy(pthread_cond_t*)
{
return 0;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_wait.html
int pthread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex)
{
return pthread_cond_timedwait(cond, mutex, nullptr);
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html
int pthread_cond_timedwait(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* abstime)
{
// Save the mutex this condition variable is associated with. We don't (yet)
// support changing this mutex once set.
pthread_mutex_t* old_mutex = AK::atomic_exchange(&cond->mutex, mutex, AK::memory_order_relaxed);
if (old_mutex && old_mutex != mutex)
TODO();
// Fetch the current value, and record that we're about to wait. Fetching
// the current value has to be done while we hold the mutex, because the
// value might change as soon as we unlock it.
u32 value = AK::atomic_fetch_or(&cond->value, NEED_TO_WAKE_ONE | NEED_TO_WAKE_ALL, AK::memory_order_release) | NEED_TO_WAKE_ONE | NEED_TO_WAKE_ALL;
pthread_mutex_unlock(mutex);
int rc = futex_wait(&cond->value, value, abstime, cond->clockid);
if (rc < 0 && errno != EAGAIN)
return errno;
// We might have been re-queued onto the mutex while we were sleeping. Take
// the pessimistic locking path.
__pthread_mutex_lock_pessimistic_np(mutex);
return 0;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_signal.html
int pthread_cond_signal(pthread_cond_t* cond)
{
// Increment the generation.
u32 value = AK::atomic_fetch_add(&cond->value, INCREMENT, AK::memory_order_relaxed);
// Fast path: nobody's waiting (or at least, nobody has to be woken).
if (!(value & NEED_TO_WAKE_ONE)) [[likely]]
return 0;
// Wake someone, and clear the NEED_TO_WAKE_ONE flag if there was nobody for
// us to wake, to take the fast path the next time. Since we only learn
// whether there has been somebody waiting or not after we have tried to
// wake them, it would make sense for us to clear the flag after trying to
// wake someone up and seeing there was nobody waiting; but that would race
// with somebody else setting the flag. Therefore, we do it like this:
// attempt to clear the flag first...
value = AK::atomic_fetch_and(&cond->value, ~NEED_TO_WAKE_ONE, AK::memory_order_relaxed);
// ...check if it was already cleared by someone else...
if (!(value & NEED_TO_WAKE_ONE)) [[likely]]
return 0;
// ...try to wake someone...
int rc = futex_wake(&cond->value, 1);
VERIFY(rc >= 0);
// ...and if we have woken someone, put the flag back.
if (rc > 0)
AK::atomic_fetch_or(&cond->value, NEED_TO_WAKE_ONE, AK::memory_order_relaxed);
return 0;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_broadcast.html
int pthread_cond_broadcast(pthread_cond_t* cond)
{
// Increment the generation.
u32 value = AK::atomic_fetch_add(&cond->value, INCREMENT, AK::memory_order_relaxed);
// Fast path: nobody's waiting (or at least, nobody has to be woken).
if (!(value & NEED_TO_WAKE_ALL)) [[likely]]
return 0;
AK::atomic_fetch_and(&cond->value, ~(NEED_TO_WAKE_ONE | NEED_TO_WAKE_ALL), AK::memory_order_acquire);
pthread_mutex_t* mutex = AK::atomic_load(&cond->mutex, AK::memory_order_relaxed);
VERIFY(mutex);
int rc = futex(&cond->value, FUTEX_REQUEUE, 1, nullptr, &mutex->lock, INT_MAX);
VERIFY(rc >= 0);
return 0;
}

View file

@ -1,87 +0,0 @@
/*
* Copyright (c) 2021, Gunnar Beutner <gunnar@beutner.name>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <LibC/bits/pthread_forward.h>
static PthreadFunctions s_pthread_functions;
void __init_pthread_forward(PthreadFunctions funcs)
{
s_pthread_functions = funcs;
}
int pthread_mutex_trylock(pthread_mutex_t* mutex)
{
VERIFY(s_pthread_functions.pthread_mutex_trylock);
return s_pthread_functions.pthread_mutex_trylock(mutex);
}
int pthread_mutex_destroy(pthread_mutex_t* mutex)
{
VERIFY(s_pthread_functions.pthread_mutex_destroy);
return s_pthread_functions.pthread_mutex_destroy(mutex);
}
int pthread_mutexattr_init(pthread_mutexattr_t* attr)
{
VERIFY(s_pthread_functions.pthread_mutexattr_init);
return s_pthread_functions.pthread_mutexattr_init(attr);
}
int pthread_mutexattr_settype(pthread_mutexattr_t* attr, int type)
{
VERIFY(s_pthread_functions.pthread_mutexattr_settype);
return s_pthread_functions.pthread_mutexattr_settype(attr, type);
}
int pthread_mutexattr_destroy(pthread_mutexattr_t* attr)
{
VERIFY(s_pthread_functions.pthread_mutexattr_destroy);
return s_pthread_functions.pthread_mutexattr_destroy(attr);
}
int pthread_once(pthread_once_t* self, void (*callback)(void))
{
VERIFY(s_pthread_functions.pthread_once);
return s_pthread_functions.pthread_once(self, callback);
}
int pthread_cond_broadcast(pthread_cond_t* cond)
{
VERIFY(s_pthread_functions.pthread_cond_broadcast);
return s_pthread_functions.pthread_cond_broadcast(cond);
}
int pthread_cond_init(pthread_cond_t* cond, pthread_condattr_t const* attr)
{
VERIFY(s_pthread_functions.pthread_cond_init);
return s_pthread_functions.pthread_cond_init(cond, attr);
}
int pthread_cond_signal(pthread_cond_t* cond)
{
VERIFY(s_pthread_functions.pthread_cond_signal);
return s_pthread_functions.pthread_cond_signal(cond);
}
int pthread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex)
{
VERIFY(s_pthread_functions.pthread_cond_wait);
return s_pthread_functions.pthread_cond_wait(cond, mutex);
}
int pthread_cond_destroy(pthread_cond_t* cond)
{
VERIFY(s_pthread_functions.pthread_cond_destroy);
return s_pthread_functions.pthread_cond_destroy(cond);
}
int pthread_cond_timedwait(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* abstime)
{
VERIFY(s_pthread_functions.pthread_cond_timedwait);
return s_pthread_functions.pthread_cond_timedwait(cond, mutex, abstime);
}

View file

@ -0,0 +1,88 @@
/*
* Copyright (c) 2020, Sergey Bugaev <bugaevc@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/Atomic.h>
#include <AK/Types.h>
#include <pthread.h>
#include <serenity.h>
enum State : i32 {
INITIAL = PTHREAD_ONCE_INIT,
DONE,
PERFORMING_NO_WAITERS,
PERFORMING_WITH_WAITERS,
};
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_once.html
int pthread_once(pthread_once_t* self, void (*callback)(void))
{
auto& state = reinterpret_cast<Atomic<State>&>(*self);
// See what the current state is, and at the same time grab the lock if we
// got here first. We need acquire ordering here because if we see
// State::DONE, everything we do after that should "happen after" everything
// the other thread has done before writing the State::DONE.
State state2 = State::INITIAL;
bool have_exchanged = state.compare_exchange_strong(
state2, State::PERFORMING_NO_WAITERS, AK::memory_order_acquire);
if (have_exchanged) {
// We observed State::INITIAL and we've changed it to
// State::PERFORMING_NO_WAITERS, so it's us who should perform the
// operation.
callback();
// Now, record that we're done.
state2 = state.exchange(State::DONE, AK::memory_order_release);
switch (state2) {
case State::INITIAL:
case State::DONE:
VERIFY_NOT_REACHED();
case State::PERFORMING_NO_WAITERS:
// The fast path: there's no contention, so we don't have to wake
// anyone.
break;
case State::PERFORMING_WITH_WAITERS:
futex_wake(self, INT_MAX);
break;
}
return 0;
}
// We did not get there first. Let's see if we have to wait.
// state2 contains the observed state.
while (true) {
switch (state2) {
case State::INITIAL:
VERIFY_NOT_REACHED();
case State::DONE:
// Awesome, nothing to do then.
return 0;
case State::PERFORMING_NO_WAITERS:
// We're going to wait for it, but we have to record that we're
// waiting and the other thread should wake us up. We need acquire
// ordering here for the same reason as above.
have_exchanged = state.compare_exchange_strong(
state2, State::PERFORMING_WITH_WAITERS, AK::memory_order_acquire);
if (!have_exchanged) {
// Something has changed already, reevaluate without waiting.
continue;
}
state2 = State::PERFORMING_WITH_WAITERS;
[[fallthrough]];
case State::PERFORMING_WITH_WAITERS:
// Let's wait for it.
futex_wait(self, state2, nullptr, 0);
// We have been woken up, but that might have been due to a signal
// or something, so we have to reevaluate. We need acquire ordering
// here for the same reason as above. Hopefully we'll just see
// State::DONE this time, but who knows.
state2 = state.load(AK::memory_order_acquire);
continue;
}
}
}

View file

@ -0,0 +1,169 @@
/*
* Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
* Copyright (c) 2021, Sergey Bugaev <bugaevc@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/Atomic.h>
#include <AK/Types.h>
#include <errno.h>
#include <semaphore.h>
#include <serenity.h>
// Whether sem_wait() or sem_post() is responsible for waking any sleeping
// threads.
static constexpr u32 POST_WAKES = 1 << 31;
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/sem_open.html
sem_t* sem_open(char const*, int, ...)
{
errno = ENOSYS;
return nullptr;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/sem_close.html
int sem_close(sem_t*)
{
errno = ENOSYS;
return -1;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/sem_unlink.html
int sem_unlink(char const*)
{
errno = ENOSYS;
return -1;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/sem_init.html
int sem_init(sem_t* sem, int shared, unsigned int value)
{
if (shared) {
errno = ENOSYS;
return -1;
}
if (value > SEM_VALUE_MAX) {
errno = EINVAL;
return -1;
}
sem->value = value;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/sem_destroy.html
int sem_destroy(sem_t*)
{
return 0;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/sem_getvalue.html
int sem_getvalue(sem_t* sem, int* sval)
{
u32 value = AK::atomic_load(&sem->value, AK::memory_order_relaxed);
*sval = value & ~POST_WAKES;
return 0;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/sem_post.html
int sem_post(sem_t* sem)
{
u32 value = AK::atomic_fetch_add(&sem->value, 1u, AK::memory_order_release);
// Fast path: no need to wake.
if (!(value & POST_WAKES)) [[likely]]
return 0;
// Pass the responsibility for waking more threads if more slots become
// available later to sem_wait() in the thread we're about to wake, as
// opposed to further sem_post() calls that free up those slots.
value = AK::atomic_fetch_and(&sem->value, ~POST_WAKES, AK::memory_order_relaxed);
// Check if another sem_post() call has handled it already.
if (!(value & POST_WAKES)) [[likely]]
return 0;
int rc = futex_wake(&sem->value, 1);
VERIFY(rc >= 0);
return 0;
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/sem_trywait.html
int sem_trywait(sem_t* sem)
{
u32 value = AK::atomic_load(&sem->value, AK::memory_order_relaxed);
u32 count = value & ~POST_WAKES;
if (count == 0) {
errno = EAGAIN;
return -1;
}
// Decrement the count without touching the flag.
u32 desired = (count - 1) | (value & POST_WAKES);
bool exchanged = AK::atomic_compare_exchange_strong(&sem->value, value, desired, AK::memory_order_acquire);
if (exchanged) [[likely]] {
return 0;
} else {
errno = EAGAIN;
return -1;
}
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/sem_wait.html
int sem_wait(sem_t* sem)
{
return sem_timedwait(sem, nullptr);
}
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/sem_timedwait.html
int sem_timedwait(sem_t* sem, const struct timespec* abstime)
{
u32 value = AK::atomic_load(&sem->value, AK::memory_order_relaxed);
bool responsible_for_waking = false;
while (true) {
u32 count = value & ~POST_WAKES;
if (count > 0) [[likely]] {
// It looks like there are some free slots.
u32 whether_post_wakes = value & POST_WAKES;
bool going_to_wake = false;
if (responsible_for_waking && !whether_post_wakes) {
// If we have ourselves been woken up previously, and the
// POST_WAKES flag is not set, that means some more slots might
// be available now, and it's us who has to wake up additional
// threads.
if (count > 1) [[unlikely]]
going_to_wake = true;
// Pass the responsibility for waking up further threads back to
// sem_post() calls. In particular, we don't want the threads
// we're about to wake to try to wake anyone else.
whether_post_wakes = POST_WAKES;
}
// Now, try to commit this.
u32 desired = (count - 1) | whether_post_wakes;
bool exchanged = AK::atomic_compare_exchange_strong(&sem->value, value, desired, AK::memory_order_acquire);
if (!exchanged) [[unlikely]]
// Re-evaluate.
continue;
if (going_to_wake) [[unlikely]] {
int rc = futex_wake(&sem->value, count - 1);
VERIFY(rc >= 0);
}
return 0;
}
// We're probably going to sleep, so attempt to set the flag. We do not
// commit to sleeping yet, though, as setting the flag may fail and
// cause us to reevaluate what we're doing.
if (value == 0) {
bool exchanged = AK::atomic_compare_exchange_strong(&sem->value, value, POST_WAKES, AK::memory_order_relaxed);
if (!exchanged) [[unlikely]]
// Re-evaluate.
continue;
value = POST_WAKES;
}
// At this point, we're committed to sleeping.
responsible_for_waking = true;
futex_wait(&sem->value, value, abstime, CLOCK_REALTIME);
// This is the state we will probably see upon being waked:
value = 1;
}
}

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2021, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <limits.h>
#include <pthread.h>
#include <sys/cdefs.h>
#include <sys/types.h>
__BEGIN_DECLS
typedef struct {
uint32_t value;
} sem_t;
int sem_close(sem_t*);
int sem_destroy(sem_t*);
int sem_getvalue(sem_t*, int*);
int sem_init(sem_t*, int, unsigned int);
sem_t* sem_open(char const*, int, ...);
int sem_post(sem_t*);
int sem_trywait(sem_t*);
int sem_unlink(char const*);
int sem_wait(sem_t*);
int sem_timedwait(sem_t*, const struct timespec* abstime);
#define SEM_FAILED ((sem_t*)0)
#define SEM_VALUE_MAX INT_MAX
__END_DECLS