From 9f07627f5813345b6a6864404d0f8ae2d001fffe Mon Sep 17 00:00:00 2001 From: Brian Gianforcaro Date: Sun, 2 May 2021 03:33:30 -0700 Subject: [PATCH] LibPthread: Implement pthread_spinlock_t API. This change implements the pthread user space spinlock API. The stress-ng Port requires a functioning version to work correctly. To facilitate the requirements of the posix specification for the API we implement the spinlock so that the owning tid is the value stored in the spinlock. This gives us the proper ownership semantics needed to implement the proper error handling. --- Userland/Libraries/LibC/sys/types.h | 4 +- Userland/Libraries/LibPthread/pthread.cpp | 53 ++++++++++++++++++----- 2 files changed, 46 insertions(+), 11 deletions(-) diff --git a/Userland/Libraries/LibC/sys/types.h b/Userland/Libraries/LibC/sys/types.h index 8fb86f0585..dd56e5be97 100644 --- a/Userland/Libraries/LibC/sys/types.h +++ b/Userland/Libraries/LibC/sys/types.h @@ -84,7 +84,9 @@ typedef struct __pthread_cond_t { typedef uint64_t pthread_rwlock_t; typedef void* pthread_rwlockattr_t; -typedef void* pthread_spinlock_t; +typedef struct __pthread_spinlock_t { + int m_lock; +} pthread_spinlock_t; typedef struct __pthread_condattr_t { int clockid; // clockid_t } pthread_condattr_t; diff --git a/Userland/Libraries/LibPthread/pthread.cpp b/Userland/Libraries/LibPthread/pthread.cpp index d9fcb48627..cba9533609 100644 --- a/Userland/Libraries/LibPthread/pthread.cpp +++ b/Userland/Libraries/LibPthread/pthread.cpp @@ -559,29 +559,62 @@ int pthread_setcanceltype([[maybe_unused]] int type, [[maybe_unused]] int* oldty TODO(); } -int pthread_spin_destroy([[maybe_unused]] pthread_spinlock_t* lock) +constexpr static pid_t spinlock_unlock_sentinel = 0; +int pthread_spin_destroy(pthread_spinlock_t* lock) { - TODO(); + auto current = AK::atomic_load(&lock->m_lock); + + if (current != spinlock_unlock_sentinel) + return EBUSY; + + return 0; } -int pthread_spin_init([[maybe_unused]] pthread_spinlock_t* lock, [[maybe_unused]] int shared) +int pthread_spin_init(pthread_spinlock_t* lock, [[maybe_unused]] int shared) { - TODO(); + lock->m_lock = spinlock_unlock_sentinel; + return 0; } -int pthread_spin_lock([[maybe_unused]] pthread_spinlock_t* lock) +int pthread_spin_lock(pthread_spinlock_t* lock) { - TODO(); + const auto desired = gettid(); + while (true) { + auto current = AK::atomic_load(&lock->m_lock); + + if (current == desired) + return EDEADLK; + + if (AK::atomic_compare_exchange_strong(&lock->m_lock, current, desired, AK::MemoryOrder::memory_order_acquire)) + break; + } + + return 0; } -int pthread_spin_trylock([[maybe_unused]] pthread_spinlock_t* lock) +int pthread_spin_trylock(pthread_spinlock_t* lock) { - TODO(); + // We expect the current value to be unlocked, as the specification + // states that trylock should lock ony if it is not held by ANY thread. + auto current = spinlock_unlock_sentinel; + auto desired = gettid(); + + if (AK::atomic_compare_exchange_strong(&lock->m_lock, current, desired, AK::MemoryOrder::memory_order_acquire)) { + return 0; + } else { + return EBUSY; + } } -int pthread_spin_unlock([[maybe_unused]] pthread_spinlock_t* lock) +int pthread_spin_unlock(pthread_spinlock_t* lock) { - TODO(); + auto current = AK::atomic_load(&lock->m_lock); + + if (gettid() != current) + return EPERM; + + AK::atomic_store(&lock->m_lock, spinlock_unlock_sentinel); + return 0; } int pthread_equal(pthread_t t1, pthread_t t2)