1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-23 13:57:35 +00:00

LibPthread+LibC: Support PTHREAD_MUTEX_RECURSIVE

This allows SDL to build against our native recursive mutex instead
of providing its own. Also it's just a nice feature to have. :^)
This commit is contained in:
Andreas Kling 2019-12-22 14:21:59 +01:00
parent 523fd6533e
commit 0c97380ee6
3 changed files with 55 additions and 16 deletions

View file

@ -63,7 +63,14 @@ struct utimbuf {
typedef int pthread_t;
typedef int pthread_key_t;
typedef void* pthread_once_t;
typedef uint32_t pthread_mutex_t;
typedef struct __pthread_mutex_t {
uint32_t lock;
pthread_t owner;
int level;
int type;
} pthread_mutex_t;
typedef void* pthread_attr_t;
typedef struct __pthread_mutexattr_t {
int type;

View file

@ -100,9 +100,10 @@ int pthread_sigmask(int how, const sigset_t* set, sigset_t* old_set)
int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attributes)
{
// FIXME: Implement mutex attributes
UNUSED_PARAM(attributes);
*mutex = 0;
mutex->lock = 0;
mutex->owner = 0;
mutex->level = 0;
mutex->type = attributes ? attributes->type : PTHREAD_MUTEX_NORMAL;
return 0;
}
@ -114,27 +115,47 @@ int pthread_mutex_destroy(pthread_mutex_t*)
int pthread_mutex_lock(pthread_mutex_t* mutex)
{
auto* atomic = reinterpret_cast<Atomic<u32>*>(mutex);
pthread_t this_thread = pthread_self();
for (;;) {
u32 expected = false;
if (atomic->compare_exchange_strong(expected, true, AK::memory_order_acq_rel))
if (!atomic->compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->owner == this_thread) {
mutex->level++;
return 0;
}
sched_yield();
continue;
}
mutex->owner = this_thread;
mutex->level = 0;
return 0;
}
}
int pthread_mutex_trylock(pthread_mutex_t* mutex)
{
auto* atomic = reinterpret_cast<Atomic<u32>*>(mutex);
auto* atomic = reinterpret_cast<Atomic<u32>*>(mutex->lock);
u32 expected = false;
if (atomic->compare_exchange_strong(expected, true, AK::memory_order_acq_rel))
if (!atomic->compare_exchange_strong(expected, true, AK::memory_order_acq_rel)) {
if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->owner == pthread_self()) {
mutex->level++;
return 0;
}
return EBUSY;
}
mutex->owner = pthread_self();
mutex->level = 0;
return 0;
}
int pthread_mutex_unlock(pthread_mutex_t* mutex)
{
auto* atomic = reinterpret_cast<Atomic<u32>*>(mutex);
atomic->store(false, AK::memory_order_release);
if (mutex->type == PTHREAD_MUTEX_RECURSIVE && mutex->level > 0) {
mutex->level--;
return 0;
}
mutex->owner = 0;
mutex->lock = 0;
return 0;
}
@ -149,6 +170,16 @@ int pthread_mutexattr_destroy(pthread_mutexattr_t*)
return 0;
}
int pthread_mutexattr_settype(pthread_mutexattr_t* attr, int type)
{
if (!attr)
return EINVAL;
if (type != PTHREAD_MUTEX_NORMAL && type != PTHREAD_MUTEX_RECURSIVE)
return EINVAL;
attr->type = type;
return 0;
}
int pthread_attr_init(pthread_attr_t* attributes)
{
auto* impl = new PthreadAttrImpl {};
@ -492,7 +523,7 @@ struct KeyTable {
// FIXME: Invoke key destructors on thread exit!
KeyDestructor destructors[64] { nullptr };
int next { 0 };
pthread_mutex_t mutex { PTHREAD_MUTEX_INITIALIZER };
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
};
struct SpecificTable {

View file

@ -51,8 +51,9 @@ int pthread_setschedparam(pthread_t thread, int policy, const struct sched_param
#define PTHREAD_MUTEX_NORMAL 0
#define PTHREAD_MUTEX_RECURSIVE 1
#define PTHREAD_MUTEX_INITIALIZER 0
#define PTHREAD_COND_INITIALIZER 0
#define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_NORMAL
#define PTHREAD_MUTEX_INITIALIZER { 0, 0, 0, PTHREAD_MUTEX_DEFAULT }
#define PTHREAD_COND_INITIALIZER { NULL }
int pthread_key_create(pthread_key_t* key, void (*destructor)(void*));
int pthread_key_delete(pthread_key_t key);