GSAtomic: Add prefix to macro definitions

This commit is contained in:
hmelder 2024-06-04 20:18:20 +02:00
parent c498475110
commit 97880a285d
3 changed files with 26 additions and 20 deletions

View file

@ -19,9 +19,9 @@
* Use native C11 atomic operations. _Atomic() should be defined by the
* compiler.
*/
#define atomic_load_explicit(object, order) \
#define gs_atomic_load_explicit(object, order) \
__c11_atomic_load(object, order)
#define atomic_store_explicit(object, desired, order) \
#define gs_atomic_store_explicit(object, desired, order) \
__c11_atomic_store(object, desired, order)
#else
@ -33,7 +33,7 @@
#define _Atomic(T) struct { T volatile __val; }
#if __has_builtin(__sync_swap)
/* Clang provides a full-barrier atomic exchange - use it if available. */
#define atomic_exchange_explicit(object, desired, order) \
#define gs_atomic_exchange_explicit(object, desired, order) \
((void)(order), __sync_swap(&(object)->__val, desired))
#else
/*
@ -41,7 +41,7 @@
* practice it is usually a full barrier) so we need an explicit barrier before
* it.
*/
#define atomic_exchange_explicit(object, desired, order) \
#define gs_atomic_exchange_explicit(object, desired, order) \
__extension__ ({ \
__typeof__(object) __o = (object); \
__typeof__(desired) __d = (desired); \
@ -50,10 +50,10 @@ __extension__ ({ \
__sync_lock_test_and_set(&(__o)->__val, __d); \
})
#endif
#define atomic_load_explicit(object, order) \
#define gs_atomic_load_explicit(object, order) \
((void)(order), __sync_fetch_and_add(&(object)->__val, 0))
#define atomic_store_explicit(object, desired, order) \
((void)atomic_exchange_explicit(object, desired, order))
#define gs_atomic_store_explicit(object, desired, order) \
((void)gs_atomic_exchange_explicit(object, desired, order))
#endif
@ -64,9 +64,9 @@ __extension__ ({ \
/*
* Convenience functions.
*/
#define atomic_load(object) \
atomic_load_explicit(object, __ATOMIC_SEQ_CST)
#define atomic_store(object, desired) \
atomic_store_explicit(object, desired, __ATOMIC_SEQ_CST)
#define gs_atomic_load(object) \
gs_atomic_load_explicit(object, __ATOMIC_SEQ_CST)
#define gs_atomic_store(object, desired) \
gs_atomic_store_explicit(object, desired, __ATOMIC_SEQ_CST)
#endif // _GSAtomic_h_

View file

@ -62,12 +62,18 @@ typedef CONDITION_VARIABLE gs_cond_t;
#define GS_COND_BROADCAST(cond) WakeAllConditionVariable(&(cond))
/* Pthread-like locking primitives defined in NSLock.m */
#ifdef __cplusplus
extern "C" {
#endif
void gs_mutex_init(gs_mutex_t *l, gs_mutex_attr_t attr);
int gs_mutex_lock(gs_mutex_t *l);
int gs_mutex_trylock(gs_mutex_t *l);
int gs_mutex_unlock(gs_mutex_t *l);
int gs_cond_wait(gs_cond_t *cond, gs_mutex_t *mutex);
int gs_cond_timedwait(gs_cond_t *cond, gs_mutex_t *mutex, DWORD millisecs);
#ifdef __cplusplus
}
#endif
/*
* Threading primitives.

View file

@ -952,12 +952,12 @@ gs_mutex_lock(gs_mutex_t *mutex)
{
assert(mutex->depth == 0);
mutex->depth = 1;
atomic_store(&mutex->owner, thisThread);
gs_atomic_store(&mutex->owner, thisThread);
return 0;
}
// needs to be atomic because another thread can concurrently set it
ownerThread = atomic_load(&mutex->owner);
ownerThread = gs_atomic_load(&mutex->owner);
if (ownerThread == thisThread)
{
// this thread already owns this lock
@ -986,7 +986,7 @@ gs_mutex_lock(gs_mutex_t *mutex)
AcquireSRWLockExclusive(&mutex->lock);
assert(mutex->depth == 0);
mutex->depth = 1;
atomic_store(&mutex->owner, thisThread);
gs_atomic_store(&mutex->owner, thisThread);
return 0;
}
@ -1000,12 +1000,12 @@ gs_mutex_trylock(gs_mutex_t *mutex)
{
assert(mutex->depth == 0);
mutex->depth = 1;
atomic_store(&mutex->owner, thisThread);
gs_atomic_store(&mutex->owner, thisThread);
return 0;
}
// needs to be atomic because another thread can concurrently set it
ownerThread = atomic_load(&mutex->owner);
ownerThread = gs_atomic_load(&mutex->owner);
if (ownerThread == thisThread && mutex->attr == gs_mutex_attr_recursive)
{
// this thread already owns this lock and it's recursive
@ -1029,7 +1029,7 @@ gs_mutex_unlock(gs_mutex_t *mutex)
case gs_mutex_attr_recursive: {
// return error if lock is not held by this thread
DWORD thisThread = GetCurrentThreadId();
DWORD ownerThread = atomic_load(&mutex->owner);
DWORD ownerThread = gs_atomic_load(&mutex->owner);
if (ownerThread != thisThread) {
return EPERM;
}
@ -1047,7 +1047,7 @@ gs_mutex_unlock(gs_mutex_t *mutex)
{
assert(mutex->depth == 1);
mutex->depth = 0;
atomic_store(&mutex->owner, 0);
gs_atomic_store(&mutex->owner, 0);
ReleaseSRWLockExclusive(&mutex->lock);
return 0;
}
@ -1061,7 +1061,7 @@ gs_cond_timedwait(gs_cond_t *cond, gs_mutex_t *mutex, DWORD millisecs)
assert(mutex->depth == 1);
mutex->depth = 0;
atomic_store(&mutex->owner, 0);
gs_atomic_store(&mutex->owner, 0);
if (!SleepConditionVariableSRW(cond, &mutex->lock, millisecs, 0))
{
@ -1075,7 +1075,7 @@ gs_cond_timedwait(gs_cond_t *cond, gs_mutex_t *mutex, DWORD millisecs)
assert(mutex->depth == 0);
mutex->depth = 1;
atomic_store(&mutex->owner, GetCurrentThreadId());
gs_atomic_store(&mutex->owner, GetCurrentThreadId());
return retVal;
}