Index: lib/libthr/thread/thr_private.h =================================================================== RCS file: /home/ncvs/src/lib/libthr/thread/thr_private.h,v retrieving revision 1.28 diff -u -r1.28 thr_private.h --- lib/libthr/thread/thr_private.h 30 Dec 2003 08:34:57 -0000 1.28 +++ lib/libthr/thread/thr_private.h 17 Jan 2004 22:33:43 -0000 @@ -211,6 +211,11 @@ spinlock_t lock; }; +struct pthread_spinlock { + void *s_owner; + unsigned int s_magic; +}; + /* * Flags for mutexes. */ Index: lib/libthr/thread/thr_spinlock.c =================================================================== RCS file: /home/ncvs/src/lib/libthr/thread/thr_spinlock.c,v retrieving revision 1.8 diff -u -r1.8 thr_spinlock.c --- lib/libthr/thread/thr_spinlock.c 9 Dec 2003 11:12:11 -0000 1.8 +++ lib/libthr/thread/thr_spinlock.c 17 Jan 2004 23:25:38 -0000 @@ -33,6 +33,9 @@ * */ +#include +#include + #include #include #include @@ -43,6 +46,91 @@ #include #include "thr_private.h" + +#define THR_SPIN_MAGIC 0xdadadada +#define THR_SPIN_UNOWNED (void *)0 +#define MAGIC_TEST_RETURN_ON_FAIL(l) \ + do { \ + if ((l) == NULL || atomic_load_acq_int(&((l)->s_magic)) != \ + THR_SPIN_MAGIC) \ + return (EINVAL); \ + } while(0) + +__weak_reference(_pthread_spin_destroy, pthread_spin_destroy); +__weak_reference(_pthread_spin_init, pthread_spin_init); +__weak_reference(_pthread_spin_lock, pthread_spin_lock); +__weak_reference(_pthread_spin_trylock, pthread_spin_trylock); +__weak_reference(_pthread_spin_unlock, pthread_spin_unlock); + +int +_pthread_spin_destroy(pthread_spinlock_t *lock) +{ + MAGIC_TEST_RETURN_ON_FAIL((*lock)); + if (atomic_load_acq_ptr(&(*lock)->s_owner) == THR_SPIN_UNOWNED) { + (*lock)->s_magic = 0; + free((*lock)); + *lock = NULL; + return (0); + } + return (EBUSY); +} + +int +_pthread_spin_init(pthread_spinlock_t *lock, int pshared) +{ + struct pthread_spinlock *s; + + if (*lock != NULL) { + if (atomic_load_acq_int(&(*lock)->s_magic) == THR_SPIN_MAGIC) + return (EBUSY); + } + s = (struct pthread_spinlock *)malloc(sizeof(struct pthread_spinlock)); + if (s == NULL) + return (ENOMEM); + s->s_magic = THR_SPIN_MAGIC; + s->s_owner = THR_SPIN_UNOWNED; + *lock = s; + return (0); +} + +/* + * If the caller sets nonblocking to 1, this function will return + * immediately without acquiring the lock it is owned by another thread. + * If set to 0, it will keep spinning until it acquires the lock. + */ +int +_pthread_spin_lock(pthread_spinlock_t *lock) +{ + MAGIC_TEST_RETURN_ON_FAIL(*lock); + if (atomic_load_acq_ptr(&(*lock)->s_owner) == curthread) + return (EDEADLK); + while (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED, + (void *)curthread) != 1) + ; /* SPIN */ + return (0); +} + +int +_pthread_spin_trylock(pthread_spinlock_t *lock) +{ + MAGIC_TEST_RETURN_ON_FAIL(*lock); + if (atomic_load_acq_ptr(&(*lock)->s_owner) == curthread) + return (EDEADLK); + if (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED, + (void *)curthread) == 1) + return (0); + return (EBUSY); +} + +int +_pthread_spin_unlock(pthread_spinlock_t *lock) +{ + MAGIC_TEST_RETURN_ON_FAIL(*lock); + if (atomic_cmpset_rel_ptr(&(*lock)->s_owner, (void *)curthread, + THR_SPIN_UNOWNED) == 1) + return (0); + return (EPERM); +} void _spinunlock(spinlock_t *lck)