Index: include/pthread.h =================================================================== --- include/pthread.h (revision 212945) +++ include/pthread.h (working copy) @@ -98,7 +98,7 @@ * Static initialization values. */ #define PTHREAD_MUTEX_INITIALIZER NULL -#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP NULL +#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP ((pthread_mutex_t)1) #define PTHREAD_COND_INITIALIZER NULL #define PTHREAD_RWLOCK_INITIALIZER NULL Index: lib/libthr/thread/thr_cond.c =================================================================== --- lib/libthr/thread/thr_cond.c (revision 213160) +++ lib/libthr/thread/thr_cond.c (working copy) @@ -104,6 +104,19 @@ return (ret); } +#define CHECK_AND_INIT_COND \ + if (__predict_false((cv = (*cond)) <= THR_COND_DESTROYED)) { \ + if (cv == THR_COND_INITIALIZER) { \ + int ret; \ + ret = init_static(_get_curthread(), cond); \ + if (ret) \ + return (ret); \ + } else if (cv == THR_COND_DESTROYED) { \ + return (EINVAL); \ + } \ + cv = *cond; \ + } + int _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { @@ -119,16 +132,14 @@ struct pthread_cond *cv; int rval = 0; - if (*cond == NULL) + if ((cv = *cond) == THR_COND_INITIALIZER) + rval = 0; + else if (cv == THR_COND_DESTROYED) rval = EINVAL; else { cv = *cond; THR_UMUTEX_LOCK(curthread, &cv->c_lock); - /* - * NULL the caller's pointer now that the condition - * variable has been destroyed: - */ - *cond = NULL; + *cond = THR_COND_DESTROYED; THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); /* @@ -137,7 +148,6 @@ */ free(cv); } - /* Return the completion status: */ return (rval); } @@ -178,22 +188,18 @@ struct timespec ts, ts2, *tsp; struct cond_cancel_info info; pthread_cond_t cv; - int ret = 0; + int ret; /* * If the condition variable is statically initialized, * perform the dynamic initialization: */ - if (__predict_false(*cond == NULL && - (ret = init_static(curthread, cond)) != 0)) - return (ret); + CHECK_AND_INIT_COND - _thr_testcancel(curthread); - cv = *cond; THR_UMUTEX_LOCK(curthread, &cv->c_lock); ret = _mutex_cv_unlock(mutex, &info.count); - if (ret) { + if (__predict_false(ret != 0)) { THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); return (ret); } @@ -273,11 +279,8 @@ * If the condition variable is statically initialized, perform dynamic * initialization. */ - if (__predict_false(*cond == NULL && - (ret = init_static(curthread, cond)) != 0)) - return (ret); + CHECK_AND_INIT_COND - cv = *cond; THR_UMUTEX_LOCK(curthread, &cv->c_lock); if (!broadcast) ret = _thr_ucond_signal(&cv->c_kerncv); Index: lib/libthr/thread/thr_mutex.c =================================================================== --- lib/libthr/thread/thr_mutex.c (revision 213160) +++ lib/libthr/thread/thr_mutex.c (working copy) @@ -124,7 +124,7 @@ static int mutex_init(pthread_mutex_t *mutex, - const pthread_mutexattr_t *mutex_attr, + const struct pthread_mutex_attr *mutex_attr, void *(calloc_cb)(size_t, size_t)) { const struct pthread_mutex_attr *attr; @@ -133,7 +133,7 @@ if (mutex_attr == NULL) { attr = &_pthread_mutexattr_default; } else { - attr = *mutex_attr; + attr = mutex_attr; if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) return (EINVAL); @@ -153,6 +153,10 @@ pmutex->m_yieldloops = 0; MUTEX_INIT_LINK(pmutex); switch(attr->m_protocol) { + case PTHREAD_PRIO_NONE: + pmutex->m_lock.m_owner = UMUTEX_UNOWNED; + pmutex->m_lock.m_flags = 0; + break; case PTHREAD_PRIO_INHERIT: pmutex->m_lock.m_owner = UMUTEX_UNOWNED; pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; @@ -162,9 +166,6 @@ pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; break; - case PTHREAD_PRIO_NONE: - pmutex->m_lock.m_owner = UMUTEX_UNOWNED; - pmutex->m_lock.m_flags = 0; } if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) { @@ -184,11 +185,12 @@ THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); - if (*mutex == NULL) - ret = mutex_init(mutex, NULL, calloc); + if (*mutex == THR_MUTEX_INITIALIZER) + ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc); + else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) + ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc); else ret = 0; - THR_LOCK_RELEASE(thread, &_mutex_static_lock); return (ret); @@ -210,7 +212,7 @@ __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr) { - return mutex_init(mutex, mutex_attr, calloc); + return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc); } /* This function is used internally by malloc. */ @@ -223,10 +225,9 @@ .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0 }; - static const struct pthread_mutex_attr *pattr = &attr; int ret; - ret = mutex_init(mutex, (pthread_mutexattr_t *)&pattr, calloc_cb); + ret = mutex_init(mutex, &attr, calloc_cb); if (ret == 0) (*mutex)->m_private = 1; return (ret); @@ -261,19 +262,21 @@ uint32_t id; int ret = 0; - if (__predict_false(*mutex == NULL)) + m = *mutex; + if (m < THR_MUTEX_DESTROYED) { + ret = 0; + } else if (m == THR_MUTEX_DESTROYED) { ret = EINVAL; - else { + } else { id = TID(curthread); /* * Try to lock the mutex structure, we only need to * try once, if failed, the mutex is in used. */ - ret = _thr_umutex_trylock(&(*mutex)->m_lock, id); + ret = _thr_umutex_trylock(&m->m_lock, id); if (ret) return (ret); - m = *mutex; /* * Check mutex other fields to see if this mutex is * in use. Mostly for prority mutex types, or there @@ -285,11 +288,7 @@ _thr_umutex_unlock(&m->m_lock, id); ret = EBUSY; } else { - /* - * Save a pointer to the mutex so it can be free'd - * and set the caller's pointer to NULL. - */ - *mutex = NULL; + *mutex = THR_MUTEX_DESTROYED; if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) set_inherited_priority(curthread, m); @@ -314,19 +313,30 @@ TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ } while (0) +#define CHECK_AND_INIT_MUTEX \ + if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \ + if (m == THR_MUTEX_DESTROYED) \ + return (EINVAL); \ + int ret; \ + ret = init_static(_get_curthread(), mutex); \ + if (ret) \ + return (ret); \ + m = *mutex; \ + } + static int -mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) +mutex_trylock_common(pthread_mutex_t *mutex) { - struct pthread_mutex *m; + struct pthread *curthread = _get_curthread(); + struct pthread_mutex *m = *mutex; uint32_t id; int ret; id = TID(curthread); - m = *mutex; if (m->m_private) THR_CRITICAL_ENTER(curthread); ret = _thr_umutex_trylock(&m->m_lock, id); - if (ret == 0) { + if (__predict_true(ret == 0)) { ENQUEUE_MUTEX(curthread, m); } else if (m->m_owner == curthread) { ret = mutex_self_trylock(m); @@ -339,19 +349,11 @@ int __pthread_mutex_trylock(pthread_mutex_t *mutex) { - struct pthread *curthread = _get_curthread(); - int ret; + struct pthread_mutex *m; - /* - * If the mutex is statically initialized, perform the dynamic - * initialization: - */ - if (__predict_false(*mutex == NULL)) { - ret = init_static(curthread, mutex); - if (__predict_false(ret)) - return (ret); - } - return (mutex_trylock_common(curthread, mutex)); + CHECK_AND_INIT_MUTEX + + return (mutex_trylock_common(mutex)); } static int @@ -372,8 +374,10 @@ * the lock is likely to be released quickly and it is * faster than entering the kernel */ - if (m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) - goto sleep_in_kernel; + if (__predict_false( + (m->m_lock.m_flags & + (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)) + goto sleep_in_kernel; if (!_thr_is_smp) goto yield_loop; @@ -421,9 +425,10 @@ } static inline int -mutex_lock_common(struct pthread *curthread, struct pthread_mutex *m, +mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime) { + struct pthread *curthread = _get_curthread(); int ret; if (m->m_private) @@ -442,50 +447,25 @@ int __pthread_mutex_lock(pthread_mutex_t *mutex) { - struct pthread *curthread; - struct pthread_mutex *m; - int ret; + struct pthread_mutex *m; _thr_check_init(); - curthread = _get_curthread(); + CHECK_AND_INIT_MUTEX - /* - * If the mutex is statically initialized, perform the dynamic - * initialization: - */ - if (__predict_false((m = *mutex) == NULL)) { - ret = init_static(curthread, mutex); - if (__predict_false(ret)) - return (ret); - m = *mutex; - } - - return (mutex_lock_common(curthread, m, NULL)); + return (mutex_lock_common(m, NULL)); } int __pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime) { - struct pthread *curthread; - struct pthread_mutex *m; - int ret; + struct pthread_mutex *m; _thr_check_init(); - curthread = _get_curthread(); + CHECK_AND_INIT_MUTEX - /* - * If the mutex is statically initialized, perform the dynamic - * initialization: - */ - if (__predict_false((m = *mutex) == NULL)) { - ret = init_static(curthread, mutex); - if (__predict_false(ret)) - return (ret); - m = *mutex; - } - return (mutex_lock_common(curthread, m, abstime)); + return (mutex_lock_common(m, abstime)); } int @@ -495,20 +475,22 @@ } int -_mutex_cv_lock(pthread_mutex_t *m, int count) +_mutex_cv_lock(pthread_mutex_t *mutex, int count) { + struct pthread_mutex *m; int ret; - ret = mutex_lock_common(_get_curthread(), *m, NULL); + m = *mutex; + ret = mutex_lock_common(m, NULL); if (ret == 0) { - (*m)->m_refcount--; - (*m)->m_count += count; + m->m_refcount--; + m->m_count += count; } return (ret); } static int -mutex_self_trylock(pthread_mutex_t m) +mutex_self_trylock(struct pthread_mutex *m) { int ret; @@ -536,7 +518,7 @@ } static int -mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime) +mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) { struct timespec ts1, ts2; int ret; @@ -611,8 +593,12 @@ struct pthread_mutex *m; uint32_t id; - if (__predict_false((m = *mutex) == NULL)) - return (EINVAL); + m = *mutex; + if (__predict_false(m <= THR_MUTEX_DESTROYED)) { + if (m == THR_MUTEX_DESTROYED) + return (EINVAL); + return (EPERM); + } /* * Check if the running thread is not the owner of the mutex. @@ -629,7 +615,7 @@ m->m_owner = NULL; /* Remove the mutex from the threads queue. */ MUTEX_ASSERT_IS_OWNED(m); - if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) + if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) TAILQ_REMOVE(&curthread->mutexq, m, m_qe); else { TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); @@ -649,9 +635,7 @@ struct pthread *curthread = _get_curthread(); struct pthread_mutex *m; - if (__predict_false((m = *mutex) == NULL)) - return (EINVAL); - + m = *mutex; /* * Check if the running thread is not the owner of the mutex. */ @@ -667,7 +651,7 @@ m->m_owner = NULL; /* Remove the mutex from the threads queue. */ MUTEX_ASSERT_IS_OWNED(m); - if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) + if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) TAILQ_REMOVE(&curthread->mutexq, m, m_qe); else { TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); @@ -685,18 +669,19 @@ _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, int *prioceiling) { + struct pthread_mutex *m; int ret; - if (*mutex == NULL) + m = *mutex; + if ((m <= THR_MUTEX_DESTROYED) || + (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) ret = EINVAL; - else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) - ret = EINVAL; else { - *prioceiling = (*mutex)->m_lock.m_ceilings[0]; + *prioceiling = m->m_lock.m_ceilings[0]; ret = 0; } - return(ret); + return (ret); } int @@ -708,7 +693,8 @@ int ret; m = *mutex; - if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) + if ((m <= THR_MUTEX_DESTROYED) || + (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) return (EINVAL); ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); @@ -737,61 +723,54 @@ int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) { - if (*mutex == NULL) - return (EINVAL); - *count = (*mutex)->m_spinloops; + struct pthread_mutex *m; + + CHECK_AND_INIT_MUTEX + + *count = m->m_spinloops; return (0); } int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) { - struct pthread *curthread = _get_curthread(); - int ret; + struct pthread_mutex *m; - if (__predict_false(*mutex == NULL)) { - ret = init_static(curthread, mutex); - if (__predict_false(ret)) - return (ret); - } - (*mutex)->m_spinloops = count; + CHECK_AND_INIT_MUTEX + + m->m_spinloops = count; return (0); } int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) { - if (*mutex == NULL) - return (EINVAL); - *count = (*mutex)->m_yieldloops; + struct pthread_mutex *m; + + CHECK_AND_INIT_MUTEX + + *count = m->m_yieldloops; return (0); } int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) { - struct pthread *curthread = _get_curthread(); - int ret; + struct pthread_mutex *m; - if (__predict_false(*mutex == NULL)) { - ret = init_static(curthread, mutex); - if (__predict_false(ret)) - return (ret); - } - (*mutex)->m_yieldloops = count; + CHECK_AND_INIT_MUTEX + + m->m_yieldloops = count; return (0); } int _pthread_mutex_isowned_np(pthread_mutex_t *mutex) { - struct pthread *curthread = _get_curthread(); - int ret; + struct pthread_mutex *m; - if (__predict_false(*mutex == NULL)) { - ret = init_static(curthread, mutex); - if (__predict_false(ret)) - return (ret); - } - return ((*mutex)->m_owner == curthread); + m = *mutex; + if (m <= THR_MUTEX_DESTROYED) + return (0); + return (m->m_owner == _get_curthread()); } Index: lib/libthr/thread/thr_private.h =================================================================== --- lib/libthr/thread/thr_private.h (revision 213161) +++ lib/libthr/thread/thr_private.h (working copy) @@ -125,6 +125,15 @@ } \ } while (0) +/* XXX These values should be same as those defined in pthread.h */ +#define THR_MUTEX_INITIALIZER ((struct pthread_mutex *)NULL) +#define THR_ADAPTIVE_MUTEX_INITIALIZER ((struct pthread_mutex *)1) +#define THR_MUTEX_DESTROYED ((struct pthread_mutex *)2) +#define THR_COND_INITIALIZER ((struct pthread_cond *)NULL) +#define THR_COND_DESTROYED ((struct pthread_cond *)1) +#define THR_RWLOCK_INITIALIZER ((struct pthread_rwlock *)NULL) +#define THR_RWLOCK_DESTROYED ((struct pthread_rwlock *)1) + struct pthread_mutex { /* * Lock for accesses to this structure. @@ -627,6 +636,7 @@ /* Default mutex attributes: */ extern struct pthread_mutex_attr _pthread_mutexattr_default __hidden; +extern struct pthread_mutex_attr _pthread_mutexattr_adaptive_default __hidden; /* Default condition variable attributes: */ extern struct pthread_cond_attr _pthread_condattr_default __hidden; Index: lib/libthr/thread/thr_rwlock.c =================================================================== --- lib/libthr/thread/thr_rwlock.c (revision 213160) +++ lib/libthr/thread/thr_rwlock.c (working copy) @@ -45,6 +45,19 @@ __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock); __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock); +#define CHECK_AND_INIT_RWLOCK \ + if (__predict_false((prwlock = (*rwlock)) <= THR_RWLOCK_DESTROYED)) { \ + if (prwlock == THR_RWLOCK_INITIALIZER) { \ + int ret; \ + ret = init_static(_get_curthread(), rwlock); \ + if (ret) \ + return (ret); \ + } else if (prwlock == THR_RWLOCK_DESTROYED) { \ + return (EINVAL); \ + } \ + prwlock = *rwlock; \ + } + /* * Prototypes */ @@ -64,16 +77,17 @@ int _pthread_rwlock_destroy (pthread_rwlock_t *rwlock) { + pthread_rwlock_t prwlock; int ret; - if (rwlock == NULL) + prwlock = *rwlock; + if (prwlock == THR_RWLOCK_INITIALIZER) + ret = 0; + else if (prwlock == THR_RWLOCK_DESTROYED) ret = EINVAL; else { - pthread_rwlock_t prwlock; + *rwlock = THR_RWLOCK_DESTROYED; - prwlock = *rwlock; - *rwlock = NULL; - free(prwlock); ret = 0; } @@ -87,7 +101,7 @@ THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock); - if (*rwlock == NULL) + if (*rwlock == THR_RWLOCK_INITIALIZER) ret = rwlock_init(rwlock, NULL); else ret = 0; @@ -113,19 +127,8 @@ int flags; int ret; - if (__predict_false(rwlock == NULL)) - return (EINVAL); + CHECK_AND_INIT_RWLOCK - prwlock = *rwlock; - - /* check for static initialization */ - if (__predict_false(prwlock == NULL)) { - if ((ret = init_static(curthread, rwlock)) != 0) - return (ret); - - prwlock = *rwlock; - } - if (curthread->rdlock_count) { /* * To avoid having to track all the rdlocks held by @@ -206,19 +209,8 @@ int flags; int ret; - if (__predict_false(rwlock == NULL)) - return (EINVAL); + CHECK_AND_INIT_RWLOCK - prwlock = *rwlock; - - /* check for static initialization */ - if (__predict_false(prwlock == NULL)) { - if ((ret = init_static(curthread, rwlock)) != 0) - return (ret); - - prwlock = *rwlock; - } - if (curthread->rdlock_count) { /* * To avoid having to track all the rdlocks held by @@ -250,19 +242,8 @@ pthread_rwlock_t prwlock; int ret; - if (__predict_false(rwlock == NULL)) - return (EINVAL); + CHECK_AND_INIT_RWLOCK - prwlock = *rwlock; - - /* check for static initialization */ - if (__predict_false(prwlock == NULL)) { - if ((ret = init_static(curthread, rwlock)) != 0) - return (ret); - - prwlock = *rwlock; - } - ret = _thr_rwlock_trywrlock(&prwlock->lock); if (ret == 0) prwlock->owner = curthread; @@ -277,19 +258,8 @@ struct timespec ts, ts2, *tsp; int ret; - if (__predict_false(rwlock == NULL)) - return (EINVAL); + CHECK_AND_INIT_RWLOCK - prwlock = *rwlock; - - /* check for static initialization */ - if (__predict_false(prwlock == NULL)) { - if ((ret = init_static(curthread, rwlock)) != 0) - return (ret); - - prwlock = *rwlock; - } - /* * POSIX said the validity of the abstimeout parameter need * not be checked if the lock can be immediately acquired. @@ -356,12 +326,9 @@ int ret; int32_t state; - if (__predict_false(rwlock == NULL)) - return (EINVAL); - prwlock = *rwlock; - if (__predict_false(prwlock == NULL)) + if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED)) return (EINVAL); state = prwlock->lock.rw_state; Index: lib/libthr/thread/thr_init.c =================================================================== --- lib/libthr/thread/thr_init.c (revision 213160) +++ lib/libthr/thread/thr_init.c (working copy) @@ -92,6 +92,12 @@ .m_ceiling = 0 }; +struct pthread_mutex_attr _pthread_mutexattr_adaptive_default = { + .m_type = PTHREAD_MUTEX_ADAPTIVE_NP, + .m_protocol = PTHREAD_PRIO_NONE, + .m_ceiling = 0 +}; + /* Default condition variable attributes: */ struct pthread_cond_attr _pthread_condattr_default = { .c_pshared = PTHREAD_PROCESS_PRIVATE,