Index: lib/libc/gen/sem.c =================================================================== --- lib/libc/gen/sem.c (revision 263398) +++ lib/libc/gen/sem.c (working copy) @@ -329,14 +329,14 @@ _umtx_wait_uint(volatile unsigned *mtx, unsigned i } return _umtx_op(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT_UINT_PRIVATE, id, - (void *)tm_size, __DECONST(void*, tm_p)); + (void *)tm_size, __DECONST(void*, tm_p), 0); } static int _umtx_wake(volatile void *mtx) { return _umtx_op(__DEVOLATILE(void *, mtx), UMTX_OP_WAKE_PRIVATE, - 1, NULL, NULL); + 1, NULL, NULL, 0); } #define TIMESPEC_SUB(dst, src, val) \ Index: lib/libc/gen/sem_new.c =================================================================== --- lib/libc/gen/sem_new.c (revision 263398) +++ lib/libc/gen/sem_new.c (working copy) @@ -332,7 +332,7 @@ _sem_getvalue(sem_t * __restrict sem, int * __rest static __inline int usem_wake(struct _usem *sem) { - return _umtx_op(sem, UMTX_OP_SEM_WAKE, 0, NULL, NULL); + return _umtx_op(sem, UMTX_OP_SEM_WAKE, 0, NULL, NULL, 0); } static __inline int @@ -352,7 +352,7 @@ usem_wait(struct _usem *sem, const struct timespec tm_size = sizeof(timeout); } return _umtx_op(sem, UMTX_OP_SEM_WAIT, 0, - (void *)tm_size, __DECONST(void*, tm_p)); + (void *)tm_size, __DECONST(void*, tm_p), 0); } int Index: lib/libthr/thread/thr_cond.c =================================================================== --- lib/libthr/thread/thr_cond.c (revision 263398) +++ lib/libthr/thread/thr_cond.c (working copy) @@ -242,7 +242,7 @@ cond_wait_user(struct pthread_cond *cvp, struct pt defered = 0; if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0) (void)_umtx_op_err(&mp->m_lock, UMTX_OP_MUTEX_WAKE2, - mp->m_lock.m_flags, 0, 0); + mp->m_lock.m_flags, 0, 0, 0); } if (curthread->nwaiter_defer > 0) { _thr_wake_all(curthread->defer_waiters, Index: lib/libthr/thread/thr_kern.c =================================================================== --- lib/libthr/thread/thr_kern.c (revision 263398) +++ lib/libthr/thread/thr_kern.c (working copy) @@ -208,5 +208,5 @@ _thr_wake_all(unsigned int *waddrs[], int count) for (i = 0; i < count; ++i) *waddrs[i] = 1; - _umtx_op(waddrs, UMTX_OP_NWAKE_PRIVATE, count, NULL, NULL); + _umtx_op(waddrs, UMTX_OP_NWAKE_PRIVATE, count, NULL, NULL, 0); } Index: lib/libthr/thread/thr_mutex.c =================================================================== --- lib/libthr/thread/thr_mutex.c (revision 263398) +++ lib/libthr/thread/thr_mutex.c (working copy) @@ -248,9 +248,9 @@ _mutex_fork(struct pthread *curthread) */ TAILQ_FOREACH(m, &curthread->mutexq, m_qe) - m->m_lock.m_owner = TID(curthread); + m->m_lock.m_owner = (uintptr_t)curthread; TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) - m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; + m->m_lock.m_owner = (uintptr_t)curthread | UMUTEX_CONTESTED; } int @@ -316,13 +316,13 @@ mutex_trylock_common(pthread_mutex_t *mutex) { struct pthread *curthread = _get_curthread(); struct pthread_mutex *m = *mutex; - uint32_t id; + uintptr_t newowner; int ret; - id = TID(curthread); + newowner = (uintptr_t)curthread; if (m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_ENTER(curthread); - ret = _thr_umutex_trylock(&m->m_lock, id); + ret = _thr_umutex_trylock(&m->m_lock, newowner); if (__predict_true(ret == 0)) { ENQUEUE_MUTEX(curthread, m); } else if (m->m_owner == curthread) { @@ -347,14 +347,14 @@ static int mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, const struct timespec *abstime) { - uint32_t id, owner; + uintptr_t newowner, x; int count; int ret; if (m->m_owner == curthread) return mutex_self_lock(m, abstime); - id = TID(curthread); + newowner = (uintptr_t)curthread; /* * For adaptive mutexes, spin for a bit in the expectation * that if the application requests this mutex type then @@ -371,9 +371,10 @@ mutex_lock_sleep(struct pthread *curthread, struct count = m->m_spinloops; while (count--) { - owner = m->m_lock.m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { + x = m->m_lock.m_owner; + if ((x & ~UMUTEX_CONTESTED) == 0) { + if (atomic_cmpset_acq_ptr(&m->m_lock.m_owner, x, + newowner | x)) { ret = 0; goto done; } @@ -385,9 +386,10 @@ yield_loop: count = m->m_yieldloops; while (count--) { _sched_yield(); - owner = m->m_lock.m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { + x = m->m_lock.m_owner; + if ((x & ~UMUTEX_CONTESTED) == 0) { + if (atomic_cmpset_acq_ptr(&m->m_lock.m_owner, x, + newowner | x)) { ret = 0; goto done; } @@ -396,13 +398,13 @@ yield_loop: sleep_in_kernel: if (abstime == NULL) { - ret = __thr_umutex_lock(&m->m_lock, id); + ret = __thr_umutex_lock(&m->m_lock, newowner); } else if (__predict_false( abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)) { ret = EINVAL; } else { - ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); + ret = __thr_umutex_timedlock(&m->m_lock, newowner, abstime); } done: if (ret == 0) @@ -420,7 +422,7 @@ mutex_lock_common(struct pthread_mutex *m, if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_ENTER(curthread); - if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) { + if (_thr_umutex_trylock2(&m->m_lock, (uintptr_t)curthread) == 0) { ENQUEUE_MUTEX(curthread, m); ret = 0; } else { @@ -632,7 +634,7 @@ static int mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer) { struct pthread *curthread = _get_curthread(); - uint32_t id; + uintptr_t curowner; int defered; if (__predict_false(m <= THR_MUTEX_DESTROYED)) { @@ -647,7 +649,7 @@ mutex_unlock_common(struct pthread_mutex *m, int c if (__predict_false(m->m_owner != curthread)) return (EPERM); - id = TID(curthread); + curowner = (uintptr_t)curthread; if (__predict_false( PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) { @@ -660,7 +662,7 @@ mutex_unlock_common(struct pthread_mutex *m, int c defered = 0; DEQUEUE_MUTEX(curthread, m); - _thr_umutex_unlock2(&m->m_lock, id, mtx_defer); + _thr_umutex_unlock2(&m->m_lock, curowner, mtx_defer); if (mtx_defer == NULL && defered) { _thr_wake_all(curthread->defer_waiters, Index: lib/libthr/thread/thr_private.h =================================================================== --- lib/libthr/thread/thr_private.h (revision 263398) +++ lib/libthr/thread/thr_private.h (working copy) @@ -554,27 +554,27 @@ struct pthread { } while (0) #define THR_UMUTEX_TRYLOCK(thrd, lck) \ - _thr_umutex_trylock((lck), TID(thrd)) + _thr_umutex_trylock((lck), (uintptr_t)(thrd)) #define THR_UMUTEX_LOCK(thrd, lck) \ - _thr_umutex_lock((lck), TID(thrd)) + _thr_umutex_lock((lck), (uintptr_t)(thrd)) #define THR_UMUTEX_TIMEDLOCK(thrd, lck, timo) \ - _thr_umutex_timedlock((lck), TID(thrd), (timo)) + _thr_umutex_timedlock((lck), (uintptr_t)(thrd), (timo)) #define THR_UMUTEX_UNLOCK(thrd, lck) \ - _thr_umutex_unlock((lck), TID(thrd)) + _thr_umutex_unlock((lck), (uintptr_t)(thrd)) #define THR_LOCK_ACQUIRE(thrd, lck) \ do { \ (thrd)->locklevel++; \ - _thr_umutex_lock(lck, TID(thrd)); \ + _thr_umutex_lock(lck, (uintptr_t)(thrd)); \ } while (0) #define THR_LOCK_ACQUIRE_SPIN(thrd, lck) \ do { \ (thrd)->locklevel++; \ - _thr_umutex_lock_spin(lck, TID(thrd)); \ + _thr_umutex_lock_spin(lck, (uintptr_t)(thrd)); \ } while (0) #ifdef _PTHREADS_INVARIANTS @@ -590,7 +590,7 @@ do { \ #define THR_LOCK_RELEASE(thrd, lck) \ do { \ THR_ASSERT_LOCKLEVEL(thrd); \ - _thr_umutex_unlock((lck), TID(thrd)); \ + _thr_umutex_unlock((lck), (uintptr_t)(thrd)); \ (thrd)->locklevel--; \ _thr_ast(thrd); \ } while (0) Index: lib/libthr/thread/thr_umtx.c =================================================================== --- lib/libthr/thread/thr_umtx.c (revision 263398) +++ lib/libthr/thread/thr_umtx.c (working copy) @@ -31,9 +31,10 @@ #include "thr_umtx.h" #ifndef HAS__UMTX_OP_ERR -int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2) +int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2, + uintptr_t owner) { - if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1) + if (_umtx_op(obj, op, val, uaddr, uaddr2, owner) == -1) return (errno); return (0); } @@ -55,44 +56,45 @@ _thr_urwlock_init(struct urwlock *rwl) } int -__thr_umutex_lock(struct umutex *mtx, uint32_t id) +__thr_umutex_lock(struct umutex *mtx, uintptr_t newowner) { - uint32_t owner; + uintptr_t x; if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { for (;;) { /* wait in kernel */ - _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); - - owner = mtx->m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0 && - atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) + _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0, + newowner); + x = mtx->m_owner; + if ((x & ~UMUTEX_CONTESTED) == 0 && + atomic_cmpset_acq_ptr(&mtx->m_owner, x, + newowner | x)) return (0); } } - return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); + return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0, newowner); } #define SPINLOOPS 1000 int -__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) +__thr_umutex_lock_spin(struct umutex *mtx, uintptr_t newowner) { - uint32_t owner; + uintptr_t x; if (!_thr_is_smp) - return __thr_umutex_lock(mtx, id); + return __thr_umutex_lock(mtx, newowner); if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { for (;;) { int count = SPINLOOPS; while (count--) { - owner = mtx->m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32( + x = mtx->m_owner; + if ((x & ~UMUTEX_CONTESTED) == 0) { + if (atomic_cmpset_acq_ptr( &mtx->m_owner, - owner, id|owner)) { + x, newowner | x)) { return (0); } } @@ -100,20 +102,21 @@ int } /* wait in kernel */ - _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); + _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0, + newowner); } } - return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); + return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0, newowner); } int -__thr_umutex_timedlock(struct umutex *mtx, uint32_t id, +__thr_umutex_timedlock(struct umutex *mtx, uintptr_t newowner, const struct timespec *abstime) { struct _umtx_time *tm_p, timeout; size_t tm_size; - uint32_t owner; + uintptr_t x; int ret; if (abstime == NULL) { @@ -132,16 +135,19 @@ int /* wait in kernel */ ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, - (void *)tm_size, __DECONST(void *, tm_p)); + (void *)tm_size, __DECONST(void *, tm_p), + newowner); /* now try to lock it */ - owner = mtx->m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0 && - atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) + x = mtx->m_owner; + if ((x & ~UMUTEX_CONTESTED) == 0 && + atomic_cmpset_acq_ptr(&mtx->m_owner, x, + newowner | x)) return (0); } else { ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, - (void *)tm_size, __DECONST(void *, tm_p)); + (void *)tm_size, __DECONST(void *, tm_p), + newowner); if (ret == 0) break; } @@ -152,22 +158,23 @@ int } int -__thr_umutex_unlock(struct umutex *mtx, uint32_t id) +__thr_umutex_unlock(struct umutex *mtx, uintptr_t curowner) { - return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0); + return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0, curowner); } int __thr_umutex_trylock(struct umutex *mtx) { - return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0); + return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0, 0); } int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, uint32_t *oldceiling) { - return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0); + return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0, + 0); } int @@ -177,7 +184,7 @@ _thr_umtx_wait(volatile long *mtx, long id, const timeout->tv_nsec <= 0))) return (ETIMEDOUT); return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0, - __DECONST(void*, timeout)); + __DECONST(void*, timeout), 0); } int @@ -188,7 +195,7 @@ _thr_umtx_wait_uint(volatile u_int *mtx, u_int id, return (ETIMEDOUT); return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, - __DECONST(void*, timeout)); + __DECONST(void*, timeout), 0); } int @@ -211,14 +218,14 @@ _thr_umtx_timedwait_uint(volatile u_int *mtx, u_in return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, - (void *)tm_size, __DECONST(void *, tm_p)); + (void *)tm_size, __DECONST(void *, tm_p), 0); } int _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared) { return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, - nr_wakeup, 0, 0); + nr_wakeup, 0, 0, 0); } void @@ -231,14 +238,17 @@ int _thr_ucond_wait(struct ucond *cv, struct umutex *m, const struct timespec *timeout, int flags) { + uintptr_t curowner; + + curowner = (uintptr_t)_get_curthread(); + if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))) { - struct pthread *curthread = _get_curthread(); - _thr_umutex_unlock(m, TID(curthread)); + _thr_umutex_unlock(m, curowner); return (ETIMEDOUT); } - return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, - m, __DECONST(void*, timeout)); + return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, m, + __DECONST(void*, timeout), curowner); } int @@ -246,7 +256,7 @@ _thr_ucond_signal(struct ucond *cv) { if (!cv->c_has_waiters) return (0); - return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL); + return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL, 0); } int @@ -254,7 +264,7 @@ _thr_ucond_broadcast(struct ucond *cv) { if (!cv->c_has_waiters) return (0); - return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL); + return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL, 0); } int @@ -274,7 +284,8 @@ __thr_rwlock_rdlock(struct urwlock *rwlock, int fl tm_p = &timeout; tm_size = sizeof(timeout); } - return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, (void *)tm_size, tm_p); + return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, (void *)tm_size, + tm_p, 0); } int @@ -293,13 +304,14 @@ __thr_rwlock_wrlock(struct urwlock *rwlock, const tm_p = &timeout; tm_size = sizeof(timeout); } - return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size, tm_p); + return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size, + tm_p, 0); } int __thr_rwlock_unlock(struct urwlock *rwlock) { - return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL); + return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL, 0); } void Index: lib/libthr/thread/thr_umtx.h =================================================================== --- lib/libthr/thread/thr_umtx.h (revision 263398) +++ lib/libthr/thread/thr_umtx.h (working copy) @@ -35,12 +35,12 @@ #define DEFAULT_UMUTEX {0,0,{0,0},{0,0,0,0}} #define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}} -int _umtx_op_err(void *, int op, u_long, void *, void *) __hidden; -int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden; -int __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) __hidden; -int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, +int _umtx_op_err(void *, int op, u_long, void *, void *, uintptr_t) __hidden; +int __thr_umutex_lock(struct umutex *mtx, uintptr_t newowner) __hidden; +int __thr_umutex_lock_spin(struct umutex *mtx, uintptr_t newowner) __hidden; +int __thr_umutex_timedlock(struct umutex *mtx, uintptr_t newowner, const struct timespec *timeout) __hidden; -int __thr_umutex_unlock(struct umutex *mtx, uint32_t id) __hidden; +int __thr_umutex_unlock(struct umutex *mtx, uintptr_t curowner) __hidden; int __thr_umutex_trylock(struct umutex *mtx) __hidden; int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, uint32_t *oldceiling) __hidden; @@ -73,9 +73,9 @@ void _thr_rwl_wrlock(struct urwlock *rwlock) __hid void _thr_rwl_unlock(struct urwlock *rwlock) __hidden; static inline int -_thr_umutex_trylock(struct umutex *mtx, uint32_t id) +_thr_umutex_trylock(struct umutex *mtx, uintptr_t newowner) { - if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) + if (atomic_cmpset_acq_ptr(&mtx->m_owner, UMUTEX_UNOWNED, newowner)) return (0); if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0) return (EBUSY); @@ -83,72 +83,74 @@ static inline int } static inline int -_thr_umutex_trylock2(struct umutex *mtx, uint32_t id) +_thr_umutex_trylock2(struct umutex *mtx, uintptr_t newowner) { - if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0) + if (atomic_cmpset_acq_ptr(&mtx->m_owner, UMUTEX_UNOWNED, newowner) != 0) return (0); - if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED && + if (mtx->m_owner == UMUTEX_CONTESTED && __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0)) - if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED)) + if (atomic_cmpset_acq_ptr(&mtx->m_owner, UMUTEX_CONTESTED, + newowner | UMUTEX_CONTESTED)) return (0); return (EBUSY); } static inline int -_thr_umutex_lock(struct umutex *mtx, uint32_t id) +_thr_umutex_lock(struct umutex *mtx, uintptr_t newowner) { - if (_thr_umutex_trylock2(mtx, id) == 0) + if (_thr_umutex_trylock2(mtx, newowner) == 0) return (0); - return (__thr_umutex_lock(mtx, id)); + return (__thr_umutex_lock(mtx, newowner)); } static inline int -_thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) +_thr_umutex_lock_spin(struct umutex *mtx, uintptr_t newowner) { - if (_thr_umutex_trylock2(mtx, id) == 0) + if (_thr_umutex_trylock2(mtx, newowner) == 0) return (0); - return (__thr_umutex_lock_spin(mtx, id)); + return (__thr_umutex_lock_spin(mtx, newowner)); } static inline int -_thr_umutex_timedlock(struct umutex *mtx, uint32_t id, +_thr_umutex_timedlock(struct umutex *mtx, uintptr_t newowner, const struct timespec *timeout) { - if (_thr_umutex_trylock2(mtx, id) == 0) + if (_thr_umutex_trylock2(mtx, newowner) == 0) return (0); - return (__thr_umutex_timedlock(mtx, id, timeout)); + return (__thr_umutex_timedlock(mtx, newowner, timeout)); } static inline int -_thr_umutex_unlock2(struct umutex *mtx, uint32_t id, int *defer) +_thr_umutex_unlock2(struct umutex *mtx, uintptr_t curowner, int *defer) { + uintptr_t x; uint32_t flags = mtx->m_flags; if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { - uint32_t owner; do { - owner = mtx->m_owner; - if (__predict_false((owner & ~UMUTEX_CONTESTED) != id)) + x = mtx->m_owner; + if (__predict_false((x & ~UMUTEX_CONTESTED) != + curowner)) return (EPERM); - } while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner, - owner, UMUTEX_UNOWNED))); - if ((owner & UMUTEX_CONTESTED)) { + } while (__predict_false(!atomic_cmpset_rel_ptr(&mtx->m_owner, + x, UMUTEX_UNOWNED))); + if ((x & UMUTEX_CONTESTED)) { if (defer == NULL) - (void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, flags, 0, 0); + (void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, flags, 0, 0, 0); else *defer = 1; } return (0); } - if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED)) + if (atomic_cmpset_rel_ptr(&mtx->m_owner, curowner, UMUTEX_UNOWNED)) return (0); - return (__thr_umutex_unlock(mtx, id)); + return (__thr_umutex_unlock(mtx, curowner)); } static inline int -_thr_umutex_unlock(struct umutex *mtx, uint32_t id) +_thr_umutex_unlock(struct umutex *mtx, uintptr_t curowner) { - return _thr_umutex_unlock2(mtx, id, NULL); + return _thr_umutex_unlock2(mtx, curowner, NULL); } static inline int Index: lib/libthr/thread/thr_rtld.c =================================================================== --- lib/libthr/thread/thr_rtld.c (revision 263398) +++ lib/libthr/thread/thr_rtld.c (working copy) @@ -188,7 +188,7 @@ _thr_rtld_init(void) curthread = _get_curthread(); /* force to resolve _umtx_op PLT */ - _umtx_op_err((struct umtx *)&dummy, UMTX_OP_WAKE, 1, 0, 0); + _umtx_op_err((struct umtx *)&dummy, UMTX_OP_WAKE, 1, 0, 0, 0); /* force to resolve errno() PLT */ __error(); Index: sys/kern/kern_umtx.c =================================================================== --- sys/kern/kern_umtx.c (revision 263398) +++ sys/kern/kern_umtx.c (working copy) @@ -70,6 +70,14 @@ __FBSDID("$FreeBSD$"); (((w) > (sw)) || ((w) == (sw) && (f) > (sf))) #endif +CTASSERT(sizeof(long) == sizeof(uintptr_t)); +#define UMTX_OWNER_ULOAD(m) \ + fuword(__DEVOLATILE(uintptr_t *, &(m)->m_owner)) +#define UMTX_OWNER_USTORE(m, curowner, newowner) \ + casuword(&(m)->m_owner, (curowner), (newowner)) +#define UMTX_OWNER_UDEFSTORE(m, newowner) \ + suword(__DEVOLATILE(uintptr_t *, &(m)->m_owner), (newowner)) + /* Priority inheritance mutex info. */ struct umtx_pi { /* Owner thread */ @@ -227,7 +235,8 @@ static int umtxq_sleep(struct umtx_q *uq, const ch static int umtxq_count(struct umtx_key *key); static struct umtx_pi *umtx_pi_alloc(int); static void umtx_pi_free(struct umtx_pi *pi); -static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags); +static int do_unlock_pp(struct thread *td, struct umutex *m, + uintptr_t curowner, uint32_t flags); static void umtx_thread_cleanup(struct thread *td); static void umtx_exec_hook(void *arg __unused, struct proc *p __unused, struct image_params *imgp __unused); @@ -902,15 +911,14 @@ kern_umtx_wake(struct thread *td, void *uaddr, int * Lock PTHREAD_PRIO_NONE protocol POSIX mutex. */ static int -do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags, - struct _umtx_time *timeout, int mode) +do_lock_normal(struct thread *td, struct umutex *m, uintptr_t newowner, + uint32_t flags, struct _umtx_time *timeout, int mode) { struct abs_timeout timo; struct umtx_q *uq; - uint32_t owner, old, id; + uintptr_t owner, oldowner; int error = 0; - id = td->td_tid; uq = td->td_umtxq; if (timeout != NULL) @@ -921,7 +929,7 @@ static int * can fault on any access. */ for (;;) { - owner = fuword32(__DEVOLATILE(void *, &m->m_owner)); + owner = UMTX_OWNER_ULOAD(m); if (mode == _UMUTEX_WAIT) { if (owner == UMUTEX_UNOWNED || owner == UMUTEX_CONTESTED) return (0); @@ -929,7 +937,7 @@ static int /* * Try the uncontested case. This should be done in userland. */ - owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id); + owner = UMTX_OWNER_USTORE(m, UMUTEX_UNOWNED, newowner); /* The acquire succeeded. */ if (owner == UMUTEX_UNOWNED) @@ -941,8 +949,9 @@ static int /* If no one owns it but it is contested try to acquire it. */ if (owner == UMUTEX_CONTESTED) { - owner = casuword32(&m->m_owner, - UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); + owner = UMTX_OWNER_USTORE(m, + UMUTEX_CONTESTED, + newowner | UMUTEX_CONTESTED); if (owner == UMUTEX_CONTESTED) return (0); @@ -985,10 +994,11 @@ static int * either some one else has acquired the lock or it has been * released. */ - old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED); + oldowner = UMTX_OWNER_USTORE(m, owner, + owner | UMUTEX_CONTESTED); /* The address was invalid. */ - if (old == -1) { + if (oldowner == -1) { umtxq_lock(&uq->uq_key); umtxq_remove(uq); umtxq_unbusy(&uq->uq_key); @@ -1004,7 +1014,7 @@ static int */ umtxq_lock(&uq->uq_key); umtxq_unbusy(&uq->uq_key); - if (old == owner) + if (oldowner == owner) error = umtxq_sleep(uq, "umtxn", timeout == NULL ? NULL : &timo); umtxq_remove(uq); @@ -1022,31 +1032,31 @@ static int * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex. */ static int -do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags) +do_unlock_normal(struct thread *td, struct umutex *m, uintptr_t curowner, + uint32_t flags) { struct umtx_key key; - uint32_t owner, old, id; + uintptr_t owner, oldowner; int error; int count; - id = td->td_tid; /* * Make sure we own this mtx. */ - owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + owner = UMTX_OWNER_ULOAD(m); if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != id) + if ((owner & ~UMUTEX_CONTESTED) != curowner) return (EPERM); if ((owner & UMUTEX_CONTESTED) == 0) { - old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); - if (old == -1) + oldowner = UMTX_OWNER_USTORE(m, owner, UMUTEX_UNOWNED); + if (oldowner == -1) return (EFAULT); - if (old == owner) + if (oldowner == owner) return (0); - owner = old; + owner = oldowner; } /* We should only ever be in here for contested locks */ @@ -1064,16 +1074,16 @@ static int * there is zero or one thread only waiting for it. * Otherwise, it must be marked as contested. */ - old = casuword32(&m->m_owner, owner, + oldowner = UMTX_OWNER_USTORE(m, owner, count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); umtxq_lock(&key); umtxq_signal(&key,1); umtxq_unbusy(&key); umtxq_unlock(&key); umtx_key_release(&key); - if (old == -1) + if (oldowner == -1) return (EFAULT); - if (old != owner) + if (oldowner != owner) return (EINVAL); return (0); } @@ -1086,12 +1096,12 @@ static int do_wake_umutex(struct thread *td, struct umutex *m) { struct umtx_key key; - uint32_t owner; + uintptr_t owner; uint32_t flags; int error; int count; - owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + owner = UMTX_OWNER_ULOAD(m); if (owner == -1) return (EFAULT); @@ -1111,7 +1121,7 @@ do_wake_umutex(struct thread *td, struct umutex *m umtxq_unlock(&key); if (count <= 1) - owner = casuword32(&m->m_owner, UMUTEX_CONTESTED, UMUTEX_UNOWNED); + owner = UMTX_OWNER_USTORE(m, UMUTEX_CONTESTED, UMUTEX_UNOWNED); umtxq_lock(&key); if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0) @@ -1129,7 +1139,7 @@ static int do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags) { struct umtx_key key; - uint32_t owner, old; + uintptr_t owner, oldowner; int type; int error; int count; @@ -1162,29 +1172,29 @@ do_wake2_umutex(struct thread *td, struct umutex * * any memory. */ if (count > 1) { - owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + owner = UMTX_OWNER_ULOAD(m); while ((owner & UMUTEX_CONTESTED) ==0) { - old = casuword32(&m->m_owner, owner, - owner|UMUTEX_CONTESTED); - if (old == owner) + oldowner = UMTX_OWNER_USTORE(m, owner, + owner | UMUTEX_CONTESTED); + if (oldowner == owner) break; - owner = old; - if (old == -1) + owner = oldowner; + if (oldowner == -1) break; error = umtxq_check_susp(td); if (error != 0) break; } } else if (count == 1) { - owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + owner = UMTX_OWNER_ULOAD(m); while ((owner & ~UMUTEX_CONTESTED) != 0 && (owner & UMUTEX_CONTESTED) == 0) { - old = casuword32(&m->m_owner, owner, - owner|UMUTEX_CONTESTED); - if (old == owner) + oldowner = UMTX_OWNER_USTORE(m, owner, + owner | UMUTEX_CONTESTED); + if (oldowner == owner) break; - owner = old; - if (old == -1) + owner = oldowner; + if (oldowner == -1) break; error = umtxq_check_susp(td); if (error != 0) @@ -1569,16 +1579,15 @@ umtx_pi_insert(struct umtx_pi *pi) * Lock a PI mutex. */ static int -do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, - struct _umtx_time *timeout, int try) +do_lock_pi(struct thread *td, struct umutex *m, uintptr_t newowner, + uint32_t flags, struct _umtx_time *timeout, int try) { struct abs_timeout timo; struct umtx_q *uq; struct umtx_pi *pi, *new_pi; - uint32_t id, owner, old; + uintptr_t owner, oldowner; int error; - id = td->td_tid; uq = td->td_umtxq; if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags), @@ -1619,7 +1628,7 @@ static int /* * Try the uncontested case. This should be done in userland. */ - owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id); + owner = UMTX_OWNER_USTORE(m, UMUTEX_UNOWNED, newowner); /* The acquire succeeded. */ if (owner == UMUTEX_UNOWNED) { @@ -1635,8 +1644,8 @@ static int /* If no one owns it but it is contested try to acquire it. */ if (owner == UMUTEX_CONTESTED) { - owner = casuword32(&m->m_owner, - UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); + owner = UMTX_OWNER_USTORE(m, UMUTEX_CONTESTED, + newowner | UMUTEX_CONTESTED); if (owner == UMUTEX_CONTESTED) { umtxq_lock(&uq->uq_key); @@ -1683,10 +1692,11 @@ static int * either some one else has acquired the lock or it has been * released. */ - old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED); + oldowner = UMTX_OWNER_USTORE(m, owner, + owner | UMUTEX_CONTESTED); /* The address was invalid. */ - if (old == -1) { + if (oldowner == -1) { umtxq_lock(&uq->uq_key); umtxq_unbusy(&uq->uq_key); umtxq_unlock(&uq->uq_key); @@ -1700,7 +1710,7 @@ static int * and we need to retry or we lost a race to the thread * unlocking the umtx. */ - if (old == owner) + if (oldowner == owner) error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED, "umtxpi", timeout == NULL ? NULL : &timo); else { @@ -1725,35 +1735,35 @@ static int * Unlock a PI mutex. */ static int -do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags) +do_unlock_pi(struct thread *td, struct umutex *m, uintptr_t curowner, + uint32_t flags) { struct umtx_key key; struct umtx_q *uq_first, *uq_first2, *uq_me; struct umtx_pi *pi, *pi2; - uint32_t owner, old, id; + uintptr_t owner, oldowner; int error; int count; int pri; - id = td->td_tid; /* * Make sure we own this mtx. */ - owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + owner = UMTX_OWNER_ULOAD(m); if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != id) + if ((owner & ~UMUTEX_CONTESTED) != curowner) return (EPERM); /* This should be done in userland */ if ((owner & UMUTEX_CONTESTED) == 0) { - old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); - if (old == -1) + oldowner = UMTX_OWNER_USTORE(m, owner, UMUTEX_UNOWNED); + if (oldowner == -1) return (EFAULT); - if (old == owner) + if (oldowner == owner) return (0); - owner = old; + owner = oldowner; } /* We should only ever be in here for contested locks */ @@ -1807,16 +1817,16 @@ static int * there is zero or one thread only waiting for it. * Otherwise, it must be marked as contested. */ - old = casuword32(&m->m_owner, owner, - count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); + oldowner = UMTX_OWNER_USTORE(m, owner, + count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); umtxq_lock(&key); umtxq_unbusy(&key); umtxq_unlock(&key); umtx_key_release(&key); - if (old == -1) + if (oldowner == -1) return (EFAULT); - if (old != owner) + if (oldowner != owner) return (EINVAL); return (0); } @@ -1825,17 +1835,16 @@ static int * Lock a PP mutex. */ static int -do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags, - struct _umtx_time *timeout, int try) +do_lock_pp(struct thread *td, struct umutex *m, uintptr_t newowner, + uint32_t flags, struct _umtx_time *timeout, int try) { struct abs_timeout timo; struct umtx_q *uq, *uq2; struct umtx_pi *pi; + uintptr_t owner; uint32_t ceiling; - uint32_t owner, id; int error, pri, old_inherited_pri, su; - id = td->td_tid; uq = td->td_umtxq; if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags), &uq->uq_key)) != 0) @@ -1872,8 +1881,8 @@ static int } mtx_unlock_spin(&umtx_lock); - owner = casuword32(&m->m_owner, - UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); + owner = UMTX_OWNER_USTORE(m, UMUTEX_CONTESTED, + newowner | UMUTEX_CONTESTED); if (owner == UMUTEX_CONTESTED) { error = 0; @@ -1955,27 +1964,27 @@ out: * Unlock a PP mutex. */ static int -do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags) +do_unlock_pp(struct thread *td, struct umutex *m, uintptr_t curowner, + uint32_t flags) { struct umtx_key key; struct umtx_q *uq, *uq2; struct umtx_pi *pi; - uint32_t owner, id; + uintptr_t owner; uint32_t rceiling; int error, pri, new_inherited_pri, su; - id = td->td_tid; uq = td->td_umtxq; su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0); /* * Make sure we own this mtx. */ - owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + owner = UMTX_OWNER_ULOAD(m); if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != id) + if ((owner & ~UMUTEX_CONTESTED) != curowner) return (EPERM); error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t)); @@ -2003,8 +2012,7 @@ static int * to lock the mutex, it is necessary because thread priority * has to be adjusted for such mutex. */ - error = suword32(__DEVOLATILE(uint32_t *, &m->m_owner), - UMUTEX_CONTESTED); + error = UMTX_OWNER_UDEFSTORE(m, UMUTEX_CONTESTED); umtxq_lock(&key); if (error == 0) @@ -2038,12 +2046,12 @@ static int } static int -do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling, - uint32_t *old_ceiling) +do_set_ceiling(struct thread *td, struct umutex *m, uintptr_t newowner, + uint32_t ceiling, uint32_t *old_ceiling) { struct umtx_q *uq; + uintptr_t owner; uint32_t save_ceiling; - uint32_t owner, id; uint32_t flags; int error; @@ -2052,7 +2060,6 @@ static int return (EINVAL); if (ceiling > RTP_PRIO_MAX) return (EINVAL); - id = td->td_tid; uq = td->td_umtxq; if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags), &uq->uq_key)) != 0) @@ -2064,13 +2071,12 @@ static int save_ceiling = fuword32(&m->m_ceilings[0]); - owner = casuword32(&m->m_owner, - UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); + owner = UMTX_OWNER_USTORE(m, UMUTEX_CONTESTED, + newowner | UMUTEX_CONTESTED); if (owner == UMUTEX_CONTESTED) { suword32(&m->m_ceilings[0], ceiling); - suword32(__DEVOLATILE(uint32_t *, &m->m_owner), - UMUTEX_CONTESTED); + UMTX_OWNER_UDEFSTORE(m, UMUTEX_CONTESTED); error = 0; break; } @@ -2081,7 +2087,7 @@ static int break; } - if ((owner & ~UMUTEX_CONTESTED) == id) { + if ((owner & ~UMUTEX_CONTESTED) == newowner) { suword32(&m->m_ceilings[0], ceiling); error = 0; break; @@ -2121,25 +2127,28 @@ static int * Lock a userland POSIX mutex. */ static int -do_lock_umutex(struct thread *td, struct umutex *m, +do_lock_umutex(struct thread *td, struct umutex *m, uintptr_t newowner, struct _umtx_time *timeout, int mode) { uint32_t flags; int error; + if (newowner == UMUTEX_UNOWNED) + return (EINVAL); + flags = fuword32(&m->m_flags); if (flags == -1) return (EFAULT); switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { case 0: - error = do_lock_normal(td, m, flags, timeout, mode); + error = do_lock_normal(td, m, newowner, flags, timeout, mode); break; case UMUTEX_PRIO_INHERIT: - error = do_lock_pi(td, m, flags, timeout, mode); + error = do_lock_pi(td, m, newowner, flags, timeout, mode); break; case UMUTEX_PRIO_PROTECT: - error = do_lock_pp(td, m, flags, timeout, mode); + error = do_lock_pp(td, m, newowner, flags, timeout, mode); break; default: return (EINVAL); @@ -2159,21 +2168,24 @@ static int * Unlock a userland POSIX mutex. */ static int -do_unlock_umutex(struct thread *td, struct umutex *m) +do_unlock_umutex(struct thread *td, struct umutex *m, uintptr_t curowner) { uint32_t flags; + if (curowner == UMUTEX_UNOWNED) + return (EINVAL); + flags = fuword32(&m->m_flags); if (flags == -1) return (EFAULT); switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { case 0: - return (do_unlock_normal(td, m, flags)); + return (do_unlock_normal(td, m, curowner, flags)); case UMUTEX_PRIO_INHERIT: - return (do_unlock_pi(td, m, flags)); + return (do_unlock_pi(td, m, curowner, flags)); case UMUTEX_PRIO_PROTECT: - return (do_unlock_pp(td, m, flags)); + return (do_unlock_pp(td, m, curowner, flags)); } return (EINVAL); @@ -2181,7 +2193,7 @@ static int static int do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m, - struct timespec *timeout, u_long wflags) + uintptr_t curowner, struct timespec *timeout, u_long wflags) { struct abs_timeout timo; struct umtx_q *uq; @@ -2222,7 +2234,7 @@ do_cv_wait(struct thread *td, struct ucond *cv, st umtxq_unbusy(&uq->uq_key); umtxq_unlock(&uq->uq_key); - error = do_unlock_umutex(td, m); + error = do_unlock_umutex(td, m, curowner); if (timeout != NULL) abs_timeout_init(&timo, clockid, ((wflags & CVWAIT_ABSTIME) != 0), @@ -2943,13 +2955,13 @@ __umtx_op_lock_umutex(struct thread *td, struct _u return (error); tm_p = &timeout; } - return do_lock_umutex(td, uap->obj, tm_p, 0); + return do_lock_umutex(td, uap->obj, uap->owner, tm_p, 0); } static int __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap) { - return do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY); + return do_lock_umutex(td, uap->obj, uap->owner, NULL, _UMUTEX_TRY); } static int @@ -2968,7 +2980,7 @@ __umtx_op_wait_umutex(struct thread *td, struct _u return (error); tm_p = &timeout; } - return do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT); + return do_lock_umutex(td, uap->obj, uap->owner, tm_p, _UMUTEX_WAIT); } static int @@ -2980,13 +2992,13 @@ __umtx_op_wake_umutex(struct thread *td, struct _u static int __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap) { - return do_unlock_umutex(td, uap->obj); + return do_unlock_umutex(td, uap->obj, uap->owner); } static int __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap) { - return do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1); + return do_set_ceiling(td, uap->obj, uap->owner, uap->val, uap->uaddr1); } static int @@ -3004,7 +3016,8 @@ __umtx_op_cv_wait(struct thread *td, struct _umtx_ return (error); ts = &timeout; } - return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val)); + return (do_cv_wait(td, uap->obj, uap->uaddr1, uap->owner, ts, + uap->val)); } static int @@ -3222,7 +3235,7 @@ __umtx_op_lock_umutex_compat32(struct thread *td, return (error); tm_p = &timeout; } - return do_lock_umutex(td, uap->obj, tm_p, 0); + return do_lock_umutex(td, uap->obj, uap->owner, tm_p, 0); } static int @@ -3241,7 +3254,7 @@ __umtx_op_wait_umutex_compat32(struct thread *td, return (error); tm_p = &timeout; } - return do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT); + return do_lock_umutex(td, uap->obj, uap->owner, tm_p, _UMUTEX_WAIT); } static int @@ -3259,7 +3272,8 @@ __umtx_op_cv_wait_compat32(struct thread *td, stru return (error); ts = &timeout; } - return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val)); + return (do_cv_wait(td, uap->obj, uap->uaddr1, uap->owner, ts, + uap->val)); } static int Index: sys/sys/_umtx.h =================================================================== --- sys/sys/_umtx.h (revision 263398) +++ sys/sys/_umtx.h (working copy) @@ -31,10 +31,11 @@ #define _SYS__UMTX_H_ #include +#include #include struct umutex { - volatile __lwpid_t m_owner; /* Owner of the mutex */ + volatile __uintptr_t m_owner; /* Owner of the mutex */ __uint32_t m_flags; /* Flags of the mutex */ __uint32_t m_ceilings[2]; /* Priority protect ceiling */ __uint32_t m_spare[4]; Index: sys/sys/sysproto.h =================================================================== --- sys/sys/sysproto.h (revision 263398) +++ sys/sys/sysproto.h (working copy) @@ -1374,6 +1374,7 @@ struct _umtx_op_args { char val_l_[PADL_(u_long)]; u_long val; char val_r_[PADR_(u_long)]; char uaddr1_l_[PADL_(void *)]; void * uaddr1; char uaddr1_r_[PADR_(void *)]; char uaddr2_l_[PADL_(void *)]; void * uaddr2; char uaddr2_r_[PADR_(void *)]; + char owner_l_[PADL_(uintptr_t)]; uintptr_t owner; char owner_r_[PADR_(uintptr_t)]; }; struct thr_new_args { char param_l_[PADL_(struct thr_param *)]; struct thr_param * param; char param_r_[PADR_(struct thr_param *)]; Index: sys/sys/umtx.h =================================================================== --- sys/sys/umtx.h (revision 263398) +++ sys/sys/umtx.h (working copy) @@ -35,7 +35,7 @@ #define USYNC_PROCESS_SHARED 0x0001 /* Process shared sync objs */ #define UMUTEX_UNOWNED 0x0 -#define UMUTEX_CONTESTED 0x80000000U +#define UMUTEX_CONTESTED 0x1 #define UMUTEX_PRIO_INHERIT 0x0004 /* Priority inherited mutex */ #define UMUTEX_PRIO_PROTECT 0x0008 /* Priority protect mutex */ @@ -89,7 +89,8 @@ #ifndef _KERNEL -int _umtx_op(void *obj, int op, u_long val, void *uaddr, void *uaddr2); +int _umtx_op(void *obj, int op, u_long val, void *uaddr, void *uaddr2, + uintptr_t owner); #else