Index: lib/libthr/thread/thr_mutex.c =================================================================== --- lib/libthr/thread/thr_mutex.c (revision 252562) +++ lib/libthr/thread/thr_mutex.c (working copy) @@ -316,13 +316,11 @@ mutex_trylock_common(pthread_mutex_t *mutex) { struct pthread *curthread = _get_curthread(); struct pthread_mutex *m = *mutex; - uint32_t id; int ret; - id = TID(curthread); if (m->m_flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_ENTER(curthread); - ret = _thr_umutex_trylock(&m->m_lock, id); + ret = _thr_umutex_trylock(&m->m_lock, 0); if (__predict_true(ret == 0)) { ENQUEUE_MUTEX(curthread, m); } else if (m->m_owner == curthread) { @@ -347,14 +345,14 @@ static int mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, const struct timespec *abstime) { - uint32_t id, owner; + long owner, td; int count; int ret; if (m->m_owner == curthread) return mutex_self_lock(m, abstime); + td = (long)_get_curthread(); - id = TID(curthread); /* * For adaptive mutexes, spin for a bit in the expectation * that if the application requests this mutex type then @@ -373,7 +371,8 @@ mutex_lock_sleep(struct pthread *curthread, struct while (count--) { owner = m->m_lock.m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { + if (atomic_cmpset_acq_long(&m->m_lock.m_owner, owner, + td | owner)) { ret = 0; goto done; } @@ -387,7 +386,8 @@ yield_loop: _sched_yield(); owner = m->m_lock.m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { + if (atomic_cmpset_acq_long(&m->m_lock.m_owner, owner, + td | owner)) { ret = 0; goto done; } @@ -396,13 +396,13 @@ yield_loop: sleep_in_kernel: if (abstime == NULL) { - ret = __thr_umutex_lock(&m->m_lock, id); + ret = __thr_umutex_lock(&m->m_lock, 0); } else if (__predict_false( abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)) { ret = EINVAL; } else { - ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); + ret = __thr_umutex_timedlock(&m->m_lock, 0, abstime); } done: if (ret == 0) Index: lib/libthr/thread/thr_umtx.c =================================================================== --- lib/libthr/thread/thr_umtx.c (revision 252562) +++ lib/libthr/thread/thr_umtx.c (working copy) @@ -55,10 +55,11 @@ _thr_urwlock_init(struct urwlock *rwl) } int -__thr_umutex_lock(struct umutex *mtx, uint32_t id) +__thr_umutex_lock(struct umutex *mtx, uint32_t id __unused) { - uint32_t owner; + long owner, td; + td = (long)_get_curthread(); if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { for (;;) { /* wait in kernel */ @@ -66,7 +67,8 @@ int owner = mtx->m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0 && - atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) + atomic_cmpset_acq_long(&mtx->m_owner, owner, + td | owner)) return (0); } } @@ -77,12 +79,14 @@ int #define SPINLOOPS 1000 int -__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) +__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id __unused) { - uint32_t owner; + long td; + long owner; + td = (long)_get_curthread(); if (!_thr_is_smp) - return __thr_umutex_lock(mtx, id); + return __thr_umutex_lock(mtx, 0); if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { for (;;) { @@ -90,9 +94,9 @@ int while (count--) { owner = mtx->m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32( + if (atomic_cmpset_acq_long( &mtx->m_owner, - owner, id|owner)) { + owner, td | owner)) { return (0); } } @@ -108,14 +112,16 @@ int } int -__thr_umutex_timedlock(struct umutex *mtx, uint32_t id, +__thr_umutex_timedlock(struct umutex *mtx, uint32_t id __unused, const struct timespec *abstime) { + long td; struct _umtx_time *tm_p, timeout; + long owner; size_t tm_size; - uint32_t owner; int ret; + td = (long)_get_curthread(); if (abstime == NULL) { tm_p = NULL; tm_size = 0; @@ -137,7 +143,8 @@ int /* now try to lock it */ owner = mtx->m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0 && - atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) + atomic_cmpset_acq_long(&mtx->m_owner, owner, + td | owner)) return (0); } else { ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, @@ -152,7 +159,7 @@ int } int -__thr_umutex_unlock(struct umutex *mtx, uint32_t id) +__thr_umutex_unlock(struct umutex *mtx, uint32_t id __unused) { return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0); } @@ -173,6 +180,7 @@ __thr_umutex_set_ceiling(struct umutex *mtx, uint3 int _thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout) { + if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))) return (ETIMEDOUT); @@ -183,12 +191,13 @@ _thr_umtx_wait(volatile long *mtx, long id, const int _thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared) { + if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))) return (ETIMEDOUT); return _umtx_op_err(__DEVOLATILE(void *, mtx), - shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, - __DECONST(void*, timeout)); + shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, + id, 0, __DECONST(void*, timeout)); } int @@ -208,7 +217,6 @@ _thr_umtx_timedwait_uint(volatile u_int *mtx, u_in tm_p = &timeout; tm_size = sizeof(timeout); } - return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, (void *)tm_size, __DECONST(void *, tm_p)); @@ -233,8 +241,7 @@ _thr_ucond_wait(struct ucond *cv, struct umutex *m { if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))) { - struct pthread *curthread = _get_curthread(); - _thr_umutex_unlock(m, TID(curthread)); + _thr_umutex_unlock(m, 0); return (ETIMEDOUT); } return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, Index: lib/libthr/thread/thr_umtx.h =================================================================== --- lib/libthr/thread/thr_umtx.h (revision 252562) +++ lib/libthr/thread/thr_umtx.h (working copy) @@ -73,9 +73,12 @@ void _thr_rwl_wrlock(struct urwlock *rwlock) __hid void _thr_rwl_unlock(struct urwlock *rwlock) __hidden; static inline int -_thr_umutex_trylock(struct umutex *mtx, uint32_t id) +_thr_umutex_trylock(struct umutex *mtx, uint32_t id __unused) { - if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) + long td; + + td = (long)_get_curthread(); + if (atomic_cmpset_acq_long(&mtx->m_owner, UMUTEX_UNOWNED, td)) return (0); if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0) return (EBUSY); @@ -83,35 +86,40 @@ static inline int } static inline int -_thr_umutex_trylock2(struct umutex *mtx, uint32_t id) +_thr_umutex_trylock2(struct umutex *mtx, uint32_t id __unused) { - if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0) + long td; + + td = (long)_get_curthread(); + if (atomic_cmpset_acq_long(&mtx->m_owner, UMUTEX_UNOWNED, td) != 0) return (0); - if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED && - __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0)) - if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED)) + if ((long)mtx->m_owner == UMUTEX_CONTESTED && + __predict_true((mtx->m_flags & + (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0)) + if (atomic_cmpset_acq_long(&mtx->m_owner, UMUTEX_CONTESTED, + td | UMUTEX_CONTESTED)) return (0); return (EBUSY); } static inline int -_thr_umutex_lock(struct umutex *mtx, uint32_t id) +_thr_umutex_lock(struct umutex *mtx, uint32_t id __unused) { - if (_thr_umutex_trylock2(mtx, id) == 0) + if (_thr_umutex_trylock2(mtx, 0) == 0) return (0); - return (__thr_umutex_lock(mtx, id)); + return (__thr_umutex_lock(mtx, 0)); } static inline int -_thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) +_thr_umutex_lock_spin(struct umutex *mtx, uint32_t id __unused) { - if (_thr_umutex_trylock2(mtx, id) == 0) + if (_thr_umutex_trylock2(mtx, 0) == 0) return (0); - return (__thr_umutex_lock_spin(mtx, id)); + return (__thr_umutex_lock_spin(mtx, 0)); } static inline int -_thr_umutex_timedlock(struct umutex *mtx, uint32_t id, +_thr_umutex_timedlock(struct umutex *mtx, uint32_t id __unused, const struct timespec *timeout) { if (_thr_umutex_trylock2(mtx, id) == 0) @@ -120,17 +128,19 @@ static inline int } static inline int -_thr_umutex_unlock2(struct umutex *mtx, uint32_t id, int *defer) +_thr_umutex_unlock2(struct umutex *mtx, uint32_t id __unused, int *defer) { uint32_t flags = mtx->m_flags; + long td; + td = _get_curthread(); if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { - uint32_t owner; + long owner; do { owner = mtx->m_owner; - if (__predict_false((owner & ~UMUTEX_CONTESTED) != id)) + if (__predict_false((owner & ~UMUTEX_CONTESTED) != td)) return (EPERM); - } while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner, + } while (__predict_false(!atomic_cmpset_rel_long(&mtx->m_owner, owner, UMUTEX_UNOWNED))); if ((owner & UMUTEX_CONTESTED)) { if (defer == NULL) @@ -140,15 +150,15 @@ static inline int } return (0); } - if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED)) + if (atomic_cmpset_rel_long(&mtx->m_owner, td, UMUTEX_UNOWNED)) return (0); - return (__thr_umutex_unlock(mtx, id)); + return (__thr_umutex_unlock(mtx, td)); } static inline int -_thr_umutex_unlock(struct umutex *mtx, uint32_t id) +_thr_umutex_unlock(struct umutex *mtx, uint32_t id __unused) { - return _thr_umutex_unlock2(mtx, id, NULL); + return _thr_umutex_unlock2(mtx, 0, NULL); } static inline int Index: sys/kern/kern_thr.c =================================================================== --- sys/kern/kern_thr.c (revision 252562) +++ sys/kern/kern_thr.c (working copy) @@ -168,6 +168,13 @@ create_thread(struct thread *td, mcontext_t *ctx, return (EPROCLIM); } + /* + * Cache the address of struct pthread for this thread. + * This way we can write it on the owner field of struct + * umtx. + */ + td->td_pthread = arg; + if (rtp != NULL) { switch(rtp->type) { case RTP_PRIO_REALTIME: Index: sys/kern/kern_umtx.c =================================================================== --- sys/kern/kern_umtx.c (revision 252562) +++ sys/kern/kern_umtx.c (working copy) @@ -1268,10 +1268,10 @@ do_lock_normal(struct thread *td, struct umutex *m { struct abs_timeout timo; struct umtx_q *uq; - uint32_t owner, old, id; + long id, old, owner; int error = 0; - id = td->td_tid; + id = (long)td->td_pthread; uq = td->td_umtxq; if (timeout != NULL) @@ -1282,7 +1282,7 @@ do_lock_normal(struct thread *td, struct umutex *m * can fault on any access. */ for (;;) { - owner = fuword32(__DEVOLATILE(void *, &m->m_owner)); + owner = fuword(__DEVOLATILE(void *, &m->m_owner)); if (mode == _UMUTEX_WAIT) { if (owner == UMUTEX_UNOWNED || owner == UMUTEX_CONTESTED) return (0); @@ -1290,7 +1290,7 @@ do_lock_normal(struct thread *td, struct umutex *m /* * Try the uncontested case. This should be done in userland. */ - owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id); + owner = casuword(&m->m_owner, UMUTEX_UNOWNED, id); /* The acquire succeeded. */ if (owner == UMUTEX_UNOWNED) @@ -1302,7 +1302,7 @@ do_lock_normal(struct thread *td, struct umutex *m /* If no one owns it but it is contested try to acquire it. */ if (owner == UMUTEX_CONTESTED) { - owner = casuword32(&m->m_owner, + owner = casuword(&m->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); if (owner == UMUTEX_CONTESTED) @@ -1350,7 +1350,7 @@ do_lock_normal(struct thread *td, struct umutex *m * either some one else has acquired the lock or it has been * released. */ - old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED); + old = casuword(&m->m_owner, owner, owner | UMUTEX_CONTESTED); /* The address was invalid. */ if (old == -1) { @@ -1390,15 +1390,16 @@ static int do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags) { struct umtx_key key; - uint32_t owner, old, id; + long owner, old, id; int error; int count; - id = td->td_tid; + id = (long)td->td_pthread; + /* * Make sure we own this mtx. */ - owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + owner = fuword(__DEVOLATILE(void *, &m->m_owner)); if (owner == -1) return (EFAULT); @@ -1406,7 +1407,7 @@ do_unlock_normal(struct thread *td, struct umutex return (EPERM); if ((owner & UMUTEX_CONTESTED) == 0) { - old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); + old = casuword(&m->m_owner, owner, UMUTEX_UNOWNED); if (old == -1) return (EFAULT); if (old == owner) @@ -1429,7 +1430,7 @@ do_unlock_normal(struct thread *td, struct umutex * there is zero or one thread only waiting for it. * Otherwise, it must be marked as contested. */ - old = casuword32(&m->m_owner, owner, + old = casuword(&m->m_owner, owner, count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); umtxq_lock(&key); umtxq_signal(&key,1); @@ -1451,12 +1452,12 @@ static int do_wake_umutex(struct thread *td, struct umutex *m) { struct umtx_key key; - uint32_t owner; + long owner; uint32_t flags; int error; int count; - owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + owner = fuword(__DEVOLATILE(void *, &m->m_owner)); if (owner == -1) return (EFAULT); @@ -1476,7 +1477,7 @@ do_wake_umutex(struct thread *td, struct umutex *m umtxq_unlock(&key); if (count <= 1) - owner = casuword32(&m->m_owner, UMUTEX_CONTESTED, UMUTEX_UNOWNED); + owner = casuword(&m->m_owner, UMUTEX_CONTESTED, UMUTEX_UNOWNED); umtxq_lock(&key); if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0) @@ -1527,10 +1528,10 @@ do_wake2_umutex(struct thread *td, struct umutex * * any memory. */ if (count > 1) { - owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + owner = fuword(__DEVOLATILE(void *, &m->m_owner)); while ((owner & UMUTEX_CONTESTED) ==0) { - old = casuword32(&m->m_owner, owner, - owner|UMUTEX_CONTESTED); + old = casuword(&m->m_owner, owner, + owner | UMUTEX_CONTESTED); if (old == owner) break; owner = old; @@ -1541,11 +1542,11 @@ do_wake2_umutex(struct thread *td, struct umutex * break; } } else if (count == 1) { - owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + owner = fuword(__DEVOLATILE(void *, &m->m_owner)); while ((owner & ~UMUTEX_CONTESTED) != 0 && (owner & UMUTEX_CONTESTED) == 0) { - old = casuword32(&m->m_owner, owner, - owner|UMUTEX_CONTESTED); + old = casuword(&m->m_owner, owner, + owner | UMUTEX_CONTESTED); if (old == owner) break; owner = old; @@ -1940,10 +1941,10 @@ do_lock_pi(struct thread *td, struct umutex *m, ui struct abs_timeout timo; struct umtx_q *uq; struct umtx_pi *pi, *new_pi; - uint32_t id, owner, old; + long id, owner, old; int error; - id = td->td_tid; + id = (long)td->td_pthread; uq = td->td_umtxq; if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags), @@ -1984,7 +1985,7 @@ do_lock_pi(struct thread *td, struct umutex *m, ui /* * Try the uncontested case. This should be done in userland. */ - owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id); + owner = casuword(&m->m_owner, UMUTEX_UNOWNED, id); /* The acquire succeeded. */ if (owner == UMUTEX_UNOWNED) { @@ -2000,7 +2001,7 @@ do_lock_pi(struct thread *td, struct umutex *m, ui /* If no one owns it but it is contested try to acquire it. */ if (owner == UMUTEX_CONTESTED) { - owner = casuword32(&m->m_owner, + owner = casuword(&m->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); if (owner == UMUTEX_CONTESTED) { @@ -2054,7 +2055,7 @@ do_lock_pi(struct thread *td, struct umutex *m, ui * either some one else has acquired the lock or it has been * released. */ - old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED); + old = casuword(&m->m_owner, owner, owner | UMUTEX_CONTESTED); /* The address was invalid. */ if (old == -1) { @@ -2106,11 +2107,11 @@ do_unlock_pi(struct thread *td, struct umutex *m, int count; int pri; - id = td->td_tid; + id = (long)td->td_pthread; /* * Make sure we own this mtx. */ - owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + owner = fuword(__DEVOLATILE(void *, &m->m_owner)); if (owner == -1) return (EFAULT); @@ -2119,7 +2120,7 @@ do_unlock_pi(struct thread *td, struct umutex *m, /* This should be done in userland */ if ((owner & UMUTEX_CONTESTED) == 0) { - old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); + old = casuword(&m->m_owner, owner, UMUTEX_UNOWNED); if (old == -1) return (EFAULT); if (old == owner) @@ -2178,7 +2179,7 @@ do_unlock_pi(struct thread *td, struct umutex *m, * there is zero or one thread only waiting for it. * Otherwise, it must be marked as contested. */ - old = casuword32(&m->m_owner, owner, + old = casuword(&m->m_owner, owner, count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); umtxq_lock(&key); @@ -2202,11 +2203,11 @@ do_lock_pp(struct thread *td, struct umutex *m, ui struct abs_timeout timo; struct umtx_q *uq, *uq2; struct umtx_pi *pi; + long owner, id; uint32_t ceiling; - uint32_t owner, id; int error, pri, old_inherited_pri, su; - id = td->td_tid; + id = (long)td->td_pthread; uq = td->td_umtxq; if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags), &uq->uq_key)) != 0) @@ -2243,7 +2244,7 @@ do_lock_pp(struct thread *td, struct umutex *m, ui } mtx_unlock_spin(&umtx_lock); - owner = casuword32(&m->m_owner, + owner = casuword(&m->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); if (owner == UMUTEX_CONTESTED) { @@ -2337,18 +2338,18 @@ do_unlock_pp(struct thread *td, struct umutex *m, struct umtx_key key; struct umtx_q *uq, *uq2; struct umtx_pi *pi; - uint32_t owner, id; + long owner, id; uint32_t rceiling; int error, pri, new_inherited_pri, su; - id = td->td_tid; + id = (long)td->td_pthread; uq = td->td_umtxq; su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0); /* * Make sure we own this mtx. */ - owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + owner = fuword(__DEVOLATILE(void *, &m->m_owner)); if (owner == -1) return (EFAULT); @@ -2380,7 +2381,7 @@ do_unlock_pp(struct thread *td, struct umutex *m, * to lock the mutex, it is necessary because thread priority * has to be adjusted for such mutex. */ - error = suword32(__DEVOLATILE(uint32_t *, &m->m_owner), + error = suword(__DEVOLATILE(void *, &m->m_owner), UMUTEX_CONTESTED); umtxq_lock(&key); @@ -2419,8 +2420,8 @@ do_set_ceiling(struct thread *td, struct umutex *m uint32_t *old_ceiling) { struct umtx_q *uq; + long owner, id; uint32_t save_ceiling; - uint32_t owner, id; uint32_t flags; int error; @@ -2429,7 +2430,7 @@ do_set_ceiling(struct thread *td, struct umutex *m return (EINVAL); if (ceiling > RTP_PRIO_MAX) return (EINVAL); - id = td->td_tid; + id = (long)td->td_pthread; uq = td->td_umtxq; if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags), &uq->uq_key)) != 0) @@ -2441,12 +2442,12 @@ do_set_ceiling(struct thread *td, struct umutex *m save_ceiling = fuword32(&m->m_ceilings[0]); - owner = casuword32(&m->m_owner, + owner = casuword(&m->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); if (owner == UMUTEX_CONTESTED) { suword32(&m->m_ceilings[0], ceiling); - suword32(__DEVOLATILE(uint32_t *, &m->m_owner), + suword(__DEVOLATILE(void *, &m->m_owner), UMUTEX_CONTESTED); error = 0; break; @@ -3175,14 +3176,20 @@ int sys__umtx_lock(struct thread *td, struct _umtx_lock_args *uap) /* struct umtx *umtx */ { - return do_lock_umtx(td, uap->umtx, td->td_tid, 0); + long id; + + id = (long)td->td_pthread; + return do_lock_umtx(td, uap->umtx, id, 0); } int sys__umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap) /* struct umtx *umtx */ { - return do_unlock_umtx(td, uap->umtx, td->td_tid); + long id; + + id = (long)td->td_pthread); + return do_unlock_umtx(td, uap->umtx, id); } inline int Index: sys/sys/_umtx.h =================================================================== --- sys/sys/_umtx.h (revision 252562) +++ sys/sys/_umtx.h (working copy) @@ -38,7 +38,7 @@ struct umtx { }; struct umutex { - volatile __lwpid_t m_owner; /* Owner of the mutex */ + volatile unsigned long m_owner; /* Owner of the mutex */ __uint32_t m_flags; /* Flags of the mutex */ __uint32_t m_ceilings[2]; /* Priority protect ceiling */ __uint32_t m_spare[4]; Index: sys/sys/proc.h =================================================================== --- sys/sys/proc.h (revision 252562) +++ sys/sys/proc.h (working copy) @@ -293,6 +293,7 @@ struct thread { * or already have been set in the allocator, constructor, etc. */ struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ + void *td_pthread; /* (t) Associated struct pthread addr */ enum { TDS_INACTIVE = 0x0, TDS_INHIBITED, Index: sys/sys/umtx.h =================================================================== --- sys/sys/umtx.h (revision 252562) +++ sys/sys/umtx.h (working copy) @@ -34,12 +34,12 @@ #include #define UMTX_UNOWNED 0x0 -#define UMTX_CONTESTED LONG_MIN +#define UMTX_CONTESTED 0x1 #define USYNC_PROCESS_SHARED 0x0001 /* Process shared sync objs */ #define UMUTEX_UNOWNED 0x0 -#define UMUTEX_CONTESTED 0x80000000U +#define UMUTEX_CONTESTED 0x1 #define UMUTEX_ERROR_CHECK 0x0002 /* Error-checking mutex */ #define UMUTEX_PRIO_INHERIT 0x0004 /* Priority inherited mutex */ @@ -115,7 +115,7 @@ umtx_init(struct umtx *umtx) static __inline u_long umtx_owner(struct umtx *umtx) { - return (umtx->u_owner & ~LONG_MIN); + return (umtx->u_owner & ~UMTX_CONTESTED); } static __inline int