Index: thread/thr_cond.c =================================================================== --- thread/thr_cond.c (revision 214510) +++ thread/thr_cond.c (working copy) @@ -45,7 +45,8 @@ static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int cancel); -static int cond_signal_common(pthread_cond_t *cond, int broadcast); +static int cond_signal_common(pthread_cond_t *cond); +static int cond_broadcast_common(pthread_cond_t *cond); /* * Double underscore versions are cancellation points. Single underscore @@ -80,7 +81,8 @@ pcond->c_pshared = (*cond_attr)->c_pshared; pcond->c_clockid = (*cond_attr)->c_clockid; } - _thr_umutex_init(&pcond->c_lock); + if (pcond->c_pshared == 0) + TAILQ_INIT(&pcond->c_waitq); *cond = pcond; } /* Return the completion status: */ @@ -128,7 +130,6 @@ int _pthread_cond_destroy(pthread_cond_t *cond) { - struct pthread *curthread = _get_curthread(); struct pthread_cond *cv; int rval = 0; @@ -138,9 +139,18 @@ rval = EINVAL; else { cv = *cond; - THR_UMUTEX_LOCK(curthread, &cv->c_lock); - *cond = THR_COND_DESTROYED; - THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); + if (!cv->c_pshared) { + _thr_umtx_lock_spin(&cv->c_qlock); + if (TAILQ_FIRST(&cv->c_waitq) != NULL) { + _thr_umtx_unlock(&cv->c_qlock); + return (EBUSY); + } + *cond = THR_COND_DESTROYED; + _thr_umtx_unlock(&cv->c_qlock); + } else { + *cond = THR_COND_DESTROYED; + } + _thr_ucond_broadcast(&cv->c_kerncv); /* * Free the memory allocated for the condition @@ -155,54 +165,36 @@ { pthread_mutex_t *mutex; pthread_cond_t *cond; - int count; + int recurse; }; static void cond_cancel_handler(void *arg) { - struct pthread *curthread = _get_curthread(); struct cond_cancel_info *info = (struct cond_cancel_info *)arg; - pthread_cond_t cv; - - if (info->cond != NULL) { - cv = *(info->cond); - THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); - } - _mutex_cv_lock(info->mutex, info->count); + + _mutex_cv_lock(info->mutex, info->recurse, 1); } /* - * Cancellation behaivor: - * Thread may be canceled at start, if thread is canceled, it means it - * did not get a wakeup from pthread_cond_signal(), otherwise, it is - * not canceled. - * Thread cancellation never cause wakeup from pthread_cond_signal() - * to be lost. + * Wait on kernel based condition variable. */ static int -cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, +cond_wait_kernel(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int cancel) { struct pthread *curthread = _get_curthread(); - struct timespec ts, ts2, *tsp; + struct pthread_mutex *m; + struct timespec ts, ts2, *tsp; struct cond_cancel_info info; pthread_cond_t cv; - int ret; + int error; - /* - * If the condition variable is statically initialized, - * perform the dynamic initialization: - */ - CHECK_AND_INIT_COND - cv = *cond; - THR_UMUTEX_LOCK(curthread, &cv->c_lock); - ret = _mutex_cv_unlock(mutex, &info.count); - if (__predict_false(ret != 0)) { - THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); - return (ret); - } + m = *mutex; + error = _mutex_cv_detach(mutex, &info.recurse); + if (__predict_false(error != 0)) + return (error); info.mutex = mutex; info.cond = cond; @@ -217,19 +209,125 @@ if (cancel) { THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &info); _thr_cancel_enter2(curthread, 0); - ret = _thr_ucond_wait(&cv->c_kerncv, &cv->c_lock, tsp, 1); + error = _thr_ucond_wait(&cv->c_kerncv, &m->m_lock, tsp, 1); info.cond = NULL; - _thr_cancel_leave(curthread, (ret != 0)); + _thr_cancel_leave(curthread, (error != 0)); THR_CLEANUP_POP(curthread, 0); } else { - ret = _thr_ucond_wait(&cv->c_kerncv, &cv->c_lock, tsp, 0); + error = _thr_ucond_wait(&cv->c_kerncv, &m->m_lock, tsp, 0); } - if (ret == EINTR) - ret = 0; - _mutex_cv_lock(mutex, info.count); - return (ret); + if (error == EINTR) + error = 0; + _mutex_cv_lock(mutex, info.recurse, 1); + return (error); } +/* + * Cancellation behaivor: + * Thread may be canceled at start, if thread is canceled, it means it + * did not get a wakeup from pthread_cond_signal(), otherwise, it is + * not canceled. + * Thread cancellation never cause wakeup from pthread_cond_signal() + * to be lost. + */ +static int +cond_wait_queue(pthread_cond_t *cond, pthread_mutex_t *mutex, + const struct timespec *abstime, int cancel) +{ + struct pthread *curthread = _get_curthread(); + pthread_cond_t cv; + int recurse; + int error; + + cv = *cond; + _thr_umtx_lock_spin(&cv->c_qlock); + if (cv->c_mutex != NULL && cv->c_mutex != mutex) { + _thr_umtx_unlock(&cv->c_qlock); + return (EINVAL); + } + error = _mutex_cv_unlock(mutex, &recurse); + if (__predict_false(error != 0)) { + _thr_umtx_unlock(&cv->c_qlock); + return (error); + } + cv->c_mutex = mutex; + TAILQ_INSERT_TAIL(&cv->c_waitq, curthread, wle); + curthread->waitq_ptr = &cv->c_waitq; + _thr_clear_wake(curthread); + _thr_umtx_unlock(&cv->c_qlock); + + for (;;) { + if (cancel) { + _thr_cancel_enter2(curthread, 0); + error = _thr_sleep(curthread, abstime, cv->c_clockid); + _thr_cancel_leave(curthread, 0); + } else { + error = _thr_sleep(curthread, abstime, cv->c_clockid); + } + _thr_clear_wake(curthread); + _thr_umtx_lock_spin(&cv->c_qlock); + if (curthread->waitq_ptr == NULL) { + /* + * We either were signaled by condition variable + * or mutex unlocking. + */ + error = 0; + break; + } else if (curthread->waitq_ptr != &cv->c_waitq) { + /* We were moved to mutex wait queue. */ + _thr_umtx_unlock(&cv->c_qlock); + error = _mutex_cv_lock(mutex, recurse, 0); + return (error); + } if (abstime != NULL && error == ETIMEDOUT) { + TAILQ_REMOVE(&cv->c_waitq, curthread, wle); + curthread->waitq_ptr = NULL; + break; + } else if (SHOULD_CANCEL(curthread)) { + TAILQ_REMOVE(&cv->c_waitq, curthread, wle); + if (TAILQ_EMPTY(&cv->c_waitq)) + cv->c_mutex = NULL; + _thr_umtx_unlock(&cv->c_qlock); + curthread->waitq_ptr = NULL; + (void)_mutex_cv_lock(mutex, recurse, 0); + _pthread_exit(PTHREAD_CANCELED); + } + _thr_umtx_unlock(&cv->c_qlock); + } + if (TAILQ_EMPTY(&cv->c_waitq)) + cv->c_mutex = NULL; + _thr_umtx_unlock(&cv->c_qlock); + _mutex_cv_lock(mutex, recurse, 0); + return (error); +} + +static int +cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, + const struct timespec *abstime, int cancel) +{ + struct pthread *curthread = _get_curthread(); + pthread_cond_t cv; + struct pthread_mutex *m; + + /* + * If the condition variable is statically initialized, + * perform the dynamic initialization: + */ + CHECK_AND_INIT_COND + if ((m = *mutex) == NULL || m < THR_MUTEX_DESTROYED) + return (EINVAL); + /* + * if it is a real-time scheduling thread or the mutex is a + * complex mutex or the condition variable is process-shared, + * then we use kernel based condition varible, otherwise + * we use user-mode wait-queue. + */ + if (curthread->attr.sched_policy == SCHED_OTHER && + IS_SIMPLE_MUTEX(m) && !cv->c_pshared) + return cond_wait_queue(cond, mutex, abstime, cancel); + else + return cond_wait_kernel(cond, mutex, abstime, cancel); +} + int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { @@ -268,12 +366,27 @@ return (cond_wait_common(cond, mutex, abstime, 1)); } +static inline void +wake_threads(unsigned int *addrs[], int len) +{ + int i; + + for (i = 0; i < len; ++i) { + _thr_set_wake(addrs[i]); + _thr_umtx_wake(addrs[i], INT_MAX, 0); + } +} + static int -cond_signal_common(pthread_cond_t *cond, int broadcast) +cond_signal_common(pthread_cond_t *cond) { struct pthread *curthread = _get_curthread(); - pthread_cond_t cv; - int ret = 0; + pthread_mutex_t *mutex; + struct pthread_mutex *m; + struct pthread *td; + struct pthread_cond *cv; + int move = 0; + unsigned int *waddr = NULL; /* * If the condition variable is statically initialized, perform dynamic @@ -281,25 +394,123 @@ */ CHECK_AND_INIT_COND - THR_UMUTEX_LOCK(curthread, &cv->c_lock); - if (!broadcast) - ret = _thr_ucond_signal(&cv->c_kerncv); - else - ret = _thr_ucond_broadcast(&cv->c_kerncv); - THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); - return (ret); + _thr_ucond_signal(&cv->c_kerncv); + if (cv->c_pshared) + return (0); + td = TAILQ_FIRST(&cv->c_waitq); + if (td == NULL) + return (0); + _thr_umtx_lock_spin(&cv->c_qlock); + /* + * Check if we owned the temporarily binding mutex, + * if we owned, we can migrate thread to mutex wait + * queue without waking up thread, after we unlock + * the mutex, we will wake up a waiter thread. + */ + if ((mutex = cv->c_mutex) != NULL) { + m = *mutex; + move = (m->m_owner == curthread); + } else { + m = NULL; + } + td = TAILQ_FIRST(&cv->c_waitq); + if (td != NULL) { + TAILQ_REMOVE(&cv->c_waitq, td, wle); + if (move) { + td->waitq_ptr = &m->m_waitq; + _thr_umtx_lock(&m->m_slock.qlock); + TAILQ_INSERT_TAIL(&m->m_waitq, td, wle); + _thr_umtx_unlock(&m->m_slock.qlock); + _mutex_set_contested(m); + } else { + td->waitq_ptr = NULL; + waddr = WAKE_ADDR(td); + } + if (TAILQ_EMPTY(&cv->c_waitq)) + cv->c_mutex = NULL; + } + _thr_umtx_unlock(&cv->c_qlock); + if (waddr != NULL) { + _thr_set_wake(waddr); + _thr_umtx_wake(waddr, INT_MAX, 0); + } + return (0); } +static int +cond_broadcast_common(pthread_cond_t *cond) +{ +#define MAX_TD 500 + struct pthread *curthread = _get_curthread(); + pthread_mutex_t *mutex; + struct pthread_mutex *m; + struct pthread *td, *td2; + struct pthread_cond *cv; + unsigned int *waddrs[MAX_TD]; + int move = 0; + int i = 0; + + /* + * If the condition variable is statically initialized, perform dynamic + * initialization. + */ + CHECK_AND_INIT_COND + + _thr_ucond_broadcast(&cv->c_kerncv); + if (cv->c_pshared) + return (0); + td = TAILQ_FIRST(&cv->c_waitq); + if (td == NULL) + return (0); + _thr_umtx_lock_spin(&cv->c_qlock); + /* + * Check if we owned the temporarily binding mutex, + * if we owned, we can migrate thread to mutex wait + * queue without waking up thread, after we unlock + * the mutex, we will wake up a waiter thread. + */ + if ((mutex = cv->c_mutex) != NULL) { + m = *mutex; + move = (m->m_owner == curthread); + } else { + m = NULL; + } + if (move) { + TAILQ_FOREACH(td, &cv->c_waitq, wle) + td->waitq_ptr = &m->m_waitq; + _thr_umtx_lock_spin(&m->m_slock.qlock); + TAILQ_CONCAT(&m->m_waitq, &cv->c_waitq, wle); + if (!TAILQ_EMPTY(&m->m_waitq)) + _mutex_set_contested(m); + _thr_umtx_unlock(&m->m_slock.qlock); + } else { + TAILQ_FOREACH_SAFE(td, &cv->c_waitq, wle, td2) { + TAILQ_REMOVE(&cv->c_waitq, td, wle); + td->waitq_ptr = NULL; + waddrs[i++] = WAKE_ADDR(td); + if (i >= MAX_TD) { + /* Too many threads, flush out. :-( */ + wake_threads(waddrs, i); + i = 0; + } + } + } + cv->c_mutex = NULL; + _thr_umtx_unlock(&cv->c_qlock); + wake_threads(waddrs, i); + return (0); +} + int _pthread_cond_signal(pthread_cond_t * cond) { - return (cond_signal_common(cond, 0)); + return (cond_signal_common(cond)); } int _pthread_cond_broadcast(pthread_cond_t * cond) { - return (cond_signal_common(cond, 1)); + return (cond_broadcast_common(cond)); } Index: thread/thr_kern.c =================================================================== --- thread/thr_kern.c (revision 214510) +++ thread/thr_kern.c (working copy) @@ -31,6 +31,7 @@ #include #include #include +#include #include "thr_private.h" @@ -41,6 +42,57 @@ #define DBG_MSG(x...) #endif +#define GOLDEN_RATIO_PRIME 2654404609U +#define POOL_SIZE 128 +#define POOL_SHIFTS (__WORD_BIT - 7) + +static struct umutex addr_lock; +static struct wake_addr *wake_addr_head; +static struct wake_addr default_wake_addr; + +struct wake_addr * +_thr_alloc_wake_addr(void) +{ + struct pthread *curthread; + struct wake_addr *p; + + if (_thr_initial == NULL) { + return &default_wake_addr; + } + + curthread = _get_curthread(); + + THR_UMUTEX_LOCK(curthread, &addr_lock); + if (wake_addr_head == NULL) { + unsigned i; + unsigned pagesize = getpagesize(); + struct wake_addr *pp = (struct wake_addr *)mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, MAP_ANON, -1, 0); + for (i = 1; i < pagesize/sizeof(struct wake_addr); ++i) + pp[i].link = &pp[i+1]; + pp[i-1].link = NULL; + wake_addr_head = &pp[1]; + p = &pp[0]; + } else { + p = wake_addr_head; + wake_addr_head = p->link; + } + THR_UMUTEX_UNLOCK(curthread, &addr_lock); + return (p); +} + +void +_thr_release_wake_addr(struct wake_addr *wa) +{ + struct pthread *curthread = _get_curthread(); + + if (wa == &default_wake_addr) + return; + THR_UMUTEX_LOCK(curthread, &addr_lock); + wa->link = wake_addr_head; + wake_addr_head = wa; + THR_UMUTEX_UNLOCK(curthread, &addr_lock); +} + /* * This is called when the first thread (other than the initial * thread) is created. @@ -130,3 +182,25 @@ _schedparam_to_rtp(policy, param, &rtp); return (rtprio_thread(RTP_SET, lwpid, &rtp)); } + +/* Sleep on thread wakeup address */ +int +_thr_sleep(struct pthread *curthread, const struct timespec *abstime, int clockid) +{ + struct timespec *tsp, ts, ts2; + int error; + + if (abstime != NULL) { + clock_gettime(clockid, &ts); + TIMESPEC_SUB(&ts2, abstime, &ts); + if (ts.tv_sec < 0 || ts.tv_nsec <= 0) + return (ETIMEDOUT); + tsp = &ts2; + } else { + tsp = NULL; + } + + error = _thr_umtx_wait_uint(&curthread->wake_addr->value, + 0, tsp, 0); + return (error); +} Index: thread/thr_mutex.c =================================================================== --- thread/thr_mutex.c (revision 214510) +++ thread/thr_mutex.c (working copy) @@ -55,8 +55,8 @@ PANIC("mutex is not on list"); \ } while (0) #define MUTEX_ASSERT_NOT_OWNED(m) do { \ - if (__predict_false((m)->m_qe.tqe_prev != NULL || \ - (m)->m_qe.tqe_next != NULL)) \ + if (__predict_false((m)->m_qe.tqe_prev != NULL ||\ + (m)->m_qe.tqe_next != NULL)) \ PANIC("mutex is on list"); \ } while (0) #else @@ -93,8 +93,6 @@ static int mutex_self_lock(pthread_mutex_t, const struct timespec *abstime); static int mutex_unlock_common(pthread_mutex_t *); -static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, - const struct timespec *); __weak_reference(__pthread_mutex_init, pthread_mutex_init); __strong_reference(__pthread_mutex_init, _pthread_mutex_init); @@ -147,7 +145,7 @@ pmutex->m_type = attr->m_type; pmutex->m_owner = NULL; - pmutex->m_count = 0; + pmutex->m_recurse = 0; pmutex->m_refcount = 0; pmutex->m_spinloops = 0; pmutex->m_yieldloops = 0; @@ -173,7 +171,7 @@ _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; pmutex->m_yieldloops = _thr_yieldloops; } - + TAILQ_INIT(&pmutex->m_waitq); *mutex = pmutex; return (0); } @@ -181,19 +179,19 @@ static int init_static(struct pthread *thread, pthread_mutex_t *mutex) { - int ret; + int error; THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); if (*mutex == THR_MUTEX_INITIALIZER) - ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc); + error = mutex_init(mutex, &_pthread_mutexattr_default, calloc); else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) - ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc); + error = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc); else - ret = 0; + error = 0; THR_LOCK_RELEASE(thread, &_mutex_static_lock); - return (ret); + return (error); } static void @@ -225,12 +223,12 @@ .m_protocol = PTHREAD_PRIO_NONE, .m_ceiling = 0 }; - int ret; + int error; - ret = mutex_init(mutex, &attr, calloc_cb); - if (ret == 0) + error = mutex_init(mutex, &attr, calloc_cb); + if (error == 0) (*mutex)->m_private = 1; - return (ret); + return (error); } void @@ -247,9 +245,17 @@ * process shared mutex is not supported, so I * am not worried. */ - - TAILQ_FOREACH(m, &curthread->mutexq, m_qe) + TAILQ_FOREACH(m, &curthread->mutexq, m_qe) { + if (IS_SIMPLE_MUTEX(m)) { + /* + * Just make pthread_mutex_unlock in child works, + * it is rahter odd, it depends on implementation. + */ + m->m_qlock = 0; + TAILQ_INIT(&m->m_waitq); + } m->m_lock.m_owner = TID(curthread); + } TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; } @@ -284,20 +290,32 @@ (m)->m_owner = curthread; \ /* Add to the list of owned mutexes: */ \ MUTEX_ASSERT_NOT_OWNED((m)); \ - if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ + if (__predict_true(((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) \ TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ else \ TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ } while (0) +#define DEQUEUE_MUTEX(curthread, m) \ + (m)->m_owner = NULL; \ + /* Remove the mutex from the threads queue. */ \ + MUTEX_ASSERT_IS_OWNED(m); \ + if (__predict_true(((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) \ + TAILQ_REMOVE(&curthread->mutexq, (m), m_qe); \ + else { \ + TAILQ_REMOVE(&curthread->pp_mutexq, (m), m_qe); \ + set_inherited_priority(curthread, (m)); \ + } \ + MUTEX_INIT_LINK(m); + #define CHECK_AND_INIT_MUTEX \ if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \ if (m == THR_MUTEX_DESTROYED) \ return (EINVAL); \ - int ret; \ - ret = init_static(_get_curthread(), mutex); \ - if (ret) \ - return (ret); \ + int error; \ + error = init_static(_get_curthread(), mutex); \ + if (error) \ + return (error); \ m = *mutex; \ } @@ -307,20 +325,20 @@ struct pthread *curthread = _get_curthread(); struct pthread_mutex *m = *mutex; uint32_t id; - int ret; - + int error; + id = TID(curthread); if (m->m_private) THR_CRITICAL_ENTER(curthread); - ret = _thr_umutex_trylock(&m->m_lock, id); - if (__predict_true(ret == 0)) { + error = _thr_umutex_trylock(&m->m_lock, id); + if (__predict_true(error == 0)) { ENQUEUE_MUTEX(curthread, m); } else if (m->m_owner == curthread) { - ret = mutex_self_trylock(m); + error = mutex_self_trylock(m); } /* else {} */ - if (ret && m->m_private) + if (error != 0 && m->m_private) THR_CRITICAL_LEAVE(curthread); - return (ret); + return (error); } int @@ -333,94 +351,154 @@ return (mutex_trylock_common(mutex)); } +/* Lock user-mode queue based mutex. */ static int -mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, +mutex_lock_queued(struct pthread_mutex *m, const struct timespec *abstime) { - uint32_t id, owner; - int count; - int ret; + struct pthread *curthread = _get_curthread(); + uint32_t owner, tid; + int error = 0; + int spin; - if (m->m_owner == curthread) - return mutex_self_lock(m, abstime); + spin = m->m_spinloops; + tid = TID(curthread); + for (;;) { + if (!_thr_is_smp) + goto sleep; - id = TID(curthread); - /* - * For adaptive mutexes, spin for a bit in the expectation - * that if the application requests this mutex type then - * the lock is likely to be released quickly and it is - * faster than entering the kernel - */ - if (__predict_false( - (m->m_lock.m_flags & - (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)) - goto sleep_in_kernel; - - if (!_thr_is_smp) - goto yield_loop; - - count = m->m_spinloops; - while (count--) { - owner = m->m_lock.m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { - ret = 0; - goto done; + while (spin-- > 0) { + /* + * For adaptive mutexes, spin for a bit in the expectation + * that if the application requests this mutex type then + * the lock is likely to be released quickly and it is + * faster than entering the kernel + */ + owner = m->m_lockword; + if ((owner & UMUTEX_TIDMASK) == 0) { + if (atomic_cmpset_acq_32(&m->m_lockword, owner, owner|tid)) { + ENQUEUE_MUTEX(curthread, m); + error = 0; + goto out; + } } + CPU_SPINWAIT; } - CPU_SPINWAIT; - } +sleep: + _thr_clear_wake(curthread); -yield_loop: - count = m->m_yieldloops; - while (count--) { - _sched_yield(); - owner = m->m_lock.m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { - ret = 0; - goto done; + _thr_umtx_lock_spin(&m->m_qlock); + if (curthread->waitq_ptr == NULL) { + curthread->waitq_ptr = &m->m_waitq; + TAILQ_INSERT_TAIL(&m->m_waitq, curthread, wle); + } + _thr_umtx_unlock(&m->m_qlock); + owner = m->m_lockword; + /* Set contested bit. */ + while ((owner & UMUTEX_TIDMASK) != 0 && (owner & UMUTEX_CONTESTED) == 0) { + if (atomic_cmpset_acq_32(&m->m_lockword, + owner, owner|UMUTEX_CONTESTED)) + break; + owner = m->m_lockword; + } + if ((owner & UMUTEX_TIDMASK) != 0) { + error = _thr_sleep(curthread, abstime, CLOCK_REALTIME); + if (error != EINTR) { + if (curthread->waitq_ptr != NULL) { + _thr_umtx_lock_spin(&m->m_qlock); + if (curthread->waitq_ptr != NULL) + TAILQ_REMOVE(&m->m_waitq, curthread, wle); + _thr_umtx_unlock(&m->m_qlock); + curthread->waitq_ptr = NULL; + } + } else + error = 0; + owner = m->m_lockword; + } + if ((owner & UMUTEX_TIDMASK) == 0) { + if (atomic_cmpset_acq_32(&m->m_lockword, owner, owner|tid)) { + ENQUEUE_MUTEX(curthread, m); + error = 0; + break; } } + if (error != 0) + break; + spin = m->m_spinloops; } +out: + if (curthread->waitq_ptr != NULL) { + _thr_umtx_lock_spin(&m->m_qlock); + if (curthread->waitq_ptr != NULL) + TAILQ_REMOVE(&m->m_waitq, curthread, wle); + _thr_umtx_unlock(&m->m_qlock); + curthread->waitq_ptr = NULL; + } + return (error); +} -sleep_in_kernel: +/* Enter kernel and lock mutex */ +static int +mutex_lock_kernel(struct pthread_mutex *m, + const struct timespec *abstime) +{ + struct pthread *curthread = _get_curthread(); + uint32_t id; + int error; + + id = TID(curthread); if (abstime == NULL) { - ret = __thr_umutex_lock(&m->m_lock, id); - } else if (__predict_false( - abstime->tv_nsec < 0 || - abstime->tv_nsec >= 1000000000)) { - ret = EINVAL; + error = __thr_umutex_lock(&m->m_lock, id); } else { - ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); + error = __thr_umutex_timedlock(&m->m_lock, id, abstime); } -done: - if (ret == 0) + if (error == 0) ENQUEUE_MUTEX(curthread, m); - - return (ret); + return (error); } static inline int -mutex_lock_common(struct pthread_mutex *m, - const struct timespec *abstime) +_mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime) { - struct pthread *curthread = _get_curthread(); - int ret; + struct pthread *curthread = _get_curthread(); + int error; - if (m->m_private) - THR_CRITICAL_ENTER(curthread); if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) { ENQUEUE_MUTEX(curthread, m); - ret = 0; + error = 0; + } else if (m->m_owner == curthread) { + error = mutex_self_lock(m, abstime); } else { - ret = mutex_lock_sleep(curthread, m, abstime); + if (__predict_false(abstime != NULL && + (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || + abstime->tv_nsec >= 1000000000))) + error = EINVAL; + else if (__predict_true(IS_SIMPLE_MUTEX(m))) + error = mutex_lock_queued(m, abstime); + else + error = mutex_lock_kernel(m, abstime); } - if (ret && m->m_private) - THR_CRITICAL_LEAVE(curthread); - return (ret); + return (error); } +static inline int +mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime) +{ + int error; + + if (!m->m_private) { + error = _mutex_lock_common(m, abstime); + } else { + struct pthread *curthread = _get_curthread(); + + THR_CRITICAL_ENTER(curthread); + error = _mutex_lock_common(m, abstime); + if (error != 0) + THR_CRITICAL_LEAVE(curthread); + } + return (error); +} + int __pthread_mutex_lock(pthread_mutex_t *mutex) { @@ -451,54 +529,39 @@ return (mutex_unlock_common(m)); } -int -_mutex_cv_lock(pthread_mutex_t *mutex, int count) -{ - struct pthread_mutex *m; - int ret; - - m = *mutex; - ret = mutex_lock_common(m, NULL); - if (ret == 0) { - m->m_refcount--; - m->m_count += count; - } - return (ret); -} - static int mutex_self_trylock(struct pthread_mutex *m) { - int ret; + int error; switch (m->m_type) { case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: - ret = EBUSY; + error = EBUSY; break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ - if (m->m_count + 1 > 0) { - m->m_count++; - ret = 0; + if (m->m_recurse + 1 > 0) { + m->m_recurse++; + error = 0; } else - ret = EAGAIN; + error = EAGAIN; break; default: /* Trap invalid mutex types; */ - ret = EINVAL; + error = EINVAL; } - return (ret); + return (error); } static int mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) { struct timespec ts1, ts2; - int ret; + int error; switch (m->m_type) { case PTHREAD_MUTEX_ERRORCHECK: @@ -506,19 +569,19 @@ if (abstime) { if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) { - ret = EINVAL; + error = EINVAL; } else { clock_gettime(CLOCK_REALTIME, &ts1); TIMESPEC_SUB(&ts2, abstime, &ts1); __sys_nanosleep(&ts2, NULL); - ret = ETIMEDOUT; + error = ETIMEDOUT; } } else { /* * POSIX specifies that mutexes should return * EDEADLK if a recursive lock is detected. */ - ret = EDEADLK; + error = EDEADLK; } break; @@ -527,48 +590,76 @@ * What SS2 define as a 'normal' mutex. Intentionally * deadlock on attempts to get a lock you already own. */ - ret = 0; + error = 0; if (abstime) { if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) { - ret = EINVAL; + error = EINVAL; } else { clock_gettime(CLOCK_REALTIME, &ts1); TIMESPEC_SUB(&ts2, abstime, &ts1); __sys_nanosleep(&ts2, NULL); - ret = ETIMEDOUT; + error = ETIMEDOUT; } } else { - ts1.tv_sec = 30; - ts1.tv_nsec = 0; for (;;) - __sys_nanosleep(&ts1, NULL); + thr_suspend(NULL); } break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ - if (m->m_count + 1 > 0) { - m->m_count++; - ret = 0; + if (m->m_recurse + 1 > 0) { + m->m_recurse++; + error = 0; } else - ret = EAGAIN; + error = EAGAIN; break; default: /* Trap invalid mutex types; */ - ret = EINVAL; + error = EINVAL; } - return (ret); + return (error); } static int -mutex_unlock_common(pthread_mutex_t *mutex) +mutex_unlock_queued(struct pthread *curthread, struct pthread_mutex *m) { - struct pthread *curthread = _get_curthread(); + struct pthread *td; + unsigned int *wake_addr = NULL; + uint32_t tid = TID(curthread); + int32_t newval; + + if (!atomic_cmpset_rel_32(&m->m_lock.m_owner, tid, UMUTEX_UNOWNED)) { + _thr_umtx_lock_spin(&m->m_qlock); + if ((td = TAILQ_FIRST(&m->m_waitq)) != NULL) { + TAILQ_REMOVE(&m->m_waitq, td, wle); + td->waitq_ptr = NULL; + if (TAILQ_FIRST(&m->m_waitq) == NULL) + newval = UMUTEX_UNOWNED; + else + newval = UMUTEX_CONTESTED; + wake_addr = WAKE_ADDR(td); + } else { + newval = UMUTEX_UNOWNED; + wake_addr = NULL; + } + atomic_store_rel_32(&m->m_lockword, newval); + _thr_umtx_unlock(&m->m_qlock); + if (wake_addr != NULL) { + _thr_set_wake(wake_addr); + _thr_umtx_wake(wake_addr, INT_MAX, 0); + } + } + return (0); +} + +static inline int +mutex_owned(struct pthread *curthread, const pthread_mutex_t *mutex) +{ struct pthread_mutex *m; - uint32_t id; m = *mutex; if (__predict_false(m <= THR_MUTEX_DESTROYED)) { @@ -576,75 +667,105 @@ return (EINVAL); return (EPERM); } - /* * Check if the running thread is not the owner of the mutex. */ if (__predict_false(m->m_owner != curthread)) return (EPERM); + return (0); +} - id = TID(curthread); +static inline int +_mutex_unlock_common(struct pthread *curthread, struct pthread_mutex *m) +{ + uint32_t tid; + + tid = TID(curthread); if (__predict_false( m->m_type == PTHREAD_MUTEX_RECURSIVE && - m->m_count > 0)) { - m->m_count--; - } else { - m->m_owner = NULL; - /* Remove the mutex from the threads queue. */ - MUTEX_ASSERT_IS_OWNED(m); - if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) - TAILQ_REMOVE(&curthread->mutexq, m, m_qe); - else { - TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); - set_inherited_priority(curthread, m); - } - MUTEX_INIT_LINK(m); - _thr_umutex_unlock(&m->m_lock, id); + m->m_recurse > 0)) { + m->m_recurse--; + return (0); } + DEQUEUE_MUTEX(curthread, m); + if (__predict_true(IS_SIMPLE_MUTEX(m))) + mutex_unlock_queued(curthread, m); + else + _thr_umutex_unlock(&m->m_lock, tid); + return (0); +} + +static int +mutex_unlock_common(pthread_mutex_t *mutex) +{ + struct pthread *curthread = _get_curthread(); + struct pthread_mutex *m; + int error; + + if ((error = mutex_owned(curthread, mutex)) != 0) + return (error); + m = *mutex; + _mutex_unlock_common(curthread, m); if (m->m_private) THR_CRITICAL_LEAVE(curthread); return (0); } int -_mutex_cv_unlock(pthread_mutex_t *mutex, int *count) +_mutex_cv_lock(pthread_mutex_t *mutex, int recurse, int detach) { struct pthread *curthread = _get_curthread(); - struct pthread_mutex *m; + struct pthread_mutex *m; + int error; m = *mutex; - if (__predict_false(m <= THR_MUTEX_DESTROYED)) { - if (m == THR_MUTEX_DESTROYED) - return (EINVAL); - return (EPERM); + error = _mutex_lock_common(m, NULL); + if (error == 0) { + m->m_refcount--; + m->m_recurse += recurse; + if (detach == 0 && m->m_private) + THR_CRITICAL_ENTER(curthread); } + return (error); +} - /* - * Check if the running thread is not the owner of the mutex. - */ - if (__predict_false(m->m_owner != curthread)) - return (EPERM); +int +_mutex_cv_unlock(pthread_mutex_t *mutex, int *recurse) +{ + struct pthread *curthread = _get_curthread(); + struct pthread_mutex *m; + int error; + if ((error = mutex_owned(curthread, mutex)) != 0) + return (error); + m = *mutex; + *recurse = m->m_recurse; + m->m_recurse = 0; + m->m_refcount++; + _mutex_unlock_common(curthread, m); + if (m->m_private) + THR_CRITICAL_LEAVE(curthread); + return (0); +} + +int +_mutex_cv_detach(pthread_mutex_t *mutex, int *recurse) +{ + struct pthread *curthread = _get_curthread(); + struct pthread_mutex *m; + int error; + + if ((error = mutex_owned(curthread, mutex)) != 0) + return (error); + + m = *mutex; /* * Clear the count in case this is a recursive mutex. */ - *count = m->m_count; + *recurse = m->m_recurse; + m->m_recurse = 0; m->m_refcount++; - m->m_count = 0; - m->m_owner = NULL; - /* Remove the mutex from the threads queue. */ - MUTEX_ASSERT_IS_OWNED(m); - if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) - TAILQ_REMOVE(&curthread->mutexq, m, m_qe); - else { - TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); - set_inherited_priority(curthread, m); - } - MUTEX_INIT_LINK(m); - _thr_umutex_unlock(&m->m_lock, TID(curthread)); - - if (m->m_private) - THR_CRITICAL_LEAVE(curthread); + DEQUEUE_MUTEX(curthread, m); return (0); } Index: thread/thr_list.c =================================================================== --- thread/thr_list.c (revision 214510) +++ thread/thr_list.c (working copy) @@ -193,6 +193,7 @@ thread->tcb = NULL; if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) { thr_destroy(curthread, thread); + _thr_release_wake_addr(thread->wake_addr); atomic_fetchadd_int(&total_threads, -1); } else { /* Index: thread/thr_private.h =================================================================== --- thread/thr_private.h (revision 214510) +++ thread/thr_private.h (working copy) @@ -135,14 +135,25 @@ #define THR_RWLOCK_INITIALIZER ((struct pthread_rwlock *)NULL) #define THR_RWLOCK_DESTROYED ((struct pthread_rwlock *)1) +typedef TAILQ_HEAD(waitqueue, pthread) waitqueue_t; + struct pthread_mutex { /* * Lock for accesses to this structure. */ - struct umutex m_lock; + union { + struct umutex klock; + struct { + volatile uint32_t owner; + uint32_t flags; + uint32_t qlock; + waitqueue_t waitq; + } slock; + } m_lockdata; + enum pthread_mutextype m_type; - struct pthread *m_owner; - int m_count; + struct pthread *volatile m_owner; + int m_recurse; int m_refcount; int m_spinloops; int m_yieldloops; @@ -153,6 +164,21 @@ TAILQ_ENTRY(pthread_mutex) m_qe; }; +#define m_lock m_lockdata.klock +#define m_slock m_lockdata.slock +#define m_lockflags m_lockdata.klock.m_flags +#define m_lockword m_lockdata.slock.owner +#define m_waitq m_lockdata.slock.waitq +#define m_qlock m_lockdata.slock.qlock + +#define IS_SIMPLE_MUTEX(m) \ + (((m)->m_lockflags & \ + (UMUTEX_PRIO_INHERIT|UMUTEX_PRIO_PROTECT|USYNC_PROCESS_SHARED)) == 0) + +#ifndef UMUTEX_TIDMASK +#define UMUTEX_TIDMASK (~UMUTEX_CONTESTED) +#endif + struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; @@ -163,12 +189,20 @@ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } struct pthread_cond { - struct umutex c_lock; - struct ucond c_kerncv; - int c_pshared; - int c_clockid; + struct ucond c_kerncv; + struct { + umtx_t qlock; + waitqueue_t waitq; + pthread_mutex_t *mutex; + } c_usercv; + int c_pshared; + int c_clockid; }; +#define c_qlock c_usercv.qlock +#define c_waitq c_usercv.waitq +#define c_mutex c_usercv.mutex + struct pthread_cond_attr { int c_pshared; int c_clockid; @@ -245,6 +279,11 @@ size_t cpusetsize; }; +struct wake_addr { + struct wake_addr *link; + unsigned value; +}; + /* * Thread creation state attributes. */ @@ -356,6 +395,12 @@ /* Hash queue entry. */ LIST_ENTRY(pthread) hle; + TAILQ_ENTRY(pthread) wle; /* link for all threads in process */ + waitqueue_t *waitq_ptr; + struct wake_addr *wake_addr; + +#define WAKE_ADDR(td) (&(td)->wake_addr->value) + /* Threads reference count. */ int refcount; @@ -507,6 +552,9 @@ #define THR_UMUTEX_LOCK(thrd, lck) \ _thr_umutex_lock((lck), TID(thrd)) +#define THR_UMUTEX_LOCK_SPIN(thrd, lck) \ + _thr_umutex_lock_spin((lck), TID(thrd)) + #define THR_UMUTEX_TIMEDLOCK(thrd, lck, timo) \ _thr_umutex_timedlock((lck), TID(thrd), (timo)) @@ -671,8 +719,9 @@ */ __BEGIN_DECLS int _thr_setthreaded(int) __hidden; -int _mutex_cv_lock(pthread_mutex_t *, int count) __hidden; -int _mutex_cv_unlock(pthread_mutex_t *, int *count) __hidden; +int _mutex_cv_lock(pthread_mutex_t *, int count, int); +int _mutex_cv_unlock(pthread_mutex_t *, int *count); +int _mutex_cv_detach(pthread_mutex_t *, int *count); int _mutex_reinit(pthread_mutex_t *) __hidden; void _mutex_fork(struct pthread *curthread) __hidden; void _libpthread_init(struct pthread *) __hidden; @@ -719,6 +768,9 @@ void _thr_signal_postfork(void) __hidden; void _thr_signal_postfork_child(void) __hidden; void _thr_try_gc(struct pthread *, struct pthread *) __hidden; +struct wake_addr *_thr_alloc_wake_addr(void); +void _thr_release_wake_addr(struct wake_addr *); +int _thr_sleep(struct pthread *, const struct timespec *, int); int _rtp_to_schedparam(const struct rtprio *rtp, int *policy, struct sched_param *param) __hidden; int _schedparam_to_rtp(int policy, const struct sched_param *param, @@ -797,6 +849,24 @@ _libpthread_init(NULL); } +static inline void +_mutex_set_contested(struct pthread_mutex *m) +{ + atomic_set_32(&m->m_lockword, UMUTEX_CONTESTED); +} + +static inline void +_thr_clear_wake(struct pthread *curthread) +{ + curthread->wake_addr->value = 0; +} + +static inline void +_thr_set_wake(unsigned int *addr) +{ + *addr = 1; +} + struct dl_phdr_info; void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info); void _thr_tsd_unload(struct dl_phdr_info *phdr_info) __hidden; Index: thread/thr_umtx.c =================================================================== --- thread/thr_umtx.c (revision 214510) +++ thread/thr_umtx.c (working copy) @@ -74,7 +74,40 @@ return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); } +#define SPINLOOPS 2000 + int +__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) +{ + uint32_t owner; + + if (!_thr_is_smp) + return __thr_umutex_lock(mtx, id); + + if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { + for (;;) { + int count = SPINLOOPS; + while (count--) { + owner = mtx->m_owner; + if ((owner & ~UMUTEX_CONTESTED) == 0) { + if (atomic_cmpset_acq_32( + &mtx->m_owner, + owner, id|owner)) { + return (0); + } + } + CPU_SPINWAIT; + } + + /* wait in kernel */ + _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); + } + } + + return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); +} + +int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, const struct timespec *ets) { @@ -263,3 +296,57 @@ if (_thr_rwlock_unlock(rwlock)) PANIC("unlock error"); } + +int +__thr_umtx_lock(volatile umtx_t *mtx) +{ + int v; + + do { + v = *mtx; + if (v == 2 || atomic_cmpset_acq_int(mtx, 1, 2)) + _thr_umtx_wait_uint(mtx, 2, NULL, 0); + } while (!atomic_cmpset_acq_int(mtx, 0, 2)); + return (0); +} + +#define LOOPS 500 + +int +__thr_umtx_lock_spin(volatile umtx_t *mtx) +{ + int v; + int i; + + if (!_thr_is_smp) + return _thr_umtx_lock(mtx); + + do { + i = LOOPS; + while (i-- > 0) { + if (*mtx == 0) + break; + CPU_SPINWAIT; + } + v = *mtx; + if (v == 2 || atomic_cmpset_acq_int(mtx, 1, 2)) + _thr_umtx_wait_uint(mtx, 2, NULL, 0); + } while (!atomic_cmpset_acq_int(mtx, 0, 2)); + return (0); +} +void +__thr_umtx_unlock(volatile umtx_t *mtx) +{ + int v; + + for (;;) { + v = *mtx; + if (atomic_cmpset_acq_int(mtx, v, v-1)) { + if (v != 1) { + *mtx = 0; + _thr_umtx_wake(mtx, 1, 0); + } + break; + } + } +} Index: thread/thr_printf.c =================================================================== --- thread/thr_printf.c (revision 214510) +++ thread/thr_printf.c (working copy) @@ -58,8 +58,14 @@ int c; long d; int islong; + static struct umutex m; + struct pthread *curtd = _get_curthread(); + if (curtd) + THR_UMUTEX_LOCK(curtd, &m); + va_start(ap, fmt); + while ((c = *fmt++)) { islong = 0; if (c == '%') { @@ -111,6 +117,8 @@ } out: va_end(ap); + if (curtd) + THR_UMUTEX_UNLOCK(curtd, &m); } /* Index: thread/thr_umtx.h =================================================================== --- thread/thr_umtx.h (revision 214510) +++ thread/thr_umtx.h (working copy) @@ -35,7 +35,10 @@ #define DEFAULT_UMUTEX {0,0,{0,0},{0,0,0,0}} #define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}} +typedef uint32_t umtx_t; + int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden; +int __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) __hidden; int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, const struct timespec *timeout) __hidden; int __thr_umutex_unlock(struct umutex *mtx, uint32_t id) __hidden; @@ -66,6 +69,10 @@ void _thr_rwl_wrlock(struct urwlock *rwlock) __hidden; void _thr_rwl_unlock(struct urwlock *rwlock) __hidden; +int __thr_umtx_lock(volatile umtx_t *mtx); +int __thr_umtx_lock_spin(volatile umtx_t *mtx); +void __thr_umtx_unlock(volatile umtx_t *mtx); + static inline int _thr_umutex_trylock(struct umutex *mtx, uint32_t id) { @@ -97,6 +104,14 @@ } static inline int +_thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) +{ + if (_thr_umutex_trylock2(mtx, id) == 0) + return (0); + return (__thr_umutex_lock_spin(mtx, id)); +} + +static inline int _thr_umutex_timedlock(struct umutex *mtx, uint32_t id, const struct timespec *timeout) { @@ -193,4 +208,43 @@ } return (__thr_rwlock_unlock(rwlock)); } + +static inline void +_thr_umtx_init(volatile umtx_t *mtx) +{ + *mtx = 0; +} + +static inline int +_thr_umtx_trylock(volatile umtx_t *mtx) +{ + if (atomic_cmpset_acq_int(mtx, 0, 1)) + return (0); + return (EBUSY); +} + +static inline int +_thr_umtx_lock(volatile umtx_t *mtx) +{ + if (atomic_cmpset_acq_int(mtx, 0, 1)) + return (0); + return (__thr_umtx_lock(mtx)); +} + +static inline int +_thr_umtx_lock_spin(volatile umtx_t *mtx) +{ + if (atomic_cmpset_acq_int(mtx, 0, 1)) + return (0); + return (__thr_umtx_lock_spin(mtx)); +} + +static inline void +_thr_umtx_unlock(volatile umtx_t *mtx) +{ + if (atomic_cmpset_acq_int(mtx, 1, 0)) + return; + __thr_umtx_unlock(mtx); +} + #endif Index: thread/thr_init.c =================================================================== --- thread/thr_init.c (revision 214510) +++ thread/thr_init.c (working copy) @@ -387,6 +387,8 @@ -1, 0) == MAP_FAILED) PANIC("Cannot allocate red zone for initial thread"); + thread->wake_addr = _thr_alloc_wake_addr(); + /* * Mark the stack as an application supplied stack so that it * isn't deallocated. Index: thread/thr_create.c =================================================================== --- thread/thr_create.c (revision 214510) +++ thread/thr_create.c (working copy) @@ -166,6 +166,8 @@ SIGDELSET(new_thread->sigmask, SIGCANCEL); } + new_thread->wake_addr = _thr_alloc_wake_addr(); + ret = thr_new(¶m, sizeof(param)); if (ret != 0) {