Index: libc/i386/gen/makecontext.c =================================================================== RCS file: /usr/cvs/src/lib/libc/i386/gen/makecontext.c,v retrieving revision 1.4 diff -u -r1.4 makecontext.c --- libc/i386/gen/makecontext.c 2 Jul 2004 14:19:44 -0000 1.4 +++ libc/i386/gen/makecontext.c 25 Jan 2005 15:36:31 -0000 @@ -93,7 +93,7 @@ * * _ctx_start() - context start wrapper * start() - user start routine - * arg1 + * arg1 - first argument, aligned(16) * ... * argn * ucp - this context, %ebp points here @@ -110,15 +110,17 @@ * (uc_link != 0) or exit the program (uc_link == 0). */ stack_top = (char *)(ucp->uc_stack.ss_sp + - ucp->uc_stack.ss_size - sizeof(double)); - stack_top = (char *)ALIGN(stack_top); + ucp->uc_stack.ss_size - sizeof(intptr_t)); /* * Adjust top of stack to allow for 3 pointers (return * address, _ctx_start, and ucp) and argc arguments. - * We allow the arguments to be pointers also. + * We allow the arguments to be pointers also. The first + * argument to the user function must be properly aligned. */ - stack_top = stack_top - (sizeof(intptr_t) * (3 + argc)); + stack_top = stack_top - (sizeof(intptr_t) * (1 + argc)); + stack_top = (char *)((unsigned)stack_top & ~15); + stack_top = stack_top - (2 * sizeof(intptr_t)); argp = (intptr_t *)stack_top; /* Index: libpthread/pthread.map =================================================================== RCS file: /usr/cvs/src/lib/libpthread/pthread.map,v retrieving revision 1.12.2.1 diff -u -r1.12.2.1 pthread.map --- libpthread/pthread.map 11 Oct 2004 20:04:58 -0000 1.12.2.1 +++ libpthread/pthread.map 25 Jan 2005 15:47:26 -0000 @@ -14,6 +14,7 @@ __poll; __pthread_cond_timedwait; __pthread_cond_wait; + __pthread_mutex_init; __pthread_mutex_lock; __pthread_mutex_trylock; __pthread_mutex_timedlock; Index: libpthread/arch/amd64/amd64/pthread_md.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/arch/amd64/amd64/pthread_md.c,v retrieving revision 1.3 diff -u -r1.3 pthread_md.c --- libpthread/arch/amd64/amd64/pthread_md.c 15 Aug 2004 16:28:04 -0000 1.3 +++ libpthread/arch/amd64/amd64/pthread_md.c 25 Jan 2005 15:47:26 -0000 @@ -66,10 +66,11 @@ { struct kcb *kcb; - if ((kcb = malloc(sizeof(struct kcb))) != NULL) { + kcb = malloc(sizeof(struct kcb)); + if (kcb != NULL) { bzero(kcb, sizeof(struct kcb)); - kcb->kcb_kse = kse; kcb->kcb_self = kcb; + kcb->kcb_kse = kse; } return (kcb); } Index: libpthread/arch/amd64/include/pthread_md.h =================================================================== RCS file: /usr/cvs/src/lib/libpthread/arch/amd64/include/pthread_md.h,v retrieving revision 1.9 diff -u -r1.9 pthread_md.h --- libpthread/arch/amd64/include/pthread_md.h 16 Aug 2004 03:27:28 -0000 1.9 +++ libpthread/arch/amd64/include/pthread_md.h 25 Jan 2005 15:47:26 -0000 @@ -91,7 +91,7 @@ __asm __volatile("movq %%fs:%1, %0" \ : "=r" (__i) \ : "m" (*(u_long *)(__kcb_offset(name)))); \ - __result = *(__kcb_type(name) *)&__i; \ + __result = (__kcb_type(name))__i; \ \ __result; \ }) @@ -103,7 +103,7 @@ __kcb_type(name) __val = (val); \ \ u_long __i; \ - __i = *(u_long *)&__val; \ + __i = (u_long)__val; \ __asm __volatile("movq %1,%%fs:%0" \ : "=m" (*(u_long *)(__kcb_offset(name))) \ : "r" (__i)); \ Index: libpthread/arch/i386/i386/pthread_md.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/arch/i386/i386/pthread_md.c,v retrieving revision 1.3 diff -u -r1.3 pthread_md.c --- libpthread/arch/i386/i386/pthread_md.c 15 Aug 2004 16:28:05 -0000 1.3 +++ libpthread/arch/i386/i386/pthread_md.c 25 Jan 2005 15:47:26 -0000 @@ -76,7 +76,9 @@ struct kcb * _kcb_ctor(struct kse *kse) { +#ifndef COMPAT_32BIT union descriptor ldt; +#endif struct kcb *kcb; kcb = malloc(sizeof(struct kcb)); @@ -84,6 +86,7 @@ bzero(kcb, sizeof(struct kcb)); kcb->kcb_self = kcb; kcb->kcb_kse = kse; +#ifndef COMPAT_32BIT ldt.sd.sd_hibase = (unsigned int)kcb >> 24; ldt.sd.sd_lobase = (unsigned int)kcb & 0xFFFFFF; ldt.sd.sd_hilimit = (sizeof(struct kcb) >> 16) & 0xF; @@ -99,6 +102,7 @@ free(kcb); return (NULL); } +#endif } return (kcb); } @@ -106,9 +110,11 @@ void _kcb_dtor(struct kcb *kcb) { +#ifndef COMPAT_32BIT if (kcb->kcb_ldt >= 0) { i386_set_ldt(kcb->kcb_ldt, NULL, 1); kcb->kcb_ldt = -1; /* just in case */ } +#endif free(kcb); } Index: libpthread/arch/i386/include/pthread_md.h =================================================================== RCS file: /usr/cvs/src/lib/libpthread/arch/i386/include/pthread_md.h,v retrieving revision 1.10 diff -u -r1.10 pthread_md.h --- libpthread/arch/i386/include/pthread_md.h 16 Aug 2004 03:27:29 -0000 1.10 +++ libpthread/arch/i386/include/pthread_md.h 25 Jan 2005 15:47:26 -0000 @@ -32,7 +32,9 @@ #define _PTHREAD_MD_H_ #include +#include #include +#include #include extern int _thr_setcontext(mcontext_t *, intptr_t, intptr_t *); @@ -89,7 +91,7 @@ __asm __volatile("movl %%gs:%1, %0" \ : "=r" (__i) \ : "m" (*(u_int *)(__kcb_offset(name)))); \ - __result = *(__kcb_type(name) *)&__i; \ + __result = (__kcb_type(name))__i; \ \ __result; \ }) @@ -101,7 +103,7 @@ __kcb_type(name) __val = (val); \ \ u_int __i; \ - __i = *(u_int *)&__val; \ + __i = (u_int)__val; \ __asm __volatile("movl %1,%%gs:%0" \ : "=m" (*(u_int *)(__kcb_offset(name))) \ : "r" (__i)); \ @@ -150,10 +152,15 @@ static __inline void _kcb_set(struct kcb *kcb) { +#ifndef COMPAT_32BIT int val; val = (kcb->kcb_ldt << 3) | 7; __asm __volatile("movl %0, %%gs" : : "r" (val)); +#else + _amd64_set_gsbase(kcb); +#endif + } /* Get the current kcb. */ Index: libpthread/sys/thr_error.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/sys/thr_error.c,v retrieving revision 1.7 diff -u -r1.7 thr_error.c --- libpthread/sys/thr_error.c 23 Apr 2003 21:46:50 -0000 1.7 +++ libpthread/sys/thr_error.c 25 Jan 2005 15:47:26 -0000 @@ -37,6 +37,7 @@ #include "libc_private.h" #include "thr_private.h" +#undef errno extern int errno; int * Index: libpthread/thread/thr_cancel.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_cancel.c,v retrieving revision 1.31 diff -u -r1.31 thr_cancel.c --- libpthread/thread/thr_cancel.c 9 Dec 2003 02:20:56 -0000 1.31 +++ libpthread/thread/thr_cancel.c 25 Jan 2005 15:47:26 -0000 @@ -14,18 +14,26 @@ static inline int checkcancel(struct pthread *curthread) { - if (((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) && - ((curthread->cancelflags & THR_CANCELLING) != 0)) { + if ((curthread->cancelflags & THR_CANCELLING) != 0) { /* * It is possible for this thread to be swapped out * while performing cancellation; do not allow it * to be cancelled again. */ - curthread->cancelflags &= ~THR_CANCELLING; - return (1); + if ((curthread->flags & THR_FLAGS_EXITING) != 0) { + /* + * This may happen once, but after this, it + * shouldn't happen again. + */ + curthread->cancelflags &= ~THR_CANCELLING; + return (0); + } + if ((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) { + curthread->cancelflags &= ~THR_CANCELLING; + return (1); + } } - else - return (0); + return (0); } static inline void Index: libpthread/thread/thr_clean.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_clean.c,v retrieving revision 1.8 diff -u -r1.8 thr_clean.c --- libpthread/thread/thr_clean.c 18 Apr 2003 05:04:15 -0000 1.8 +++ libpthread/thread/thr_clean.c 25 Jan 2005 15:47:26 -0000 @@ -50,6 +50,7 @@ malloc(sizeof(struct pthread_cleanup))) != NULL) { new->routine = routine; new->routine_arg = routine_arg; + new->onstack = 0; new->next = curthread->cleanup; curthread->cleanup = new; @@ -67,6 +68,7 @@ if (execute) { old->routine(old->routine_arg); } - free(old); + if (old->onstack == 0) + free(old); } } Index: libpthread/thread/thr_concurrency.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_concurrency.c,v retrieving revision 1.8 diff -u -r1.8 thr_concurrency.c --- libpthread/thread/thr_concurrency.c 14 Mar 2004 05:24:27 -0000 1.8 +++ libpthread/thread/thr_concurrency.c 25 Jan 2005 15:47:26 -0000 @@ -84,6 +84,13 @@ int i; int ret; + /* + * Turn on threaded mode, if failed, it is unnecessary to + * do further work. + */ + if (_kse_isthreaded() == 0 && _kse_setthreaded(1)) + return (EAGAIN); + ret = 0; curthread = _get_curthread(); /* Race condition, but so what. */ Index: libpthread/thread/thr_cond.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_cond.c,v retrieving revision 1.51 diff -u -r1.51 thr_cond.c --- libpthread/thread/thr_cond.c 9 Dec 2003 02:20:56 -0000 1.51 +++ libpthread/thread/thr_cond.c 25 Jan 2005 15:47:26 -0000 @@ -47,6 +47,9 @@ static inline struct pthread *cond_queue_deq(pthread_cond_t); static inline void cond_queue_remove(pthread_cond_t, pthread_t); static inline void cond_queue_enq(pthread_cond_t, pthread_t); +static void cond_wait_backout(void *); +static inline void check_continuation(struct pthread *, + struct pthread_cond *, pthread_mutex_t *); /* * Double underscore versions are cancellation points. Single underscore @@ -171,8 +174,7 @@ struct pthread *curthread = _get_curthread(); int rval = 0; int done = 0; - int interrupted = 0; - int unlock_mutex = 1; + int mutex_locked = 1; int seqno; if (cond == NULL) @@ -198,10 +200,11 @@ * and backed out of the waiting queue prior to executing the * signal handler. */ - do { - /* Lock the condition variable structure: */ - THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock); + /* Lock the condition variable structure: */ + THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock); + seqno = (*cond)->c_seqno; + do { /* * If the condvar was statically allocated, properly * initialize the tail queue. @@ -217,9 +220,6 @@ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { - /* Unlock the condition variable structure: */ - THR_LOCK_RELEASE(curthread, &(*cond)->c_lock); - /* Return invalid argument error: */ rval = EINVAL; } else { @@ -233,15 +233,11 @@ */ cond_queue_enq(*cond, curthread); - /* Remember the mutex and sequence number: */ - (*cond)->c_mutex = *mutex; - seqno = (*cond)->c_seqno; - /* Wait forever: */ curthread->wakeup_time.tv_sec = -1; /* Unlock the mutex: */ - if ((unlock_mutex != 0) && + if (mutex_locked && ((rval = _mutex_cv_unlock(mutex)) != 0)) { /* * Cannot unlock the mutex, so remove @@ -249,22 +245,18 @@ * variable queue: */ cond_queue_remove(*cond, curthread); - - /* Check for no more waiters: */ - if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) - (*cond)->c_mutex = NULL; - - /* Unlock the condition variable structure: */ - THR_LOCK_RELEASE(curthread, &(*cond)->c_lock); } else { + /* Remember the mutex: */ + (*cond)->c_mutex = *mutex; + /* * Don't unlock the mutex the next * time through the loop (if the * thread has to be requeued after * handling a signal). */ - unlock_mutex = 0; + mutex_locked = 0; /* * This thread is active and is in a @@ -277,6 +269,7 @@ /* Remember the CV: */ curthread->data.cond = *cond; + curthread->sigbackout = cond_wait_backout; THR_SCHED_UNLOCK(curthread, curthread); /* Unlock the CV structure: */ @@ -286,8 +279,6 @@ /* Schedule the next thread: */ _thr_sched_switch(curthread); - curthread->data.cond = NULL; - /* * XXX - This really isn't a good check * since there can be more than one @@ -299,41 +290,39 @@ * should be sent "as soon as possible". */ done = (seqno != (*cond)->c_seqno); - - if (THR_IN_SYNCQ(curthread)) { + if (done && !THR_IN_CONDQ(curthread)) { /* - * Lock the condition variable - * while removing the thread. + * The thread is dequeued, so + * it is safe to clear these. */ - THR_LOCK_ACQUIRE(curthread, - &(*cond)->c_lock); + curthread->data.cond = NULL; + curthread->sigbackout = NULL; + check_continuation(curthread, + NULL, mutex); + return (_mutex_cv_lock(mutex)); + } + + /* Relock the CV structure: */ + THR_LOCK_ACQUIRE(curthread, + &(*cond)->c_lock); + + /* + * Clear these after taking the lock to + * prevent a race condition where a + * signal can arrive before dequeueing + * the thread. + */ + curthread->data.cond = NULL; + curthread->sigbackout = NULL; + done = (seqno != (*cond)->c_seqno); + if (THR_IN_CONDQ(curthread)) { cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; - - THR_LOCK_RELEASE(curthread, - &(*cond)->c_lock); - } - - /* - * Save the interrupted flag; locking - * the mutex may destroy it. - */ - interrupted = curthread->interrupted; - - /* - * Note that even though this thread may - * have been canceled, POSIX requires - * that the mutex be reaquired prior to - * cancellation. - */ - if (done || interrupted) { - rval = _mutex_cv_lock(mutex); - unlock_mutex = 1; } } } @@ -341,18 +330,21 @@ /* Trap invalid condition variable types: */ default: - /* Unlock the condition variable structure: */ - THR_LOCK_RELEASE(curthread, &(*cond)->c_lock); - /* Return an invalid argument error: */ rval = EINVAL; break; } - if ((interrupted != 0) && (curthread->continuation != NULL)) - curthread->continuation((void *) curthread); + check_continuation(curthread, *cond, + mutex_locked ? NULL : mutex); } while ((done == 0) && (rval == 0)); + /* Unlock the condition variable structure: */ + THR_LOCK_RELEASE(curthread, &(*cond)->c_lock); + + if (mutex_locked == 0) + _mutex_cv_lock(mutex); + /* Return the completion status: */ return (rval); } @@ -378,8 +370,7 @@ struct pthread *curthread = _get_curthread(); int rval = 0; int done = 0; - int interrupted = 0; - int unlock_mutex = 1; + int mutex_locked = 1; int seqno; THR_ASSERT(curthread->locklevel == 0, @@ -407,10 +398,11 @@ * and backed out of the waiting queue prior to executing the * signal handler. */ - do { - /* Lock the condition variable structure: */ - THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock); + /* Lock the condition variable structure: */ + THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock); + seqno = (*cond)->c_seqno; + do { /* * If the condvar was statically allocated, properly * initialize the tail queue. @@ -428,9 +420,6 @@ ((*cond)->c_mutex != *mutex))) { /* Return invalid argument error: */ rval = EINVAL; - - /* Unlock the condition variable structure: */ - THR_LOCK_RELEASE(curthread, &(*cond)->c_lock); } else { /* Set the wakeup time: */ curthread->wakeup_time.tv_sec = abstime->tv_sec; @@ -447,12 +436,8 @@ */ cond_queue_enq(*cond, curthread); - /* Remember the mutex and sequence number: */ - (*cond)->c_mutex = *mutex; - seqno = (*cond)->c_seqno; - /* Unlock the mutex: */ - if ((unlock_mutex != 0) && + if (mutex_locked && ((rval = _mutex_cv_unlock(mutex)) != 0)) { /* * Cannot unlock the mutex; remove the @@ -460,21 +445,17 @@ * variable queue: */ cond_queue_remove(*cond, curthread); - - /* Check for no more waiters: */ - if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) - (*cond)->c_mutex = NULL; - - /* Unlock the condition variable structure: */ - THR_LOCK_RELEASE(curthread, &(*cond)->c_lock); } else { + /* Remember the mutex: */ + (*cond)->c_mutex = *mutex; + /* * Don't unlock the mutex the next * time through the loop (if the * thread has to be requeued after * handling a signal). */ - unlock_mutex = 0; + mutex_locked = 0; /* * This thread is active and is in a @@ -487,6 +468,7 @@ /* Remember the CV: */ curthread->data.cond = *cond; + curthread->sigbackout = cond_wait_backout; THR_SCHED_UNLOCK(curthread, curthread); /* Unlock the CV structure: */ @@ -496,8 +478,6 @@ /* Schedule the next thread: */ _thr_sched_switch(curthread); - curthread->data.cond = NULL; - /* * XXX - This really isn't a good check * since there can be more than one @@ -509,38 +489,45 @@ * should be sent "as soon as possible". */ done = (seqno != (*cond)->c_seqno); - - if (THR_IN_CONDQ(curthread)) { + if (done && !THR_IN_CONDQ(curthread)) { /* - * Lock the condition variable - * while removing the thread. + * The thread is dequeued, so + * it is safe to clear these. */ - THR_LOCK_ACQUIRE(curthread, - &(*cond)->c_lock); + curthread->data.cond = NULL; + curthread->sigbackout = NULL; + check_continuation(curthread, + NULL, mutex); + return (_mutex_cv_lock(mutex)); + } + /* Relock the CV structure: */ + THR_LOCK_ACQUIRE(curthread, + &(*cond)->c_lock); + + /* + * Clear these after taking the lock to + * prevent a race condition where a + * signal can arrive before dequeueing + * the thread. + */ + curthread->data.cond = NULL; + curthread->sigbackout = NULL; + + done = (seqno != (*cond)->c_seqno); + + if (THR_IN_CONDQ(curthread)) { cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) (*cond)->c_mutex = NULL; - - THR_LOCK_RELEASE(curthread, - &(*cond)->c_lock); } - /* - * Save the interrupted flag; locking - * the mutex may destroy it. - */ - interrupted = curthread->interrupted; if (curthread->timeout != 0) { /* The wait timedout. */ rval = ETIMEDOUT; - (void)_mutex_cv_lock(mutex); - } else if (interrupted || done) { - rval = _mutex_cv_lock(mutex); - unlock_mutex = 1; } } } @@ -548,18 +535,21 @@ /* Trap invalid condition variable types: */ default: - /* Unlock the condition variable structure: */ - THR_LOCK_RELEASE(curthread, &(*cond)->c_lock); - /* Return an invalid argument error: */ rval = EINVAL; break; } - if ((interrupted != 0) && (curthread->continuation != NULL)) - curthread->continuation((void *)curthread); + check_continuation(curthread, *cond, + mutex_locked ? NULL : mutex); } while ((done == 0) && (rval == 0)); + /* Unlock the condition variable structure: */ + THR_LOCK_RELEASE(curthread, &(*cond)->c_lock); + + if (mutex_locked == 0) + _mutex_cv_lock(mutex); + /* Return the completion status: */ return (rval); } @@ -615,6 +605,7 @@ != NULL) { THR_SCHED_LOCK(curthread, pthread); cond_queue_remove(*cond, pthread); + pthread->sigbackout = NULL; if ((pthread->kseg == curthread->kseg) && (pthread->active_priority > curthread->active_priority)) @@ -681,6 +672,7 @@ != NULL) { THR_SCHED_LOCK(curthread, pthread); cond_queue_remove(*cond, pthread); + pthread->sigbackout = NULL; if ((pthread->kseg == curthread->kseg) && (pthread->active_priority > curthread->active_priority)) @@ -712,9 +704,31 @@ __strong_reference(_pthread_cond_broadcast, _thr_cond_broadcast); -void -_cond_wait_backout(struct pthread *curthread) +static inline void +check_continuation(struct pthread *curthread, struct pthread_cond *cond, + pthread_mutex_t *mutex) +{ + if ((curthread->interrupted != 0) && + (curthread->continuation != NULL)) { + if (cond != NULL) + /* Unlock the condition variable structure: */ + THR_LOCK_RELEASE(curthread, &cond->c_lock); + /* + * Note that even though this thread may have been + * canceled, POSIX requires that the mutex be + * reaquired prior to cancellation. + */ + if (mutex != NULL) + _mutex_cv_lock(mutex); + curthread->continuation((void *) curthread); + PANIC("continuation returned in pthread_cond_wait.\n"); + } +} + +static void +cond_wait_backout(void *arg) { + struct pthread *curthread = (struct pthread *)arg; pthread_cond_t cond; cond = curthread->data.cond; @@ -740,6 +754,8 @@ /* Unlock the condition variable structure: */ THR_LOCK_RELEASE(curthread, &cond->c_lock); } + /* No need to call this again. */ + curthread->sigbackout = NULL; } /* Index: libpthread/thread/thr_create.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_create.c,v retrieving revision 1.57.2.1 diff -u -r1.57.2.1 thr_create.c --- libpthread/thread/thr_create.c 31 Oct 2004 04:21:43 -0000 1.57.2.1 +++ libpthread/thread/thr_create.c 25 Jan 2005 15:47:26 -0000 @@ -171,9 +171,6 @@ /* No thread is wanting to join to this one: */ new_thread->joiner = NULL; - /* Initialize the signal frame: */ - new_thread->curframe = NULL; - /* * Initialize the machine context. * Enter a critical region to get consistent context. @@ -235,6 +232,7 @@ new_thread->cleanup = NULL; new_thread->flags = 0; new_thread->tlflags = 0; + new_thread->sigbackout = NULL; new_thread->continuation = NULL; new_thread->wakeup_time.tv_sec = -1; new_thread->lock_switch = 0; Index: libpthread/thread/thr_exit.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_exit.c,v retrieving revision 1.38 diff -u -r1.38 thr_exit.c --- libpthread/thread/thr_exit.c 12 Aug 2004 12:12:12 -0000 1.38 +++ libpthread/thread/thr_exit.c 25 Jan 2005 15:47:26 -0000 @@ -105,7 +105,20 @@ THR_SCHED_LOCK(curthread, curthread); curthread->flags |= THR_FLAGS_EXITING; THR_SCHED_UNLOCK(curthread, curthread); - + + /* + * To avoid signal-lost problem, if signals had already been + * delivered to us, handle it. we have already set EXITING flag + * so no new signals should be delivered to us. + * XXX this is not enough if signal was delivered just before + * thread called sigprocmask and masked it! in this case, we + * might have to re-post the signal by kill() if the signal + * is targeting process (not for a specified thread). + * Kernel has same signal-lost problem, a signal may be delivered + * to a thread which is on the way to call sigprocmask or thr_exit()! + */ + if (curthread->check_pending) + _thr_sig_check_pending(curthread); /* Save the return value: */ curthread->ret = status; while (curthread->cleanup != NULL) { Index: libpthread/thread/thr_fork.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_fork.c,v retrieving revision 1.34 diff -u -r1.34 thr_fork.c --- libpthread/thread/thr_fork.c 5 Nov 2003 18:18:45 -0000 1.34 +++ libpthread/thread/thr_fork.c 25 Jan 2005 15:47:26 -0000 @@ -43,12 +43,6 @@ #include "libc_private.h" #include "thr_private.h" -/* - * For a while, allow libpthread to work with a libc that doesn't - * export the malloc lock. - */ -#pragma weak __malloc_lock - __weak_reference(_fork, fork); pid_t @@ -60,11 +54,21 @@ pid_t ret; int errsave; - if (!_kse_isthreaded()) - return (__sys_fork()); - curthread = _get_curthread(); + if (!_kse_isthreaded()) { + SIGFILLSET(sigset); + __sys_sigprocmask(SIG_SETMASK, &sigset, &oldset); + ret = __sys_fork(); + if (ret == 0) + /* Child */ + __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, + NULL); + else + __sys_sigprocmask(SIG_SETMASK, &oldset, NULL); + return (ret); + } + /* * Masks all signals until we reach a safe point in * _kse_single_thread, and the signal masks will be @@ -86,7 +90,7 @@ } /* Fork a new process: */ - if ((_kse_isthreaded() != 0) && (__malloc_lock != NULL)) { + if (_kse_isthreaded() != 0) { _spinlock(__malloc_lock); } if ((ret = __sys_fork()) == 0) { Index: libpthread/thread/thr_init.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_init.c,v retrieving revision 1.65 diff -u -r1.65 thr_init.c --- libpthread/thread/thr_init.c 12 Aug 2004 12:12:12 -0000 1.65 +++ libpthread/thread/thr_init.c 25 Jan 2005 15:47:26 -0000 @@ -73,6 +73,7 @@ int __pthread_mutex_lock(pthread_mutex_t *); int __pthread_mutex_trylock(pthread_mutex_t *); void _thread_init_hack(void); +extern int _thread_state_running; static void init_private(void); static void init_main_thread(struct pthread *thread); @@ -224,6 +225,9 @@ if ((references[0] == NULL) || (libgcc_references[0] == NULL)) PANIC("Failed loading mandatory references in _thread_init"); + /* Pull debug symbols in for static binary */ + _thread_state_running = PS_RUNNING; + /* * Check the size of the jump table to make sure it is preset * with the correct number of entries. @@ -387,6 +391,7 @@ thread->specific = NULL; thread->cleanup = NULL; thread->flags = 0; + thread->sigbackout = NULL; thread->continuation = NULL; thread->state = PS_RUNNING; Index: libpthread/thread/thr_kern.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_kern.c,v retrieving revision 1.112.2.1 diff -u -r1.112.2.1 thr_kern.c --- libpthread/thread/thr_kern.c 31 Oct 2004 04:21:43 -0000 1.112.2.1 +++ libpthread/thread/thr_kern.c 25 Jan 2005 15:47:26 -0000 @@ -56,7 +56,7 @@ #include "thr_private.h" #include "libc_private.h" -/*#define DEBUG_THREAD_KERN */ +/* #define DEBUG_THREAD_KERN */ #ifdef DEBUG_THREAD_KERN #define DBG_MSG stdout_debug #else @@ -165,8 +165,7 @@ static void thr_cleanup(struct kse *kse, struct pthread *curthread); static void thr_link(struct pthread *thread); static void thr_resume_wrapper(int sig, siginfo_t *, ucontext_t *); -static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp, - struct pthread_sigframe *psf); +static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp); static int thr_timedout(struct pthread *thread, struct timespec *curtime); static void thr_unlink(struct pthread *thread); static void thr_destroy(struct pthread *curthread, struct pthread *thread); @@ -352,6 +351,9 @@ curthread->kse->k_kcb->kcb_kmbx.km_curthread = NULL; curthread->attr.flags |= PTHREAD_SCOPE_SYSTEM; + /* After a fork(), there child should have no pending signals. */ + sigemptyset(&curthread->sigpend); + /* * Restore signal mask early, so any memory problems could * dump core. @@ -443,6 +445,7 @@ _kse_initial->k_kcb->kcb_kmbx.km_lwp; _thread_activated = 1; +#ifndef SYSTEM_SCOPE_ONLY if (_thread_scope_system <= 0) { /* Set current thread to initial thread */ _tcb_set(_kse_initial->k_kcb, _thr_initial->tcb); @@ -450,10 +453,10 @@ _thr_start_sig_daemon(); _thr_setmaxconcurrency(); } - else { + else +#endif __sys_sigprocmask(SIG_SETMASK, &_thr_initial->sigmask, NULL); - } } return (0); } @@ -614,13 +617,12 @@ void _thr_sched_switch_unlocked(struct pthread *curthread) { - struct pthread_sigframe psf; struct kse *curkse; volatile int resume_once = 0; ucontext_t *uc; /* We're in the scheduler, 5 by 5: */ - curkse = _get_curkse(); + curkse = curthread->kse; curthread->need_switchout = 1; /* The thread yielded on its own. */ curthread->critical_yield = 0; /* No need to yield anymore. */ @@ -628,14 +630,6 @@ /* Thread can unlock the scheduler lock. */ curthread->lock_switch = 1; - /* - * The signal frame is allocated off the stack because - * a thread can be interrupted by other signals while - * it is running down pending signals. - */ - psf.psf_valid = 0; - curthread->curframe = &psf; - if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) kse_sched_single(&curkse->k_kcb->kcb_kmbx); else { @@ -657,18 +651,11 @@ } /* - * It is ugly we must increase critical count, because we - * have a frame saved, we must backout state in psf - * before we can process signals. - */ - curthread->critical_count += psf.psf_valid; - - /* * Unlock the scheduling queue and leave the * critical region. */ /* Don't trust this after a switch! */ - curkse = _get_curkse(); + curkse = curthread->kse; curthread->lock_switch = 0; KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); @@ -677,16 +664,14 @@ /* * This thread is being resumed; check for cancellations. */ - if ((psf.psf_valid || - ((curthread->check_pending || THR_NEED_ASYNC_CANCEL(curthread)) - && !THR_IN_CRITICAL(curthread)))) { + if (THR_NEED_ASYNC_CANCEL(curthread) && !THR_IN_CRITICAL(curthread)) { uc = alloca(sizeof(ucontext_t)); resume_once = 0; THR_GETCONTEXT(uc); if (resume_once == 0) { resume_once = 1; curthread->check_pending = 0; - thr_resume_check(curthread, uc, &psf); + thr_resume_check(curthread, uc); } } THR_ACTIVATE_LAST_LOCK(curthread); @@ -876,18 +861,17 @@ THR_DEACTIVATE_LAST_LOCK(curthread); kse_wait(curkse, curthread, sigseqno); THR_ACTIVATE_LAST_LOCK(curthread); - KSE_GET_TOD(curkse, &ts); - if (thr_timedout(curthread, &ts)) { - /* Indicate the thread timedout: */ - curthread->timeout = 1; - /* Make the thread runnable. */ - THR_SET_STATE(curthread, PS_RUNNING); + if (curthread->wakeup_time.tv_sec >= 0) { + KSE_GET_TOD(curkse, &ts); + if (thr_timedout(curthread, &ts)) { + /* Indicate the thread timedout: */ + curthread->timeout = 1; + /* Make the thread runnable. */ + THR_SET_STATE(curthread, PS_RUNNING); + } } } - /* Remove the frame reference. */ - curthread->curframe = NULL; - if (curthread->lock_switch == 0) { /* Unlock the scheduling queue. */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); @@ -922,7 +906,6 @@ { struct kse *curkse; struct pthread *curthread, *td_wait; - struct pthread_sigframe *curframe; int ret; curkse = (struct kse *)kmbx->km_udata; @@ -977,6 +960,8 @@ * will be cleared. */ curthread->blocked = 1; + DBG_MSG("Running thread %p is now blocked in kernel.\n", + curthread); } /* Check for any unblocked threads in the kernel. */ @@ -1082,10 +1067,6 @@ /* Mark the thread active. */ curthread->active = 1; - /* Remove the frame reference. */ - curframe = curthread->curframe; - curthread->curframe = NULL; - /* * The thread's current signal frame will only be NULL if it * is being resumed after being blocked in the kernel. In @@ -1093,7 +1074,7 @@ * signals or needs a cancellation check, we need to add a * signal frame to the thread's context. */ - if ((curframe == NULL) && (curthread->state == PS_RUNNING) && + if (curthread->lock_switch == 0 && curthread->state == PS_RUNNING && (curthread->check_pending != 0 || THR_NEED_ASYNC_CANCEL(curthread)) && !THR_IN_CRITICAL(curthread)) { @@ -1133,10 +1114,10 @@ DBG_MSG(">>> sig wrapper\n"); if (curthread->lock_switch) PANIC("thr_resume_wrapper, lock_switch != 0\n"); - thr_resume_check(curthread, ucp, NULL); + thr_resume_check(curthread, ucp); errno = err_save; _kse_critical_enter(); - curkse = _get_curkse(); + curkse = curthread->kse; curthread->tcb->tcb_tmbx.tm_context = *ucp; ret = _thread_switch(curkse->k_kcb, curthread->tcb, 1); if (ret != 0) @@ -1146,10 +1127,9 @@ } static void -thr_resume_check(struct pthread *curthread, ucontext_t *ucp, - struct pthread_sigframe *psf) +thr_resume_check(struct pthread *curthread, ucontext_t *ucp) { - _thr_sig_rundown(curthread, ucp, psf); + _thr_sig_rundown(curthread, ucp); if (THR_NEED_ASYNC_CANCEL(curthread)) pthread_testcancel(); @@ -1814,13 +1794,12 @@ struct timespec ts, ts_sleep; int saved_flags; - KSE_GET_TOD(kse, &ts); - if ((td_wait == NULL) || (td_wait->wakeup_time.tv_sec < 0)) { /* Limit sleep to no more than 1 minute. */ ts_sleep.tv_sec = 60; ts_sleep.tv_nsec = 0; } else { + KSE_GET_TOD(kse, &ts); TIMESPEC_SUB(&ts_sleep, &td_wait->wakeup_time, &ts); if (ts_sleep.tv_sec > 60) { ts_sleep.tv_sec = 60; Index: libpthread/thread/thr_mutex.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_mutex.c,v retrieving revision 1.45 diff -u -r1.45 thr_mutex.c --- libpthread/thread/thr_mutex.c 17 Jan 2004 03:09:57 -0000 1.45 +++ libpthread/thread/thr_mutex.c 25 Jan 2005 15:47:26 -0000 @@ -85,26 +85,26 @@ static inline pthread_t mutex_queue_deq(pthread_mutex_t); static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); - +static void mutex_lock_backout(void *arg); static struct pthread_mutex_attr static_mutex_attr = PTHREAD_MUTEXATTR_STATIC_INITIALIZER; static pthread_mutexattr_t static_mattr = &static_mutex_attr; /* Single underscore versions provided for libc internal usage: */ +__weak_reference(__pthread_mutex_init, pthread_mutex_init); __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); /* No difference between libc and application usage of these: */ -__weak_reference(_pthread_mutex_init, pthread_mutex_init); __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); int -_pthread_mutex_init(pthread_mutex_t *mutex, +__pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr) { struct pthread_mutex *pmutex; @@ -206,6 +206,22 @@ return (ret); } +int +_pthread_mutex_init(pthread_mutex_t *mutex, + const pthread_mutexattr_t *mutex_attr) +{ + struct pthread_mutex_attr mattr, *mattrp; + + if ((mutex_attr == NULL) || (*mutex_attr == NULL)) + return (__pthread_mutex_init(mutex, &static_mattr)); + else { + mattr = **mutex_attr; + mattr.m_flags |= MUTEX_FLAGS_PRIVATE; + mattrp = &mattr; + return (__pthread_mutex_init(mutex, &mattrp)); + } +} + void _thr_mutex_reinit(pthread_mutex_t *mutex) { @@ -303,6 +319,7 @@ static int mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) { + int private; int ret = 0; THR_ASSERT((mutex != NULL) && (*mutex != NULL), @@ -310,6 +327,7 @@ /* Lock the mutex structure: */ THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock); + private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE; /* * If the mutex was statically allocated, properly @@ -417,6 +435,9 @@ break; } + if (ret == 0 && private) + THR_CRITICAL_ENTER(curthread); + /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock); @@ -468,6 +489,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m, const struct timespec * abstime) { + int private; int ret = 0; THR_ASSERT((m != NULL) && (*m != NULL), @@ -482,6 +504,8 @@ curthread->timeout = 0; curthread->wakeup_time.tv_sec = -1; + private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE; + /* * Enter a loop waiting to become the mutex owner. We need a * loop in case the waiting thread is interrupted by a signal @@ -516,6 +540,8 @@ MUTEX_ASSERT_NOT_OWNED(*m); TAILQ_INSERT_TAIL(&curthread->mutexq, (*m), m_qe); + if (private) + THR_CRITICAL_ENTER(curthread); /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*m)->m_lock); @@ -539,6 +565,7 @@ */ mutex_queue_enq(*m, curthread); curthread->data.mutex = *m; + curthread->sigbackout = mutex_lock_backout; /* * This thread is active and is in a critical * region (holding the mutex lock); we should @@ -554,12 +581,17 @@ /* Schedule the next thread: */ _thr_sched_switch(curthread); - curthread->data.mutex = NULL; if (THR_IN_MUTEXQ(curthread)) { THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); mutex_queue_remove(*m, curthread); THR_LOCK_RELEASE(curthread, &(*m)->m_lock); } + /* + * Only clear these after assuring the + * thread is dequeued. + */ + curthread->data.mutex = NULL; + curthread->sigbackout = NULL; } break; @@ -590,6 +622,8 @@ MUTEX_ASSERT_NOT_OWNED(*m); TAILQ_INSERT_TAIL(&curthread->mutexq, (*m), m_qe); + if (private) + THR_CRITICAL_ENTER(curthread); /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*m)->m_lock); @@ -613,6 +647,7 @@ */ mutex_queue_enq(*m, curthread); curthread->data.mutex = *m; + curthread->sigbackout = mutex_lock_backout; /* * This thread is active and is in a critical @@ -633,12 +668,17 @@ /* Schedule the next thread: */ _thr_sched_switch(curthread); - curthread->data.mutex = NULL; if (THR_IN_MUTEXQ(curthread)) { THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); mutex_queue_remove(*m, curthread); THR_LOCK_RELEASE(curthread, &(*m)->m_lock); } + /* + * Only clear these after assuring the + * thread is dequeued. + */ + curthread->data.mutex = NULL; + curthread->sigbackout = NULL; } break; @@ -679,6 +719,8 @@ MUTEX_ASSERT_NOT_OWNED(*m); TAILQ_INSERT_TAIL(&curthread->mutexq, (*m), m_qe); + if (private) + THR_CRITICAL_ENTER(curthread); /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*m)->m_lock); @@ -702,6 +744,7 @@ */ mutex_queue_enq(*m, curthread); curthread->data.mutex = *m; + curthread->sigbackout = mutex_lock_backout; /* Clear any previous error: */ curthread->error = 0; @@ -722,12 +765,17 @@ /* Schedule the next thread: */ _thr_sched_switch(curthread); - curthread->data.mutex = NULL; if (THR_IN_MUTEXQ(curthread)) { THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); mutex_queue_remove(*m, curthread); THR_LOCK_RELEASE(curthread, &(*m)->m_lock); } + /* + * Only clear these after assuring the + * thread is dequeued. + */ + curthread->data.mutex = NULL; + curthread->sigbackout = NULL; /* * The threads priority may have changed while @@ -911,14 +959,7 @@ /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: - /* - * POSIX specifies that mutexes should return EDEADLK if a - * recursive lock is detected. - */ - if (m->m_owner == curthread) - ret = EDEADLK; - else - ret = EBUSY; + ret = EBUSY; break; case PTHREAD_MUTEX_RECURSIVE: @@ -939,6 +980,13 @@ { int ret = 0; + /* + * Don't allow evil recursive mutexes for private use + * in libc and libpthread. + */ + if (m->m_flags & MUTEX_FLAGS_PRIVATE) + PANIC("Recurse on a private mutex."); + switch (m->m_type) { /* case PTHREAD_MUTEX_DEFAULT: */ case PTHREAD_MUTEX_ERRORCHECK: @@ -1142,8 +1190,13 @@ /* Increment the reference count: */ (*m)->m_refcount++; + /* Leave the critical region if this is a private mutex. */ + if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE)) + THR_CRITICAL_LEAVE(curthread); + /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*m)->m_lock); + if (kmbx != NULL) kse_wakeup(kmbx); } @@ -1518,9 +1571,10 @@ * This is called by the current thread when it wants to back out of a * mutex_lock in order to run a signal handler. */ -void -_mutex_lock_backout(struct pthread *curthread) +static void +mutex_lock_backout(void *arg) { + struct pthread *curthread = (struct pthread *)arg; struct pthread_mutex *m; if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) { @@ -1561,6 +1615,8 @@ THR_LOCK_RELEASE(curthread, &m->m_lock); } } + /* No need to call this again. */ + curthread->sigbackout = NULL; } /* @@ -1681,13 +1737,16 @@ (pthread->active_priority > curthread->active_priority)) curthread->critical_yield = 1; - THR_SCHED_UNLOCK(curthread, pthread); - if (mutex->m_owner == pthread) + if (mutex->m_owner == pthread) { /* We're done; a valid owner was found. */ + if (mutex->m_flags & MUTEX_FLAGS_PRIVATE) + THR_CRITICAL_ENTER(pthread); + THR_SCHED_UNLOCK(curthread, pthread); break; - else - /* Get the next thread from the waiting queue: */ - pthread = TAILQ_NEXT(pthread, sqe); + } + THR_SCHED_UNLOCK(curthread, pthread); + /* Get the next thread from the waiting queue: */ + pthread = TAILQ_NEXT(pthread, sqe); } if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT)) Index: libpthread/thread/thr_once.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_once.c,v retrieving revision 1.9 diff -u -r1.9 thr_once.c --- libpthread/thread/thr_once.c 9 Sep 2003 22:38:12 -0000 1.9 +++ libpthread/thread/thr_once.c 25 Jan 2005 15:47:26 -0000 @@ -67,6 +67,7 @@ int _pthread_once(pthread_once_t *once_control, void (*init_routine) (void)) { + struct pthread *curthread; int wakeup = 0; if (once_control->state == ONCE_DONE) @@ -81,9 +82,10 @@ if (*(volatile int *)&(once_control->state) == ONCE_NEVER_DONE) { once_control->state = ONCE_IN_PROGRESS; _pthread_mutex_unlock(&once_lock); - _pthread_cleanup_push(once_cancel_handler, once_control); + curthread = _get_curthread(); + THR_CLEANUP_PUSH(curthread, once_cancel_handler, once_control); init_routine(); - _pthread_cleanup_pop(0); + THR_CLEANUP_POP(curthread, 0); _pthread_mutex_lock(&once_lock); once_control->state = ONCE_DONE; wakeup = 1; Index: libpthread/thread/thr_private.h =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_private.h,v retrieving revision 1.118.2.1 diff -u -r1.118.2.1 thr_private.h --- libpthread/thread/thr_private.h 31 Oct 2004 04:21:43 -0000 1.118.2.1 +++ libpthread/thread/thr_private.h 25 Jan 2005 15:47:26 -0000 @@ -416,8 +416,24 @@ struct pthread_cleanup *next; void (*routine) (); void *routine_arg; + int onstack; }; +#define THR_CLEANUP_PUSH(td, func, arg) { \ + struct pthread_cleanup __cup; \ + \ + __cup.routine = func; \ + __cup.routine_arg = arg; \ + __cup.onstack = 1; \ + __cup.next = (td)->cleanup; \ + (td)->cleanup = &__cup; + +#define THR_CLEANUP_POP(td, exec) \ + (td)->cleanup = __cup.next; \ + if ((exec) != 0) \ + __cup.routine(__cup.routine_arg); \ +} + struct pthread_atfork { TAILQ_ENTRY(pthread_atfork) qe; void (*prepare)(void); @@ -563,6 +579,7 @@ struct pthread_sigframe { int psf_valid; int psf_flags; + int psf_cancelflags; int psf_interrupted; int psf_timeout; int psf_signo; @@ -572,6 +589,7 @@ sigset_t psf_sigset; sigset_t psf_sigmask; int psf_seqno; + thread_continuation_t psf_continuation; }; struct join_status { @@ -644,8 +662,8 @@ /* * Used for tracking delivery of signal handlers. */ - struct pthread_sigframe *curframe; siginfo_t *siginfo; + thread_continuation_t sigbackout; /* * Cancelability flags - the lower 2 bits are used by cancel @@ -1069,7 +1087,6 @@ */ __BEGIN_DECLS int _cond_reinit(pthread_cond_t *); -void _cond_wait_backout(struct pthread *); struct kse *_kse_alloc(struct pthread *, int sys_scope); kse_critical_t _kse_critical_enter(void); void _kse_critical_leave(kse_critical_t); @@ -1084,7 +1101,6 @@ void _kseg_free(struct kse_group *); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); -void _mutex_lock_backout(struct pthread *); void _mutex_notify_priochange(struct pthread *, struct pthread *, int); int _mutex_reinit(struct pthread_mutex *); void _mutex_unlock_private(struct pthread *); @@ -1147,8 +1163,7 @@ void _thr_seterrno(struct pthread *, int); void _thr_sig_handler(int, siginfo_t *, ucontext_t *); void _thr_sig_check_pending(struct pthread *); -void _thr_sig_rundown(struct pthread *, ucontext_t *, - struct pthread_sigframe *); +void _thr_sig_rundown(struct pthread *, ucontext_t *); void _thr_sig_send(struct pthread *pthread, int sig); void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf); void _thr_spinlock_init(void); Index: libpthread/thread/thr_sem.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_sem.c,v retrieving revision 1.15 diff -u -r1.15 thr_sem.c --- libpthread/thread/thr_sem.c 6 Feb 2004 15:20:56 -0000 1.15 +++ libpthread/thread/thr_sem.c 25 Jan 2005 15:47:26 -0000 @@ -123,7 +123,7 @@ { semid_t semid; - semid = SEM_USER; + semid = (semid_t)SEM_USER; if ((pshared != 0) && (ksem_init(&semid, value) != 0)) return (-1); @@ -145,8 +145,8 @@ if (sem_check_validity(sem) != 0) return (-1); + curthread = _get_curthread(); if ((*sem)->syssem != 0) { - curthread = _get_curthread(); _thr_cancel_enter(curthread); retval = ksem_wait((*sem)->semid); _thr_cancel_leave(curthread, retval != 0); @@ -157,9 +157,9 @@ while ((*sem)->count <= 0) { (*sem)->nwaiters++; - pthread_cleanup_push(decrease_nwaiters, sem); + THR_CLEANUP_PUSH(curthread, decrease_nwaiters, sem); pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock); - pthread_cleanup_pop(0); + THR_CLEANUP_POP(curthread, 0); (*sem)->nwaiters--; } (*sem)->count--; Index: libpthread/thread/thr_sig.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_sig.c,v retrieving revision 1.79.2.1 diff -u -r1.79.2.1 thr_sig.c --- libpthread/thread/thr_sig.c 31 Oct 2004 04:21:43 -0000 1.79.2.1 +++ libpthread/thread/thr_sig.c 25 Jan 2005 15:47:26 -0000 @@ -43,17 +43,15 @@ #include "thr_private.h" /* Prototypes: */ -static void build_siginfo(siginfo_t *info, int signo); +static inline void build_siginfo(siginfo_t *info, int signo); #ifndef SYSTEM_SCOPE_ONLY static struct pthread *thr_sig_find(struct kse *curkse, int sig, siginfo_t *info); -static void handle_special_signals(struct kse *curkse, int sig); #endif -static void thr_sigframe_add(struct pthread *thread); -static void thr_sigframe_restore(struct pthread *thread, - struct pthread_sigframe *psf); -static void thr_sigframe_save(struct pthread *thread, - struct pthread_sigframe *psf); +static inline void thr_sigframe_restore(struct pthread *thread, + struct pthread_sigframe *psf); +static inline void thr_sigframe_save(struct pthread *thread, + struct pthread_sigframe *psf); #define SA_KILL 0x01 /* terminates process by default */ #define SA_STOP 0x02 @@ -254,9 +252,6 @@ DBG_MSG(">>> _thr_sig_dispatch(%d)\n", sig); - /* Some signals need special handling: */ - handle_special_signals(curkse, sig); - /* Check if the signal requires a dump of thread information: */ if (sig == SIGINFO) { /* Dump thread information to file: */ @@ -306,11 +301,14 @@ void _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) { + struct pthread_sigframe psf; __siginfohandler_t *sigfunc; struct pthread *curthread; struct kse *curkse; struct sigaction act; - int sa_flags, err_save, intr_save, timeout_save; + int sa_flags, err_save; + + err_save = errno; DBG_MSG(">>> _thr_sig_handler(%d)\n", sig); @@ -319,15 +317,18 @@ PANIC("No current thread.\n"); if (!(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)) PANIC("Thread is not system scope.\n"); - if (curthread->flags & THR_FLAGS_EXITING) + if (curthread->flags & THR_FLAGS_EXITING) { + errno = err_save; return; + } + curkse = _get_curkse(); /* * If thread is in critical region or if thread is on * the way of state transition, then latch signal into buffer. */ if (_kse_in_critical() || THR_IN_CRITICAL(curthread) || - (curthread->state != PS_RUNNING && curthread->curframe == NULL)) { + curthread->state != PS_RUNNING) { DBG_MSG(">>> _thr_sig_handler(%d) in critical\n", sig); curthread->siginfo[sig-1] = *info; curthread->check_pending = 1; @@ -341,18 +342,24 @@ */ if (KSE_IS_IDLE(curkse)) kse_wakeup(&curkse->k_kcb->kcb_kmbx); + errno = err_save; return; } - /* It is now safe to invoke signal handler */ - err_save = errno; - timeout_save = curthread->timeout; - intr_save = curthread->interrupted; /* Check if the signal requires a dump of thread information: */ if (sig == SIGINFO) { /* Dump thread information to file: */ _thread_dump_info(); } + + /* Check the threads previous state: */ + curthread->critical_count++; + if (curthread->sigbackout != NULL) + curthread->sigbackout((void *)curthread); + curthread->critical_count--; + thr_sigframe_save(curthread, &psf); + THR_ASSERT(!(curthread->sigbackout), "sigbackout was not cleared."); + _kse_critical_enter(); /* Get a fresh copy of signal mask */ __sys_sigprocmask(SIG_BLOCK, NULL, &curthread->sigmask); @@ -395,14 +402,16 @@ #endif } } - errno = err_save; - curthread->timeout = timeout_save; - curthread->interrupted = intr_save; _kse_critical_enter(); curthread->sigmask = ucp->uc_sigmask; SIG_CANTMASK(curthread->sigmask); _kse_critical_leave(&curthread->tcb->tcb_tmbx); + + thr_sigframe_restore(curthread, &psf); + DBG_MSG("<<< _thr_sig_handler(%d)\n", sig); + + errno = err_save; } struct sighandle_info { @@ -439,7 +448,7 @@ if (!_kse_in_critical()) PANIC("thr_sig_invoke_handler without in critical\n"); - curkse = _get_curkse(); + curkse = curthread->kse; /* * Check that a custom handler is installed and if * the signal is not blocked: @@ -491,7 +500,7 @@ _kse_critical_enter(); /* Don't trust after critical leave/enter */ - curkse = _get_curkse(); + curkse = curthread->kse; /* * Restore the thread's signal mask. @@ -705,6 +714,10 @@ KSE_LOCK_RELEASE(curkse, &_thread_list_lock); if (kmbx != NULL) kse_wakeup(kmbx); + if (suspended_thread != NULL) + _thr_ref_delete(NULL, suspended_thread); + if (signaled_thread != NULL) + _thr_ref_delete(NULL, signaled_thread); return (NULL); } else if (!SIGISMEMBER(pthread->sigmask, sig)) { /* @@ -748,7 +761,7 @@ } #endif /* ! SYSTEM_SCOPE_ONLY */ -static void +static inline void build_siginfo(siginfo_t *info, int signo) { bzero(info, sizeof(*info)); @@ -761,54 +774,35 @@ * It should only be called from the context of the thread. */ void -_thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp, - struct pthread_sigframe *psf) +_thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp) { - int interrupted = curthread->interrupted; - int timeout = curthread->timeout; + struct pthread_sigframe psf; siginfo_t siginfo; - int i; + int i, err_save; kse_critical_t crit; struct kse *curkse; sigset_t sigmask; + err_save = errno; + DBG_MSG(">>> thr_sig_rundown (%p)\n", curthread); + /* Check the threads previous state: */ - if ((psf != NULL) && (psf->psf_valid != 0)) { - /* - * Do a little cleanup handling for those threads in - * queues before calling the signal handler. Signals - * for these threads are temporarily blocked until - * after cleanup handling. - */ - switch (psf->psf_state) { - case PS_COND_WAIT: - _cond_wait_backout(curthread); - psf->psf_state = PS_RUNNING; - break; - - case PS_MUTEX_WAIT: - _mutex_lock_backout(curthread); - psf->psf_state = PS_RUNNING; - break; - - case PS_RUNNING: - break; + curthread->critical_count++; + if (curthread->sigbackout != NULL) + curthread->sigbackout((void *)curthread); + curthread->critical_count--; - default: - psf->psf_state = PS_RUNNING; - break; - } - /* XXX see comment in thr_sched_switch_unlocked */ - curthread->critical_count--; - } + THR_ASSERT(!(curthread->sigbackout), "sigbackout was not cleared."); + THR_ASSERT((curthread->state == PS_RUNNING), "state is not PS_RUNNING"); + thr_sigframe_save(curthread, &psf); /* * Lower the priority before calling the handler in case * it never returns (longjmps back): */ crit = _kse_critical_enter(); - curkse = _get_curkse(); + curkse = curthread->kse; KSE_SCHED_LOCK(curkse, curkse->k_kseg); KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock); curthread->active_priority &= ~THR_SIGNAL_PRIORITY; @@ -847,9 +841,8 @@ } } - if (psf != NULL && psf->psf_valid != 0) - thr_sigframe_restore(curthread, psf); - curkse = _get_curkse(); + /* Don't trust after signal handling */ + curkse = curthread->kse; KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); _kse_critical_leave(&curthread->tcb->tcb_tmbx); @@ -871,10 +864,10 @@ } __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); } - curthread->interrupted = interrupted; - curthread->timeout = timeout; - DBG_MSG("<<< thr_sig_rundown (%p)\n", curthread); + + thr_sigframe_restore(curthread, &psf); + errno = err_save; } /* @@ -893,7 +886,15 @@ volatile int once; int errsave; - if (THR_IN_CRITICAL(curthread)) + /* + * If the thread is in critical region, delay processing signals. + * If the thread state is not PS_RUNNING, it might be switching + * into UTS and but a THR_LOCK_RELEASE saw check_pending, and it + * goes here, in the case we delay processing signals, lets UTS + * process complicated things, normally UTS will call _thr_sig_add + * to resume the thread, so we needn't repeat doing it here. + */ + if (THR_IN_CRITICAL(curthread) || curthread->state != PS_RUNNING) return; errsave = errno; @@ -902,42 +903,11 @@ if (once == 0) { once = 1; curthread->check_pending = 0; - _thr_sig_rundown(curthread, &uc, NULL); + _thr_sig_rundown(curthread, &uc); } errno = errsave; } -#ifndef SYSTEM_SCOPE_ONLY -/* - * This must be called with upcalls disabled. - */ -static void -handle_special_signals(struct kse *curkse, int sig) -{ - switch (sig) { - /* - * POSIX says that pending SIGCONT signals are - * discarded when one of these signals occurs. - */ - case SIGTSTP: - case SIGTTIN: - case SIGTTOU: - KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock); - SIGDELSET(_thr_proc_sigpending, SIGCONT); - KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); - break; - case SIGCONT: - KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock); - SIGDELSET(_thr_proc_sigpending, SIGTSTP); - SIGDELSET(_thr_proc_sigpending, SIGTTIN); - SIGDELSET(_thr_proc_sigpending, SIGTTOU); - KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); - default: - break; - } -} -#endif /* ! SYSTEM_SCOPE_ONLY */ - /* * Perform thread specific actions in response to a signal. * This function is only called if there is a handler installed @@ -975,11 +945,9 @@ return (NULL); } - if (pthread->curframe == NULL || - (pthread->state != PS_SIGWAIT && - SIGISMEMBER(pthread->sigmask, sig)) || - THR_IN_CRITICAL(pthread)) { - /* thread is running or signal was being masked */ + if (pthread->state != PS_SIGWAIT && + SIGISMEMBER(pthread->sigmask, sig)) { + /* signal is masked, just add signal to thread. */ if (!fromproc) { SIGADDSET(pthread->sigpend, sig); if (info == NULL) @@ -992,19 +960,6 @@ return (NULL); SIGADDSET(pthread->sigpend, sig); } - if (!SIGISMEMBER(pthread->sigmask, sig)) { - /* A quick path to exit process */ - if (sigfunc == SIG_DFL && sigprop(sig) & SA_KILL) { - kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig); - /* Never reach */ - } - pthread->check_pending = 1; - if (!(pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) && - (pthread->blocked != 0) && - !THR_IN_CRITICAL(pthread)) - kse_thr_interrupt(&pthread->tcb->tcb_tmbx, - restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0); - } } else { /* if process signal not exists, just return */ @@ -1045,7 +1000,6 @@ /* Possible not in RUNQ and has curframe ? */ pthread->active_priority |= THR_SIGNAL_PRIORITY; } - suppress_handler = 1; break; /* * States which cannot be interrupted but still require the @@ -1111,19 +1065,22 @@ build_siginfo(&pthread->siginfo[sig-1], sig); else if (info != &pthread->siginfo[sig-1]) memcpy(&pthread->siginfo[sig-1], info, sizeof(*info)); - + pthread->check_pending = 1; + if (!(pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) && + (pthread->blocked != 0) && !THR_IN_CRITICAL(pthread)) + kse_thr_interrupt(&pthread->tcb->tcb_tmbx, + restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0); if (suppress_handler == 0) { /* * Setup a signal frame and save the current threads * state: */ - thr_sigframe_add(pthread); - if (pthread->flags & THR_FLAGS_IN_RUNQ) - THR_RUNQ_REMOVE(pthread); - pthread->active_priority |= THR_SIGNAL_PRIORITY; - kmbx = _thr_setrunnable_unlocked(pthread); - } else { - pthread->check_pending = 1; + if (pthread->state != PS_RUNNING) { + if (pthread->flags & THR_FLAGS_IN_RUNQ) + THR_RUNQ_REMOVE(pthread); + pthread->active_priority |= THR_SIGNAL_PRIORITY; + kmbx = _thr_setrunnable_unlocked(pthread); + } } } return (kmbx); @@ -1147,6 +1104,10 @@ THR_SCHED_LOCK(curthread, pthread); if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) { kmbx = _thr_sig_add(pthread, sig, NULL); + /* Add a preemption point. */ + if (kmbx == NULL && (curthread->kseg == pthread->kseg) && + (pthread->active_priority > curthread->active_priority)) + curthread->critical_yield = 1; THR_SCHED_UNLOCK(curthread, pthread); if (kmbx != NULL) kse_wakeup(kmbx); @@ -1157,50 +1118,55 @@ */ if (pthread == curthread && curthread->check_pending) _thr_sig_check_pending(curthread); + } else { THR_SCHED_UNLOCK(curthread, pthread); } } -static void -thr_sigframe_add(struct pthread *thread) +static inline void +thr_sigframe_restore(struct pthread *curthread, struct pthread_sigframe *psf) { - if (thread->curframe == NULL) - PANIC("Thread doesn't have signal frame "); + kse_critical_t crit; + struct kse *curkse; - if (thread->curframe->psf_valid == 0) { - thread->curframe->psf_valid = 1; - /* - * Multiple signals can be added to the same signal - * frame. Only save the thread's state the first time. - */ - thr_sigframe_save(thread, thread->curframe); - } + THR_THREAD_LOCK(curthread, curthread); + curthread->cancelflags = psf->psf_cancelflags; + crit = _kse_critical_enter(); + curkse = curthread->kse; + KSE_SCHED_LOCK(curkse, curthread->kseg); + curthread->flags = psf->psf_flags; + curthread->interrupted = psf->psf_interrupted; + curthread->timeout = psf->psf_timeout; + curthread->data = psf->psf_wait_data; + curthread->wakeup_time = psf->psf_wakeup_time; + curthread->continuation = psf->psf_continuation; + KSE_SCHED_UNLOCK(curkse, curthread->kseg); + _kse_critical_leave(crit); + THR_THREAD_UNLOCK(curthread, curthread); } -static void -thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf) +static inline void +thr_sigframe_save(struct pthread *curthread, struct pthread_sigframe *psf) { - if (psf->psf_valid == 0) - PANIC("invalid pthread_sigframe\n"); - thread->flags = psf->psf_flags; - thread->interrupted = psf->psf_interrupted; - thread->timeout = psf->psf_timeout; - thread->state = psf->psf_state; - thread->data = psf->psf_wait_data; - thread->wakeup_time = psf->psf_wakeup_time; -} + kse_critical_t crit; + struct kse *curkse; -static void -thr_sigframe_save(struct pthread *thread, struct pthread_sigframe *psf) -{ + THR_THREAD_LOCK(curthread, curthread); + psf->psf_cancelflags = curthread->cancelflags; + crit = _kse_critical_enter(); + curkse = curthread->kse; + KSE_SCHED_LOCK(curkse, curthread->kseg); /* This has to initialize all members of the sigframe. */ - psf->psf_flags = thread->flags & THR_FLAGS_PRIVATE; - psf->psf_interrupted = thread->interrupted; - psf->psf_timeout = thread->timeout; - psf->psf_state = thread->state; - psf->psf_wait_data = thread->data; - psf->psf_wakeup_time = thread->wakeup_time; + psf->psf_flags = (curthread->flags & (THR_FLAGS_PRIVATE | THR_FLAGS_EXITING)); + psf->psf_interrupted = curthread->interrupted; + psf->psf_timeout = curthread->timeout; + psf->psf_wait_data = curthread->data; + psf->psf_wakeup_time = curthread->wakeup_time; + psf->psf_continuation = curthread->continuation; + KSE_SCHED_UNLOCK(curkse, curthread->kseg); + _kse_critical_leave(crit); + THR_THREAD_UNLOCK(curthread, curthread); } void @@ -1260,6 +1226,9 @@ int i; struct pthread *curthread = _get_curthread(); + /* Clear process pending signals. */ + sigemptyset(&_thr_proc_sigpending); + /* Enter a loop to get the existing signal status: */ for (i = 1; i <= _SIG_MAXSIG; i++) { /* Check for signals which cannot be trapped: */ Index: libpthread/thread/thr_sigsuspend.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_sigsuspend.c,v retrieving revision 1.24 diff -u -r1.24 thr_sigsuspend.c --- libpthread/thread/thr_sigsuspend.c 12 Jun 2004 07:40:01 -0000 1.24 +++ libpthread/thread/thr_sigsuspend.c 25 Jan 2005 15:47:26 -0000 @@ -69,12 +69,18 @@ /* Wait for a signal: */ _thr_sched_switch_unlocked(curthread); } else { + curthread->check_pending = 1; THR_UNLOCK_SWITCH(curthread); /* check pending signal I can handle: */ _thr_sig_check_pending(curthread); } - THR_ASSERT(curthread->oldsigmask == NULL, - "oldsigmask is not cleared"); + if ((curthread->cancelflags & THR_CANCELLING) != 0) + curthread->oldsigmask = NULL; + else { + THR_ASSERT(curthread->oldsigmask == NULL, + "oldsigmask is not cleared"); + } + /* Always return an interrupted error: */ errno = EINTR; } else { Index: libpthread/thread/thr_spinlock.c =================================================================== RCS file: /usr/cvs/src/lib/libpthread/thread/thr_spinlock.c,v retrieving revision 1.21 diff -u -r1.21 thr_spinlock.c --- libpthread/thread/thr_spinlock.c 9 Dec 2003 02:37:40 -0000 1.21 +++ libpthread/thread/thr_spinlock.c 25 Jan 2005 15:47:26 -0000 @@ -49,6 +49,10 @@ static void init_spinlock(spinlock_t *lck); +static struct pthread_mutex_attr static_mutex_attr = + PTHREAD_MUTEXATTR_STATIC_INITIALIZER; +static pthread_mutexattr_t static_mattr = &static_mutex_attr; + static pthread_mutex_t spinlock_static_lock; static struct spinlock_extra extra[MAX_SPINLOCKS]; static int spinlock_count = 0; @@ -65,7 +69,7 @@ struct spinlock_extra *extra; extra = (struct spinlock_extra *)lck->fname; - pthread_mutex_unlock(&extra->lock); + _pthread_mutex_unlock(&extra->lock); } /* @@ -90,7 +94,7 @@ if (lck->fname == NULL) init_spinlock(lck); extra = (struct spinlock_extra *)lck->fname; - pthread_mutex_lock(&extra->lock); + _pthread_mutex_lock(&extra->lock); } /* @@ -112,13 +116,13 @@ static void init_spinlock(spinlock_t *lck) { - pthread_mutex_lock(&spinlock_static_lock); + _pthread_mutex_lock(&spinlock_static_lock); if ((lck->fname == NULL) && (spinlock_count < MAX_SPINLOCKS)) { lck->fname = (char *)&extra[spinlock_count]; extra[spinlock_count].owner = lck; spinlock_count++; } - pthread_mutex_unlock(&spinlock_static_lock); + _pthread_mutex_unlock(&spinlock_static_lock); if (lck->fname == NULL) PANIC("Exceeded max spinlocks"); } @@ -133,10 +137,10 @@ for (i = 0; i < spinlock_count; i++) _thr_mutex_reinit(&extra[i].lock); } else { - if (pthread_mutex_init(&spinlock_static_lock, NULL)) + if (_pthread_mutex_init(&spinlock_static_lock, &static_mattr)) PANIC("Cannot initialize spinlock_static_lock"); for (i = 0; i < MAX_SPINLOCKS; i++) { - if (pthread_mutex_init(&extra[i].lock, NULL)) + if (_pthread_mutex_init(&extra[i].lock, &static_mattr)) PANIC("Cannot initialize spinlock extra"); } initialized = 1;