Index: lib/libthr/thread/thr_kern.c =================================================================== RCS file: /home/ncvs/src/lib/libthr/thread/thr_kern.c,v retrieving revision 1.3 diff -u -r1.3 thr_kern.c --- lib/libthr/thread/thr_kern.c 20 Apr 2003 02:58:30 -0000 1.3 +++ lib/libthr/thread/thr_kern.c 8 May 2003 11:29:56 -0000 @@ -52,6 +52,56 @@ static sigset_t restore; void +_thread_critical_enter(pthread_t pthread) +{ + sigset_t set; + sigset_t sav; + + /* + * Block all signals. + */ + SIGFILLSET(set); + + /* + * We can not use the global 'restore' set until after we have + * acquired the giant lock. + */ + _SPINLOCK(&pthread->lock); + if (__sys_sigprocmask(SIG_SETMASK, &set, &sav)) { + _thread_printf(STDERR_FILENO, "Critical Enter: sig err %d\n", + errno); + abort(); + } + + restore = sav; +} + +void +_thread_critical_exit(pthread_t pthread) +{ + sigset_t set; + int error; + + /* + * restore is protected by giant. We could restore our signal state + * incorrectly if someone else set restore between unlocking giant + * and restoring the signal mask. To avoid this we cache a copy prior + * to the unlock. + */ + set = restore; + + /* + * Restore signals. + */ + if (__sys_sigprocmask(SIG_SETMASK, &set, NULL)) { + _thread_printf(STDERR_FILENO, "Critical Exit: sig err %d\n", + errno); + abort(); + } + _SPINUNLOCK(&pthread->lock); +} + +void GIANT_LOCK(pthread_t pthread) { sigset_t set; Index: lib/libthr/thread/thr_mutex.c =================================================================== RCS file: /home/ncvs/src/lib/libthr/thread/thr_mutex.c,v retrieving revision 1.4 diff -u -r1.4 thr_mutex.c --- lib/libthr/thread/thr_mutex.c 6 May 2003 02:30:52 -0000 1.4 +++ lib/libthr/thread/thr_mutex.c 8 May 2003 08:50:44 -0000 @@ -62,6 +62,8 @@ /* * Prototypes */ +static int get_muncontested(pthread_mutex_t, int); +static void get_mcontested(pthread_mutex_t); static inline int mutex_self_trylock(pthread_mutex_t); static inline int mutex_self_lock(pthread_mutex_t); static inline int mutex_unlock_common(pthread_mutex_t *, int); @@ -297,138 +299,6 @@ return (ret); } -static int -mutex_trylock_common(pthread_mutex_t *mutex) -{ - int ret = 0; - - PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), - "Uninitialized mutex in pthread_mutex_trylock_basic"); - - /* - * Defer signals to protect the scheduling queues from - * access by the signal handler: - */ - /* _thread_kern_sig_defer(); XXXThr */ - - /* Lock the mutex structure: */ - _SPINLOCK(&(*mutex)->lock); - - /* - * If the mutex was statically allocated, properly - * initialize the tail queue. - */ - if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { - TAILQ_INIT(&(*mutex)->m_queue); - _MUTEX_INIT_LINK(*mutex); - (*mutex)->m_flags |= MUTEX_FLAGS_INITED; - } - - /* Process according to mutex type: */ - switch ((*mutex)->m_protocol) { - /* Default POSIX mutex: */ - case PTHREAD_PRIO_NONE: - /* Check if this mutex is not locked: */ - if ((*mutex)->m_owner == NULL) { - /* Lock the mutex for the running thread: */ - (*mutex)->m_owner = curthread; - - /* Add to the list of owned mutexes: */ - _MUTEX_ASSERT_NOT_OWNED(*mutex); - TAILQ_INSERT_TAIL(&curthread->mutexq, - (*mutex), m_qe); - } else if ((*mutex)->m_owner == curthread) - ret = mutex_self_trylock(*mutex); - else - /* Return a busy error: */ - ret = EBUSY; - break; - - /* POSIX priority inheritence mutex: */ - case PTHREAD_PRIO_INHERIT: - /* Check if this mutex is not locked: */ - if ((*mutex)->m_owner == NULL) { - /* Lock the mutex for the running thread: */ - (*mutex)->m_owner = curthread; - - /* Track number of priority mutexes owned: */ - curthread->priority_mutex_count++; - - /* - * The mutex takes on the attributes of the - * running thread when there are no waiters. - */ - (*mutex)->m_prio = curthread->active_priority; - (*mutex)->m_saved_prio = - curthread->inherited_priority; - - /* Add to the list of owned mutexes: */ - _MUTEX_ASSERT_NOT_OWNED(*mutex); - TAILQ_INSERT_TAIL(&curthread->mutexq, - (*mutex), m_qe); - } else if ((*mutex)->m_owner == curthread) - ret = mutex_self_trylock(*mutex); - else - /* Return a busy error: */ - ret = EBUSY; - break; - - /* POSIX priority protection mutex: */ - case PTHREAD_PRIO_PROTECT: - /* Check for a priority ceiling violation: */ - if (curthread->active_priority > (*mutex)->m_prio) - ret = EINVAL; - - /* Check if this mutex is not locked: */ - else if ((*mutex)->m_owner == NULL) { - /* Lock the mutex for the running thread: */ - (*mutex)->m_owner = curthread; - - /* Track number of priority mutexes owned: */ - curthread->priority_mutex_count++; - - /* - * The running thread inherits the ceiling - * priority of the mutex and executes at that - * priority. - */ - curthread->active_priority = (*mutex)->m_prio; - (*mutex)->m_saved_prio = - curthread->inherited_priority; - curthread->inherited_priority = - (*mutex)->m_prio; - - /* Add to the list of owned mutexes: */ - _MUTEX_ASSERT_NOT_OWNED(*mutex); - TAILQ_INSERT_TAIL(&curthread->mutexq, - (*mutex), m_qe); - } else if ((*mutex)->m_owner == curthread) - ret = mutex_self_trylock(*mutex); - else - /* Return a busy error: */ - ret = EBUSY; - break; - - /* Trap invalid mutex types: */ - default: - /* Return an invalid argument error: */ - ret = EINVAL; - break; - } - - /* Unlock the mutex structure: */ - _SPINUNLOCK(&(*mutex)->lock); - - /* - * Undefer and handle pending signals, yielding if - * necessary: - */ - /* _thread_kern_sig_undefer(); */ - - /* Return the completion status: */ - return (ret); -} - int __pthread_mutex_trylock(pthread_mutex_t *mutex) { @@ -442,7 +312,7 @@ * initialization: */ else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0) - ret = mutex_trylock_common(mutex); + ret = mutex_lock_common(mutex, 1); return (ret); } @@ -460,15 +330,15 @@ * initialization marking the mutex private (delete safe): */ else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0) - ret = mutex_trylock_common(mutex); + ret = mutex_lock_common(mutex, 1); return (ret); } static int -mutex_lock_common(pthread_mutex_t * mutex) +mutex_lock_common(pthread_mutex_t * mutex, int nonblock) { - int ret = 0; + int ret = 0, inCancel = 0; PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), "Uninitialized mutex in mutex_lock_common"); @@ -505,51 +375,18 @@ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: - if ((*mutex)->m_owner == NULL) { - /* Lock the mutex for this thread: */ - (*mutex)->m_owner = curthread; - - /* Add to the list of owned mutexes: */ - _MUTEX_ASSERT_NOT_OWNED(*mutex); - TAILQ_INSERT_TAIL(&curthread->mutexq, - (*mutex), m_qe); - - } else if ((*mutex)->m_owner == curthread) - ret = mutex_self_lock(*mutex); - else { - /* - * Join the queue of threads waiting to lock - * the mutex: - */ - mutex_queue_enq(*mutex, curthread); - - /* - * Keep a pointer to the mutex this thread - * is waiting on: - */ - curthread->data.mutex = *mutex; - - /* - * Unlock the mutex structure and schedule the - * next thread: - */ - /* XXX Sched lock. */ - PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT); - _SPINUNLOCK(&(*mutex)->lock); - _thread_suspend(curthread, NULL); - - /* Lock the mutex structure again: */ - _SPINLOCK(&(*mutex)->lock); - } + if ((ret = get_muncontested(*mutex, nonblock)) == -1) + if (nonblock) { + ret = EBUSY; + break; + } else { + get_mcontested(*mutex); + } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: - /* Check if this mutex is not locked: */ - if ((*mutex)->m_owner == NULL) { - /* Lock the mutex for this thread: */ - (*mutex)->m_owner = curthread; - + if ((ret = get_muncontested(*mutex, nonblock)) == 0) { /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; @@ -562,43 +399,18 @@ curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; - - /* Add to the list of owned mutexes: */ - _MUTEX_ASSERT_NOT_OWNED(*mutex); - TAILQ_INSERT_TAIL(&curthread->mutexq, - (*mutex), m_qe); - - } else if ((*mutex)->m_owner == curthread) - ret = mutex_self_lock(*mutex); - else { - /* - * Join the queue of threads waiting to lock - * the mutex: - */ - mutex_queue_enq(*mutex, curthread); - - /* - * Keep a pointer to the mutex this thread - * is waiting on: - */ - curthread->data.mutex = *mutex; + } else if (ret == -1) { + if (nonblock) { + ret = EBUSY; + break; + } else { + get_mcontested(*mutex); + } if (curthread->active_priority > (*mutex)->m_prio) /* Adjust priorities: */ mutex_priority_adjust(*mutex); - - /* - * Unlock the mutex structure and schedule the - * next thread: - */ - /* XXX Sched lock. */ - PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT); - _SPINUNLOCK(&(*mutex)->lock); - _thread_suspend(curthread, NULL); - - /* Lock the mutex structure again: */ - _SPINLOCK(&(*mutex)->lock); } break; @@ -608,14 +420,7 @@ if (curthread->active_priority > (*mutex)->m_prio) ret = EINVAL; - /* Check if this mutex is not locked: */ - else if ((*mutex)->m_owner == NULL) { - /* - * Lock the mutex for the running - * thread: - */ - (*mutex)->m_owner = curthread; - + if ((ret = get_muncontested(*mutex, nonblock)) == 0) { /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; @@ -629,40 +434,16 @@ curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; - - /* Add to the list of owned mutexes: */ - _MUTEX_ASSERT_NOT_OWNED(*mutex); - TAILQ_INSERT_TAIL(&curthread->mutexq, - (*mutex), m_qe); - } else if ((*mutex)->m_owner == curthread) - ret = mutex_self_lock(*mutex); - else { - /* - * Join the queue of threads waiting to lock - * the mutex: - */ - mutex_queue_enq(*mutex, curthread); - - /* - * Keep a pointer to the mutex this thread - * is waiting on: - */ - curthread->data.mutex = *mutex; + } else if (ret == -1) { + if (nonblock) { + ret = EBUSY; + break; + } /* Clear any previous error: */ curthread->error = 0; - /* - * Unlock the mutex structure and schedule the - * next thread: - */ - /* XXX Sched lock. */ - PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT); - _SPINUNLOCK(&(*mutex)->lock); - _thread_suspend(curthread, NULL); - - /* Lock the mutex structure again: */ - _SPINLOCK(&(*mutex)->lock); + get_mcontested(*mutex); /* * The threads priority may have changed while @@ -685,8 +466,11 @@ * Check to see if this thread was interrupted and * is still in the mutex queue of waiting threads: */ - if (curthread->cancelflags & PTHREAD_CANCELLING) - mutex_queue_remove(*mutex, curthread); + if (curthread->cancelflags & PTHREAD_CANCELLING) { + if (!nonblock) + mutex_queue_remove(*mutex, curthread); + inCancel=1; + } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); @@ -696,7 +480,7 @@ * necessary: */ /* _thread_kern_sig_undefer(); */ - if (curthread->cancelflags & PTHREAD_CANCELLING) { + if (inCancel) { pthread_testcancel(); PANIC("Canceled thread came back.\n"); } @@ -722,7 +506,7 @@ * initialization: */ else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0)) - ret = mutex_lock_common(mutex); + ret = mutex_lock_common(mutex, 0); return (ret); } @@ -743,7 +527,7 @@ * initialization marking it private (delete safe): */ else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0)) - ret = mutex_lock_common(mutex); + ret = mutex_lock_common(mutex, 0); return (ret); } @@ -890,7 +674,9 @@ /* * Get the next thread from the queue of - * threads waiting on the mutex: + * threads waiting on the mutex. The deq + * function will have already locked it + * for us. */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) { @@ -911,6 +697,7 @@ * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; + _thread_critical_exit((*mutex)->m_owner); } } break; @@ -964,7 +751,8 @@ /* * Get the next thread from the queue of threads - * waiting on the mutex: + * waiting on the mutex. It will already be + * locked for us. */ if (((*mutex)->m_owner = mutex_queue_deq(*mutex)) == NULL) @@ -1020,6 +808,8 @@ /* XXXTHR sched lock. */ PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); + + _thread_critical_exit((*mutex)->m_owner); } } break; @@ -1074,7 +864,7 @@ /* * Enter a loop to find a waiting thread whose * active priority will not cause a ceiling - * violation: + * violation. It will already be locked for us. */ while ((((*mutex)->m_owner = mutex_queue_deq(*mutex)) != NULL) && @@ -1095,6 +885,8 @@ * this mutex: */ (*mutex)->m_owner->data.mutex = NULL; + + _thread_critical_exit((*mutex)->m_owner); } /* Check for a new owner: */ @@ -1140,6 +932,8 @@ /* XXXTHR sched lock. */ PTHREAD_NEW_STATE((*mutex)->m_owner, PS_RUNNING); + + _thread_critical_exit((*mutex)->m_owner); } } break; @@ -1496,7 +1290,7 @@ /* * Dequeue a waiting thread from the head of a mutex queue in descending - * priority order. + * priority order. This funtion will return with the thread locked. */ static inline pthread_t mutex_queue_deq(pthread_mutex_t mutex) @@ -1504,6 +1298,7 @@ pthread_t pthread; while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { + _thread_critical_enter(pthread); TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; @@ -1514,6 +1309,8 @@ if ((pthread->cancelflags & PTHREAD_CANCELLING) == 0 && pthread->state == PS_MUTEX_WAIT) break; + else + _thread_critical_exit(pthread); } return (pthread); @@ -1556,3 +1353,59 @@ pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; } +/* + * Returns with the lock owned and on the threads mutexq if + * it is currently unowned. Returns 1, otherwise. + */ +static int +get_muncontested(pthread_mutex_t mutexp, int nonblock) +{ + if (mutexp->m_owner != NULL && mutexp->m_owner != curthread) + return (-1); + else if (mutexp->m_owner == curthread) + if (nonblock) + return (mutex_self_trylock(mutexp)); + else + return (mutex_self_lock(mutexp)); + + /* + * The mutex belongs to this thread now. Mark it as + * such. Add it to the list of mutexes owned by this + * thread. + */ + mutexp->m_owner = curthread; + _MUTEX_ASSERT_NOT_OWNED(mutexp); + TAILQ_INSERT_TAIL(&curthread->mutexq, mutexp, m_qe); + return (0); +} + +/* + * Returns with the lock owned and on the thread's mutexq. If + * the mutex is currently owned by another thread it will sleep + * until it is available. + */ +static void +get_mcontested(pthread_mutex_t mutexp) +{ + _thread_critical_enter(curthread); + + /* + * Put this thread on the mutex's list of waiting threads. + * The lock on the thread ensures atomic (as far as other + * threads are concerned) setting of the thread state with + * it's status on the mutex queue. + */ + do { + mutex_queue_enq(mutexp, curthread); + PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT); + curthread->data.mutex = mutexp; + _thread_critical_exit(curthread); + _SPINUNLOCK(&mutexp->lock); + _thread_suspend(curthread, NULL); + + _SPINLOCK(&mutexp->lock); + _thread_critical_enter(curthread); + } while (curthread->state != PS_MUTEX_WAIT); + + _thread_critical_exit(curthread); +} Index: lib/libthr/thread/thr_private.h =================================================================== RCS file: /home/ncvs/src/lib/libthr/thread/thr_private.h,v retrieving revision 1.4 diff -u -r1.4 thr_private.h --- lib/libthr/thread/thr_private.h 3 Apr 2003 03:34:50 -0000 1.4 +++ lib/libthr/thread/thr_private.h 8 May 2003 11:17:45 -0000 @@ -727,6 +727,8 @@ void _thread_leave_cancellation_point(void); void _thread_cancellation_point(void); int _thread_suspend(pthread_t thread, struct timespec *abstime); +void _thread_critical_enter(pthread_t); +void _thread_critical_exit(pthread_t); /* #include */ #ifdef _SYS_AIO_H_