Index: sys/sys/syscall.h =================================================================== --- sys/sys/syscall.h (版本 213714) +++ sys/sys/syscall.h (工作副本) @@ -431,4 +431,6 @@ #define SYS_shmctl 512 #define SYS_lpathconf 513 #define SYS_pselect 522 -#define SYS_MAXSYSCALL 523 +#define SYS_thr_set_robust_list 523 +#define SYS_thr_get_robust_list 524 +#define SYS_MAXSYSCALL 525 Index: sys/sys/_umtx.h =================================================================== --- sys/sys/_umtx.h (版本 213714) +++ sys/sys/_umtx.h (工作副本) @@ -40,7 +40,8 @@ volatile __lwpid_t m_owner; /* Owner of the mutex */ __uint32_t m_flags; /* Flags of the mutex */ __uint32_t m_ceilings[2]; /* Priority protect ceiling */ - __uint32_t m_spare[4]; + __uint32_t m_robust_state; + __uint32_t m_spare[3]; }; struct ucond { @@ -63,4 +64,15 @@ __uint32_t _flags; }; +struct _robust_list_entry { + struct _robust_list_entry *next; + struct _robust_list_entry *prev; +}; + +struct _robust_list_head { + struct _robust_list_entry list; /* circle list */ + struct _robust_list_entry *list_op_pending; + int offset; +}; + #endif /* !_SYS__UMTX_H_ */ Index: sys/sys/sysproto.h =================================================================== --- sys/sys/sysproto.h (版本 213714) +++ sys/sys/sysproto.h (工作副本) @@ -1665,6 +1665,14 @@ char ts_l_[PADL_(const struct timespec *)]; const struct timespec * ts; char ts_r_[PADR_(const struct timespec *)]; char sm_l_[PADL_(const sigset_t *)]; const sigset_t * sm; char sm_r_[PADR_(const sigset_t *)]; }; +struct thr_set_robust_list_args { + char head_l_[PADL_(struct _robust_list_head *)]; struct _robust_list_head * head; char head_r_[PADR_(struct _robust_list_head *)]; + char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; +}; +struct thr_get_robust_list_args { + char head_ptr_l_[PADL_(struct _robust_list_head **)]; struct _robust_list_head ** head_ptr; char head_ptr_r_[PADR_(struct _robust_list_head **)]; + char len_ptr_l_[PADL_(size_t *)]; size_t * len_ptr; char len_ptr_r_[PADR_(size_t *)]; +}; int nosys(struct thread *, struct nosys_args *); void sys_exit(struct thread *, struct sys_exit_args *); int fork(struct thread *, struct fork_args *); @@ -2026,6 +2034,8 @@ int shmctl(struct thread *, struct shmctl_args *); int lpathconf(struct thread *, struct lpathconf_args *); int pselect(struct thread *, struct pselect_args *); +int thr_set_robust_list(struct thread *, struct thr_set_robust_list_args *); +int thr_get_robust_list(struct thread *, struct thr_get_robust_list_args *); #ifdef COMPAT_43 @@ -2701,6 +2711,8 @@ #define SYS_AUE_shmctl AUE_SHMCTL #define SYS_AUE_lpathconf AUE_LPATHCONF #define SYS_AUE_pselect AUE_SELECT +#define SYS_AUE_thr_set_robust_list AUE_NULL +#define SYS_AUE_thr_get_robust_list AUE_NULL #undef PAD_ #undef PADL_ Index: sys/sys/thr.h =================================================================== --- sys/sys/thr.h (版本 213714) +++ sys/sys/thr.h (工作副本) @@ -58,6 +58,8 @@ void *spare[3]; /* TODO: cpu affinity mask etc. */ }; +struct _robust_list_head; + /* * See pthread_* */ @@ -79,6 +81,9 @@ int thr_suspend(const struct timespec *timeout); int thr_wake(long id); int thr_set_name(long id, const char *name); +int thr_set_robust_list(struct _robust_list_head *head, size_t len); +int thr_get_robust_list(struct _robust_list_head **head_ptr, + size_t *len_ptr); __END_DECLS #endif /* !_KERNEL */ Index: sys/sys/umtx.h =================================================================== --- sys/sys/umtx.h (版本 213714) +++ sys/sys/umtx.h (工作副本) @@ -40,11 +40,19 @@ #define UMUTEX_UNOWNED 0x0 #define UMUTEX_CONTESTED 0x80000000U +#define UMUTEX_TIDMASK 0x7fffffffU +/* umutex flags */ #define UMUTEX_ERROR_CHECK 0x0002 /* Error-checking mutex */ #define UMUTEX_PRIO_INHERIT 0x0004 /* Priority inherited mutex */ #define UMUTEX_PRIO_PROTECT 0x0008 /* Priority protect mutex */ +#define UMUTEX_ROBUST 0x0010 /* Mutex is robust mutex */ +/* robust mutex states */ +#define UMUTEX_ROBUST_NORMAL 0x0 +#define UMUTEX_ROBUST_OWNERDEAD 0x1 +#define UMUTEX_ROBUST_NOTRECOVERABLE 0x2 + /* urwlock flags */ #define URWLOCK_PREFER_READER 0x0002 Index: sys/sys/proc.h =================================================================== --- sys/sys/proc.h (版本 213714) +++ sys/sys/proc.h (工作副本) @@ -210,7 +210,7 @@ struct seltd *td_sel; /* Select queue/channel. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ struct turnstile *td_turnstile; /* (k) Associated turnstile. */ - struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */ + struct umtx_q *td_umtxq; /* (*) Link for when we're blocked. */ lwpid_t td_tid; /* (b) Thread ID. */ sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */ #define td_siglist td_sigqueue.sq_signals @@ -264,6 +264,8 @@ int td_ng_outbound; /* (k) Thread entered ng from above. */ struct osd td_osd; /* (k) Object specific data. */ struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */ + struct _robust_list_head + *td_robust_head;/* (*) Link for robust mutex. */ #define td_endzero td_rqindex /* Copied during fork1() or thread_sched_upcall(). */ Index: sys/sys/errno.h =================================================================== --- sys/sys/errno.h (版本 213714) +++ sys/sys/errno.h (工作副本) @@ -181,6 +181,9 @@ #define ELAST 93 /* Must be equal largest errno */ #endif /* _POSIX_SOURCE */ +#define EOWNERDEAD 94 +#define ENOTRECOVERABLE 95 + #ifdef _KERNEL /* pseudo-errors returned inside kernel to modify return to process */ #define ERESTART (-1) /* restart syscall */ Index: sys/sys/syscall.mk =================================================================== --- sys/sys/syscall.mk (版本 213714) +++ sys/sys/syscall.mk (工作副本) @@ -379,4 +379,6 @@ msgctl.o \ shmctl.o \ lpathconf.o \ - pselect.o + pselect.o \ + thr_set_robust_list.o \ + thr_get_robust_list.o Index: sys/kern/kern_thread.c =================================================================== --- sys/kern/kern_thread.c (版本 213714) +++ sys/kern/kern_thread.c (工作副本) @@ -373,7 +373,6 @@ #ifdef AUDIT AUDIT_SYSCALL_EXIT(0, td); #endif - umtx_thread_exit(td); /* * drop FPU & debug register state storage, or any other * architecture specific resources that @@ -761,6 +760,7 @@ PROC_SUNLOCK(p); PROC_UNLOCK(p); tidhash_remove(td); + umtx_thread_exit(td); PROC_LOCK(p); PROC_SLOCK(p); thread_exit(); Index: sys/kern/kern_umtx.c =================================================================== --- sys/kern/kern_umtx.c (版本 213714) +++ sys/kern/kern_umtx.c (工作副本) @@ -225,6 +225,12 @@ SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD, &umtx_pi_allocated, 0, "Allocated umtx_pi"); +static int maxrobustmutex = 10000; +TUNABLE_INT("kern.umtx.maxrobustmutex", &maxrobustmutex); +SYSCTL_NODE(_kern, OID_AUTO, umtx, CTLFLAG_RD, 0, "umtx"); +SYSCTL_INT(_kern_umtx, OID_AUTO, maxrobustmutex, CTLFLAG_RD, &maxrobustmutex, + 0, "maximum robust mutex a thread can have"); + static void umtxq_sysinit(void *); static void umtxq_hash(struct umtx_key *key); static struct umtxq_chain *umtxq_getchain(struct umtx_key *key); @@ -243,7 +249,6 @@ static struct umtx_pi *umtx_pi_alloc(int); static void umtx_pi_free(struct umtx_pi *pi); static void umtx_pi_adjust_locked(struct thread *td, u_char oldpri); -static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags); static void umtx_thread_cleanup(struct thread *td); static void umtx_exec_hook(void *arg __unused, struct proc *p __unused, struct image_params *imgp __unused); @@ -1105,6 +1110,69 @@ return (0); } +/* Update robust mutex state while unlocking. */ +static __inline void +update_robust_state(struct umutex *m, int exiting) +{ + uint32_t robust; + + robust = fuword32(&m->m_robust_state); + if (exiting) { + if (robust == UMUTEX_ROBUST_NORMAL) { + /* Mark dead state. */ + suword32(&m->m_robust_state, UMUTEX_ROBUST_OWNERDEAD); + } + } + if (robust == UMUTEX_ROBUST_OWNERDEAD) { + /* Make the mutex not usable. */ + suword32(&m->m_robust_state, UMUTEX_ROBUST_NOTRECOVERABLE); + } +} + +/* Get error code after successfully locking a robust mutex. */ +static __inline int +calc_robust_error_code(struct umutex *m) +{ + uint32_t robust = fuword32(&m->m_robust_state); + + if (robust == UMUTEX_ROBUST_OWNERDEAD) + return (EOWNERDEAD); + if (robust == UMUTEX_ROBUST_NOTRECOVERABLE) + return (ENOTRECOVERABLE); + return (0); +} + +static inline struct umutex * +entry_to_umutex(struct _robust_list_entry *ent, int offset) +{ + return (struct umutex *)((char *)ent - offset); +} + +static inline struct _robust_list_entry * +umutex_to_entry(struct umutex *m, int offset) +{ + return (struct _robust_list_entry *)((char *)m + offset); +} + +static __inline int +clear_list_op_pending(struct thread *td, struct umutex *m) +{ + struct _robust_list_head roblist; + struct _robust_list_entry *ent; + int error; + + /* XXX 32-bit ?*/ + if (td->td_robust_head == NULL) + return (ENXIO); + error = copyin(&td->td_robust_head, &roblist, sizeof(roblist)); + if (error) + return (error); + ent = umutex_to_entry(m, roblist.offset); + if (ent == roblist.list_op_pending) + return suword(&td->td_robust_head->list_op_pending->next, 0); + return (0); +} + /* * Lock PTHREAD_PRIO_NONE protocol POSIX mutex. */ @@ -1135,8 +1203,10 @@ owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id); /* The acquire succeeded. */ - if (owner == UMUTEX_UNOWNED) - return (0); + if (owner == UMUTEX_UNOWNED) { + error = 0; + break; + } /* The address was invalid. */ if (owner == -1) @@ -1147,8 +1217,10 @@ owner = casuword32(&m->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); - if (owner == UMUTEX_CONTESTED) - return (0); + if (owner == UMUTEX_CONTESTED) { + error = 0; + break; + } /* The address was invalid. */ if (owner == -1) @@ -1214,7 +1286,9 @@ umtx_key_release(&uq->uq_key); } - return (0); + if (error == 0 && (flags & UMUTEX_ROBUST) != 0) + return calc_robust_error_code(m); + return (error); } /* @@ -1224,7 +1298,7 @@ * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex. */ static int -do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags) +do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, int exiting) { struct umtx_key key; uint32_t owner, old, id; @@ -1239,9 +1313,16 @@ if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != id) + if ((owner & ~UMUTEX_CONTESTED) != id) { + clear_list_op_pending(td, m); return (EPERM); + } + if ((flags & UMUTEX_ROBUST) != 0) { + update_robust_state(m, exiting); + clear_list_op_pending(td, m); + } + if ((owner & UMUTEX_CONTESTED) == 0) { old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); if (old == -1) @@ -1269,7 +1350,7 @@ old = casuword32(&m->m_owner, owner, count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); umtxq_lock(&key); - umtxq_signal(&key,1); + umtxq_signal(&key, !exiting ? 1 : INT_MAX); umtxq_unbusy(&key); umtxq_unlock(&key); umtx_key_release(&key); @@ -1285,7 +1366,7 @@ * only for simple mutex. */ static int -do_wake_umutex(struct thread *td, struct umutex *m) +do_wake_umutex(struct thread *td, struct umutex *m, int exiting) { struct umtx_key key; uint32_t owner; @@ -1317,7 +1398,7 @@ umtxq_lock(&key); if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0) - umtxq_signal(&key, 1); + umtxq_signal(&key, !exiting ? 1 : INT_MAX); umtxq_unbusy(&key); umtxq_unlock(&key); umtx_key_release(&key); @@ -1858,6 +1939,9 @@ umtxq_unlock(&uq->uq_key); umtx_key_release(&uq->uq_key); + + if (error == 0 && (flags & UMUTEX_ROBUST) != 0) + return calc_robust_error_code(m); return (error); } @@ -1865,10 +1949,10 @@ * Unlock a PI mutex. */ static int -do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags) +do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, int exiting) { struct umtx_key key; - struct umtx_q *uq_first, *uq_first2, *uq_me; + struct umtx_q *uq_temp, *uq_temp2, *uq_me; struct umtx_pi *pi, *pi2; uint32_t owner, old, id; int error; @@ -1883,9 +1967,16 @@ if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != id) + if ((owner & ~UMUTEX_CONTESTED) != id) { + clear_list_op_pending(td, m); return (EPERM); + } + if ((flags & UMUTEX_ROBUST) != 0) { + update_robust_state(m, exiting); + clear_list_op_pending(td, m); + } + /* This should be done in userland */ if ((owner & UMUTEX_CONTESTED) == 0) { old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); @@ -1903,10 +1994,10 @@ umtxq_lock(&key); umtxq_busy(&key); - count = umtxq_count_pi(&key, &uq_first); - if (uq_first != NULL) { + count = umtxq_count_pi(&key, &uq_temp); + if (uq_temp != NULL) { mtx_lock_spin(&umtx_lock); - pi = uq_first->uq_pi_blocked; + pi = uq_temp->uq_pi_blocked; KASSERT(pi != NULL, ("pi == NULL?")); if (pi->pi_owner != curthread) { mtx_unlock_spin(&umtx_lock); @@ -1920,25 +2011,25 @@ pi->pi_owner = NULL; TAILQ_REMOVE(&uq_me->uq_pi_contested, pi, pi_link); /* get highest priority thread which is still sleeping. */ - uq_first = TAILQ_FIRST(&pi->pi_blocked); - while (uq_first != NULL && - (uq_first->uq_flags & UQF_UMTXQ) == 0) { - uq_first = TAILQ_NEXT(uq_first, uq_lockq); + TAILQ_FOREACH_SAFE(uq_temp, &pi->pi_blocked, uq_lockq, uq_temp2) { + if ((uq_temp->uq_flags & UQF_UMTXQ) != 0) { + umtxq_signal_thread(uq_temp); + if (!exiting) + break; + } } pri = PRI_MAX; TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) { - uq_first2 = TAILQ_FIRST(&pi2->pi_blocked); - if (uq_first2 != NULL) { - if (pri > UPRI(uq_first2->uq_thread)) - pri = UPRI(uq_first2->uq_thread); + uq_temp2 = TAILQ_FIRST(&pi2->pi_blocked); + if (uq_temp2 != NULL) { + if (pri > UPRI(uq_temp2->uq_thread)) + pri = UPRI(uq_temp2->uq_thread); } } thread_lock(curthread); sched_unlend_user_prio(curthread, pri); thread_unlock(curthread); mtx_unlock_spin(&umtx_lock); - if (uq_first) - umtxq_signal_thread(uq_first); } umtxq_unlock(&key); @@ -2088,6 +2179,9 @@ umtxq_unbusy(&uq->uq_key); umtxq_unlock(&uq->uq_key); umtx_key_release(&uq->uq_key); + + if (error == 0 && (flags & UMUTEX_ROBUST) != 0) + return calc_robust_error_code(m); return (error); } @@ -2095,7 +2189,7 @@ * Unlock a PP mutex. */ static int -do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags) +do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, int exiting) { struct umtx_key key; struct umtx_q *uq, *uq2; @@ -2115,14 +2209,21 @@ if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != id) + if ((owner & ~UMUTEX_CONTESTED) != id) { + clear_list_op_pending(td, m); return (EPERM); + } + if ((flags & UMUTEX_ROBUST) != 0) { + update_robust_state(m, exiting); + clear_list_op_pending(td, m); + } + error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t)); if (error != 0) return (error); - if (rceiling == -1) + if (exiting || rceiling == -1) new_inherited_pri = PRI_MAX; else { rceiling = RTP_PRIO_MAX - rceiling; @@ -2148,7 +2249,7 @@ umtxq_lock(&key); if (error == 0) - umtxq_signal(&key, 1); + umtxq_signal(&key, !exiting ? 1 : INT_MAX); umtxq_unbusy(&key); umtxq_unlock(&key); @@ -2321,7 +2422,7 @@ * Unlock a userland POSIX mutex. */ static int -do_unlock_umutex(struct thread *td, struct umutex *m) +do_unlock_umutex(struct thread *td, struct umutex *m, int exiting) { uint32_t flags; @@ -2331,11 +2432,11 @@ switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { case 0: - return (do_unlock_normal(td, m, flags)); + return (do_unlock_normal(td, m, flags, exiting)); case UMUTEX_PRIO_INHERIT: - return (do_unlock_pi(td, m, flags)); + return (do_unlock_pi(td, m, flags, exiting)); case UMUTEX_PRIO_PROTECT: - return (do_unlock_pp(td, m, flags)); + return (do_unlock_pp(td, m, flags, exiting)); } return (EINVAL); @@ -2365,13 +2466,14 @@ * The magic thing is we should set c_has_waiters to 1 before * releasing user mutex. */ - suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 1); + if (fuword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters)) == 0) + suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 1); umtxq_lock(&uq->uq_key); umtxq_unbusy(&uq->uq_key); umtxq_unlock(&uq->uq_key); - error = do_unlock_umutex(td, m); + error = do_unlock_umutex(td, m, 0); umtxq_lock(&uq->uq_key); if (error == 0) { @@ -2839,7 +2941,8 @@ umtxq_insert(uq); umtxq_unlock(&uq->uq_key); - suword32(__DEVOLATILE(uint32_t *, &sem->_has_waiters), 1); + if (fuword32(__DEVOLATILE(uint32_t *, &sem->_has_waiters)) == 0) + suword32(__DEVOLATILE(uint32_t *, &sem->_has_waiters), 1); count = fuword32(__DEVOLATILE(uint32_t *, &sem->_count)); if (count != 0) { @@ -3087,13 +3190,13 @@ static int __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap) { - return do_wake_umutex(td, uap->obj); + return do_wake_umutex(td, uap->obj, 0); } static int __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap) { - return do_unlock_umutex(td, uap->obj); + return do_unlock_umutex(td, uap->obj, 0); } static int @@ -3568,6 +3671,68 @@ umtx_thread_cleanup(td); } +static int cleanup_robust_umutex(struct thread *td, struct umutex *m) +{ + int error; + uint32_t owner; + + error = copyin(__DEVOLATILE(uint32_t *, &m->m_owner), &owner, + sizeof(uint32_t)); + if (error) + return (error); + owner &= UMUTEX_TIDMASK; + if (owner == td->td_tid) { + /* + * Mutex does not work in hand-off, pass 1 to wake up + * all threads. + */ + (void) do_unlock_umutex(td, m, 1); + } else if (owner == UMUTEX_UNOWNED) { + /* may not necessary */ + (void) do_wake_umutex(td, m, 1); + } + return (error); +} + +static void +cleanup_robust_mutex_list(struct thread *td) +{ + struct proc *p = td->td_proc; + static struct timeval lasttime; + static int curpps; + + struct _robust_list_entry *ent, *next; + struct _robust_list_head roblist; + int error; + int loops = 0; + + if (td->td_robust_head == NULL) + return; + + error = copyin(td->td_robust_head, &roblist, sizeof(roblist)); + if (error) + return; + for (ent = roblist.list.next; + error == 0 && ent != &td->td_robust_head->list; + ent = next) { + error = copyin(&ent->next, &next, sizeof(next)); + if (ent == roblist.list_op_pending) + roblist.list_op_pending = NULL; + cleanup_robust_umutex(td, entry_to_umutex(ent, roblist.offset)); + + if (++loops > maxrobustmutex && + ppsratecheck(&lasttime, &curpps, 1)) { + printf("pid %d (%s), uid %d, %s\n", p->p_pid, p->p_comm, + p->p_ucred ? p->p_ucred->cr_uid : -1, + "has too many robust mutexes."); + } + } + if (roblist.list_op_pending != NULL) { + cleanup_robust_umutex(td, + entry_to_umutex(roblist.list_op_pending, roblist.offset)); + } +} + /* * clean up umtx data. */ @@ -3590,4 +3755,6 @@ td->td_flags &= ~TDF_UBORROWING; thread_unlock(td); mtx_unlock_spin(&umtx_lock); + + cleanup_robust_mutex_list(td); } Index: sys/kern/kern_thr.c =================================================================== --- sys/kern/kern_thr.c (版本 213714) +++ sys/kern/kern_thr.c (工作副本) @@ -279,7 +279,10 @@ p = td->td_proc; - /* Signal userland that it can free the stack. */ + /* Clean up umtx states before declaring our disappearance. */ + umtx_thread_exit(td); + + /* Signal userland that the thread is exited. */ if ((void *)uap->state != NULL) { suword_lwpid(uap->state, 1); kern_umtx_wake(td, uap->state, INT_MAX, 0); @@ -522,3 +525,32 @@ PROC_UNLOCK(p); return (error); } + +int +thr_set_robust_list(struct thread *td, struct thr_set_robust_list_args *uap) +{ + if (uap->len != sizeof(struct _robust_list_head)) + return (EINVAL); + td->td_robust_head = uap->head; + return (0); +} + +int +thr_get_robust_list(struct thread *td, struct thr_get_robust_list_args *uap) +{ + size_t size = sizeof(struct _robust_list_head); + +#ifdef COMPAT_FREEBSD32 + if (SV_CURPROC_FLAG(SV_ILP32)) { + if (suword32(uap->head_ptr, (intptr_t)td->td_robust_head) || + suword32(uap->len_ptr, size)) + return (EFAULT); + return (0); + } +#endif + + if (suword(uap->head_ptr, (intptr_t)td->td_robust_head) || + suword(uap->len_ptr, size)) + return (EFAULT); + return (0); +} Index: sys/kern/kern_exit.c =================================================================== --- sys/kern/kern_exit.c (版本 213714) +++ sys/kern/kern_exit.c (工作副本) @@ -72,6 +72,7 @@ #ifdef KTRACE #include #endif +#include #include #include @@ -208,6 +209,8 @@ /* Drain the limit callout while we don't have the proc locked */ callout_drain(&p->p_limco); + umtx_thread_exit(td); + #ifdef AUDIT /* * The Sun BSM exit token contains two components: an exit status as Index: sys/kern/init_sysent.c =================================================================== --- sys/kern/init_sysent.c (版本 213714) +++ sys/kern/init_sysent.c (工作副本) @@ -557,4 +557,6 @@ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 520 = pdgetpid */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 521 = pdwait */ { AS(pselect_args), (sy_call_t *)pselect, AUE_SELECT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 522 = pselect */ + { AS(thr_set_robust_list_args), (sy_call_t *)thr_set_robust_list, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 523 = thr_set_robust_list */ + { AS(thr_get_robust_list_args), (sy_call_t *)thr_get_robust_list, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 524 = thr_get_robust_list */ }; Index: sys/kern/syscalls.c =================================================================== --- sys/kern/syscalls.c (版本 213714) +++ sys/kern/syscalls.c (工作副本) @@ -530,4 +530,6 @@ "#520", /* 520 = pdgetpid */ "#521", /* 521 = pdwait */ "pselect", /* 522 = pselect */ + "thr_set_robust_list", /* 523 = thr_set_robust_list */ + "thr_get_robust_list", /* 524 = thr_get_robust_list */ }; Index: sys/kern/syscalls.master =================================================================== --- sys/kern/syscalls.master (版本 213714) +++ sys/kern/syscalls.master (工作副本) @@ -926,5 +926,12 @@ fd_set *ou, fd_set *ex, \ const struct timespec *ts, \ const sigset_t *sm); } +523 AUE_NULL STD { int thr_set_robust_list( \ + struct _robust_list_head *head, \ + size_t len); } +524 AUE_NULL STD { int thr_get_robust_list( \ + struct _robust_list_head **head_ptr, \ + size_t *len_ptr); } + ; Please copy any additions and changes to the following compatability tables: ; sys/compat/freebsd32/syscalls.master Index: sys/kern/systrace_args.c =================================================================== --- sys/kern/systrace_args.c (版本 213714) +++ sys/kern/systrace_args.c (工作副本) @@ -3108,6 +3108,22 @@ *n_args = 6; break; } + /* thr_set_robust_list */ + case 523: { + struct thr_set_robust_list_args *p = params; + uarg[0] = (intptr_t) p->head; /* struct _robust_list_head * */ + uarg[1] = p->len; /* size_t */ + *n_args = 2; + break; + } + /* thr_get_robust_list */ + case 524: { + struct thr_get_robust_list_args *p = params; + uarg[0] = (intptr_t) p->head_ptr; /* struct _robust_list_head ** */ + uarg[1] = (intptr_t) p->len_ptr; /* size_t * */ + *n_args = 2; + break; + } default: *n_args = 0; break; @@ -8265,6 +8281,32 @@ break; }; break; + /* thr_set_robust_list */ + case 523: + switch(ndx) { + case 0: + p = "struct _robust_list_head *"; + break; + case 1: + p = "size_t"; + break; + default: + break; + }; + break; + /* thr_get_robust_list */ + case 524: + switch(ndx) { + case 0: + p = "struct _robust_list_head **"; + break; + case 1: + p = "size_t *"; + break; + default: + break; + }; + break; default: break; }; Index: lib/libthr/pthread.map =================================================================== --- lib/libthr/pthread.map (版本 213561) +++ lib/libthr/pthread.map (工作副本) @@ -278,6 +278,7 @@ _pthread_kill; _pthread_main_np; _pthread_multi_np; + _pthread_mutex_consistent; _pthread_mutex_destroy; _pthread_mutex_getprioceiling; _pthread_mutex_getspinloops_np; @@ -297,12 +298,14 @@ _pthread_mutexattr_getprioceiling; _pthread_mutexattr_getprotocol; _pthread_mutexattr_getpshared; + _pthread_mutexattr_getrobust; _pthread_mutexattr_gettype; _pthread_mutexattr_init; _pthread_mutexattr_setkind_np; _pthread_mutexattr_setprioceiling; _pthread_mutexattr_setprotocol; _pthread_mutexattr_setpshared; + _pthread_mutexattr_setrobust; _pthread_mutexattr_settype; _pthread_once; _pthread_resume_all_np; @@ -403,4 +406,7 @@ openat; setcontext; swapcontext; + pthread_mutexattr_getrobust; + pthread_mutexattr_setrobust; + pthread_mutex_consistent; }; Index: lib/libthr/thread/thr_mutex.c =================================================================== --- lib/libthr/thread/thr_mutex.c (版本 213561) +++ lib/libthr/thread/thr_mutex.c (工作副本) @@ -45,6 +45,7 @@ #include "thr_private.h" +#undef _PTHREADS_INVARIANTS #if defined(_PTHREADS_INVARIANTS) #define MUTEX_INIT_LINK(m) do { \ (m)->m_qe.tqe_prev = NULL; \ @@ -65,6 +66,10 @@ #define MUTEX_ASSERT_NOT_OWNED(m) #endif +#define IS_ROBUST_MUTEX(m) (((m)->m_lock.m_flags & UMUTEX_ROBUST) != 0) +#define IS_PRI_PROTECT_MUTEX(m) (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0) +#define MUTEX_OWNER(m) ((m)->m_lock.m_owner & UMUTEX_TIDMASK) + /* * For adaptive mutexes, how many times to spin doing trylock2 * before entering the kernel to block @@ -93,8 +98,7 @@ static int mutex_self_lock(pthread_mutex_t, const struct timespec *abstime); static int mutex_unlock_common(pthread_mutex_t *); -static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, - const struct timespec *); +static int mutex_lock_sleep(pthread_mutex_t, const struct timespec *); __weak_reference(__pthread_mutex_init, pthread_mutex_init); __strong_reference(__pthread_mutex_init, _pthread_mutex_init); @@ -121,6 +125,7 @@ __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np); __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np); +__weak_reference(_pthread_mutex_consistent, pthread_mutex_consistent); static int mutex_init(pthread_mutex_t *mutex, @@ -167,13 +172,13 @@ pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; break; } - + if (attr->m_robust == PTHREAD_MUTEX_ROBUST) + pmutex->m_lock.m_flags |= UMUTEX_ROBUST; if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) { pmutex->m_spinloops = _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; pmutex->m_yieldloops = _thr_yieldloops; } - *mutex = pmutex; return (0); } @@ -208,6 +213,23 @@ m->m_lock.m_ceilings[1] = -1; } +static inline void +enqueue_robust_mutex(struct pthread *td, struct pthread_mutex *m) +{ + m->m_roblink.next = td->robust.list.next; + m->m_roblink.prev = &td->robust.list; + compiler_memory_barrier(); + td->robust.list.next = &m->m_roblink; + m->m_roblink.next->prev = &m->m_roblink; +} + +static inline void +dequeue_robust_mutex(struct pthread *td, struct pthread_mutex *m) +{ + m->m_roblink.prev->next = m->m_roblink.next; + m->m_roblink.next->prev = m->m_roblink.prev; +} + int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr) @@ -274,25 +296,31 @@ * Try to lock the mutex structure, we only need to * try once, if failed, the mutex is in used. */ + if (__predict_false(IS_ROBUST_MUTEX(m))) + curthread->robust.list_op_pending = &m->m_roblink; ret = _thr_umutex_trylock(&m->m_lock, id); - if (ret) + if (ret) { + curthread->robust.list_op_pending = NULL; return (ret); + } /* * Check mutex other fields to see if this mutex is * in use. Mostly for prority mutex types, or there * are condition variables referencing it. */ if (m->m_owner != NULL || m->m_refcount != 0) { - if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) + if (IS_PRI_PROTECT_MUTEX(m)) set_inherited_priority(curthread, m); _thr_umutex_unlock(&m->m_lock, id); + curthread->robust.list_op_pending = NULL; ret = EBUSY; } else { *mutex = THR_MUTEX_DESTROYED; - if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) + if (IS_PRI_PROTECT_MUTEX(m)) set_inherited_priority(curthread, m); _thr_umutex_unlock(&m->m_lock, id); + curthread->robust.list_op_pending = NULL; MUTEX_ASSERT_NOT_OWNED(m); free(m); @@ -307,7 +335,7 @@ (m)->m_owner = curthread; \ /* Add to the list of owned mutexes: */ \ MUTEX_ASSERT_NOT_OWNED((m)); \ - if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ + if (!IS_PRI_PROTECT_MUTEX(m)) \ TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ else \ TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ @@ -324,19 +352,66 @@ m = *mutex; \ } +static inline int +robust_mutex_lock_final(struct pthread *curthread, struct pthread_mutex *m, + int everlocked) +{ + int ret = 0; + uint32_t id; + + id = TID(curthread); + if (m->m_lock.m_robust_state == UMUTEX_ROBUST_NOTRECOVERABLE) { + _thr_umutex_unlock(&m->m_lock, id); + curthread->robust.list_op_pending = NULL; + if (m->m_private) + THR_CRITICAL_LEAVE(curthread); + return (ENOTRECOVERABLE); + } + if (m->m_lock.m_robust_state == UMUTEX_ROBUST_OWNERDEAD) + ret = EOWNERDEAD; + if (!everlocked) { + enqueue_robust_mutex(curthread, m); + ENQUEUE_MUTEX(curthread, m); + } + curthread->robust.list_op_pending = NULL; + return (ret); +} + static int -mutex_trylock_common(pthread_mutex_t *mutex) +robust_mutex_trylock_common(struct pthread_mutex *m) { struct pthread *curthread = _get_curthread(); - struct pthread_mutex *m = *mutex; uint32_t id; - int ret; + int ret, locked; id = TID(curthread); if (m->m_private) THR_CRITICAL_ENTER(curthread); + curthread->robust.list_op_pending = &m->m_roblink; + locked = (MUTEX_OWNER(m) == id); ret = _thr_umutex_trylock(&m->m_lock, id); if (__predict_true(ret == 0)) { + return robust_mutex_lock_final(curthread, m, locked); + } else if (locked) { + ret = mutex_self_trylock(m); + } /* else {} */ + + curthread->robust.list_op_pending = NULL; + if (ret && m->m_private) + THR_CRITICAL_LEAVE(curthread); + return (ret); +} + +static int +stalled_mutex_trylock_common(struct pthread_mutex *m) +{ + struct pthread *curthread = _get_curthread(); + int ret; + + if (m->m_private) + THR_CRITICAL_ENTER(curthread); + ret = _thr_umutex_trylock(&m->m_lock, TID(curthread)); + if (__predict_true(ret == 0)) { ENQUEUE_MUTEX(curthread, m); } else if (m->m_owner == curthread) { ret = mutex_self_trylock(m); @@ -346,6 +421,15 @@ return (ret); } +static int +mutex_trylock_common(struct pthread_mutex *m) +{ + if (__predict_false(IS_ROBUST_MUTEX(m))) + return (robust_mutex_trylock_common(m)); + else + return (stalled_mutex_trylock_common(m)); +} + int __pthread_mutex_trylock(pthread_mutex_t *mutex) { @@ -353,20 +437,17 @@ CHECK_AND_INIT_MUTEX - return (mutex_trylock_common(mutex)); + return (mutex_trylock_common(m)); } static int -mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, - const struct timespec *abstime) +mutex_lock_sleep(struct pthread_mutex *m, const struct timespec *abstime) { + struct pthread *curthread = _get_curthread(); uint32_t id, owner; int count; int ret; - if (m->m_owner == curthread) - return mutex_self_lock(m, abstime); - id = TID(curthread); /* * For adaptive mutexes, spin for a bit in the expectation @@ -418,14 +499,41 @@ ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); } done: - if (ret == 0) - ENQUEUE_MUTEX(curthread, m); + return (ret); +} +static inline int +robust_mutex_lock_common(struct pthread_mutex *m, + const struct timespec *abstime) +{ + struct pthread *curthread = _get_curthread(); + uint32_t id; + int ret, locked; + + id = TID(curthread); + curthread->robust.list_op_pending = &m->m_roblink; + locked = (MUTEX_OWNER(m) == id); + if (m->m_private) + THR_CRITICAL_ENTER(curthread); + if (__predict_true( + (ret = _thr_umutex_trylock2(&m->m_lock, id)) == 0)) { + goto success; + } else if (locked) { + ret = mutex_self_lock(m, abstime); + } else if (__predict_true((ret = mutex_lock_sleep(m, abstime)) == 0)) { + goto success; + } + if (ret && m->m_private) + THR_CRITICAL_LEAVE(curthread); + curthread->robust.list_op_pending = NULL; return (ret); + +success: + return robust_mutex_lock_final(curthread, m, locked); } static inline int -mutex_lock_common(struct pthread_mutex *m, +stalled_mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); @@ -434,16 +542,30 @@ if (m->m_private) THR_CRITICAL_ENTER(curthread); if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) { + ret = 0; ENQUEUE_MUTEX(curthread, m); - ret = 0; + } else if (m->m_owner == curthread) { + ret = mutex_self_lock(m, abstime); } else { - ret = mutex_lock_sleep(curthread, m, abstime); + ret = mutex_lock_sleep(m, abstime); + if (ret == 0) + ENQUEUE_MUTEX(curthread, m); } if (ret && m->m_private) THR_CRITICAL_LEAVE(curthread); return (ret); } +static inline int +mutex_lock_common(struct pthread_mutex *m, + const struct timespec *abstime) +{ + if (__predict_false(IS_ROBUST_MUTEX(m))) + return (robust_mutex_lock_common(m, abstime)); + else + return (stalled_mutex_lock_common(m, abstime)); +} + int __pthread_mutex_lock(pthread_mutex_t *mutex) { @@ -615,14 +737,21 @@ m->m_owner = NULL; /* Remove the mutex from the threads queue. */ MUTEX_ASSERT_IS_OWNED(m); - if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) + if (__predict_true(!IS_PRI_PROTECT_MUTEX(m))) TAILQ_REMOVE(&curthread->mutexq, m, m_qe); else { TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); set_inherited_priority(curthread, m); } MUTEX_INIT_LINK(m); + if (__predict_false(IS_ROBUST_MUTEX(m))) { + curthread->robust.list_op_pending = &m->m_roblink; + dequeue_robust_mutex(curthread, m); + if (m->m_lock.m_robust_state == UMUTEX_ROBUST_OWNERDEAD) + m->m_lock.m_robust_state = UMUTEX_ROBUST_NOTRECOVERABLE; + } _thr_umutex_unlock(&m->m_lock, id); + curthread->robust.list_op_pending = NULL; } if (m->m_private) THR_CRITICAL_LEAVE(curthread); @@ -657,14 +786,19 @@ m->m_owner = NULL; /* Remove the mutex from the threads queue. */ MUTEX_ASSERT_IS_OWNED(m); - if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) + if (__predict_true(!IS_PRI_PROTECT_MUTEX(m))) TAILQ_REMOVE(&curthread->mutexq, m, m_qe); else { TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); set_inherited_priority(curthread, m); } MUTEX_INIT_LINK(m); + if (__predict_false(IS_ROBUST_MUTEX(m))) { + curthread->robust.list_op_pending = &m->m_roblink; + dequeue_robust_mutex(curthread, m); + } _thr_umutex_unlock(&m->m_lock, TID(curthread)); + curthread->robust.list_op_pending = NULL; if (m->m_private) THR_CRITICAL_LEAVE(curthread); @@ -679,8 +813,7 @@ int ret; m = *mutex; - if ((m <= THR_MUTEX_DESTROYED) || - (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) + if ((m <= THR_MUTEX_DESTROYED) || !IS_PRI_PROTECT_MUTEX(m)) ret = EINVAL; else { *prioceiling = m->m_lock.m_ceilings[0]; @@ -699,8 +832,7 @@ int ret; m = *mutex; - if ((m <= THR_MUTEX_DESTROYED) || - (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) + if ((m <= THR_MUTEX_DESTROYED) || !IS_PRI_PROTECT_MUTEX(m)) return (EINVAL); ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); @@ -780,3 +912,15 @@ return (0); return (m->m_owner == _get_curthread()); } + +int +_pthread_mutex_consistent(pthread_mutex_t *mutex) +{ + struct pthread_mutex *m; + + CHECK_AND_INIT_MUTEX + + m->m_lock.m_robust_state = UMUTEX_ROBUST_NORMAL; + m->m_count = 0; + return (0); +} Index: lib/libthr/thread/thr_private.h =================================================================== --- lib/libthr/thread/thr_private.h (版本 213561) +++ lib/libthr/thread/thr_private.h (工作副本) @@ -147,6 +147,7 @@ int m_spinloops; int m_yieldloops; int m_private; + struct _robust_list_entry m_roblink; /* * Link for all mutexes a thread currently owns. */ @@ -157,6 +158,7 @@ enum pthread_mutextype m_type; int m_protocol; int m_ceiling; + int m_robust; }; #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ @@ -465,6 +467,8 @@ int unwind_disabled; #endif + struct _robust_list_head robust; + /* * Magic value to help recognize a valid thread structure * from an invalid one: @@ -717,6 +721,7 @@ void _thr_signal_postfork(void) __hidden; void _thr_signal_postfork_child(void) __hidden; void _thr_try_gc(struct pthread *, struct pthread *) __hidden; +void _thr_init_robustlist(struct pthread *curthread) __hidden; int _rtp_to_schedparam(const struct rtprio *rtp, int *policy, struct sched_param *param) __hidden; int _schedparam_to_rtp(int policy, const struct sched_param *param, @@ -795,6 +800,10 @@ _libpthread_init(NULL); } +static __inline void compiler_memory_barrier(void) { + __asm __volatile("":::"memory"); +} + struct dl_phdr_info; void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info); void _thr_tsd_unload(struct dl_phdr_info *phdr_info) __hidden; Index: lib/libthr/thread/thr_mutexattr.c =================================================================== --- lib/libthr/thread/thr_mutexattr.c (版本 213561) +++ lib/libthr/thread/thr_mutexattr.c (工作副本) @@ -81,6 +81,8 @@ __weak_reference(_pthread_mutexattr_setprotocol, pthread_mutexattr_setprotocol); __weak_reference(_pthread_mutexattr_getprioceiling, pthread_mutexattr_getprioceiling); __weak_reference(_pthread_mutexattr_setprioceiling, pthread_mutexattr_setprioceiling); +__weak_reference(_pthread_mutexattr_setrobust, pthread_mutexattr_setrobust); +__weak_reference(_pthread_mutexattr_getrobust, pthread_mutexattr_getrobust); int _pthread_mutexattr_init(pthread_mutexattr_t *attr) @@ -253,3 +255,29 @@ return(ret); } +int +_pthread_mutexattr_getrobust(const pthread_mutexattr_t *mattr, int *robust) +{ + int ret = 0; + + if ((mattr == NULL) || (*mattr == NULL)) + ret = EINVAL; + else + *robust = (*mattr)->m_robust; + return (ret); +} + +int +_pthread_mutexattr_setrobust(pthread_mutexattr_t *mattr, int robust) +{ + int ret = 0; + + if ((mattr == NULL) || (*mattr == NULL)) + ret = EINVAL; + else if (robust != PTHREAD_MUTEX_STALLED && + robust != PTHREAD_MUTEX_ROBUST) + ret = EINVAL; + else + (*mattr)->m_robust = robust; + return (ret); +} Index: lib/libthr/thread/thr_umtx.c =================================================================== --- lib/libthr/thread/thr_umtx.c (版本 213561) +++ lib/libthr/thread/thr_umtx.c (工作副本) @@ -121,7 +121,7 @@ { #ifndef __ia64__ /* XXX this logic has a race-condition on ia64. */ - if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { + if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST)) == 0) { atomic_cmpset_rel_32(&mtx->m_owner, id | UMUTEX_CONTESTED, UMUTEX_CONTESTED); return _umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE, 0, 0, 0); } Index: lib/libthr/thread/thr_umtx.h =================================================================== --- lib/libthr/thread/thr_umtx.h (版本 213561) +++ lib/libthr/thread/thr_umtx.h (工作副本) @@ -32,7 +32,7 @@ #include #include -#define DEFAULT_UMUTEX {0,0,{0,0},{0,0,0,0}} +#define DEFAULT_UMUTEX {0,0,{0,0},0,{0,0,0}} #define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}} int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden; Index: lib/libthr/thread/thr_init.c =================================================================== --- lib/libthr/thread/thr_init.c (版本 213561) +++ lib/libthr/thread/thr_init.c (工作副本) @@ -89,13 +89,15 @@ struct pthread_mutex_attr _pthread_mutexattr_default = { .m_type = PTHREAD_MUTEX_DEFAULT, .m_protocol = PTHREAD_PRIO_NONE, - .m_ceiling = 0 + .m_ceiling = 0, + .m_robust = PTHREAD_MUTEX_STALLED }; struct pthread_mutex_attr _pthread_mutexattr_adaptive_default = { .m_type = PTHREAD_MUTEX_ADAPTIVE_NP, .m_protocol = PTHREAD_PRIO_NONE, - .m_ceiling = 0 + .m_ceiling = 0, + .m_robust = PTHREAD_MUTEX_STALLED }; /* Default condition variable attributes: */ @@ -420,14 +422,24 @@ _thr_getscheduler(thread->tid, &thread->attr.sched_policy, &sched_param); thread->attr.prio = sched_param.sched_priority; - #ifdef _PTHREAD_FORCED_UNWIND thread->unwind_stackend = _usrstack; #endif - + _thr_init_robustlist(thread); /* Others cleared to zero by thr_alloc() */ } +void +_thr_init_robustlist(struct pthread *curthread) +{ + curthread->robust.list.next = &curthread->robust.list; + curthread->robust.list.prev = &curthread->robust.list; + curthread->robust.list_op_pending = NULL; + curthread->robust.offset = offsetof(struct pthread_mutex, m_roblink) - + offsetof(struct pthread_mutex, m_lock); + thr_set_robust_list(&curthread->robust, sizeof(curthread->robust)); +} + static void init_private(void) { Index: lib/libthr/thread/thr_create.c =================================================================== --- lib/libthr/thread/thr_create.c (版本 213561) +++ lib/libthr/thread/thr_create.c (工作副本) @@ -237,6 +237,8 @@ { sigset_t set; + _thr_init_robustlist(curthread); + if (curthread->attr.suspend == THR_CREATE_SUSPENDED) set = curthread->sigmask; Index: lib/libc/stdlib/malloc.c =================================================================== --- lib/libc/stdlib/malloc.c (版本 213561) +++ lib/libc/stdlib/malloc.c (工作副本) @@ -123,7 +123,7 @@ * defaults the A and J runtime options to off. These settings are appropriate * for production systems. */ -/* #define MALLOC_PRODUCTION */ +#define MALLOC_PRODUCTION #ifndef MALLOC_PRODUCTION /* Index: lib/libc/include/namespace.h =================================================================== --- lib/libc/include/namespace.h (版本 213561) +++ lib/libc/include/namespace.h (工作副本) @@ -143,6 +143,7 @@ #define pthread_kill _pthread_kill #define pthread_main_np _pthread_main_np #define pthread_multi_np _pthread_multi_np +#define pthread_mutex_consistent _pthread_mutex_consistent #define pthread_mutex_destroy _pthread_mutex_destroy #define pthread_mutex_getprioceiling _pthread_mutex_getprioceiling #define pthread_mutex_init _pthread_mutex_init @@ -154,6 +155,7 @@ #define pthread_mutex_unlock _pthread_mutex_unlock #define pthread_mutexattr_destroy _pthread_mutexattr_destroy #define pthread_mutexattr_getkind_np _pthread_mutexattr_getkind_np +#define pthread_mutexattr_getrobust _pthread_mutexattr_getrobust #define pthread_mutexattr_getprioceiling _pthread_mutexattr_getprioceiling #define pthread_mutexattr_getprotocol _pthread_mutexattr_getprotocol #define pthread_mutexattr_getpshared _pthread_mutexattr_getpshared @@ -163,6 +165,7 @@ #define pthread_mutexattr_setprioceiling _pthread_mutexattr_setprioceiling #define pthread_mutexattr_setprotocol _pthread_mutexattr_setprotocol #define pthread_mutexattr_setpshared _pthread_mutexattr_setpshared +#define pthread_mutexattr_setrobust _pthread_mutexattr_setrobust #define pthread_mutexattr_settype _pthread_mutexattr_settype #define pthread_once _pthread_once #define pthread_resume_all_np _pthread_resume_all_np Index: lib/libc/include/un-namespace.h =================================================================== --- lib/libc/include/un-namespace.h (版本 213561) +++ lib/libc/include/un-namespace.h (工作副本) @@ -124,6 +124,7 @@ #undef pthread_kill #undef pthread_main_np #undef pthread_multi_np +#undef pthread_mutex_consistent #undef pthread_mutex_destroy #undef pthread_mutex_getprioceiling #undef pthread_mutex_init @@ -138,12 +139,14 @@ #undef pthread_mutexattr_getprioceiling #undef pthread_mutexattr_getprotocol #undef pthread_mutexattr_getpshared +#undef pthread_mutexattr_getrobust #undef pthread_mutexattr_gettype #undef pthread_mutexattr_init #undef pthread_mutexattr_setkind_np #undef pthread_mutexattr_setprioceiling #undef pthread_mutexattr_setprotocol #undef pthread_mutexattr_setpshared +#undef pthread_mutexattr_setrobust #undef pthread_mutexattr_settype #undef pthread_once #undef pthread_resume_all_np Index: lib/libc/sys/Symbol.map =================================================================== --- lib/libc/sys/Symbol.map (版本 213561) +++ lib/libc/sys/Symbol.map (工作副本) @@ -295,11 +295,13 @@ syscall; thr_create; thr_exit; + thr_get_robust_list; thr_kill; thr_kill2; thr_new; thr_self; thr_set_name; + thr_set_robust_list; thr_suspend; thr_wake; ktimer_create; /* Do we want these to be public interfaces? */