--- src/contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h 2012-04-24 12:35:40.000000000 0000 +++ src/contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h 2012-05-08 08:39:15.000000000 0000 @@ -78,3 +78,8 @@ #define close _close #define pthread_mutex_lock _pthread_mutex_lock #define pthread_mutex_unlock _pthread_mutex_unlock +#define pthread_atfork _pthread_atfork +#define pthread_mutexattr_init _pthread_mutexattr_init +#define pthread_mutexattr_settype _pthread_mutexattr_settype +#define pthread_mutex_init _pthread_mutex_init +#define pthread_mutexattr_destroy _pthread_mutexattr_destroy --- src/contrib/jemalloc/include/jemalloc/jemalloc_defs.h 2012-05-10 18:35:44.000000000 0000 +++ src/contrib/jemalloc/include/jemalloc/jemalloc_defs.h 2012-05-25 06:47:49.000000000 0000 @@ -100,7 +100,7 @@ * _pthread_mutex_init_calloc_cb(), in which case the function is used in order * to avoid recursive allocation during mutex initialization. */ -#define JEMALLOC_MUTEX_INIT_CB 1 +/* #undef JEMALLOC_MUTEX_INIT_CB */ /* Defined if __attribute__((...)) syntax is supported. */ #define JEMALLOC_HAVE_ATTR --- src/contrib/jemalloc/src/base.c 2012-04-17 07:36:20.000000000 0000 +++ src/contrib/jemalloc/src/base.c 2012-05-11 08:49:47.000000000 0000 @@ -4,7 +4,7 @@ /******************************************************************************/ /* Data. */ -static malloc_mutex_t base_mtx; +static malloc_mutex_t base_mtx = MALLOC_MUTEX_INITIALIZER; /* * Current pages that are being used for internal memory allocations. These --- src/contrib/jemalloc/src/chunk_dss.c 2012-04-22 09:38:34.000000000 0000 +++ src/contrib/jemalloc/src/chunk_dss.c 2012-05-11 08:49:47.000000000 0000 @@ -7,7 +7,7 @@ * Protects sbrk() calls. This avoids malloc races among threads, though it * does not protect against races with threads that call sbrk() directly. */ -static malloc_mutex_t dss_mtx; +static malloc_mutex_t dss_mtx = MALLOC_MUTEX_INITIALIZER; /* Base address of the DSS. */ static void *dss_base; --- src/contrib/jemalloc/src/huge.c 2012-05-10 18:35:44.000000000 0000 +++ src/contrib/jemalloc/src/huge.c 2012-05-25 06:47:49.000000000 0000 @@ -8,7 +8,7 @@ uint64_t huge_ndalloc; size_t huge_allocated; -malloc_mutex_t huge_mtx; +malloc_mutex_t huge_mtx = MALLOC_MUTEX_INITIALIZER; /******************************************************************************/ --- src/contrib/jemalloc/src/jemalloc.c 2012-05-12 05:37:00.000000000 0000 +++ src/contrib/jemalloc/src/jemalloc.c 2012-05-25 06:47:49.000000000 0000 @@ -35,7 +35,7 @@ unsigned ncpus; -malloc_mutex_t arenas_lock; +malloc_mutex_t arenas_lock = MALLOC_MUTEX_INITIALIZER; arena_t **arenas; unsigned narenas; @@ -643,6 +643,7 @@ malloc_conf_init(); +#ifndef __FreeBSD__ #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ && !defined(_WIN32)) /* Register fork handlers. */ @@ -653,6 +654,7 @@ abort(); } #endif +#endif if (opt_stats_print) { /* Print statistics at exit. */ @@ -1616,13 +1618,13 @@ * malloc during fork(). */ -#ifndef JEMALLOC_MUTEX_INIT_CB -void -jemalloc_prefork(void) -#else +//#if !defined(JEMALLOC_MUTEX_INIT_CB) +//void +//jemalloc_prefork(void) +//#else JEMALLOC_EXPORT void _malloc_prefork(void) -#endif +//#endif { unsigned i; @@ -1643,13 +1645,13 @@ chunk_dss_prefork(); } -#ifndef JEMALLOC_MUTEX_INIT_CB -void -jemalloc_postfork_parent(void) -#else +//#if !defined(JEMALLOC_MUTEX_INIT_CB) +//void +//jemalloc_postfork_parent(void) +//#else JEMALLOC_EXPORT void _malloc_postfork(void) -#endif +//#endif { unsigned i; --- src/gnu/lib/libgcc/Makefile 2012-05-15 23:36:28.000000000 0000 +++ src/gnu/lib/libgcc/Makefile 2012-05-25 06:47:49.000000000 0000 @@ -3,7 +3,7 @@ GCCDIR= ${.CURDIR}/../../../contrib/gcc GCCLIB= ${.CURDIR}/../../../contrib/gcclibs -SHLIB_NAME= libgcc_s.so.1 +SHLIB_NAME= libgcc_s.so.2 SHLIBDIR?= /lib .include --- src/gnu/lib/libgomp/Makefile 2011-02-02 03:37:18.000000000 0000 +++ src/gnu/lib/libgomp/Makefile 2012-05-25 06:47:02.000000000 0000 @@ -7,7 +7,7 @@ .PATH: ${SRCDIR} ${SRCDIR}/config/posix LIB= gomp -SHLIB_MAJOR= 1 +SHLIB_MAJOR= 2 SRCS= alloc.c barrier.c critical.c env.c \ error.c iter.c loop.c ordered.c parallel.c sections.c \ @@ -27,18 +27,18 @@ ${MACHINE_ARCH} == powerpc || \ (${MACHINE_CPUARCH} == mips && ${MACHINE_ARCH:Mmips64*} == "") OMP_LOCK_ALIGN = 4 -OMP_LOCK_KIND= 4 -OMP_LOCK_SIZE= 4 +OMP_LOCK_KIND= 8 +OMP_LOCK_SIZE= 36 OMP_NEST_LOCK_ALIGN= 4 OMP_NEST_LOCK_KIND= 8 -OMP_NEST_LOCK_SIZE= 8 +OMP_NEST_LOCK_SIZE= 40 .else OMP_LOCK_ALIGN = 8 OMP_LOCK_KIND= 8 -OMP_LOCK_SIZE= 8 +OMP_LOCK_SIZE= 36 OMP_NEST_LOCK_ALIGN= 8 OMP_NEST_LOCK_KIND= 8 -OMP_NEST_LOCK_SIZE= 16 +OMP_NEST_LOCK_SIZE= 44 .endif gstdint.h: --- src/gnu/lib/libstdc++/Makefile 2012-03-31 14:36:32.000000000 0000 +++ src/gnu/lib/libstdc++/Makefile 2012-04-06 09:56:43.000000000 0000 @@ -11,7 +11,7 @@ ${SRCDIR}/include ${SUPDIR} ${GCCDIR} ${GCCLIB}/libiberty LIB= stdc++ -SHLIB_MAJOR= 6 +SHLIB_MAJOR= 7 CFLAGS+= -DIN_GLIBCPP_V3 -DHAVE_CONFIG_H .if ${MACHINE_CPUARCH} == "arm" --- src/include/pthread.h 2011-01-06 03:36:06.000000000 0000 +++ src/include/pthread.h 2012-04-26 07:47:24.000000000 0000 @@ -92,15 +92,23 @@ /* * Static once initialization values. */ -#define PTHREAD_ONCE_INIT { PTHREAD_NEEDS_INIT, NULL } +#define PTHREAD_ONCE_INIT { PTHREAD_NEEDS_INIT } + +#define _PTHREAD_MUTEX_MAGIC 0x3643 +#define _PTHREAD_COND_MAGIC 0x4723 +#define _PTHREAD_RWLOCK_MAGIC 0x5821 /* * Static initialization values. */ -#define PTHREAD_MUTEX_INITIALIZER NULL -#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP ((pthread_mutex_t)1) -#define PTHREAD_COND_INITIALIZER NULL -#define PTHREAD_RWLOCK_INITIALIZER NULL +#define PTHREAD_MUTEX_INITIALIZER \ + {0, 0x0010, 0, 0, {0}, 0, 0, 0, _PTHREAD_MUTEX_MAGIC, 0} +#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \ + {0, 0x0110, 0, 0, {0}, 0, 0, 0, _PTHREAD_MUTEX_MAGIC, 0} +#define PTHREAD_COND_INITIALIZER \ + {_PTHREAD_COND_MAGIC, 0, 0, 0, 0, CLOCK_REALTIME} +#define PTHREAD_RWLOCK_INITIALIZER \ + {_PTHREAD_RWLOCK_MAGIC, 0, 0, {NULL}, 0, 0, 0, 0} /* * Default attribute arguments (draft 4, deprecated). @@ -135,7 +143,10 @@ #define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_ERRORCHECK -struct _pthread_cleanup_info { +#define PTHREAD_MUTEX_STALLED 0 +#define PTHREAD_MUTEX_ROBUST 1 + +struct pthread_cleanup_info { __uintptr_t pthread_cleanup_pad[8]; }; @@ -169,7 +180,7 @@ #define pthread_cleanup_push(cleanup_routine, cleanup_arg) \ { \ - struct _pthread_cleanup_info __cleanup_info__; \ + struct pthread_cleanup_info __cleanup_info__; \ __pthread_cleanup_push_imp(cleanup_routine, cleanup_arg,\ &__cleanup_info__); \ { @@ -205,13 +216,18 @@ int pthread_key_create(pthread_key_t *, void (*) (void *)); int pthread_key_delete(pthread_key_t); -int pthread_mutexattr_init(pthread_mutexattr_t *); int pthread_mutexattr_destroy(pthread_mutexattr_t *); +int pthread_mutexattr_getrobust(const pthread_mutexattr_t *__restrict, + int *__restrict); int pthread_mutexattr_getpshared(const pthread_mutexattr_t *, int *); int pthread_mutexattr_gettype(pthread_mutexattr_t *, int *); +int pthread_mutexattr_init(pthread_mutexattr_t *); int pthread_mutexattr_settype(pthread_mutexattr_t *, int); int pthread_mutexattr_setpshared(pthread_mutexattr_t *, int); +int pthread_mutexattr_setrobust(pthread_mutexattr_t *, + int); +int pthread_mutex_consistent(pthread_mutex_t *); int pthread_mutex_destroy(pthread_mutex_t *); int pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); @@ -290,7 +306,7 @@ #endif void __pthread_cleanup_push_imp(void (*)(void *), void *, - struct _pthread_cleanup_info *); + struct pthread_cleanup_info *); void __pthread_cleanup_pop_imp(int); __END_DECLS --- src/include/unistd.h 2012-04-29 11:37:08.000000000 0000 +++ src/include/unistd.h 2012-05-07 08:30:54.000000000 0000 @@ -111,7 +111,7 @@ #define _POSIX_THREAD_PRIO_INHERIT 200112L #define _POSIX_THREAD_PRIO_PROTECT 200112L #define _POSIX_THREAD_PRIORITY_SCHEDULING 200112L -#define _POSIX_THREAD_PROCESS_SHARED -1 +#define _POSIX_THREAD_PROCESS_SHARED 200112L #define _POSIX_THREAD_SAFE_FUNCTIONS -1 #define _POSIX_THREAD_SPORADIC_SERVER -1 #define _POSIX_THREADS 200112L --- src/lib/Makefile 2012-05-17 10:36:42.000000000 0000 +++ src/lib/Makefile 2012-05-25 06:47:49.000000000 0000 @@ -106,7 +106,6 @@ ${_libsmutil} \ libstand \ libstdbuf \ - libstdthreads \ ${_libtelnet} \ ${_libthr} \ libthread_db \ --- src/lib/libc/gen/Symbol.map 2012-05-19 13:35:49.000000000 0000 +++ src/lib/libc/gen/Symbol.map 2012-05-25 06:47:49.000000000 0000 @@ -26,12 +26,6 @@ pthread_cancel; pthread_cleanup_pop; pthread_cleanup_push; - pthread_cond_broadcast; - pthread_cond_destroy; - pthread_cond_init; - pthread_cond_signal; - pthread_cond_timedwait; - pthread_cond_wait; pthread_detach; pthread_equal; pthread_exit; @@ -41,22 +35,10 @@ pthread_key_delete; pthread_kill; pthread_main_np; - pthread_mutex_destroy; - pthread_mutex_init; - pthread_mutex_lock; - pthread_mutex_trylock; - pthread_mutex_unlock; pthread_mutexattr_destroy; pthread_mutexattr_init; pthread_mutexattr_settype; pthread_once; - pthread_rwlock_destroy; - pthread_rwlock_init; - pthread_rwlock_rdlock; - pthread_rwlock_tryrdlock; - pthread_rwlock_trywrlock; - pthread_rwlock_unlock; - pthread_rwlock_wrlock; pthread_self; pthread_setcancelstate; pthread_setcanceltype; @@ -366,6 +348,24 @@ getutxid; getutxline; getutxuser; + pthread_cond_broadcast; + pthread_cond_destroy; + pthread_cond_init; + pthread_cond_signal; + pthread_cond_timedwait; + pthread_cond_wait; + pthread_mutex_destroy; + pthread_mutex_init; + pthread_mutex_lock; + pthread_mutex_trylock; + pthread_mutex_unlock; + pthread_rwlock_destroy; + pthread_rwlock_init; + pthread_rwlock_rdlock; + pthread_rwlock_tryrdlock; + pthread_rwlock_trywrlock; + pthread_rwlock_unlock; + pthread_rwlock_wrlock; pututxline; sem_close; sem_destroy; @@ -391,6 +391,7 @@ FBSDprivate_1.0 { /* needed by thread libraries */ __thr_jtable; + __thr_jtable10; _pthread_atfork; _pthread_attr_destroy; --- src/lib/libc/gen/_pthread_stubs.c 2011-11-26 17:35:37.000000000 0000 +++ src/lib/libc/gen/_pthread_stubs.c 2012-02-15 05:06:16.000000000 0000 @@ -61,6 +61,7 @@ #define PJT_DUAL_ENTRY(entry) \ (pthread_func_t)entry, (pthread_func_t)entry +/* Each symbol's highest version is in this table. */ pthread_func_entry_t __thr_jtable[PJT_MAX] = { {PJT_DUAL_ENTRY(stub_zero)}, /* PJT_ATFORK */ {PJT_DUAL_ENTRY(stub_zero)}, /* PJT_ATTR_DESTROY */ @@ -276,6 +277,94 @@ STUB_FUNC1(_pthread_cancel_enter, PJT_CANCEL_ENTER, int, int) STUB_FUNC1(_pthread_cancel_leave, PJT_CANCEL_LEAVE, int, int) +#define FUNC10_TYPE(name) __CONCAT(name, _func10_t) +#define PJT_ENTRY(entry) (pthread_func_t)entry + +pthread_func_t __thr_jtable10[PJT10_MAX] = { + PJT_ENTRY(stub_zero), /* PJT10_COND_BROADCAST */ + PJT_ENTRY(stub_zero), /* PJT10_COND_DESTROY */ + PJT_ENTRY(stub_zero), /* PJT10_COND_INIT */ + PJT_ENTRY(stub_zero), /* PJT10_COND_SIGNAL */ + PJT_ENTRY(stub_zero), /* PJT10_COND_TIMEDWAIT */ + PJT_ENTRY(stub_zero), /* PJT10_COND_WAIT */ + PJT_ENTRY(stub_zero), /* PJT10_MUTEX_DESTROY */ + PJT_ENTRY(stub_zero), /* PJT10_MUTEX_INIT */ + PJT_ENTRY(stub_zero), /* PJT10_MUTEX_LOCK */ + PJT_ENTRY(stub_zero), /* PJT10_MUTEX_TRYLOCK */ + PJT_ENTRY(stub_zero), /* PJT10_MUTEX_UNLOCK */ + PJT_ENTRY(stub_zero), /* PJT10_RWLOCK_DESTROY */ + PJT_ENTRY(stub_zero), /* PJT10_RWLOCK_INIT */ + PJT_ENTRY(stub_zero), /* PJT10_RWLOCK_RDLOCK */ + PJT_ENTRY(stub_zero), /* PJT10_RWLOCK_TRYRDLOCK */ + PJT_ENTRY(stub_zero), /* PJT10_RWLOCK_TRYWRLOCK */ + PJT_ENTRY(stub_zero), /* PJT10_RWLOCK_UNLOCK */ + PJT_ENTRY(stub_zero) /* PJT10_RWLOCK_WRLOCK */ +}; + +#define SYM_FB10(sym) __CONCAT(sym, _fb10) +#define SYM_COMPAT(sym, impl, ver) __sym_compat(sym, impl, ver) +#define SYM_DEFAULT(sym, impl, ver) __sym_default(sym, impl, ver) + +#define FB10_COMPAT(func, sym) \ + WEAK_REF(func, SYM_FB10(sym)); \ + SYM_COMPAT(sym, SYM_FB10(sym), FBSD_1.0) + +#define FUNC10_EXP(name) __CONCAT(name, _1_0_exp) + +#define STUB10_FUNC1(name, idx, ret, p0_type) \ + static ret FUNC10_EXP(name)(p0_type) __used; \ + FB10_COMPAT(FUNC10_EXP(name), name); \ + typedef ret (*FUNC10_TYPE(name))(p0_type); \ + static ret FUNC10_EXP(name)(p0_type p0) \ + { \ + FUNC10_TYPE(name) func; \ + func = (FUNC10_TYPE(name))__thr_jtable10[idx]; \ + return (func(p0)); \ + } \ + +#define STUB10_FUNC2(name, idx, ret, p0_type, p1_type) \ + static ret FUNC10_EXP(name)(p0_type, p1_type) __used; \ + FB10_COMPAT(FUNC10_EXP(name), name); \ + typedef ret (*FUNC10_TYPE(name))(p0_type, p1_type); \ + static ret FUNC10_EXP(name)(p0_type p0, p1_type p1) \ + { \ + FUNC10_TYPE(name) func; \ + func = (FUNC10_TYPE(name))__thr_jtable10[idx]; \ + return (func(p0, p1)); \ + } \ + +#define STUB10_FUNC3(name, idx, ret, p0_type, p1_type, p2_type) \ + static ret FUNC10_EXP(name)(p0_type, p1_type, p2_type) __used; \ + FB10_COMPAT(FUNC10_EXP(name), name); \ + typedef ret (*FUNC10_TYPE(name))(p0_type, p1_type, p2_type); \ + static ret FUNC10_EXP(name)(p0_type p0, p1_type p1, p2_type p2) \ + { \ + FUNC10_TYPE(name) func; \ + func = (FUNC10_TYPE(name))__thr_jtable10[idx]; \ + return (func(p0, p1, p2)); \ + } \ + +STUB10_FUNC1(pthread_cond_broadcast, PJT10_COND_BROADCAST, int, void *) +STUB10_FUNC1(pthread_cond_destroy, PJT10_COND_DESTROY, int, void *) +STUB10_FUNC2(pthread_cond_init, PJT10_COND_INIT, int, void *, void *) +STUB10_FUNC1(pthread_cond_signal, PJT10_COND_SIGNAL, int, void *) +STUB10_FUNC3(pthread_cond_timedwait, PJT10_COND_TIMEDWAIT, int, void *, void *, void *) +STUB10_FUNC2(pthread_cond_wait, PJT10_COND_WAIT, int, void *, void *) + +STUB10_FUNC1(pthread_mutex_destroy, PJT10_MUTEX_DESTROY, int, void *) +STUB10_FUNC2(pthread_mutex_init, PJT10_MUTEX_INIT, int, void *, void *) +STUB10_FUNC1(pthread_mutex_lock, PJT10_MUTEX_LOCK, int, void *) +STUB10_FUNC1(pthread_mutex_trylock, PJT10_MUTEX_TRYLOCK, int, void *) +STUB10_FUNC1(pthread_mutex_unlock, PJT10_MUTEX_UNLOCK, int, void *) + +STUB10_FUNC1(pthread_rwlock_destroy, PJT10_RWLOCK_DESTROY, int, void *) +STUB10_FUNC2(pthread_rwlock_init, PJT10_RWLOCK_INIT, int, void *, void *) +STUB10_FUNC1(pthread_rwlock_rdlock, PJT10_RWLOCK_RDLOCK, int, void *) +STUB10_FUNC1(pthread_rwlock_tryrdlock, PJT10_RWLOCK_TRYRDLOCK, int, void *) +STUB10_FUNC1(pthread_rwlock_trywrlock, PJT10_RWLOCK_TRYWRLOCK, int, void *) +STUB10_FUNC1(pthread_rwlock_unlock, PJT10_RWLOCK_UNLOCK, int, void *) +STUB10_FUNC1(pthread_rwlock_wrlock, PJT10_RWLOCK_WRLOCK, int, void *) + static int stub_zero(void) { --- src/lib/libc/gen/closedir.c 2012-05-19 13:35:49.000000000 0000 +++ src/lib/libc/gen/closedir.c 2012-05-25 06:47:49.000000000 0000 @@ -54,15 +54,16 @@ int fd; if (__isthreaded) - _pthread_mutex_lock(&dirp->dd_lock); + _pthread_mutex_lock(dirp->dd_lock); fd = dirp->dd_fd; dirp->dd_fd = -1; dirp->dd_loc = 0; free((void *)dirp->dd_buf); _reclaim_telldir(dirp); if (__isthreaded) { - _pthread_mutex_unlock(&dirp->dd_lock); - _pthread_mutex_destroy(&dirp->dd_lock); + _pthread_mutex_unlock(dirp->dd_lock); + _pthread_mutex_destroy(dirp->dd_lock); + free(dirp->dd_lock); } free((void *)dirp); return(_close(fd)); --- src/lib/libc/gen/errlst.c 2011-04-06 21:35:52.000000000 0000 +++ src/lib/libc/gen/errlst.c 2011-04-22 06:47:54.000000000 0000 @@ -152,5 +152,7 @@ "Protocol error", /* 92 - EPROTO */ "Capabilities insufficient", /* 93 - ENOTCAPABLE */ "Not permitted in capability mode", /* 94 - ECAPMODE */ + "Previous owning thread terminated", /* 95 - EOWNERDEAD */ + "State is not recoverable" /* 96 - ENOTRECOVERABLE */ }; const int sys_nerr = sizeof(sys_errlist) / sizeof(sys_errlist[0]); --- src/lib/libc/gen/opendir.c 2012-05-19 13:35:49.000000000 0000 +++ src/lib/libc/gen/opendir.c 2012-05-25 06:47:49.000000000 0000 @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -298,7 +299,8 @@ dirp->dd_loc = 0; dirp->dd_fd = fd; dirp->dd_flags = flags; - dirp->dd_lock = NULL; + dirp->dd_lock = malloc(sizeof(struct pthread_mutex)); + _pthread_mutex_init(dirp->dd_lock, NULL); /* * Set up seek point for rewinddir. --- src/lib/libc/gen/readdir.c 2012-05-19 13:35:49.000000000 0000 +++ src/lib/libc/gen/readdir.c 2012-05-25 06:47:49.000000000 0000 @@ -89,9 +89,9 @@ struct dirent *dp; if (__isthreaded) { - _pthread_mutex_lock(&dirp->dd_lock); + _pthread_mutex_lock(dirp->dd_lock); dp = _readdir_unlocked(dirp, 1); - _pthread_mutex_unlock(&dirp->dd_lock); + _pthread_mutex_unlock(dirp->dd_lock); } else dp = _readdir_unlocked(dirp, 1); @@ -110,10 +110,10 @@ saved_errno = errno; errno = 0; if (__isthreaded) { - _pthread_mutex_lock(&dirp->dd_lock); + _pthread_mutex_lock(dirp->dd_lock); if ((dp = _readdir_unlocked(dirp, 1)) != NULL) memcpy(entry, dp, _GENERIC_DIRSIZ(dp)); - _pthread_mutex_unlock(&dirp->dd_lock); + _pthread_mutex_unlock(dirp->dd_lock); } else if ((dp = _readdir_unlocked(dirp, 1)) != NULL) memcpy(entry, dp, _GENERIC_DIRSIZ(dp)); --- src/lib/libc/gen/seekdir.c 2012-05-19 13:35:49.000000000 0000 +++ src/lib/libc/gen/seekdir.c 2012-05-25 06:47:49.000000000 0000 @@ -53,8 +53,8 @@ long loc; { if (__isthreaded) - _pthread_mutex_lock(&dirp->dd_lock); + _pthread_mutex_lock(dirp->dd_lock); _seekdir(dirp, loc); if (__isthreaded) - _pthread_mutex_unlock(&dirp->dd_lock); + _pthread_mutex_unlock(dirp->dd_lock); } --- src/lib/libc/gen/telldir.c 2012-05-19 13:35:49.000000000 0000 +++ src/lib/libc/gen/telldir.c 2012-05-25 06:47:49.000000000 0000 @@ -65,13 +65,13 @@ if ((lp = (struct ddloc *)malloc(sizeof(struct ddloc))) == NULL) return (-1); if (__isthreaded) - _pthread_mutex_lock(&dirp->dd_lock); + _pthread_mutex_lock(dirp->dd_lock); lp->loc_index = dirp->dd_td->td_loccnt++; lp->loc_seek = dirp->dd_seek; lp->loc_loc = dirp->dd_loc; LIST_INSERT_HEAD(&dirp->dd_td->td_locq, lp, loc_lqe); if (__isthreaded) - _pthread_mutex_unlock(&dirp->dd_lock); + _pthread_mutex_unlock(dirp->dd_lock); return (lp->loc_index); } --- src/lib/libc/include/libc_private.h 2012-04-24 18:35:45.000000000 0000 +++ src/lib/libc/include/libc_private.h 2012-05-07 08:30:54.000000000 0000 @@ -167,10 +167,33 @@ PJT_MAX } pjt_index_t; +typedef enum { + PJT10_COND_BROADCAST, + PJT10_COND_DESTROY, + PJT10_COND_INIT, + PJT10_COND_SIGNAL, + PJT10_COND_TIMEDWAIT, + PJT10_COND_WAIT, + PJT10_MUTEX_DESTROY, + PJT10_MUTEX_INIT, + PJT10_MUTEX_LOCK, + PJT10_MUTEX_TRYLOCK, + PJT10_MUTEX_UNLOCK, + PJT10_RWLOCK_DESTROY, + PJT10_RWLOCK_INIT, + PJT10_RWLOCK_RDLOCK, + PJT10_RWLOCK_TRYRDLOCK, + PJT10_RWLOCK_TRYWRLOCK, + PJT10_RWLOCK_UNLOCK, + PJT10_RWLOCK_WRLOCK, + PJT10_MAX +} pjt10_index_t; + typedef int (*pthread_func_t)(void); typedef pthread_func_t pthread_func_entry_t[2]; extern pthread_func_entry_t __thr_jtable[]; +extern pthread_func_t __thr_jtable10[]; /* * yplib internal interfaces --- src/lib/libc/include/namespace.h 2011-02-07 21:36:09.000000000 0000 +++ src/lib/libc/include/namespace.h 2011-02-10 06:21:55.000000000 0000 @@ -144,6 +144,7 @@ #define pthread_kill _pthread_kill #define pthread_main_np _pthread_main_np #define pthread_multi_np _pthread_multi_np +#define pthread_mutex_consistent _pthread_mutex_consistent #define pthread_mutex_destroy _pthread_mutex_destroy #define pthread_mutex_getprioceiling _pthread_mutex_getprioceiling #define pthread_mutex_init _pthread_mutex_init @@ -158,12 +159,14 @@ #define pthread_mutexattr_getprioceiling _pthread_mutexattr_getprioceiling #define pthread_mutexattr_getprotocol _pthread_mutexattr_getprotocol #define pthread_mutexattr_getpshared _pthread_mutexattr_getpshared +#define pthread_mutexattr_getrobust _pthread_mutexattr_getrobust #define pthread_mutexattr_gettype _pthread_mutexattr_gettype #define pthread_mutexattr_init _pthread_mutexattr_init #define pthread_mutexattr_setkind_np _pthread_mutexattr_setkind_np #define pthread_mutexattr_setprioceiling _pthread_mutexattr_setprioceiling #define pthread_mutexattr_setprotocol _pthread_mutexattr_setprotocol #define pthread_mutexattr_setpshared _pthread_mutexattr_setpshared +#define pthread_mutexattr_setrobust _pthread_mutexattr_setrobust #define pthread_mutexattr_settype _pthread_mutexattr_settype #define pthread_once _pthread_once #define pthread_resume_all_np _pthread_resume_all_np --- src/lib/libc/include/un-namespace.h 2011-02-07 21:36:09.000000000 0000 +++ src/lib/libc/include/un-namespace.h 2011-02-10 06:21:55.000000000 0000 @@ -125,6 +125,7 @@ #undef pthread_kill #undef pthread_main_np #undef pthread_multi_np +#undef pthread_mutex_consistent #undef pthread_mutex_destroy #undef pthread_mutex_getprioceiling #undef pthread_mutex_init @@ -139,12 +140,14 @@ #undef pthread_mutexattr_getprioceiling #undef pthread_mutexattr_getprotocol #undef pthread_mutexattr_getpshared +#undef pthread_mutexattr_getrobust #undef pthread_mutexattr_gettype #undef pthread_mutexattr_init #undef pthread_mutexattr_setkind_np #undef pthread_mutexattr_setprioceiling #undef pthread_mutexattr_setprotocol #undef pthread_mutexattr_setpshared +#undef pthread_mutexattr_setrobust #undef pthread_mutexattr_settype #undef pthread_once #undef pthread_resume_all_np --- src/lib/libc/stdio/_flock_stub.c 2008-04-17 22:41:57.000000000 0000 +++ src/lib/libc/stdio/_flock_stub.c 2010-11-23 01:58:38.000000000 0000 @@ -67,7 +67,7 @@ * Make sure this mutex is treated as a private * internal mutex: */ - _pthread_mutex_lock(&fp->_fl_mutex); + _pthread_mutex_lock(fp->_fl_mutex); fp->_fl_owner = curthread; fp->_fl_count = 1; } @@ -94,7 +94,7 @@ * Make sure this mutex is treated as a private * internal mutex: */ - else if (_pthread_mutex_trylock(&fp->_fl_mutex) == 0) { + else if (_pthread_mutex_trylock(fp->_fl_mutex) == 0) { fp->_fl_owner = curthread; fp->_fl_count = 1; } @@ -130,7 +130,7 @@ */ fp->_fl_count = 0; fp->_fl_owner = NULL; - _pthread_mutex_unlock(&fp->_fl_mutex); + _pthread_mutex_unlock(fp->_fl_mutex); } } } --- src/lib/libc/stdio/findfp.c 2012-04-24 18:35:45.000000000 0000 +++ src/lib/libc/stdio/findfp.c 2012-05-07 08:30:54.000000000 0000 @@ -36,6 +36,7 @@ #include __FBSDID("$FreeBSD: src/lib/libc/stdio/findfp.c,v 1.36 2012/04/24 17:51:36 kib Exp $"); +#include "namespace.h" #include #include #include @@ -48,12 +49,14 @@ #include "libc_private.h" #include "local.h" #include "glue.h" +#include "un-namespace.h" int __sdidinit; #define NDYNAMIC 10 /* add ten more whenever necessary */ -#define std(flags, file) { \ + +#define std(flags, file, lock) { \ ._flags = (flags), \ ._file = (file), \ ._cookie = __sF + (file), \ @@ -61,16 +64,21 @@ ._read = __sread, \ ._seek = __sseek, \ ._write = __swrite, \ - ._fl_mutex = PTHREAD_MUTEX_INITIALIZER, \ + ._fl_mutex = &lock, \ } /* the usual - (stdin + stdout + stderr) */ static FILE usual[FOPEN_MAX - 3]; static struct glue uglue = { NULL, FOPEN_MAX - 3, usual }; +static pthread_mutex_t sfLOCK[3] = { + PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER +}; static FILE __sF[3] = { - std(__SRD, STDIN_FILENO), - std(__SWR, STDOUT_FILENO), - std(__SWR|__SNBF, STDERR_FILENO) + std(__SRD, STDIN_FILENO, sfLOCK[0]), + std(__SWR, STDOUT_FILENO, sfLOCK[1]), + std(__SWR|__SNBF, STDERR_FILENO, sfLOCK[2]) }; FILE *__stdinp = &__sF[0]; @@ -95,7 +103,7 @@ int n; { struct glue *g; - static FILE empty = { ._fl_mutex = PTHREAD_MUTEX_INITIALIZER }; + static FILE empty = { ._fl_mutex = NULL }; FILE *p; g = (struct glue *)malloc(sizeof(*g) + ALIGNBYTES + n * sizeof(FILE)); @@ -153,7 +161,10 @@ fp->_ub._size = 0; fp->_lb._base = NULL; /* no line buffer */ fp->_lb._size = 0; -/* fp->_fl_mutex = NULL; */ /* once set always set (reused) */ + if (fp->_fl_mutex == NULL) { /* once set always set (reused) */ + fp->_fl_mutex = malloc(sizeof(struct pthread_mutex)); + _pthread_mutex_init(fp->_fl_mutex, NULL); + } fp->_orientation = 0; memset(&fp->_mbstate, 0, sizeof(mbstate_t)); return (fp); --- src/lib/libc/stdio/local.h 2012-04-29 16:36:37.000000000 0000 +++ src/lib/libc/stdio/local.h 2012-05-07 08:30:54.000000000 0000 @@ -127,7 +127,7 @@ */ #define FAKE_FILE { \ ._file = -1, \ - ._fl_mutex = PTHREAD_MUTEX_INITIALIZER, \ + ._fl_mutex = NULL, \ } /* --- src/lib/libthr/Makefile 2010-10-08 02:35:36.000000000 0000 +++ src/lib/libthr/Makefile 2012-05-02 02:45:37.000000000 0000 @@ -40,7 +40,7 @@ # enable extra internal consistancy checks CFLAGS+=-D_PTHREADS_INVARIANTS -#CFLAGS+=-g +#DEBUG_FLAGS=-g PRECIOUSLIB= --- src/lib/libthr/arch/amd64/include/pthread_md.h 2011-12-15 20:35:31.000000000 0000 +++ src/lib/libthr/arch/amd64/include/pthread_md.h 2012-05-04 11:39:23.000000000 0000 @@ -99,5 +99,7 @@ } #define HAS__UMTX_OP_ERR 1 +#define JMPBUF_STACKPTR(buf) ((buf)[0]._jb[2]) +#define UCONTEXT_STACKPTR(ctx) ((ctx)->uc_mcontext.mc_rsp) #endif --- src/lib/libthr/arch/i386/include/pthread_md.h 2011-12-15 20:35:31.000000000 0000 +++ src/lib/libthr/arch/i386/include/pthread_md.h 2012-05-04 11:39:23.000000000 0000 @@ -104,5 +104,7 @@ } #define HAS__UMTX_OP_ERR 1 +#define JMPBUF_STACKPTR(buf) ((buf)[0]._jb[2]) +#define UCONTEXT_STACKPTR(ctx) ((ctx)->uc_mcontext.mc_esp) #endif --- src/lib/libthr/pthread.map 2011-02-07 21:36:09.000000000 0000 +++ src/lib/libthr/pthread.map 2012-05-14 05:48:42.000000000 0000 @@ -16,6 +16,7 @@ fcntl; fork; fsync; + longjmp; msync; nanosleep; open; @@ -23,13 +24,9 @@ poll; pselect; pthread_atfork; - pthread_barrier_destroy; - pthread_barrier_init; - pthread_barrier_wait; pthread_barrierattr_destroy; pthread_barrierattr_getpshared; pthread_barrierattr_init; - pthread_barrierattr_setpshared; pthread_attr_destroy; pthread_attr_get_np; pthread_attr_getdetachstate; @@ -55,18 +52,11 @@ pthread_cancel; pthread_cleanup_pop; pthread_cleanup_push; - pthread_cond_broadcast; - pthread_cond_destroy; - pthread_cond_init; - pthread_cond_signal; - pthread_cond_timedwait; - pthread_cond_wait; pthread_condattr_destroy; pthread_condattr_getclock; pthread_condattr_getpshared; pthread_condattr_init; pthread_condattr_setclock; - pthread_condattr_setpshared; pthread_create; pthread_detach; pthread_equal; @@ -81,14 +71,6 @@ pthread_kill; pthread_main_np; pthread_multi_np; - pthread_mutex_destroy; - pthread_mutex_getprioceiling; - pthread_mutex_init; - pthread_mutex_lock; - pthread_mutex_setprioceiling; - pthread_mutex_timedlock; - pthread_mutex_trylock; - pthread_mutex_unlock; pthread_mutexattr_destroy; pthread_mutexattr_getkind_np; pthread_mutexattr_getprioceiling; @@ -99,24 +81,13 @@ pthread_mutexattr_setkind_np; pthread_mutexattr_setprioceiling; pthread_mutexattr_setprotocol; - pthread_mutexattr_setpshared; pthread_mutexattr_settype; pthread_once; pthread_resume_all_np; pthread_resume_np; - pthread_rwlock_destroy; - pthread_rwlock_init; - pthread_rwlock_rdlock; - pthread_rwlock_timedrdlock; - pthread_rwlock_timedwrlock; - pthread_rwlock_tryrdlock; - pthread_rwlock_trywrlock; - pthread_rwlock_unlock; - pthread_rwlock_wrlock; pthread_rwlockattr_destroy; pthread_rwlockattr_getpshared; pthread_rwlockattr_init; - pthread_rwlockattr_setpshared; pthread_set_name_np; pthread_self; pthread_setcancelstate; @@ -127,11 +98,6 @@ pthread_setspecific; pthread_sigmask; pthread_single_np; - pthread_spin_destroy; - pthread_spin_init; - pthread_spin_lock; - pthread_spin_trylock; - pthread_spin_unlock; pthread_suspend_all_np; pthread_suspend_np; pthread_switch_add_np; @@ -148,6 +114,7 @@ sendmsg; sendto; sigaction; + siglongjmp; sigprocmask; sigsuspend; sigwait; @@ -213,6 +180,8 @@ __write; __writev; _fork; + _longjmp; + _siglongjmp; _pthread_atfork; _pthread_barrier_destroy; _pthread_barrier_init; @@ -279,6 +248,7 @@ _pthread_kill; _pthread_main_np; _pthread_multi_np; + _pthread_mutex_consistent; _pthread_mutex_destroy; _pthread_mutex_getprioceiling; _pthread_mutex_getspinloops_np; @@ -298,12 +268,14 @@ _pthread_mutexattr_getprioceiling; _pthread_mutexattr_getprotocol; _pthread_mutexattr_getpshared; + _pthread_mutexattr_getrobust; _pthread_mutexattr_gettype; _pthread_mutexattr_init; _pthread_mutexattr_setkind_np; _pthread_mutexattr_setprioceiling; _pthread_mutexattr_setprotocol; _pthread_mutexattr_setpshared; + _pthread_mutexattr_setrobust; _pthread_mutexattr_settype; _pthread_once; _pthread_resume_all_np; @@ -395,16 +367,54 @@ pthread_getaffinity_np; pthread_getcpuclockid; pthread_setaffinity_np; +}; + +FBSD_1.2 { + openat; + pthread_barrier_destroy; + pthread_barrier_init; + pthread_barrier_wait; + pthread_barrierattr_setpshared; + pthread_cond_broadcast; + pthread_cond_destroy; + pthread_cond_init; + pthread_cond_signal; + pthread_cond_timedwait; + pthread_cond_wait; + pthread_condattr_setpshared; + pthread_getthreadid_np; + pthread_mutex_destroy; + pthread_mutexattr_getrobust; + pthread_mutexattr_setpshared; + pthread_mutexattr_setrobust; + pthread_mutex_consistent; + pthread_mutex_getprioceiling; pthread_mutex_getspinloops_np; pthread_mutex_getyieldloops_np; + pthread_mutex_init; pthread_mutex_isowned_np; + pthread_mutex_lock; + pthread_mutex_setprioceiling; pthread_mutex_setspinloops_np; pthread_mutex_setyieldloops_np; -}; - -FBSD_1.2 { - openat; - pthread_getthreadid_np; + pthread_mutex_timedlock; + pthread_mutex_trylock; + pthread_mutex_unlock; + pthread_spin_destroy; + pthread_spin_init; + pthread_spin_lock; + pthread_spin_trylock; + pthread_spin_unlock; + pthread_rwlock_destroy; + pthread_rwlock_init; + pthread_rwlock_rdlock; + pthread_rwlock_timedrdlock; + pthread_rwlock_timedwrlock; + pthread_rwlock_tryrdlock; + pthread_rwlock_trywrlock; + pthread_rwlock_unlock; + pthread_rwlock_wrlock; + pthread_rwlockattr_setpshared; setcontext; swapcontext; }; --- src/lib/libthr/thread/Makefile.inc 2011-02-07 21:36:09.000000000 0000 +++ src/lib/libthr/thread/Makefile.inc 2012-05-02 02:09:13.000000000 0000 @@ -30,6 +30,7 @@ thr_kern.c \ thr_kill.c \ thr_main_np.c \ + thr_malloc.c \ thr_multi_np.c \ thr_mutex.c \ thr_mutexattr.c \ --- src/lib/libthr/thread/thr_attr.c 2010-11-02 02:35:33.000000000 0000 +++ src/lib/libthr/thread/thr_attr.c 2012-05-02 02:09:13.000000000 0000 @@ -119,9 +119,9 @@ ret = EINVAL; else { if ((*attr)->cpuset != NULL) - free((*attr)->cpuset); + lfree((*attr)->cpuset); /* Free the memory allocated to the attribute object: */ - free(*attr); + lfree(*attr); /* * Leave the attribute pointer NULL now that the memory @@ -343,7 +343,7 @@ _thr_check_init(); /* Allocate memory for the attribute object: */ - if ((pattr = (pthread_attr_t) malloc(sizeof(struct pthread_attr))) == NULL) + if ((pattr = (pthread_attr_t) lmalloc(sizeof(struct pthread_attr))) == NULL) /* Insufficient memory: */ ret = ENOMEM; else { @@ -450,8 +450,8 @@ policy = (*attr)->sched_policy; if (policy == SCHED_FIFO || policy == SCHED_RR) { - if (param->sched_priority < _thr_priorities[policy-1].pri_min || - param->sched_priority > _thr_priorities[policy-1].pri_max) + if (param->sched_priority < 0 || + param->sched_priority > THR_MAX_RR_PRIORITY) return (ENOTSUP); } else { /* @@ -479,7 +479,7 @@ ret = ENOTSUP; } else { (*attr)->sched_policy = policy; - (*attr)->prio = _thr_priorities[policy-1].pri_default; + (*attr)->prio = 0; } return(ret); } @@ -592,7 +592,7 @@ else { if (cpusetsize == 0 || cpusetp == NULL) { if (attr->cpuset != NULL) { - free(attr->cpuset); + lfree(attr->cpuset); attr->cpuset = NULL; attr->cpusetsize = 0; } --- src/lib/libthr/thread/thr_barrier.c 2012-03-16 05:36:53.000000000 0000 +++ src/lib/libthr/thread/thr_barrier.c 2012-05-02 02:09:13.000000000 0000 @@ -29,6 +29,7 @@ #include "namespace.h" #include #include +#include #include #include "un-namespace.h" @@ -38,101 +39,112 @@ __weak_reference(_pthread_barrier_wait, pthread_barrier_wait); __weak_reference(_pthread_barrier_destroy, pthread_barrier_destroy); +typedef struct pthread_barrier *pthread_barrier_old_t; +int _pthread_barrier_destroy_1_0(pthread_barrier_old_t *); +int _pthread_barrier_wait_1_0(pthread_barrier_old_t *); +int _pthread_barrier_init_1_0(pthread_barrier_old_t *, + const pthread_barrierattr_t *, unsigned); + int -_pthread_barrier_destroy(pthread_barrier_t *barrier) +_pthread_barrier_destroy(pthread_barrier_t *barp) { - pthread_barrier_t bar; - struct pthread *curthread; + (void)_pthread_cond_destroy(&barp->__cond); + (void)_pthread_mutex_destroy(&barp->__lock); + memset(barp, 0, sizeof(*barp)); + return (0); +} - if (barrier == NULL || *barrier == NULL) +int +_pthread_barrier_init(pthread_barrier_t *barp, + const pthread_barrierattr_t *attr, unsigned count) +{ + if (count == 0) return (EINVAL); - curthread = _get_curthread(); - bar = *barrier; - THR_UMUTEX_LOCK(curthread, &bar->b_lock); - if (bar->b_destroying) { - THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); - return (EBUSY); + _pthread_mutex_init(&barp->__lock, NULL); + _pthread_cond_init(&barp->__cond, NULL); + if (attr != NULL && *attr != NULL) { + if ((*attr)->pshared == PTHREAD_PROCESS_SHARED) { + barp->__lock.__flags |= USYNC_PROCESS_SHARED; + barp->__cond.__flags |= USYNC_PROCESS_SHARED; + } else if ((*attr)->pshared != PTHREAD_PROCESS_PRIVATE) { + return (EINVAL); + } } - bar->b_destroying = 1; - do { - if (bar->b_waiters > 0) { - bar->b_destroying = 0; - THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); - return (EBUSY); - } - if (bar->b_refcount != 0) { - _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, 0); - THR_UMUTEX_LOCK(curthread, &bar->b_lock); - } else - break; - } while (1); - bar->b_destroying = 0; - THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); - - *barrier = NULL; - free(bar); + barp->__count = count; + barp->__enter = count; + barp->__leave = 0; return (0); } int -_pthread_barrier_init(pthread_barrier_t *barrier, - const pthread_barrierattr_t *attr, unsigned count) +_pthread_barrier_wait(pthread_barrier_t *barp) { - pthread_barrier_t bar; + int error; + + _pthread_mutex_lock(&barp->__lock); + while (barp->__enter == 0) + _pthread_cond_wait(&barp->__cond, &barp->__lock); + barp->__enter--; + if (barp->__enter == 0) { + barp->__leave = barp->__count; + _pthread_cond_broadcast(&barp->__cond); + } + while (barp->__leave == 0) + _pthread_cond_wait(&barp->__cond, &barp->__lock); + barp->__leave--; + if (barp->__leave == 0) { + barp->__enter = barp->__count; + _pthread_cond_broadcast(&barp->__cond); + error = PTHREAD_BARRIER_SERIAL_THREAD; + } else { + error = 0; + } + _pthread_mutex_unlock(&barp->__lock); + return (error); +} - (void)attr; +int +_pthread_barrier_destroy_1_0(pthread_barrier_old_t *barpp) +{ + struct pthread_barrier *barp; - if (barrier == NULL || count <= 0) + if ((barp = *barpp) == NULL) return (EINVAL); + _pthread_barrier_destroy(barp); + lfree(barp); + return (0); +} + +int +_pthread_barrier_init_1_0(pthread_barrier_old_t *barpp, + const pthread_barrierattr_t *attr, unsigned count) +{ + struct pthread_barrier *barp; + int error; - bar = malloc(sizeof(struct pthread_barrier)); - if (bar == NULL) + barp = lmalloc(sizeof(struct pthread_barrier)); + if (barp == NULL) return (ENOMEM); - - _thr_umutex_init(&bar->b_lock); - _thr_ucond_init(&bar->b_cv); - bar->b_cycle = 0; - bar->b_waiters = 0; - bar->b_count = count; - bar->b_refcount = 0; - *barrier = bar; - + error = _pthread_barrier_init(barp, attr, count); + if (error) { + lfree(barp); + return (error); + } + *barpp = barp; return (0); } int -_pthread_barrier_wait(pthread_barrier_t *barrier) +_pthread_barrier_wait_1_0(pthread_barrier_old_t *barpp) { - struct pthread *curthread = _get_curthread(); - pthread_barrier_t bar; - int64_t cycle; - int ret; + struct pthread_barrier *barp; - if (barrier == NULL || *barrier == NULL) + if ((barp = *barpp) == NULL) return (EINVAL); + return _pthread_barrier_wait(barp); +} - bar = *barrier; - THR_UMUTEX_LOCK(curthread, &bar->b_lock); - if (++bar->b_waiters == bar->b_count) { - /* Current thread is lastest thread */ - bar->b_waiters = 0; - bar->b_cycle++; - _thr_ucond_broadcast(&bar->b_cv); - THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); - ret = PTHREAD_BARRIER_SERIAL_THREAD; - } else { - cycle = bar->b_cycle; - bar->b_refcount++; - do { - _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, 0); - THR_UMUTEX_LOCK(curthread, &bar->b_lock); - /* test cycle to avoid bogus wakeup */ - } while (cycle == bar->b_cycle); - if (--bar->b_refcount == 0 && bar->b_destroying) - _thr_ucond_broadcast(&bar->b_cv); - THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); - ret = 0; - } - return (ret); -} +FB10_COMPAT(_pthread_barrier_destroy_1_0, pthread_barrier_destroy); +FB10_COMPAT(_pthread_barrier_init_1_0, pthread_barrier_init); +FB10_COMPAT(_pthread_barrier_wait_1_0, pthread_barrier_wait); --- src/lib/libthr/thread/thr_barrierattr.c 2006-04-04 03:38:46.000000000 0000 +++ src/lib/libthr/thread/thr_barrierattr.c 2012-05-02 02:09:13.000000000 0000 @@ -43,6 +43,10 @@ __weak_reference(_pthread_barrierattr_getpshared, pthread_barrierattr_getpshared); +int _pthread_barrierattr_setpshared_1_0(pthread_barrierattr_t *, int); + +FB10_COMPAT(_pthread_barrierattr_setpshared_1_0, pthread_barrierattr_setpshared); + int _pthread_barrierattr_destroy(pthread_barrierattr_t *attr) { @@ -50,7 +54,7 @@ if (attr == NULL || *attr == NULL) return (EINVAL); - free(*attr); + lfree(*attr); return (0); } @@ -73,7 +77,7 @@ if (attr == NULL) return (EINVAL); - if ((*attr = malloc(sizeof(struct pthread_barrierattr))) == NULL) + if ((*attr = lmalloc(sizeof(struct pthread_barrierattr))) == NULL) return (ENOMEM); (*attr)->pshared = PTHREAD_PROCESS_PRIVATE; @@ -88,6 +92,22 @@ return (EINVAL); /* Only PTHREAD_PROCESS_PRIVATE is supported. */ + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) + return (EINVAL); + + (*attr)->pshared = pshared; + return (0); +} + +int +_pthread_barrierattr_setpshared_1_0(pthread_barrierattr_t *attr, int pshared) +{ + + if (attr == NULL || *attr == NULL) + return (EINVAL); + + /* Only PTHREAD_PROCESS_PRIVATE is supported. */ if (pshared != PTHREAD_PROCESS_PRIVATE) return (EINVAL); --- src/lib/libthr/thread/thr_clean.c 2010-09-25 10:35:36.000000000 0000 +++ src/lib/libthr/thread/thr_clean.c 2012-05-02 02:09:13.000000000 0000 @@ -47,7 +47,7 @@ void __pthread_cleanup_push_imp(void (*routine)(void *), void *arg, - struct _pthread_cleanup_info *info) + struct pthread_cleanup_info *info) { struct pthread *curthread = _get_curthread(); struct pthread_cleanup *newbuf; @@ -71,7 +71,7 @@ if (execute) old->routine(old->routine_arg); if (old->onheap) - free(old); + lfree(old); } } @@ -84,7 +84,7 @@ curthread->unwind_disabled = 1; #endif if ((newbuf = (struct pthread_cleanup *) - malloc(sizeof(struct _pthread_cleanup_info))) != NULL) { + lmalloc(sizeof(struct pthread_cleanup_info))) != NULL) { newbuf->routine = routine; newbuf->routine_arg = arg; newbuf->onheap = 1; --- src/lib/libthr/thread/thr_cond.c 2011-01-04 06:36:35.000000000 0000 +++ src/lib/libthr/thread/thr_cond.c 2012-06-07 07:23:03.000000000 0000 @@ -45,8 +45,8 @@ static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int cancel); -static int cond_signal_common(pthread_cond_t *cond); -static int cond_broadcast_common(pthread_cond_t *cond); +static int cond_signal_common(struct pthread_cond *cond); +static int cond_broadcast_common(struct pthread_cond *cond); /* * Double underscore versions are cancellation points. Single underscore @@ -64,89 +64,42 @@ #define CV_PSHARED(cvp) (((cvp)->__flags & USYNC_PROCESS_SHARED) != 0) static int -cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) +cond_init(struct pthread_cond *cvp, const pthread_condattr_t *cond_attr) { - struct pthread_cond *cvp; - int error = 0; - if ((cvp = (pthread_cond_t) - calloc(1, sizeof(struct pthread_cond))) == NULL) { - error = ENOMEM; + /* + * Initialise the condition variable structure: + */ + memset(cvp, 0, sizeof(*cvp)); + cvp->__magic = _PTHREAD_COND_MAGIC; + if (cond_attr == NULL || *cond_attr == NULL) { + cvp->__clock_id = CLOCK_REALTIME; } else { - /* - * Initialise the condition variable structure: - */ - if (cond_attr == NULL || *cond_attr == NULL) { - cvp->__clock_id = CLOCK_REALTIME; - } else { - if ((*cond_attr)->c_pshared) - cvp->__flags |= USYNC_PROCESS_SHARED; - cvp->__clock_id = (*cond_attr)->c_clockid; - } - *cond = cvp; + if ((*cond_attr)->c_pshared) + cvp->__flags |= USYNC_PROCESS_SHARED; + cvp->__clock_id = (*cond_attr)->c_clockid; } - return (error); + return (0); } -static int -init_static(struct pthread *thread, pthread_cond_t *cond) +int +_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { - int ret; - THR_LOCK_ACQUIRE(thread, &_cond_static_lock); - - if (*cond == NULL) - ret = cond_init(cond, NULL); - else - ret = 0; - - THR_LOCK_RELEASE(thread, &_cond_static_lock); - - return (ret); + return (cond_init(cond, cond_attr)); } -#define CHECK_AND_INIT_COND \ - if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ - if (cvp == THR_COND_INITIALIZER) { \ - int ret; \ - ret = init_static(_get_curthread(), cond); \ - if (ret) \ - return (ret); \ - } else if (cvp == THR_COND_DESTROYED) { \ - return (EINVAL); \ - } \ - cvp = *cond; \ - } - -int -_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) +static int +cond_destroy_common(pthread_cond_t *cvp) { - - *cond = NULL; - return (cond_init(cond, cond_attr)); + cvp->__magic = 0; + return (0); } int -_pthread_cond_destroy(pthread_cond_t *cond) +_pthread_cond_destroy(pthread_cond_t *cvp) { - struct pthread_cond *cvp; - int error = 0; - - if ((cvp = *cond) == THR_COND_INITIALIZER) - error = 0; - else if (cvp == THR_COND_DESTROYED) - error = EINVAL; - else { - cvp = *cond; - *cond = THR_COND_DESTROYED; - - /* - * Free the memory allocated for the condition - * variable structure: - */ - free(cvp); - } - return (error); + return cond_destroy_common(cvp); } /* @@ -162,44 +115,45 @@ const struct timespec *abstime, int cancel) { struct pthread *curthread = _get_curthread(); - int recurse; - int error, error2 = 0; + int recurse = 0; + int ceiling; + int error, error2 = 0; - error = _mutex_cv_detach(mp, &recurse); - if (error != 0) - return (error); + recurse = mp->__recurse; + mp->__recurse = 0; + mp->__ownertd.__ithread = 0; + mp->__ownerpid = 0; if (cancel) { _thr_cancel_enter2(curthread, 0); - error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, - (struct umutex *)&mp->m_lock, abstime, + error = _thr_ucond_wait( + (struct ucond *)&cvp->__has_kern_waiters, + (struct umutex *)&mp->__lockword, abstime, CVWAIT_ABSTIME|CVWAIT_CLOCKID); _thr_cancel_leave(curthread, 0); } else { - error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, - (struct umutex *)&mp->m_lock, abstime, + error = _thr_ucond_wait( + (struct ucond *)&cvp->__has_kern_waiters, + (struct umutex *)&mp->__lockword, abstime, CVWAIT_ABSTIME|CVWAIT_CLOCKID); } - /* - * Note that PP mutex and ROBUST mutex may return - * interesting error codes. - */ - if (error == 0) { - error2 = _mutex_cv_lock(mp, recurse); - } else if (error == EINTR || error == ETIMEDOUT) { - error2 = _mutex_cv_lock(mp, recurse); - if (error2 == 0 && cancel) - _thr_testcancel(curthread); + if ((mp->__flags & UMUTEX_PRIO_PROTECT2) != 0) { + _dequeue_pp_mutex(curthread, mp); + ceiling = _highest_pp_ceiling(curthread); + _thr_set_ceiling(curthread, ceiling); + } + + error2 = _mutex_cv_lock(mp, recurse); + if (error2 != 0) + return (error2); + if (error == EINTR || error == ETIMEDOUT) { if (error == EINTR) error = 0; - } else { - /* We know that it didn't unlock the mutex. */ - error2 = _mutex_cv_attach(mp, recurse); - if (error2 == 0 && cancel) + if (cancel) _thr_testcancel(curthread); } - return (error2 != 0 ? error2 : error); + return (error); } /* @@ -208,7 +162,6 @@ * is saved in curthread's defer_waiters[] buffer, but won't be * woken up until mutex is unlocked. */ - static int cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, const struct timespec *abstime, int cancel) @@ -216,28 +169,32 @@ struct pthread *curthread = _get_curthread(); struct sleepqueue *sq; int recurse; - int error; + int defered = 0; + int error, error2 = 0; if (curthread->wchan != NULL) PANIC("thread was already on queue."); - if (cancel) - _thr_testcancel(curthread); - _sleepq_lock(cvp); /* * set __has_user_waiters before unlocking mutex, this allows * us to check it without locking in pthread_cond_signal(). */ cvp->__has_user_waiters = 1; - curthread->will_sleep = 1; - (void)_mutex_cv_unlock(mp, &recurse); + _mutex_cv_unlock(mp, &recurse, &defered); curthread->mutex_obj = mp; _sleepq_add(cvp, curthread); for(;;) { _thr_clear_wake(curthread); _sleepq_unlock(cvp); + if (defered) { + if ((mp->__lockword & UMUTEX_CONTESTED) == 0) + __thr_umutex_wake(UMTX(mp), mp->__flags); + } + + _thr_flush_defer(curthread); + if (cancel) { _thr_cancel_enter2(curthread, 0); error = _thr_sleep(curthread, cvp->__clock_id, abstime); @@ -256,7 +213,9 @@ _sleepq_remove(sq, curthread); _sleepq_unlock(cvp); curthread->mutex_obj = NULL; - _mutex_cv_lock(mp, recurse); + error2 = _mutex_cv_lock(mp, recurse); + if (error2 != 0) + return (error2); if (!THR_IN_CRITICAL(curthread)) _pthread_exit(PTHREAD_CANCELED); else /* this should not happen */ @@ -270,97 +229,19 @@ } _sleepq_unlock(cvp); curthread->mutex_obj = NULL; - _mutex_cv_lock(mp, recurse); - return (error); + error2 = _mutex_cv_lock(mp, recurse); + return (error2 ? error2 : error); } static int -cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, - const struct timespec *abstime, int cancel) +cond_signal_user(struct pthread_cond *cvp) { - struct pthread *curthread = _get_curthread(); - struct pthread_cond *cvp; - struct pthread_mutex *mp; - int error; - - CHECK_AND_INIT_COND - - mp = *mutex; - - if ((error = _mutex_owned(curthread, mp)) != 0) - return (error); - - if (curthread->attr.sched_policy != SCHED_OTHER || - (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT| - USYNC_PROCESS_SHARED)) != 0 || - (cvp->__flags & USYNC_PROCESS_SHARED) != 0) - return cond_wait_kernel(cvp, mp, abstime, cancel); - else - return cond_wait_user(cvp, mp, abstime, cancel); -} - -int -_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) -{ - - return (cond_wait_common(cond, mutex, NULL, 0)); -} - -int -__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) -{ - - return (cond_wait_common(cond, mutex, NULL, 1)); -} - -int -_pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, - const struct timespec * abstime) -{ - - if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || - abstime->tv_nsec >= 1000000000) - return (EINVAL); - - return (cond_wait_common(cond, mutex, abstime, 0)); -} - -int -__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, - const struct timespec *abstime) -{ - - if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || - abstime->tv_nsec >= 1000000000) - return (EINVAL); - - return (cond_wait_common(cond, mutex, abstime, 1)); -} - -static int -cond_signal_common(pthread_cond_t *cond) -{ - struct pthread *curthread = _get_curthread(); + struct pthread *curthread; struct pthread *td; - struct pthread_cond *cvp; struct pthread_mutex *mp; struct sleepqueue *sq; int *waddr; - int pshared; - /* - * If the condition variable is statically initialized, perform dynamic - * initialization. - */ - CHECK_AND_INIT_COND - - pshared = CV_PSHARED(cvp); - - _thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters); - - if (pshared || cvp->__has_user_waiters == 0) - return (0); - curthread = _get_curthread(); waddr = NULL; _sleepq_lock(cvp); @@ -373,15 +254,10 @@ td = _sleepq_first(sq); mp = td->mutex_obj; cvp->__has_user_waiters = _sleepq_remove(sq, td); - if (mp->m_owner == curthread) { - if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { - _thr_wake_all(curthread->defer_waiters, - curthread->nwaiter_defer); - curthread->nwaiter_defer = 0; - } - curthread->defer_waiters[curthread->nwaiter_defer++] = - &td->wake_addr->value; - mp->m_flags |= PMUTEX_FLAG_DEFERED; + if (mp->__ownertd.__pthread == curthread) { + if (mp->__userf == 0) + mp->__userf = 1; + _thr_save_waiter(curthread, &td->wake_addr->value); } else { waddr = &td->wake_addr->value; } @@ -405,15 +281,10 @@ struct pthread *curthread = ba->curthread; mp = td->mutex_obj; - if (mp->m_owner == curthread) { - if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { - _thr_wake_all(curthread->defer_waiters, - curthread->nwaiter_defer); - curthread->nwaiter_defer = 0; - } - curthread->defer_waiters[curthread->nwaiter_defer++] = - &td->wake_addr->value; - mp->m_flags |= PMUTEX_FLAG_DEFERED; + if (mp->__ownertd.__pthread == curthread) { + if (mp->__userf == 0) + mp->__userf = 1; + _thr_save_waiter(curthread, &td->wake_addr->value); } else { if (ba->count >= MAX_DEFER_WAITERS) { _thr_wake_all(ba->waddrs, ba->count); @@ -424,26 +295,11 @@ } static int -cond_broadcast_common(pthread_cond_t *cond) +cond_broadcast_user(struct pthread_cond *cvp) { - int pshared; - struct pthread_cond *cvp; struct sleepqueue *sq; struct broadcast_arg ba; - /* - * If the condition variable is statically initialized, perform dynamic - * initialization. - */ - CHECK_AND_INIT_COND - - pshared = CV_PSHARED(cvp); - - _thr_ucond_broadcast((struct ucond *)&cvp->__has_kern_waiters); - - if (pshared || cvp->__has_user_waiters == 0) - return (0); - ba.curthread = _get_curthread(); ba.count = 0; @@ -461,16 +317,244 @@ return (0); } +static int +cond_wait_common(struct pthread_cond *cvp, struct pthread_mutex *mp, + const struct timespec *abstime, int cancel) +{ + if (!_mutex_owned(mp)) + return (EPERM); + + if (((cvp->__flags & USYNC_PROCESS_SHARED) ^ + (mp->__flags & USYNC_PROCESS_SHARED)) != 0) + return (EINVAL); + if ((mp->__flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT| + USYNC_PROCESS_SHARED)) != 0 || + (cvp->__flags & USYNC_PROCESS_SHARED) != 0) + return cond_wait_kernel(cvp, mp, abstime, cancel); + else + return cond_wait_user(cvp, mp, abstime, cancel); +} + +int +_pthread_cond_wait(pthread_cond_t *cvp, pthread_mutex_t *mp) +{ + + return (cond_wait_common(cvp, mp, NULL, 0)); +} + +int +__pthread_cond_wait(pthread_cond_t *cvp, pthread_mutex_t *mp) +{ + + return (cond_wait_common(cvp, mp, NULL, 1)); +} + +int +_pthread_cond_timedwait(pthread_cond_t *cvp, pthread_mutex_t *mp, + const struct timespec * abstime) +{ + + if (abstime == NULL || !_validate_timespec(abstime)) + return (EINVAL); + + return (cond_wait_common(cvp, mp, abstime, 0)); +} + +int +__pthread_cond_timedwait(pthread_cond_t *cvp, pthread_mutex_t *mp, + const struct timespec * abstime) +{ + + if (abstime == NULL || !_validate_timespec(abstime)) + return (EINVAL); + + return (cond_wait_common(cvp, mp, abstime, 1)); +} + +static int +cond_signal_common(struct pthread_cond *cvp) +{ + + _thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters); + + if (CV_PSHARED(cvp) || cvp->__has_user_waiters == 0) + return (0); + + return (cond_signal_user(cvp)); +} + +static int +cond_broadcast_common(struct pthread_cond *cvp) +{ + + _thr_ucond_broadcast((struct ucond *)&cvp->__has_kern_waiters); + + if (CV_PSHARED(cvp) || cvp->__has_user_waiters == 0) + return (0); + + return (cond_broadcast_user(cvp)); +} + +int +_pthread_cond_signal(pthread_cond_t *cvp) +{ + return (cond_signal_common(cvp)); +} + +int +_pthread_cond_broadcast(pthread_cond_t *cvp) +{ + return (cond_broadcast_common(cvp)); +} + +static int +cond_init_old(pthread_cond_old_t *cond, const pthread_condattr_t *cond_attr) +{ + struct pthread_cond *cvp = NULL; + int error; + + if ((cvp = (struct pthread_cond *) + lmalloc(sizeof(struct pthread_cond))) == NULL) { + error = ENOMEM; + } else { + error = cond_init(cvp, cond_attr); + if (error != 0) + lfree(cvp); + else + *cond = cvp; + } + return (error); +} + +static int +init_static(struct pthread *thread, pthread_cond_old_t *cond) +{ + int error; + + THR_LOCK_ACQUIRE(thread, &_cond_static_lock); + + if (*cond == NULL) + error = cond_init_old(cond, NULL); + else + error = 0; + + THR_LOCK_RELEASE(thread, &_cond_static_lock); + + return (error); +} + +int +_pthread_cond_init_1_0(pthread_cond_old_t *cond, + const pthread_condattr_t *cond_attr) +{ + + *cond = NULL; + return (cond_init_old(cond, cond_attr)); +} + +int +_pthread_cond_destroy_1_0(pthread_cond_old_t *cond) +{ + struct pthread_cond *cvp; + int error = 0; + + if ((cvp = *cond) == THR_COND_INITIALIZER) + error = 0; + else if (cvp == THR_COND_DESTROYED) + error = EINVAL; + else { + cvp = *cond; + error = cond_destroy_common(cvp); + if (error != 0) + return (error); + *cond = THR_COND_DESTROYED; + lfree(cvp); + } + return (error); +} + +static inline int +check_and_init_cond(pthread_cond_old_t *cond) +{ + struct pthread_cond *cvp; + int error; + + if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { + if (cvp == THR_COND_INITIALIZER) { + error = init_static(_get_curthread(), cond); + if (error != 0) + return (error); + } else if (cvp == THR_COND_DESTROYED) { + return (EINVAL); + } + cvp = *cond; + } + return (0); +} + int -_pthread_cond_signal(pthread_cond_t * cond) +_pthread_cond_signal_1_0(pthread_cond_old_t *cond) +{ + int error; + + error = check_and_init_cond(cond); + if (error != 0) + return (error); + return (cond_signal_common(*cond)); +} + +int +_pthread_cond_broadcast_1_0(pthread_cond_old_t *cond) +{ + int error; + + error = check_and_init_cond(cond); + if (error != 0) + return (error); + return (cond_broadcast_common(*cond)); +} + +int +_pthread_cond_wait_1_0(pthread_cond_old_t *cond, pthread_mutex_old_t *mutex) { + struct pthread_mutex *mp; + int error; + + error = check_and_init_cond(cond); + if (error != 0) + return (error); - return (cond_signal_common(cond)); + mp = *mutex; + if (mp < THR_MUTEX_DESTROYED) + return (EPERM); + if (mp == THR_MUTEX_DESTROYED) + return (EINVAL); + return (cond_wait_common(*cond, *mutex, NULL, 1)); } int -_pthread_cond_broadcast(pthread_cond_t * cond) +_pthread_cond_timedwait_1_0(pthread_cond_old_t *cond, + pthread_mutex_old_t *mutex, + const struct timespec * abstime) { + struct pthread_mutex *mp; + int error; - return (cond_broadcast_common(cond)); + error = check_and_init_cond(cond); + if (error != 0) + return (error); + + mp = *mutex; + if (mp < THR_MUTEX_DESTROYED) + return (EPERM); + if (mp == THR_MUTEX_DESTROYED) + return (EINVAL); + return (cond_wait_common(*cond, *mutex, abstime, 1)); } + +FB10_COMPAT(_pthread_cond_destroy_1_0, pthread_cond_destroy); +FB10_COMPAT(_pthread_cond_init_1_0, pthread_cond_init); +FB10_COMPAT(_pthread_cond_wait_1_0, pthread_cond_wait); +FB10_COMPAT(_pthread_cond_timedwait_1_0, pthread_cond_timedwait); +FB10_COMPAT(_pthread_cond_signal_1_0, pthread_cond_signal); +FB10_COMPAT(_pthread_cond_broadcast_1_0, pthread_cond_broadcast); --- src/lib/libthr/thread/thr_condattr.c 2010-05-24 14:43:34.000000000 0000 +++ src/lib/libthr/thread/thr_condattr.c 2012-05-02 02:09:13.000000000 0000 @@ -45,6 +45,9 @@ __weak_reference(_pthread_condattr_getpshared, pthread_condattr_getpshared); __weak_reference(_pthread_condattr_setpshared, pthread_condattr_setpshared); +int _pthread_condattr_setpshared_1_0(pthread_condattr_t *, int); +FB10_COMPAT(_pthread_condattr_setpshared_1_0, pthread_condattr_setpshared); + int _pthread_condattr_init(pthread_condattr_t *attr) { @@ -52,7 +55,7 @@ int ret; if ((pattr = (pthread_condattr_t) - malloc(sizeof(struct pthread_cond_attr))) == NULL) { + lmalloc(sizeof(struct pthread_cond_attr))) == NULL) { ret = ENOMEM; } else { memcpy(pattr, &_pthread_condattr_default, @@ -71,7 +74,7 @@ if (attr == NULL || *attr == NULL) { ret = EINVAL; } else { - free(*attr); + lfree(*attr); *attr = NULL; ret = 0; } @@ -108,7 +111,7 @@ if (attr == NULL || *attr == NULL) return (EINVAL); - *pshared = PTHREAD_PROCESS_PRIVATE; + *pshared = (*attr)->c_pshared; return (0); } @@ -118,7 +121,23 @@ if (attr == NULL || *attr == NULL) return (EINVAL); + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) + return (EINVAL); + + (*attr)->c_pshared = pshared; + return (0); +} + +int +_pthread_condattr_setpshared_1_0(pthread_condattr_t *attr, int pshared) +{ + if (attr == NULL || *attr == NULL) + return (EINVAL); + if (pshared != PTHREAD_PROCESS_PRIVATE) return (EINVAL); + + (*attr)->c_pshared = pshared; return (0); } --- src/lib/libthr/thread/thr_create.c 2011-01-09 13:35:43.000000000 0000 +++ src/lib/libthr/thread/thr_create.c 2012-05-02 02:09:13.000000000 0000 @@ -113,9 +113,7 @@ new_thread->arg = arg; new_thread->cancel_enable = 1; new_thread->cancel_async = 0; - /* Initialize the mutex queue: */ - TAILQ_INIT(&new_thread->mutexq); - TAILQ_INIT(&new_thread->pp_mutexq); + new_thread->inherited_prio = -1; /* Initialise hooks in the thread structure: */ if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) { --- src/lib/libthr/thread/thr_exit.c 2010-09-30 13:35:27.000000000 0000 +++ src/lib/libthr/thread/thr_exit.c 2012-05-02 02:09:13.000000000 0000 @@ -279,6 +279,7 @@ /* Tell malloc that the thread is exiting. */ _malloc_thread_cleanup(); + _mutex_thread_exit(curthread); THR_LOCK(curthread); curthread->state = PS_DEAD; --- src/lib/libthr/thread/thr_fork.c 2010-09-24 08:59:04.000000000 0000 +++ src/lib/libthr/thread/thr_fork.c 2012-05-04 09:55:50.000000000 0000 @@ -82,7 +82,7 @@ _thr_check_init(); - if ((af = malloc(sizeof(struct pthread_atfork))) == NULL) + if ((af = lmalloc(sizeof(struct pthread_atfork))) == NULL) return (ENOMEM); curthread = _get_curthread(); @@ -121,7 +121,7 @@ THR_CRITICAL_LEAVE(curthread); while ((af = TAILQ_FIRST(&temp_list)) != NULL) { TAILQ_REMOVE(&temp_list, af, qe); - free(af); + lfree(af); } _thr_tsd_unload(phdr_info); _thr_sigact_unload(phdr_info); @@ -145,14 +145,22 @@ return (__sys_fork()); curthread = _get_curthread(); + cancelsave = curthread->no_cancel; curthread->no_cancel = 1; - _thr_rwl_rdlock(&_thr_atfork_lock); + + /* + * fork() in signal handler only does very limited work, + * The pthread_atfork hooks will not be executed + */ + if (curthread->sig_stackptr == 0) { + _thr_rwl_rdlock(&_thr_atfork_lock); - /* Run down atfork prepare handlers. */ - TAILQ_FOREACH_REVERSE(af, &_thr_atfork_list, atfork_head, qe) { - if (af->prepare != NULL) - af->prepare(); + /* Run down atfork prepare handlers. */ + TAILQ_FOREACH_REVERSE(af, &_thr_atfork_list, atfork_head, qe) { + if (af->prepare != NULL) + af->prepare(); + } } /* @@ -168,7 +176,8 @@ */ if (_thr_isthreaded() != 0) { was_threaded = 1; - _malloc_prefork(); + if (curthread->sig_stackptr == 0) + _malloc_prefork(); _rtld_atfork_pre(rtld_locks); } else { was_threaded = 0; @@ -192,7 +201,7 @@ /* clear other threads locked us. */ _thr_umutex_init(&curthread->lock); - _mutex_fork(curthread); + _mutex_fork_child(curthread); _thr_signal_postfork_child(); @@ -210,20 +219,24 @@ _thr_rwl_rdlock(&_thr_atfork_lock); if (was_threaded) { - __isthreaded = 1; - _malloc_postfork(); - __isthreaded = 0; + if (curthread->sig_stackptr == 0) { + __isthreaded = 1; + _malloc_postfork(); + __isthreaded = 0; + } } /* Ready to continue, unblock signals. */ _thr_signal_unblock(curthread); - /* Run down atfork child handlers. */ - TAILQ_FOREACH(af, &_thr_atfork_list, qe) { - if (af->child != NULL) - af->child(); + if (curthread->sig_stackptr == 0) { + /* Run down atfork child handlers. */ + TAILQ_FOREACH(af, &_thr_atfork_list, qe) { + if (af->child != NULL) + af->child(); + } + _thr_rwlock_unlock(&_thr_atfork_lock); } - _thr_rwlock_unlock(&_thr_atfork_lock); curthread->no_cancel = cancelsave; } else { /* Parent process */ @@ -233,19 +246,21 @@ if (was_threaded) { _rtld_atfork_post(rtld_locks); - _malloc_postfork(); + if (curthread->sig_stackptr == 0) + _malloc_postfork(); } /* Ready to continue, unblock signals. */ _thr_signal_unblock(curthread); - /* Run down atfork parent handlers. */ - TAILQ_FOREACH(af, &_thr_atfork_list, qe) { - if (af->parent != NULL) - af->parent(); + if (curthread->sig_stackptr == 0) { + /* Run down atfork parent handlers. */ + TAILQ_FOREACH(af, &_thr_atfork_list, qe) { + if (af->parent != NULL) + af->parent(); + } + _thr_rwlock_unlock(&_thr_atfork_lock); } - - _thr_rwlock_unlock(&_thr_atfork_lock); curthread->no_cancel = cancelsave; /* test async cancel */ if (curthread->cancel_async) --- src/lib/libthr/thread/thr_init.c 2012-05-03 09:35:39.000000000 0000 +++ src/lib/libthr/thread/thr_init.c 2012-05-07 08:30:54.000000000 0000 @@ -67,12 +67,6 @@ atfork_head _thr_atfork_list = TAILQ_HEAD_INITIALIZER(_thr_atfork_list); struct urwlock _thr_atfork_lock = DEFAULT_URWLOCK; -struct pthread_prio _thr_priorities[3] = { - {RTP_PRIO_MIN, RTP_PRIO_MAX, 0}, /* FIFO */ - {0, 0, 63}, /* OTHER */ - {RTP_PRIO_MIN, RTP_PRIO_MAX, 0} /* RR */ -}; - struct pthread_attr _pthread_attr_default = { .sched_policy = SCHED_OTHER, .sched_inherit = PTHREAD_INHERIT_SCHED, @@ -89,13 +83,16 @@ struct pthread_mutex_attr _pthread_mutexattr_default = { .m_type = PTHREAD_MUTEX_DEFAULT, .m_protocol = PTHREAD_PRIO_NONE, - .m_ceiling = 0 + .m_ceiling = 0, + .m_pshared = 0, + .m_robust = PTHREAD_MUTEX_STALLED }; struct pthread_mutex_attr _pthread_mutexattr_adaptive_default = { .m_type = PTHREAD_MUTEX_ADAPTIVE_NP, .m_protocol = PTHREAD_PRIO_NONE, - .m_ceiling = 0 + .m_ceiling = 0, + .m_pshared = 0 }; /* Default condition variable attributes: */ @@ -120,6 +117,7 @@ struct umutex _keytable_lock = DEFAULT_UMUTEX; struct urwlock _thr_list_lock = DEFAULT_URWLOCK; struct umutex _thr_event_lock = DEFAULT_UMUTEX; +struct umutex _lmalloc_lock = DEFAULT_UMUTEX; int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *); int __pthread_mutex_lock(pthread_mutex_t *); @@ -235,10 +233,8 @@ {DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */ {DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */ {DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */ - {(pthread_func_t)__pthread_mutex_lock, - (pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */ - {(pthread_func_t)__pthread_mutex_trylock, - (pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */ + {DUAL_ENTRY(_pthread_mutex_lock)}, /* PJT_MUTEX_LOCK */ + {DUAL_ENTRY(_pthread_mutex_trylock)}, /* PJT_MUTEX_TRYLOCK */ {DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */ {DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */ {DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */ @@ -257,7 +253,30 @@ {DUAL_ENTRY(__pthread_cleanup_pop_imp)},/* PJT_CLEANUP_POP_IMP */ {DUAL_ENTRY(__pthread_cleanup_push_imp)},/* PJT_CLEANUP_PUSH_IMP */ {DUAL_ENTRY(_pthread_cancel_enter)}, /* PJT_CANCEL_ENTER */ - {DUAL_ENTRY(_pthread_cancel_leave)} /* PJT_CANCEL_LEAVE */ + {DUAL_ENTRY(_pthread_cancel_leave)} /* PJT_CANCEL_LEAVE */ +}; + +#define SINGLE_ENTRY(entry) (pthread_func_t)entry + +pthread_func_t jmp_table10[PJT10_MAX] = { + SINGLE_ENTRY(_pthread_cond_broadcast_1_0), /* PJT10_COND_BROADCAST */ + SINGLE_ENTRY(_pthread_cond_destroy_1_0), /* PJT10_COND_DESTROY */ + SINGLE_ENTRY(_pthread_cond_init_1_0), /* PJT10_COND_INIT */ + SINGLE_ENTRY(_pthread_cond_signal_1_0), /* PJT10_COND_SIGNAL */ + SINGLE_ENTRY(_pthread_cond_timedwait_1_0), /* PJT10_COND_TIMEDWAIT */ + SINGLE_ENTRY(_pthread_cond_wait_1_0), /* PJT10_COND_WAIT */ + SINGLE_ENTRY(_pthread_mutex_destroy_1_0), /* PJT10_MUTEX_DESTROY */ + SINGLE_ENTRY(_pthread_mutex_init_1_0), /* PJT10_MUTEX_INIT */ + SINGLE_ENTRY(_pthread_mutex_lock_1_0), /* PJT10_MUTEX_LOCK */ + SINGLE_ENTRY(_pthread_mutex_trylock_1_0), /* PJT10_MUTEX_TRYLOCK */ + SINGLE_ENTRY(_pthread_mutex_unlock_1_0), /* PJT10_MUTEX_UNLOCK */ + SINGLE_ENTRY(_pthread_rwlock_destroy_1_0), /* PJT10_RWLOCK_DESTROY */ + SINGLE_ENTRY(_pthread_rwlock_init_1_0), /* PJT10_RWLOCK_INIT */ + SINGLE_ENTRY(_pthread_rwlock_rdlock_1_0), /* PJT10_RWLOCK_RDLOCK */ + SINGLE_ENTRY(_pthread_rwlock_tryrdlock_1_0),/* PJT10_RWLOCK_TRYRDLOCK */ + SINGLE_ENTRY(_pthread_rwlock_trywrlock_1_0),/* PJT10_RWLOCK_TRYWRLOCK */ + SINGLE_ENTRY(_pthread_rwlock_unlock_1_0), /* PJT10_RWLOCK_UNLOCK */ + SINGLE_ENTRY(_pthread_rwlock_wrlock_1_0) /* PJT10_RWLOCK_WRLOCK */ }; static int init_once = 0; @@ -312,6 +331,10 @@ PANIC("Thread jump table not properly initialized"); memcpy(__thr_jtable, jmp_table, sizeof(jmp_table)); + if (sizeof(jmp_table10) != (sizeof(pthread_func_t) * PJT10_MAX)) + PANIC("Thread jump table not properly initialized"); + memcpy(__thr_jtable10, jmp_table10, sizeof(jmp_table10)); + /* * Check for the special case of this process running as * or in place of init as pid = 1: @@ -411,10 +434,6 @@ thread->cancel_enable = 1; thread->cancel_async = 0; - /* Initialize the mutex queue: */ - TAILQ_INIT(&thread->mutexq); - TAILQ_INIT(&thread->pp_mutexq); - thread->state = PS_RUNNING; _thr_getscheduler(thread->tid, &thread->attr.sched_policy, --- src/lib/libthr/thread/thr_join.c 2010-09-19 09:35:50.000000000 0000 +++ src/lib/libthr/thread/thr_join.c 2012-05-04 08:54:36.000000000 0000 @@ -61,8 +61,7 @@ _pthread_timedjoin_np(pthread_t pthread, void **thread_return, const struct timespec *abstime) { - if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || - abstime->tv_nsec >= 1000000000) + if (abstime == NULL || !_validate_timespec(abstime)) return (EINVAL); return (join_common(pthread, thread_return, abstime)); --- src/lib/libthr/thread/thr_kern.c 2010-12-22 05:35:46.000000000 0000 +++ src/lib/libthr/thread/thr_kern.c 2012-05-16 06:19:29.000000000 0000 @@ -136,7 +136,68 @@ return (rtprio_thread(RTP_SET, lwpid, &rtp)); } +int +_thr_enter_ceiling(struct pthread *curthread, int ceiling) +{ + struct sched_param param; + int err; + + THR_LOCK(curthread); + if (ceiling == curthread->inherited_prio) { + THR_UNLOCK(curthread); + return (0); + } + if (ceiling < curthread->inherited_prio) { + THR_UNLOCK(curthread); + return (EPERM); + } + if (curthread->attr.sched_policy != SCHED_OTHER && + ceiling < curthread->attr.prio) { + THR_UNLOCK(curthread); + return (EPERM); + } + param.sched_priority = ceiling; + if (curthread->attr.sched_policy == SCHED_OTHER) + err = _thr_setscheduler(TID(curthread), SCHED_RR, ¶m); + else + err = _thr_setscheduler(TID(curthread), + curthread->attr.sched_policy, ¶m); + if (err == 0) + curthread->inherited_prio = ceiling; + else + err = errno; + THR_UNLOCK(curthread); + return (err); +} + void +_thr_set_ceiling(struct pthread *curthread, int ceiling) +{ + struct sched_param param; + + THR_LOCK(curthread); + if (curthread->inherited_prio == ceiling) { + THR_UNLOCK(curthread); + return; + } + if (ceiling < -1 || ceiling > THR_MAX_RR_PRIORITY) { + THR_UNLOCK(curthread); + return; + } + curthread->inherited_prio = ceiling; + if (curthread->attr.sched_policy == SCHED_OTHER) { + if (ceiling == -1) { + param.sched_priority = 0; + _thr_setscheduler(TID(curthread), SCHED_OTHER, ¶m); + } + } else if (curthread->inherited_prio < curthread->attr.prio) { + param.sched_priority = curthread->attr.prio; + _thr_setscheduler(TID(curthread), curthread->attr.sched_policy, ¶m); + } + THR_UNLOCK(curthread); +} + +void _thr_wake_addr_init(void) { _thr_umutex_init(&addr_lock); @@ -198,14 +259,6 @@ _thr_sleep(struct pthread *curthread, int clockid, const struct timespec *abstime) { - - curthread->will_sleep = 0; - if (curthread->nwaiter_defer > 0) { - _thr_wake_all(curthread->defer_waiters, - curthread->nwaiter_defer); - curthread->nwaiter_defer = 0; - } - if (curthread->wake_addr->value != 0) return (0); --- src/lib/libthr/thread/thr_list.c 2012-02-11 04:36:42.000000000 0000 +++ src/lib/libthr/thread/thr_list.c 2012-05-02 02:09:13.000000000 0000 @@ -149,7 +149,7 @@ if (total_threads > MAX_THREADS) return (NULL); atomic_fetchadd_int(&total_threads, 1); - thread = calloc(1, sizeof(struct pthread)); + thread = lcalloc(1, sizeof(struct pthread)); if (thread == NULL) { atomic_fetchadd_int(&total_threads, -1); return (NULL); @@ -221,7 +221,7 @@ _sleepq_free(thread->sleepqueue); if (thread->wake_addr != NULL) _thr_release_wake_addr(thread->wake_addr); - free(thread); + lfree(thread); } /* --- src/lib/libthr/thread/thr_mutex.c 2012-05-27 01:35:52.000000000 0000 +++ src/lib/libthr/thread/thr_mutex.c 2012-06-07 07:22:34.000000000 0000 @@ -34,101 +34,92 @@ */ #include "namespace.h" -#include -#include #include -#include -#include #include #include +#include #include "un-namespace.h" #include "thr_private.h" -#if defined(_PTHREADS_INVARIANTS) -#define MUTEX_INIT_LINK(m) do { \ - (m)->m_qe.tqe_prev = NULL; \ - (m)->m_qe.tqe_next = NULL; \ -} while (0) -#define MUTEX_ASSERT_IS_OWNED(m) do { \ - if (__predict_false((m)->m_qe.tqe_prev == NULL))\ - PANIC("mutex is not on list"); \ -} while (0) -#define MUTEX_ASSERT_NOT_OWNED(m) do { \ - if (__predict_false((m)->m_qe.tqe_prev != NULL || \ - (m)->m_qe.tqe_next != NULL)) \ - PANIC("mutex is on list"); \ -} while (0) -#else -#define MUTEX_INIT_LINK(m) -#define MUTEX_ASSERT_IS_OWNED(m) -#define MUTEX_ASSERT_NOT_OWNED(m) -#endif - /* - * For adaptive mutexes, how many times to spin doing trylock2 - * before entering the kernel to block + * For adaptive mutexes, how many times to spin doing trylock before entering + * the kernel to block */ #define MUTEX_ADAPTIVE_SPINS 2000 -/* - * Prototypes - */ -int __pthread_mutex_init(pthread_mutex_t *mutex, - const pthread_mutexattr_t *mutex_attr); -int __pthread_mutex_trylock(pthread_mutex_t *mutex); -int __pthread_mutex_lock(pthread_mutex_t *mutex); -int __pthread_mutex_timedlock(pthread_mutex_t *mutex, - const struct timespec *abstime); -int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, - void *(calloc_cb)(size_t, size_t)); -int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count); -int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); -int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); -int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); -int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); -int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); +/* Mutex types */ +#define MUTEX_TYPE(m) ((m)->__flags & (UMUTEX_PRIO_PROTECT2 | \ + UMUTEX_PRIO_INHERIT | UMUTEX_SIMPLE |\ + UMUTEX_ROBUST | USYNC_PROCESS)) + +#define MT_PNONE (UMUTEX_SIMPLE) +#define MT_SHARED_PNONE (USYNC_PROCESS|UMUTEX_SIMPLE) +#define MT_ROB_PNONE (UMUTEX_ROBUST|UMUTEX_SIMPLE) +#define MT_ROB_SHARED_PNONE (UMUTEX_ROBUST|USYNC_PROCESS|UMUTEX_SIMPLE) + +#define MT_PP (UMUTEX_PRIO_PROTECT2|UMUTEX_SIMPLE) +#define MT_SHARED_PP (USYNC_PROCESS|UMUTEX_PRIO_PROTECT2|UMUTEX_SIMPLE) +#define MT_ROB_PP (UMUTEX_ROBUST|UMUTEX_PRIO_PROTECT2|UMUTEX_SIMPLE) +#define MT_ROB_SHARED_PP (UMUTEX_ROBUST|USYNC_PROCESS|UMUTEX_PRIO_PROTECT2|UMUTEX_SIMPLE) + +#define MT_PI (UMUTEX_PRIO_INHERIT) +#define MT_SHARED_PI (USYNC_PROCESS|UMUTEX_PRIO_INHERIT) +#define MT_ROB_PI (UMUTEX_ROBUST|UMUTEX_PRIO_INHERIT) +#define MT_ROB_SHARED_PI (UMUTEX_ROBUST|USYNC_PROCESS| \ + UMUTEX_PRIO_INHERIT) + +#define M_TRY 0x01 +#define M_NOCEILING 0x02 +#define M_KEEPSTATE 0x04 + +static struct umutex stalled_mutex = {.m_owner = 1 }; +static struct mutex_link * volatile mutex_link_freeq; + +#define PTR_CAST(type, var) ((type)(__uintptr_t)(void *)(var)) -static int mutex_self_trylock(pthread_mutex_t); -static int mutex_self_lock(pthread_mutex_t, - const struct timespec *abstime); -static int mutex_unlock_common(struct pthread_mutex *, int); -static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, - const struct timespec *); +static int mutex_self_lock(struct pthread_mutex *, + const struct timespec *, int); +static int mutex_lock_common(struct pthread_mutex *, + const struct timespec *, int); +static int mutex_unlock_common(struct pthread_mutex *, int, int *); +static void remember_lock(struct pthread *, struct pthread_mutex *); +static void forget_lock(struct pthread *, struct pthread_mutex *); -__weak_reference(__pthread_mutex_init, pthread_mutex_init); -__strong_reference(__pthread_mutex_init, _pthread_mutex_init); -__weak_reference(__pthread_mutex_lock, pthread_mutex_lock); -__strong_reference(__pthread_mutex_lock, _pthread_mutex_lock); -__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); -__strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock); -__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); -__strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock); +int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, + void *(calloc_cb)(size_t, size_t)); +int _pthread_mutex_getspinloops_np(pthread_mutex_t *, int *); +int _pthread_mutex_setspinloops_np(pthread_mutex_t *, int); +int _pthread_mutex_setyieldloops_np(pthread_mutex_t *, int); +int _pthread_mutex_getyieldloops_np(pthread_mutex_t *, int *); +int _pthread_mutex_setyieldloops_np(pthread_mutex_t *, int); /* Single underscore versions provided for libc internal usage: */ /* No difference between libc and application usage of these: */ +__weak_reference(_pthread_mutex_init, pthread_mutex_init); +__weak_reference(_pthread_mutex_lock, pthread_mutex_lock); +__weak_reference(_pthread_mutex_timedlock, pthread_mutex_timedlock); +__weak_reference(_pthread_mutex_trylock, pthread_mutex_trylock); __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); - __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); - -__weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); -__strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np); +__weak_reference(_pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); - -__weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); -__strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np); +__weak_reference(_pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np); +__weak_reference(_pthread_mutex_consistent, pthread_mutex_consistent); static int -mutex_init(pthread_mutex_t *mutex, - const struct pthread_mutex_attr *mutex_attr, - void *(calloc_cb)(size_t, size_t)) +mutex_init(struct pthread_mutex *mp, + const struct pthread_mutex_attr *mutex_attr) { const struct pthread_mutex_attr *attr; - struct pthread_mutex *pmutex; + + /* Must align at integer boundary */ + if (((uintptr_t)mp) & 0x03) + return (EINVAL); if (mutex_attr == NULL) { attr = &_pthread_mutexattr_default; @@ -141,657 +132,1299 @@ attr->m_protocol > PTHREAD_PRIO_PROTECT) return (EINVAL); } - if ((pmutex = (pthread_mutex_t) - calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) - return (ENOMEM); + + bzero(mp, sizeof(*mp)); - pmutex->m_flags = attr->m_type; - pmutex->m_owner = NULL; - pmutex->m_count = 0; - pmutex->m_spinloops = 0; - pmutex->m_yieldloops = 0; - MUTEX_INIT_LINK(pmutex); - switch(attr->m_protocol) { + switch (attr->m_protocol) { case PTHREAD_PRIO_NONE: - pmutex->m_lock.m_owner = UMUTEX_UNOWNED; - pmutex->m_lock.m_flags = 0; + mp->__flags |= UMUTEX_SIMPLE; break; case PTHREAD_PRIO_INHERIT: - pmutex->m_lock.m_owner = UMUTEX_UNOWNED; - pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; + mp->__flags |= UMUTEX_PRIO_INHERIT; break; case PTHREAD_PRIO_PROTECT: - pmutex->m_lock.m_owner = UMUTEX_CONTESTED; - pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; - pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; + mp->__flags |= UMUTEX_SIMPLE|UMUTEX_PRIO_PROTECT2; + mp->__ceiling = attr->m_ceiling; break; + default: + return (EINVAL); } - if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) { - pmutex->m_spinloops = - _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; - pmutex->m_yieldloops = _thr_yieldloops; + switch (attr->m_type) { + case PTHREAD_MUTEX_ERRORCHECK: + mp->__flags |= UMUTEX_ERROR_CHECK; + break; + case PTHREAD_MUTEX_RECURSIVE: + mp->__flags |= UMUTEX_RECURSIVE; + break; + case PTHREAD_MUTEX_ADAPTIVE_NP: + mp->__flags |= UMUTEX_ADAPTIVE_SPIN; + break; + case PTHREAD_MUTEX_NORMAL: + break; + default: + return (EINVAL); } - *mutex = pmutex; + if (attr->m_pshared == PTHREAD_PROCESS_SHARED) + mp->__flags |= USYNC_PROCESS; + + if (attr->m_robust == PTHREAD_MUTEX_ROBUST) + mp->__flags |= UMUTEX_ROBUST; + + mp->__magic = _PTHREAD_MUTEX_MAGIC; + return (0); +} + +int +_pthread_mutex_init(pthread_mutex_t *mutex, + const pthread_mutexattr_t *mutex_attr) +{ + return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL); +} + +/* This function is used internally by malloc. */ +int +_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, + void *(calloc_cb)(size_t, size_t) __unused) +{ + static const struct pthread_mutex_attr attr = { + .m_type = PTHREAD_MUTEX_NORMAL, + .m_protocol = PTHREAD_PRIO_NONE, + .m_ceiling = 0 + }; + + return mutex_init(mutex, &attr); +} + +int +_pthread_mutex_destroy(pthread_mutex_t *mp) +{ + mp->__magic = 0; return (0); } -static int -init_static(struct pthread *thread, pthread_mutex_t *mutex) +static struct mutex_link * +mutex_link_alloc(void) { - int ret; + struct mutex_link *l, *l2, *p; + int i, pagesize; - THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); +restart: + l = mutex_link_freeq; + if (l == NULL) { + pagesize = getpagesize(); + p = (struct mutex_link *)mmap(NULL, pagesize, + PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); + for (i = 2; i < (int)(pagesize/sizeof(struct mutex_link)); ++i) + p[i-1].next = &p[i]; + do { + l = mutex_link_freeq; + p[i-1].next = l; + } while (!atomic_cmpset_ptr( + PTR_CAST(intptr_t *, &mutex_link_freeq), + (intptr_t)l, (intptr_t)&p[1])); + return (&p[0]); + } + l2 = l->next; + if (atomic_cmpset_ptr(PTR_CAST(intptr_t *, &mutex_link_freeq), + (intptr_t)l, (intptr_t)l2)) { + l->mp = NULL; + return (l); + } + goto restart; +} - if (*mutex == THR_MUTEX_INITIALIZER) - ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc); - else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) - ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc); - else - ret = 0; - THR_LOCK_RELEASE(thread, &_mutex_static_lock); +static void +mutex_link_free(struct mutex_link *l) +{ + struct mutex_link *l2; - return (ret); + do { + l2 = l->next = mutex_link_freeq; + } while(!atomic_cmpset_rel_ptr(PTR_CAST(intptr_t *, &mutex_link_freeq), + (intptr_t)l2, (intptr_t)l)); } static void -set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) +enqueue_pp_mutex(struct pthread *curthread, struct pthread_mutex *mp) +{ + struct mutex_link *l, *cur, *prev; + + l = mutex_link_alloc(); + l->mp = mp; + + for (prev = NULL, cur = curthread->pp_mutexes; + cur != NULL && cur->mp->__ceiling > mp->__ceiling; + prev = cur, cur = cur->next) + ; + + if (prev == NULL) { + l->next = curthread->pp_mutexes; + curthread->pp_mutexes = l; + } else { + l->next = cur; + prev->next = l; + } +} + +void +_dequeue_pp_mutex(struct pthread *curthread, struct pthread_mutex *mp) { - struct pthread_mutex *m2; + struct mutex_link *cur, *prev; - m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); - if (m2 != NULL) - m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; - else - m->m_lock.m_ceilings[1] = -1; + for (prev = NULL, cur = curthread->pp_mutexes; + cur != NULL && cur->mp != mp; + prev = cur, cur = cur->next) { + if (cur->mp == mp) { + if (prev == NULL) + curthread->pp_mutexes = cur->next; + else + prev->next = cur->next; + mutex_link_free(cur); + break; + } + } } int -__pthread_mutex_init(pthread_mutex_t *mutex, - const pthread_mutexattr_t *mutex_attr) +_highest_pp_ceiling(struct pthread *curthread) { - return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc); + if (curthread->pp_mutexes == NULL) + return (-1); + return (curthread->pp_mutexes->mp->__ceiling); } -/* This function is used internally by malloc. */ -int -_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, - void *(calloc_cb)(size_t, size_t)) +static void +remember_lock(struct pthread *curthread, struct pthread_mutex *mp) { - static const struct pthread_mutex_attr attr = { - .m_type = PTHREAD_MUTEX_NORMAL, - .m_protocol = PTHREAD_PRIO_NONE, - .m_ceiling = 0 - }; - int ret; + struct mutex_link *l; - ret = mutex_init(mutex, &attr, calloc_cb); - if (ret == 0) - (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE; - return (ret); + l = mutex_link_alloc(); + l->mp = mp; + l->next = curthread->robust_mutexes; + curthread->robust_mutexes = l; } -void -_mutex_fork(struct pthread *curthread) +static void +forget_lock(struct pthread *curthread, struct pthread_mutex *mp) { - struct pthread_mutex *m; + struct mutex_link *cur, *prev; - /* - * Fix mutex ownership for child process. - * note that process shared mutex should not - * be inherited because owner is forking thread - * which is in parent process, they should be - * removed from the owned mutex list, current, - * process shared mutex is not supported, so I - * am not worried. - */ + for (prev = NULL, cur = curthread->robust_mutexes; + cur != NULL; prev = cur, cur = cur->next) { + if (cur->mp == mp) { + if (prev == NULL) + curthread->robust_mutexes = cur->next; + else + prev->next = cur->next; + mutex_link_free(cur); + break; + } + } +} - TAILQ_FOREACH(m, &curthread->mutexq, m_qe) - m->m_lock.m_owner = TID(curthread); - TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) - m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; +static inline int +trylock(struct pthread_mutex *mp, uint32_t id) +{ + if (atomic_cmpset_acq_32(&mp->__lockword, UMUTEX_UNOWNED, id) || + (mp->__lockword == UMUTEX_CONTESTED && + atomic_cmpset_acq_32(&mp->__lockword, UMUTEX_CONTESTED, + id|UMUTEX_CONTESTED))) { + return (0); + } + return (EBUSY); } -int -_pthread_mutex_destroy(pthread_mutex_t *mutex) +static int +timedlock(struct pthread_mutex *mp, uint32_t id, + const struct timespec *abstime) { - pthread_mutex_t m; - int ret; + int error; + int count; + uint32_t owner; + + if (abstime != NULL && !_validate_timespec(abstime)) + return (EINVAL); + + if (_thr_is_smp && (mp->__flags & UMUTEX_ADAPTIVE_SPIN) != 0) { + count = _thr_spinloops; + while (count--) { + owner = mp->__lockword; + if ((owner & ~UMUTEX_CONTESTED) == 0) { + if (atomic_cmpset_acq_32(&mp->__lockword, + owner, id|owner)) + return (0); + } + CPU_SPINWAIT; + } + } - m = *mutex; - if (m < THR_MUTEX_DESTROYED) { - ret = 0; - } else if (m == THR_MUTEX_DESTROYED) { - ret = EINVAL; - } else { - if (m->m_owner != NULL) { - ret = EBUSY; - } else { - *mutex = THR_MUTEX_DESTROYED; - MUTEX_ASSERT_NOT_OWNED(m); - free(m); - ret = 0; + for (;;) { + error = __thr_umutex_wait(UMTX(mp), abstime); + if (trylock(mp, id) == 0) { + error = 0; + break; } + if ((abstime != NULL && error == ETIMEDOUT) || + (error != 0 && error != EINTR)) + break; } + return (error); +} + +static inline int +unlock(struct pthread_mutex *mp, int *defer) +{ + uint32_t owner, flags; + + flags = mp->__flags; + do { + owner = mp->__lockword; + } while(!atomic_cmpset_rel_32(&mp->__lockword, owner, UMUTEX_UNOWNED)); - return (ret); + if ((owner & UMUTEX_CONTESTED) != 0) { + if (__predict_true(defer == NULL)) + __thr_umutex_wake(UMTX(mp), flags); + else + *defer = 1; + } + return (0); } -#define ENQUEUE_MUTEX(curthread, m) \ - do { \ - (m)->m_owner = curthread; \ - /* Add to the list of owned mutexes: */ \ - MUTEX_ASSERT_NOT_OWNED((m)); \ - if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ - TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ - else \ - TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ - } while (0) +static int unlock_rob(struct pthread_mutex *, int, int *); -#define DEQUEUE_MUTEX(curthread, m) \ - (m)->m_owner = NULL; \ - MUTEX_ASSERT_IS_OWNED(m); \ - if (__predict_true(((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) \ - TAILQ_REMOVE(&curthread->mutexq, (m), m_qe); \ - else { \ - TAILQ_REMOVE(&curthread->pp_mutexq, (m), m_qe); \ - set_inherited_priority(curthread, m); \ - } \ - MUTEX_INIT_LINK(m); +static int +trylock_rob(struct pthread_mutex *mp, uint32_t id) +{ + int error; -#define CHECK_AND_INIT_MUTEX \ - if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \ - if (m == THR_MUTEX_DESTROYED) \ - return (EINVAL); \ - int ret; \ - ret = init_static(_get_curthread(), mutex); \ - if (ret) \ - return (ret); \ - m = *mutex; \ + error = trylock(mp, id); + if (error == 0) { + if (mp->__robstate == ROBST_OWNER_DEAD) + return (EOWNERDEAD); + if (mp->__robstate == ROBST_NOTRECOVERABLE) { + unlock_rob(mp, 1, NULL); + return (ENOTRECOVERABLE); + } } + return (error); +} static int -mutex_trylock_common(pthread_mutex_t *mutex) +timedlock_rob(struct pthread_mutex *mp, uint32_t id, + const struct timespec *abstime) { - struct pthread *curthread = _get_curthread(); - struct pthread_mutex *m = *mutex; - uint32_t id; - int ret; + int error; + + error = timedlock(mp, id, abstime); + if (error == 0) { + if (mp->__robstate == ROBST_OWNER_DEAD) + return (EOWNERDEAD); + if (mp->__robstate == ROBST_NOTRECOVERABLE) { + unlock_rob(mp, 1, NULL); + return (ENOTRECOVERABLE); + } + } + return (error); +} - id = TID(curthread); - if (m->m_flags & PMUTEX_FLAG_PRIVATE) - THR_CRITICAL_ENTER(curthread); - ret = _thr_umutex_trylock(&m->m_lock, id); - if (__predict_true(ret == 0)) { - ENQUEUE_MUTEX(curthread, m); - } else if (m->m_owner == curthread) { - ret = mutex_self_trylock(m); - } /* else {} */ - if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE)) - THR_CRITICAL_LEAVE(curthread); - return (ret); +static int +unlock_rob(struct pthread_mutex *mp, int keepstate, int *defer) +{ + if (!keepstate) { + if (mp->__robstate == ROBST_OWNER_DEAD) + mp->__robstate = ROBST_NOTRECOVERABLE; + } + return (unlock(mp, defer)); } -int -__pthread_mutex_trylock(pthread_mutex_t *mutex) +static inline int +unlock_shared(struct pthread_mutex *mp) { - struct pthread_mutex *m; + if (atomic_cmpset_rel_32(&mp->__lockword, UMUTEX_SIMPLE_OWNER, + UMUTEX_UNOWNED)) + return (0); + /* Do fully unlocking in kernel. */ + return (__thr_umutex_unlock(UMTX(mp), 0)); +} - CHECK_AND_INIT_MUTEX +static inline int +trylock_pi(struct pthread_mutex *mp, int trykernel) +{ + uint32_t id, owner; - return (mutex_trylock_common(mutex)); + id = TID(_get_curthread()); + owner = mp->__lockword; + if (atomic_cmpset_acq_32(&mp->__lockword, UMUTEX_UNOWNED, id)) + return (0); + if (trykernel && owner == UMUTEX_CONTESTED) + return (__thr_umutex_trylock(UMTX(mp))); + return (EBUSY); } static int -mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, - const struct timespec *abstime) +unlock_pi(struct pthread_mutex *mp) { - uint32_t id, owner; - int count; - int ret; + uint32_t id; - if (m->m_owner == curthread) - return mutex_self_lock(m, abstime); + id = TID(_get_curthread()); + if (atomic_cmpset_rel_32(&mp->__lockword, id, UMUTEX_UNOWNED)) + return (0); + /* Do fully unlocking in kernel. */ + return (__thr_umutex_unlock((struct umutex *)&mp->__lockword, 0)); +} - id = TID(curthread); - /* - * For adaptive mutexes, spin for a bit in the expectation - * that if the application requests this mutex type then - * the lock is likely to be released quickly and it is - * faster than entering the kernel - */ - if (__predict_false( - (m->m_lock.m_flags & - (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)) - goto sleep_in_kernel; +static int +unlock_pi_rob(struct pthread_mutex *, int); - if (!_thr_is_smp) - goto yield_loop; +static int +trylock_pi_rob(struct pthread_mutex *mp, int trykernel) +{ + int error; - count = m->m_spinloops; - while (count--) { - owner = m->m_lock.m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { - ret = 0; - goto done; - } + error = trylock_pi(mp, trykernel); + if (error == 0) { + if (mp->__robstate == ROBST_OWNER_DEAD) + return (EOWNERDEAD); + if (mp->__robstate == ROBST_NOTRECOVERABLE) { + unlock_pi_rob(mp, 1); + return (ENOTRECOVERABLE); } - CPU_SPINWAIT; } + return (error); +} -yield_loop: - count = m->m_yieldloops; - while (count--) { - _sched_yield(); - owner = m->m_lock.m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { - ret = 0; - goto done; - } +static int +unlock_pi_rob(struct pthread_mutex *mp, int keepstate) +{ + uint32_t id; + + if (!keepstate) { + if (mp->__robstate == ROBST_OWNER_DEAD) { + mp->__robstate = ROBST_NOTRECOVERABLE; + keepstate = 1; } } + id = TID(_get_curthread()); + if (atomic_cmpset_rel_32(&mp->__lockword, id, UMUTEX_UNOWNED)) + return (0); + return (__thr_umutex_unlock((struct umutex *)&mp->__lockword, + keepstate)); +} -sleep_in_kernel: - if (abstime == NULL) { - ret = __thr_umutex_lock(&m->m_lock, id); - } else if (__predict_false( - abstime->tv_nsec < 0 || - abstime->tv_nsec >= 1000000000)) { - ret = EINVAL; +static int +mutex_self_lock(struct pthread_mutex *mp, const struct timespec *abstime, + int try) +{ + int error; + + if ((mp->__flags & UMUTEX_ERROR_CHECK) != 0) { + error = EDEADLK; + } else if ((mp->__flags & UMUTEX_RECURSIVE) != 0) { + if (mp->__recurse + 1 != 0) { + mp->__recurse++; + error = 0; + } else + error = EAGAIN; + } else if (try) { + error = EBUSY; } else { - ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); + /* + * What SS2 define as a 'normal' mutex. Intentionally + * deadlock on attempts to get a lock you already own. + */ + if (abstime != NULL && !_validate_timespec(abstime)) + error = EINVAL; + else + error = _thr_umutex_timedlock( + &stalled_mutex, + TID(_get_curthread()), + abstime); } -done: - if (ret == 0) - ENQUEUE_MUTEX(curthread, m); + return (error); +} - return (ret); +static void +print_unknown_mutex(const char *func, struct pthread_mutex *mp) +{ +#ifdef _PTHREADS_INVARIANTS + stderr_debug("%s, unknown mutex type, address:%p, " + "flags:0x%x\n", func, mp, mp->__flags); + PANIC("stop"); +#endif } -static inline int -mutex_lock_common(struct pthread_mutex *m, - const struct timespec *abstime, int cvattach) +static int +mutex_lock_common(struct pthread_mutex *mp, const struct timespec *abstime, int flags) { - struct pthread *curthread = _get_curthread(); - int ret; + struct pthread *curthread = _get_curthread(); + int type; + int error; + int old_ceiling; + int try, noceiling; - if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE) - THR_CRITICAL_ENTER(curthread); - if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) { - ENQUEUE_MUTEX(curthread, m); - ret = 0; + try = (flags & M_TRY); + noceiling = (flags & M_NOCEILING); + type = MUTEX_TYPE(mp); + /* Use a series of ifs according to their probability. */ + if (__predict_true(type == MT_PNONE)) { + if (__predict_true( + (error = trylock(mp, UMUTEX_SIMPLE_OWNER)) == 0)) { + mp->__ownertd.__pthread = curthread; + return (0); + } + if ((mp->__flags & + (UMUTEX_ERROR_CHECK|UMUTEX_RECURSIVE)) != 0) { + if (mp->__ownertd.__pthread == curthread) + return mutex_self_lock(mp, abstime, try); + } + if (try) + return (error); + error = timedlock(mp, UMUTEX_SIMPLE_OWNER, abstime); + if (error == 0) + mp->__ownertd.__pthread = curthread; + return (error); + } else if (type == MT_SHARED_PNONE) { + if ((error = trylock(mp, UMUTEX_SIMPLE_OWNER)) == 0) { + mp->__ownerpid = _thr_pid; + mp->__ownertd.__ithread = (uintptr_t)curthread; + return (0); + } + if ((mp->__flags & + (UMUTEX_ERROR_CHECK|UMUTEX_RECURSIVE)) != 0) { + if (mp->__ownertd.__pthread == curthread && + mp->__ownerpid == (uint32_t)_thr_pid) + return mutex_self_lock(mp, abstime, try); + } + if (try) + return (error); + error = timedlock(mp, UMUTEX_SIMPLE_OWNER, abstime); + if (error == 0) { + mp->__ownerpid = _thr_pid; + mp->__ownertd.__ithread = (uintptr_t)curthread; + } + return (error); + } else if (type == MT_ROB_SHARED_PNONE) { + if ((mp->__flags & + (UMUTEX_ERROR_CHECK|UMUTEX_RECURSIVE)) != 0) { + if (mp->__ownertd.__pthread == curthread && + mp->__ownerpid == (uint32_t)_thr_pid) + return mutex_self_lock(mp, abstime, try); + } + if (try) { + error = __thr_umutex_trylock(UMTX(mp)); + if (error == 0 || error == EOWNERDEAD) + goto rob_shared_pnone; + return (error); + } + error = __thr_umutex_timedlock(UMTX(mp), abstime); + if (error == 0 || error == EOWNERDEAD) { + rob_shared_pnone: + mp->__ownertd.__pthread = curthread; + mp->__ownerpid = _thr_pid; + mp->__recurse = 0; + } + return (error); + } else if (type == MT_PP) { + if ((mp->__flags & + (UMUTEX_ERROR_CHECK|UMUTEX_RECURSIVE)) != 0) { + if (mp->__ownertd.__pthread == curthread) + return mutex_self_lock(mp, abstime, try); + } + old_ceiling = curthread->inherited_prio; + if (!noceiling) { + if ((error = _thr_enter_ceiling(curthread, + mp->__ceiling)) != 0) + return (error); + } + if ((error = trylock(mp, UMUTEX_SIMPLE_OWNER)) == 0) { + mp->__ownertd.__pthread = curthread; + if (!noceiling) + enqueue_pp_mutex(curthread, mp); + return (0); + } + if (try) { + if (!noceiling) + _thr_set_ceiling(curthread, old_ceiling); + return (error); + } + error = timedlock(mp, UMUTEX_SIMPLE_OWNER, abstime); + if (error == 0) { + mp->__ownertd.__pthread = curthread; + if (!noceiling) + enqueue_pp_mutex(curthread, mp); + } else { + if (!noceiling) + _thr_set_ceiling(curthread, old_ceiling); + } + return (error); + } else if (type == MT_SHARED_PP) { + if ((mp->__flags & + (UMUTEX_ERROR_CHECK|UMUTEX_RECURSIVE)) != 0) { + if (mp->__ownertd.__pthread == curthread && + mp->__ownerpid == (uint32_t) _thr_pid) + return mutex_self_lock(mp, abstime, try); + } + old_ceiling = curthread->inherited_prio; + if (!noceiling) { + if ((error = _thr_enter_ceiling(curthread, + mp->__ceiling)) != 0) + return (error); + } + if ((error = trylock(mp, UMUTEX_SIMPLE_OWNER)) == 0) { + mp->__ownerpid = _thr_pid; + mp->__ownertd.__ithread = (uintptr_t)curthread; + if (!noceiling) + enqueue_pp_mutex(curthread, mp); + return (0); + } + if (try) { + if (!noceiling) + _thr_set_ceiling(curthread, old_ceiling); + return (error); + } + error = timedlock(mp, UMUTEX_SIMPLE_OWNER, abstime); + if (error == 0) { + mp->__ownerpid = _thr_pid; + mp->__ownertd.__ithread = (uintptr_t)curthread; + if (!noceiling) + enqueue_pp_mutex(curthread, mp); + } else { + if (!noceiling) + _thr_set_ceiling(curthread, old_ceiling); + } + return (error); + } else if (type == MT_PI) { + if ((error = trylock_pi(mp, try)) == 0) { + mp->__ownertd.__pthread = curthread; + return (0); + } + if ((mp->__flags & + (UMUTEX_ERROR_CHECK|UMUTEX_RECURSIVE)) != 0) { + if (mp->__ownertd.__pthread == curthread) + return mutex_self_lock(mp, abstime, try); + } + if (try) + return (error); + if ((error = __thr_umutex_timedlock(UMTX(mp), abstime)) == 0) + mp->__ownertd.__pthread = curthread; + return (error); + } else if (type == MT_SHARED_PI) { + if ((error = trylock_pi(mp, try)) == 0) { + mp->__ownerpid = _thr_pid; + mp->__ownertd.__ithread = (uintptr_t)curthread; + return (0); + } + if ((mp->__flags & + (UMUTEX_ERROR_CHECK|UMUTEX_RECURSIVE)) != 0) { + if (mp->__ownertd.__pthread == curthread && + mp->__ownerpid == (uint32_t) _thr_pid) + return mutex_self_lock(mp, abstime, try); + } + if (try) + return (error); + if ((error = __thr_umutex_timedlock(UMTX(mp), abstime)) == 0) { + mp->__ownerpid = _thr_pid; + mp->__ownertd.__ithread = (uintptr_t)curthread; + } + return (error); + } else if (type == MT_ROB_PNONE) { + error = trylock_rob(mp, UMUTEX_SIMPLE_OWNER); + if (error == 0 || error == EOWNERDEAD) + goto rob_pnone; + if (error != EBUSY) + return (error); + if ((mp->__flags & + (UMUTEX_ERROR_CHECK|UMUTEX_RECURSIVE)) != 0) { + if (mp->__ownertd.__pthread == curthread) + return mutex_self_lock(mp, abstime, try); + } + if (try) + return (error); + error = timedlock_rob(mp, UMUTEX_SIMPLE_OWNER, abstime); + if (error == 0 || error == EOWNERDEAD) { + rob_pnone: + mp->__ownertd.__pthread = curthread; + mp->__recurse = 0; + remember_lock(curthread, mp); + } + return (error); + } else if (type == MT_ROB_PP) { + if ((mp->__flags & + (UMUTEX_ERROR_CHECK|UMUTEX_RECURSIVE)) != 0) { + if (mp->__ownertd.__pthread == curthread) + return mutex_self_lock(mp, abstime, try); + } + old_ceiling = curthread->inherited_prio; + if (!noceiling) { + if ((error = _thr_enter_ceiling(curthread, + mp->__ceiling)) != 0) + return (error); + } + error = trylock_rob(mp, UMUTEX_SIMPLE_OWNER); + if (error == 0 || error == EOWNERDEAD) + goto rob_pp; + if (try || error != EBUSY) { + if (!noceiling) + _thr_set_ceiling(curthread, old_ceiling); + return (error); + } + error = timedlock_rob(mp, UMUTEX_SIMPLE_OWNER, abstime); + if (error == 0 || error == EOWNERDEAD) { + rob_pp: + mp->__ownertd.__pthread = curthread; + mp->__saved_ceiling = old_ceiling; + mp->__recurse = 0; + remember_lock(curthread, mp); + if (!noceiling) + enqueue_pp_mutex(curthread, mp); + } else { + if (!noceiling) + _thr_set_ceiling(curthread, old_ceiling); + } + return (error); + } else if (type == MT_ROB_PI) { + error = trylock_pi_rob(mp, try); + if (error == 0 || error == EOWNERDEAD) + goto rob_pi; + if ((mp->__flags & + (UMUTEX_ERROR_CHECK|UMUTEX_RECURSIVE)) != 0) { + if (mp->__ownertd.__pthread == curthread) + return mutex_self_lock(mp, abstime, try); + } + if (try || error != EBUSY) + return (error); + error = __thr_umutex_timedlock(UMTX(mp), abstime); + if (error == 0 || error == EOWNERDEAD) { + rob_pi: + mp->__ownertd.__pthread = curthread; + mp->__recurse = 0; + remember_lock(curthread, mp); + return (error); + } + return (error); + } else if (type == MT_ROB_SHARED_PP || type == MT_ROB_SHARED_PI) { + if ((mp->__flags & + (UMUTEX_ERROR_CHECK|UMUTEX_RECURSIVE)) != 0) { + if (mp->__ownertd.__pthread == curthread && + mp->__ownerpid == (uint32_t)_thr_pid) + return mutex_self_lock(mp, abstime, try); + } + old_ceiling = curthread->inherited_prio; + if (type == MT_ROB_SHARED_PP && !noceiling) { + if ((error = _thr_enter_ceiling(curthread, + mp->__ceiling)) != 0) + return (error); + } + if (try) { + error = __thr_umutex_trylock(UMTX(mp)); + if (error == 0 || error == EOWNERDEAD) + goto rob_shared; + if (type == MT_ROB_SHARED_PP && !noceiling) + _thr_set_ceiling(curthread, old_ceiling); + return (error); + } + error = __thr_umutex_timedlock(UMTX(mp), abstime); + if (error == 0 || error == EOWNERDEAD) { + rob_shared: + mp->__ownertd.__pthread = curthread; + mp->__ownerpid = _thr_pid; + mp->__recurse = 0; + if (type == MT_ROB_SHARED_PP && !noceiling) + enqueue_pp_mutex(curthread, mp); + } else { + if (type == MT_ROB_SHARED_PP && !noceiling) + _thr_set_ceiling(curthread, old_ceiling); + } + return (error); } else { - ret = mutex_lock_sleep(curthread, m, abstime); + print_unknown_mutex(__func__, mp); + return (EINVAL); } - if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach) - THR_CRITICAL_LEAVE(curthread); - return (ret); + + return (0); +} + +int +_pthread_mutex_trylock(pthread_mutex_t *mp) +{ + return mutex_lock_common(mp, NULL, M_TRY); } int -__pthread_mutex_lock(pthread_mutex_t *mutex) +_pthread_mutex_lock(pthread_mutex_t *mp) { - struct pthread_mutex *m; _thr_check_init(); - CHECK_AND_INIT_MUTEX + return (mutex_lock_common(mp, NULL, 0)); +} + +int +_pthread_mutex_timedlock(pthread_mutex_t *mp, const struct timespec *abstime) +{ + + _thr_check_init(); - return (mutex_lock_common(m, NULL, 0)); + return (mutex_lock_common(mp, abstime, 0)); } int -__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime) +_mutex_owned(const struct pthread_mutex *mp) +{ + if (mp->__ownertd.__pthread != _get_curthread()) + return (0); + if ((mp->__flags & USYNC_PROCESS) != 0) + return (mp->__ownerpid == (uint32_t) _thr_pid); + return (1); +} + +static int +mutex_unlock_common(struct pthread_mutex *mp, int flags, int *defer) { - struct pthread_mutex *m; + struct pthread *curthread = _get_curthread(); + int type; + int error; + int ceiling; + int keepstate; + int noceiling; + + if (__predict_false(mp->__ownertd.__pthread != curthread)) + return (EPERM); + + /* Use a series of ifs according to their probability. */ + if (__predict_true((mp->__flags & (USYNC_PROCESS|UMUTEX_RECURSIVE| + UMUTEX_PRIO_PROTECT2 |UMUTEX_PRIO_INHERIT|UMUTEX_ROBUST| + UMUTEX_RECURSIVE)) == 0)) { + /* Optimize normal mutex */ + mp->__ownertd.__pthread = NULL; + if (__predict_true(mp->__userf == 0)) + return unlock(mp, defer); + /* Need flush buffered waiters */ + mp->__userf = 0; + error = unlock(mp, defer); + if (defer == NULL) + _thr_flush_defer(curthread); + return (error); + } + + if (__predict_false((mp->__flags & USYNC_PROCESS) != 0)) { + if (mp->__ownerpid != (uint32_t)_thr_pid) + return (EPERM); + } - _thr_check_init(); + if (__predict_false((mp->__flags & UMUTEX_RECURSIVE) != 0)) { + if (mp->__recurse != 0) { + mp->__recurse--; + return (0); + } + } - CHECK_AND_INIT_MUTEX + keepstate = (flags & M_KEEPSTATE); + noceiling = (flags & M_NOCEILING); + type = MUTEX_TYPE(mp); + if (type == MT_PNONE) { + mp->__ownertd.__pthread = NULL; + if (__predict_true(mp->__userf == 0)) + return unlock(mp, defer); + mp->__userf = 0; + error = unlock(mp, defer); + if (defer == NULL) + _thr_flush_defer(curthread); + return (error); + } else if (type == MT_SHARED_PNONE) { + mp->__ownertd.__ithread = 0; + mp->__ownerpid = 0; + return (unlock_shared(mp)); + } else if (type == MT_ROB_SHARED_PNONE) { + mp->__ownertd.__ithread = 0; + mp->__ownerpid = 0; + error = __thr_umutex_unlock(UMTX(mp), keepstate); + return (error); + } else if (type == MT_PP) { + mp->__ownertd.__pthread = NULL; + error = unlock(mp, NULL); + if (!noceiling) { + ceiling = _highest_pp_ceiling(curthread); + _thr_set_ceiling(curthread, ceiling); + _dequeue_pp_mutex(curthread, mp); + } + return (error); + } else if (type == MT_SHARED_PP) { + mp->__ownertd.__ithread = 0; + mp->__ownerpid = 0; + error = unlock(mp, NULL); + if (!noceiling) { + ceiling = _highest_pp_ceiling(curthread); + _thr_set_ceiling(curthread, ceiling); + _dequeue_pp_mutex(curthread, mp); + } + return (error); + } else if (type == MT_PI) { + mp->__ownertd.__pthread = NULL; + return unlock_pi(mp); + } else if (type == MT_SHARED_PI) { + mp->__ownertd.__ithread = 0; + mp->__ownerpid = 0; + return unlock_pi(mp); + } else if (type == MT_ROB_PNONE) { + mp->__ownertd.__pthread = NULL; + if (__predict_true(mp->__userf == 0)) + error = unlock_rob(mp, keepstate, defer); + else { + mp->__userf = 0; + error = unlock_rob(mp, keepstate, defer); + if (defer == NULL) + _thr_flush_defer(curthread); + } + forget_lock(curthread, mp); + return (error); + } else if (type == MT_ROB_PP) { + mp->__ownertd.__pthread = NULL; + error = unlock_rob(mp, keepstate, NULL); + forget_lock(curthread, mp); + if (!noceiling) { + ceiling = _highest_pp_ceiling(curthread); + _thr_set_ceiling(curthread, ceiling); + _dequeue_pp_mutex(curthread, mp); + } + return (error); + } else if (type == MT_ROB_PI) { + mp->__ownertd.__pthread = NULL; + error = unlock_pi_rob(mp, keepstate); + forget_lock(curthread, mp); + return (error); + } else if (type == MT_ROB_SHARED_PP || type == MT_ROB_SHARED_PI) { + mp->__ownertd.__ithread = 0; + mp->__ownerpid = 0; + error = __thr_umutex_unlock(UMTX(mp), keepstate); + if (!noceiling && type == MT_ROB_SHARED_PP) { + ceiling = _highest_pp_ceiling(curthread); + _thr_set_ceiling(curthread, ceiling); + _dequeue_pp_mutex(curthread, mp); + } + return (error); + } else { + print_unknown_mutex(__func__, mp); + return (EINVAL); + } - return (mutex_lock_common(m, abstime, 0)); + return (0); } int -_pthread_mutex_unlock(pthread_mutex_t *mutex) +_pthread_mutex_unlock(pthread_mutex_t *mp) { - struct pthread_mutex *mp; - - mp = *mutex; - return (mutex_unlock_common(mp, 0)); + return (mutex_unlock_common(mp, 0, NULL)); } int -_mutex_cv_lock(struct pthread_mutex *m, int count) +_pthread_mutex_getprioceiling(pthread_mutex_t *mp, int *prioceiling) { int error; - error = mutex_lock_common(m, NULL, 1); - if (error == 0) - m->m_count = count; + if ((mp->__flags & UMUTEX_PRIO_PROTECT2) == 0) + error = EINVAL; + else { + *prioceiling = mp->__ceiling; + error = 0; + } + return (error); } int -_mutex_cv_unlock(struct pthread_mutex *m, int *count) +_pthread_mutex_setprioceiling(pthread_mutex_t *mp, + int ceiling, int *old_ceiling) +{ + struct pthread *curthread = _get_curthread(); + int error = 0; + + if ((mp->__flags & UMUTEX_PRIO_PROTECT2) == 0) + return (EINVAL); + if (ceiling > THR_MAX_RR_PRIORITY || ceiling < 0) + return (EINVAL); + if (_mutex_owned(mp)) { + if (old_ceiling != NULL) + *old_ceiling = mp->__ceiling; + _dequeue_pp_mutex(curthread, mp); + mp->__ceiling = ceiling; + enqueue_pp_mutex(curthread, mp); + ceiling = _highest_pp_ceiling(curthread); + if (ceiling > curthread->inherited_prio) + _thr_enter_ceiling(curthread, ceiling); + } else { + /* + * POSIX says the process of locking the mutex need not + * adhere to the priority protect protocol. + */ + error = mutex_lock_common(mp, NULL, M_NOCEILING); + if (error == 0 || error == EOWNERDEAD) { + if (old_ceiling != NULL) + *old_ceiling = mp->__ceiling; + mp->__ceiling = ceiling; + mutex_unlock_common(mp, M_KEEPSTATE|M_NOCEILING, NULL); + } + } + return (error); +} + +/* Unlock a robust mutex when thread is exiting */ +static void +mutex_forced_unlock(struct pthread_mutex *mp) +{ + if (mp->__robstate == ROBST_NORMAL) + mp->__robstate = ROBST_OWNER_DEAD; + mutex_unlock_common(mp, M_KEEPSTATE, NULL); +} + +void +_mutex_thread_exit(struct pthread *curthread) +{ + struct mutex_link *l; + + while ((l = curthread->robust_mutexes) != NULL) { + mutex_forced_unlock(l->mp); + /* unlock failure? */ + if (l == curthread->robust_mutexes) + forget_lock(curthread, l->mp); + } +} + +void +_mutex_fork_child(struct pthread *curthread) { + struct mutex_link *cur, *prev; - /* - * Clear the count in case this is a recursive mutex. - */ - *count = m->m_count; - m->m_count = 0; - (void)mutex_unlock_common(m, 1); - return (0); + /* Remove process-shared PP mutex */ + prev = NULL; + cur = curthread->pp_mutexes; + while (cur != NULL) { + if ((cur->mp->__flags & USYNC_PROCESS) == 0) { + prev = cur; + cur = cur->next; + continue; + } + if (prev == NULL) { + curthread->pp_mutexes = cur->next; + mutex_link_free(cur); + cur = curthread->pp_mutexes; + } else { + prev->next = cur->next; + mutex_link_free(cur); + cur = prev->next; + } + } +#if 0 + _thr_set_ceiling(curthread, _highest_pp_ceiling(curthread)); +#endif } int -_mutex_cv_attach(struct pthread_mutex *m, int count) +_mutex_cv_lock(pthread_mutex_t *mp, int recurse) { - struct pthread *curthread = _get_curthread(); + int error; + + error = _pthread_mutex_lock(mp); + if (error == 0 || error == EOWNERDEAD) + mp->__recurse = recurse; + return (error); +} - ENQUEUE_MUTEX(curthread, m); - m->m_count = count; - return (0); +int +_mutex_cv_unlock(pthread_mutex_t *mp, int *recurse, int *defer) +{ + *recurse = mp->__recurse; + mp->__recurse = 0; + return (mutex_unlock_common(mp, 0, defer)); } int -_mutex_cv_detach(struct pthread_mutex *mp, int *recurse) +_pthread_mutex_consistent(pthread_mutex_t *mp) { - struct pthread *curthread = _get_curthread(); - int defered; - int error; + if ((mp->__flags & UMUTEX_ROBUST) != 0) { + mp->__robstate = ROBST_NORMAL; + return (0); + } + return (EINVAL); +} - if ((error = _mutex_owned(curthread, mp)) != 0) - return (error); +int +_pthread_mutex_getspinloops_np(pthread_mutex_t *mp, int *count) +{ - /* - * Clear the count in case this is a recursive mutex. - */ - *recurse = mp->m_count; - mp->m_count = 0; - DEQUEUE_MUTEX(curthread, mp); + *count = _thr_spinloops; + return (0); +} - /* Will this happen in real-world ? */ - if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) { - defered = 1; - mp->m_flags &= ~PMUTEX_FLAG_DEFERED; - } else - defered = 0; +int +_pthread_mutex_setspinloops_np(pthread_mutex_t *mp, int count) +{ - if (defered) { - _thr_wake_all(curthread->defer_waiters, - curthread->nwaiter_defer); - curthread->nwaiter_defer = 0; - } return (0); } -static int -mutex_self_trylock(struct pthread_mutex *m) +int +_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) { - int ret; + *count = _thr_yieldloops; + return (0); +} - switch (PMUTEX_TYPE(m->m_flags)) { - case PTHREAD_MUTEX_ERRORCHECK: - case PTHREAD_MUTEX_NORMAL: - case PTHREAD_MUTEX_ADAPTIVE_NP: - ret = EBUSY; - break; +int +_pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) +{ + return (0); +} - case PTHREAD_MUTEX_RECURSIVE: - /* Increment the lock count: */ - if (m->m_count + 1 > 0) { - m->m_count++; - ret = 0; - } else - ret = EAGAIN; - break; +int +_pthread_mutex_isowned_np(pthread_mutex_t *mp) +{ + return _mutex_owned(mp); +} - default: - /* Trap invalid mutex types; */ - ret = EINVAL; - } +int +_mutex_owned_old(pthread_mutex_old_t *mutex) +{ + struct pthread_mutex *mp; - return (ret); + mp = *mutex; + if (mp < THR_MUTEX_DESTROYED) + return (0); + if (mp == THR_MUTEX_DESTROYED) + return (0); + return (_mutex_owned(mp)); } static int -mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) +mutex_init_old(pthread_mutex_old_t *mutex, + const struct pthread_mutex_attr *mutex_attr) { - struct timespec ts1, ts2; - int ret; + struct pthread_mutex *mp; + int error; - switch (PMUTEX_TYPE(m->m_flags)) { - case PTHREAD_MUTEX_ERRORCHECK: - case PTHREAD_MUTEX_ADAPTIVE_NP: - if (abstime) { - if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || - abstime->tv_nsec >= 1000000000) { - ret = EINVAL; - } else { - clock_gettime(CLOCK_REALTIME, &ts1); - TIMESPEC_SUB(&ts2, abstime, &ts1); - __sys_nanosleep(&ts2, NULL); - ret = ETIMEDOUT; - } - } else { - /* - * POSIX specifies that mutexes should return - * EDEADLK if a recursive lock is detected. - */ - ret = EDEADLK; - } - break; + if ((mp = (struct pthread_mutex *) + lmalloc(sizeof(struct pthread_mutex))) == NULL) { + return (ENOMEM); + } + error = mutex_init(mp, mutex_attr); + if (error != 0) + lfree(mp); + else + *mutex = mp; + return (error); +} - case PTHREAD_MUTEX_NORMAL: - /* - * What SS2 define as a 'normal' mutex. Intentionally - * deadlock on attempts to get a lock you already own. - */ - ret = 0; - if (abstime) { - if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || - abstime->tv_nsec >= 1000000000) { - ret = EINVAL; - } else { - clock_gettime(CLOCK_REALTIME, &ts1); - TIMESPEC_SUB(&ts2, abstime, &ts1); - __sys_nanosleep(&ts2, NULL); - ret = ETIMEDOUT; - } - } else { - ts1.tv_sec = 30; - ts1.tv_nsec = 0; - for (;;) - __sys_nanosleep(&ts1, NULL); - } - break; +static int +init_static(struct pthread *thread, pthread_mutex_old_t *mutex) +{ + int error; - case PTHREAD_MUTEX_RECURSIVE: - /* Increment the lock count: */ - if (m->m_count + 1 > 0) { - m->m_count++; - ret = 0; - } else - ret = EAGAIN; - break; - - default: - /* Trap invalid mutex types; */ - ret = EINVAL; + THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); + + if (*mutex == THR_MUTEX_INITIALIZER) { + error = mutex_init_old(mutex, &_pthread_mutexattr_default); + } else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) { + error = mutex_init_old(mutex, + &_pthread_mutexattr_adaptive_default); } - - return (ret); + else + error = 0; + THR_LOCK_RELEASE(thread, &_mutex_static_lock); + return (error); } static int -mutex_unlock_common(struct pthread_mutex *m, int cv) +check_and_init_mutex(pthread_mutex_old_t *mutex) { - struct pthread *curthread = _get_curthread(); - uint32_t id; - int defered; + struct pthread_mutex *mp; - if (__predict_false(m <= THR_MUTEX_DESTROYED)) { - if (m == THR_MUTEX_DESTROYED) + if (__predict_false((mp = *mutex) <= THR_MUTEX_DESTROYED)) { + if (mp == THR_MUTEX_DESTROYED) return (EINVAL); - return (EPERM); + int error; + error = init_static(_get_curthread(), mutex); + if (error) + return (error); + mp = *mutex; } + return (0); +} - /* - * Check if the running thread is not the owner of the mutex. - */ - if (__predict_false(m->m_owner != curthread)) - return (EPERM); +int +_pthread_mutex_destroy_1_0(pthread_mutex_old_t *mutex) +{ + pthread_mutex_t *mp; + int error; - id = TID(curthread); - if (__predict_false( - PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE && - m->m_count > 0)) { - m->m_count--; + mp = *mutex; + if (mp < THR_MUTEX_DESTROYED) { + error = 0; + } else if (mp == THR_MUTEX_DESTROYED) { + error = EINVAL; } else { - if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) { - defered = 1; - m->m_flags &= ~PMUTEX_FLAG_DEFERED; - } else - defered = 0; + *mutex = THR_MUTEX_DESTROYED; + lfree(mp); + error = 0; + } + + return (error); +} + +int +_pthread_mutex_init_1_0(pthread_mutex_old_t *mutex, + const pthread_mutexattr_t *mutex_attr) +{ + return mutex_init_old(mutex, mutex_attr ? *mutex_attr : NULL); +} + +int +_pthread_mutex_trylock_1_0(pthread_mutex_old_t *mutex) +{ + int error; - DEQUEUE_MUTEX(curthread, m); - _thr_umutex_unlock(&m->m_lock, id); + error = check_and_init_mutex(mutex); + if (error != 0) + return (error); - if (curthread->will_sleep == 0 && defered) { - _thr_wake_all(curthread->defer_waiters, - curthread->nwaiter_defer); - curthread->nwaiter_defer = 0; - } - } - if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE) - THR_CRITICAL_LEAVE(curthread); - return (0); + return (mutex_lock_common(*mutex, NULL, M_TRY)); } int -_pthread_mutex_getprioceiling(pthread_mutex_t *mutex, - int *prioceiling) +_pthread_mutex_lock_1_0(pthread_mutex_old_t *mutex) { - struct pthread_mutex *m; - int ret; + int error; - m = *mutex; - if ((m <= THR_MUTEX_DESTROYED) || - (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) - ret = EINVAL; - else { - *prioceiling = m->m_lock.m_ceilings[0]; - ret = 0; - } + _thr_check_init(); - return (ret); + error = check_and_init_mutex(mutex); + if (error != 0) + return (error); + return (mutex_lock_common(*mutex, NULL, 0)); } int -_pthread_mutex_setprioceiling(pthread_mutex_t *mutex, - int ceiling, int *old_ceiling) +_pthread_mutex_timedlock_1_0(pthread_mutex_old_t *mutex, + const struct timespec *abstime) { - struct pthread *curthread = _get_curthread(); - struct pthread_mutex *m, *m1, *m2; - int ret; + int error; + + _thr_check_init(); + + error = check_and_init_mutex(mutex); + if (error != 0) + return (error); - m = *mutex; - if ((m <= THR_MUTEX_DESTROYED) || - (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) - return (EINVAL); + return (mutex_lock_common(*mutex, abstime, 0)); +} - ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); - if (ret != 0) - return (ret); +int +_pthread_mutex_unlock_1_0(pthread_mutex_old_t *mutex) +{ + struct pthread_mutex *mp; - if (m->m_owner == curthread) { - MUTEX_ASSERT_IS_OWNED(m); - m1 = TAILQ_PREV(m, mutex_queue, m_qe); - m2 = TAILQ_NEXT(m, m_qe); - if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || - (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { - TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); - TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { - if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { - TAILQ_INSERT_BEFORE(m2, m, m_qe); - return (0); - } - } - TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); - } + mp = *mutex; + if (__predict_false(mp <= THR_MUTEX_DESTROYED)) { + if (mp == THR_MUTEX_DESTROYED) + return (EINVAL); + return (EPERM); } - return (0); + return (mutex_unlock_common(mp, 0, NULL)); } int -_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) +_pthread_mutex_getspinloops_np_1_1(pthread_mutex_old_t *mutex, int *count) { - struct pthread_mutex *m; + int error; - CHECK_AND_INIT_MUTEX + error = check_and_init_mutex(mutex); + if (error != 0) + return (error); - *count = m->m_spinloops; + *count = _thr_spinloops; return (0); } int -__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) +_pthread_mutex_setspinloops_np_1_1(pthread_mutex_old_t *mutex, int count) { - struct pthread_mutex *m; + int error; - CHECK_AND_INIT_MUTEX - - m->m_spinloops = count; + error = check_and_init_mutex(mutex); + if (error != 0) + return (error); return (0); } int -_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) +_pthread_mutex_getyieldloops_np_1_1(pthread_mutex_old_t *mutex, int *count) { - struct pthread_mutex *m; - CHECK_AND_INIT_MUTEX - - *count = m->m_yieldloops; + *count = _thr_yieldloops; return (0); } int -__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) +_pthread_mutex_setyieldloops_np_1_1(pthread_mutex_old_t *mutex, int count) { - struct pthread_mutex *m; + int error; - CHECK_AND_INIT_MUTEX + error = check_and_init_mutex(mutex); + if (error != 0) + return (error); + return (0); +} - m->m_yieldloops = count; - return (0); +int +_pthread_mutex_isowned_np_1_1(pthread_mutex_old_t *mutex) +{ + return (_mutex_owned_old(mutex)); } int -_pthread_mutex_isowned_np(pthread_mutex_t *mutex) +_pthread_mutex_getprioceiling_1_0(pthread_mutex_old_t *mutex, + int *prioceiling) { - struct pthread_mutex *m; + struct pthread_mutex *mp; + int error; - m = *mutex; - if (m <= THR_MUTEX_DESTROYED) - return (0); - return (m->m_owner == _get_curthread()); + mp = *mutex; + if ((mp <= THR_MUTEX_DESTROYED) || + (mp->__flags & UMUTEX_PRIO_PROTECT2) == 0) + error = EINVAL; + else { + *prioceiling = mp->__ceiling; + error = 0; + } + return (error); } int -_mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp) +_pthread_mutex_setprioceiling_1_0(pthread_mutex_old_t *mutex, + int ceiling, int *old_ceiling) { - if (__predict_false(mp <= THR_MUTEX_DESTROYED)) { - if (mp == THR_MUTEX_DESTROYED) - return (EINVAL); - return (EPERM); - } - if (mp->m_owner != curthread) - return (EPERM); - return (0); + struct pthread_mutex *mp; + + mp = *mutex; + if ((mp <= THR_MUTEX_DESTROYED) || + (mp->__flags & UMUTEX_PRIO_PROTECT2) == 0) + return (EINVAL); + + return (_pthread_mutex_setprioceiling(mp, ceiling, old_ceiling)); } + +FB10_COMPAT(_pthread_mutex_destroy_1_0, pthread_mutex_destroy); +FB10_COMPAT(_pthread_mutex_getprioceiling_1_0, pthread_mutex_getprioceiling); +FB10_COMPAT(_pthread_mutex_init_1_0, pthread_mutex_init); +FB10_COMPAT(_pthread_mutex_lock_1_0, pthread_mutex_lock); +FB10_COMPAT(_pthread_mutex_setprioceiling_1_0, pthread_mutex_setprioceiling); +FB10_COMPAT(_pthread_mutex_timedlock_1_0, pthread_mutex_timedlock); +FB10_COMPAT(_pthread_mutex_trylock_1_0, pthread_mutex_trylock); +FB10_COMPAT(_pthread_mutex_unlock_1_0, pthread_mutex_unlock); + +FB11_COMPAT(_pthread_mutex_isowned_np_1_1, pthread_mutex_isowned_np); +FB11_COMPAT(_pthread_mutex_getspinloops_np_1_1, pthread_mutex_getspinloops_np); +FB11_COMPAT(_pthread_mutex_getyieldloops_np_1_1, pthread_mutex_getyieldloops_np); +FB11_COMPAT(_pthread_mutex_setspinloops_np_1_1, pthread_mutex_setspinloops_np); +FB11_COMPAT(_pthread_mutex_setyieldloops_np_1_1, pthread_mutex_setyieldloops_np); --- src/lib/libthr/thread/thr_mutexattr.c 2008-03-20 12:41:24.000000000 0000 +++ src/lib/libthr/thread/thr_mutexattr.c 2012-05-02 02:09:13.000000000 0000 @@ -81,6 +81,11 @@ __weak_reference(_pthread_mutexattr_setprotocol, pthread_mutexattr_setprotocol); __weak_reference(_pthread_mutexattr_getprioceiling, pthread_mutexattr_getprioceiling); __weak_reference(_pthread_mutexattr_setprioceiling, pthread_mutexattr_setprioceiling); +__weak_reference(_pthread_mutexattr_getrobust, pthread_mutexattr_getrobust); +__weak_reference(_pthread_mutexattr_setrobust, pthread_mutexattr_setrobust); + +int _pthread_mutexattr_setpshared_1_0(pthread_mutexattr_t *attr, int pshared); +FB10_COMPAT(_pthread_mutexattr_setpshared_1_0, pthread_mutexattr_setpshared); int _pthread_mutexattr_init(pthread_mutexattr_t *attr) @@ -89,7 +94,7 @@ pthread_mutexattr_t pattr; if ((pattr = (pthread_mutexattr_t) - malloc(sizeof(struct pthread_mutex_attr))) == NULL) { + lmalloc(sizeof(struct pthread_mutex_attr))) == NULL) { ret = ENOMEM; } else { memcpy(pattr, &_pthread_mutexattr_default, @@ -162,7 +167,7 @@ if (attr == NULL || *attr == NULL) { ret = EINVAL; } else { - free(*attr); + lfree(*attr); *attr = NULL; ret = 0; } @@ -177,7 +182,7 @@ if (attr == NULL || *attr == NULL) return (EINVAL); - *pshared = PTHREAD_PROCESS_PRIVATE; + *pshared = (*attr)->m_pshared; return (0); } @@ -189,9 +194,11 @@ return (EINVAL); /* Only PTHREAD_PROCESS_PRIVATE is supported. */ - if (pshared != PTHREAD_PROCESS_PRIVATE) + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) return (EINVAL); + (*attr)->m_pshared = pshared; return (0); } @@ -218,7 +225,6 @@ ret = EINVAL; else { (*mattr)->m_protocol = protocol; - (*mattr)->m_ceiling = THR_MAX_RR_PRIORITY; } return(ret); } @@ -247,9 +253,57 @@ ret = EINVAL; else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT) ret = EINVAL; + else if (prioceiling < 0 || prioceiling > THR_MAX_RR_PRIORITY) + ret = EINVAL; else (*mattr)->m_ceiling = prioceiling; return(ret); } +int +_pthread_mutexattr_getrobust(const pthread_mutexattr_t *mattr, + int *robust) +{ + int error; + + if ((mattr == NULL) || (*mattr == NULL)) + error = EINVAL; + else { + *robust =(*mattr)->m_robust; + error = 0; + } + return (error); +} + +int +_pthread_mutexattr_setrobust(pthread_mutexattr_t *mattr, + int robust) +{ + int error; + + if ((mattr == NULL) || (*mattr == NULL)) + error = EINVAL; + else if (robust == PTHREAD_MUTEX_STALLED || + robust == PTHREAD_MUTEX_ROBUST) { + (*mattr)->m_robust = robust; + error = 0; + } else{ + error = EINVAL; + } + return (error); +} + +int +_pthread_mutexattr_setpshared_1_0(pthread_mutexattr_t *attr, int pshared) +{ + if (attr == NULL || *attr == NULL) + return (EINVAL); + + /* Only PTHREAD_PROCESS_PRIVATE is supported. */ + if (pshared != PTHREAD_PROCESS_PRIVATE) + return (EINVAL); + + (*attr)->m_pshared = pshared; + return (0); +} --- src/lib/libthr/thread/thr_once.c 2011-04-20 14:35:39.000000000 0000 +++ src/lib/libthr/thread/thr_once.c 2012-05-02 02:09:13.000000000 0000 @@ -75,9 +75,9 @@ break; } else if (state == ONCE_IN_PROGRESS) { if (atomic_cmpset_acq_int(&once_control->state, state, ONCE_WAIT)) - _thr_umtx_wait_uint(&once_control->state, ONCE_WAIT, NULL, 0); + _thr_umtx_wait_uint(&once_control->state, ONCE_WAIT, 0); } else if (state == ONCE_WAIT) { - _thr_umtx_wait_uint(&once_control->state, state, NULL, 0); + _thr_umtx_wait_uint(&once_control->state, state, 0); } else return (EINVAL); } --- src/lib/libthr/thread/thr_printf.c 2005-04-02 01:36:12.000000000 0000 +++ src/lib/libthr/thread/thr_printf.c 2012-05-02 02:09:13.000000000 0000 @@ -33,8 +33,17 @@ #include "thr_private.h" -static void pchar(int fd, char c); -static void pstr(int fd, const char *s); +#define BUFSZ 80 + +struct output { + char buf[BUFSZ]; + int pos; + int fd; +}; + +static void pchar(struct output *, char c); +static void pstr(struct output *, const char *s); +static void pflush(struct output *); /* * Write formatted output to stdout, in a thread-safe manner. @@ -58,7 +67,10 @@ int c; long d; int islong; + struct output out; + out.pos = 0; + out.fd = fd; va_start(ap, fmt); while ((c = *fmt++)) { islong = 0; @@ -68,10 +80,10 @@ goto out; switch (c) { case 'c': - pchar(fd, va_arg(ap, int)); + pchar(&out, va_arg(ap, int)); continue; case 's': - pstr(fd, va_arg(ap, char *)); + pstr(&out, va_arg(ap, char *)); continue; case 'l': islong = 1; @@ -88,7 +100,7 @@ else d = va_arg(ap, unsigned); if (d < 0) { - pchar(fd, '-'); + pchar(&out, '-'); u = (unsigned long)(d * -1); } else u = (unsigned long)d; @@ -103,33 +115,48 @@ *s++ = digits[u % r]; } while (u /= r); while (--s >= buf) - pchar(fd, *s); + pchar(&out, *s); continue; } } - pchar(fd, c); + pchar(&out, c); } out: va_end(ap); + pflush(&out); } /* * Write a single character to stdout, in a thread-safe manner. */ static void -pchar(int fd, char c) +pchar(struct output *out, char c) { - __sys_write(fd, &c, 1); + out->buf[out->pos++] = c; + if (out->pos == BUFSZ) { + __sys_write(out->fd, out->buf, BUFSZ); + out->pos = 0; + } } /* * Write a string to stdout, in a thread-safe manner. */ static void -pstr(int fd, const char *s) +pstr(struct output *out, const char *s) { + int i, len = strlen(s); - __sys_write(fd, s, strlen(s)); + for (i = 0; i < len; ++i) + pchar(out, s[i]); } +static void +pflush(struct output *out) +{ + if (out->pos != 0) { + __sys_write(out->fd, out->buf, out->pos); + out->pos = 0; + } +} --- src/lib/libthr/thread/thr_private.h 2012-05-03 09:35:39.000000000 0000 +++ src/lib/libthr/thread/thr_private.h 2012-05-16 06:19:29.000000000 0000 @@ -53,7 +53,9 @@ #include #define SYM_FB10(sym) __CONCAT(sym, _fb10) +#define SYM_FB11(sym) __CONCAT(sym, _fb11) #define SYM_FBP10(sym) __CONCAT(sym, _fbp10) +#define SYM_FBP11(sym) __CONCAT(sym, _fbp11) #define WEAK_REF(sym, alias) __weak_reference(sym, alias) #define SYM_COMPAT(sym, impl, ver) __sym_compat(sym, impl, ver) #define SYM_DEFAULT(sym, impl, ver) __sym_default(sym, impl, ver) @@ -62,6 +64,10 @@ WEAK_REF(func, SYM_FB10(sym)); \ SYM_COMPAT(sym, SYM_FB10(sym), FBSD_1.0) +#define FB11_COMPAT(func, sym) \ + WEAK_REF(func, SYM_FB10(sym)); \ + SYM_COMPAT(sym, SYM_FB10(sym), FBSD_1.1) + #define FB10_COMPAT_PRIVATE(func, sym) \ WEAK_REF(func, SYM_FBP10(sym)); \ SYM_DEFAULT(sym, SYM_FBP10(sym), FBSDprivate_1.0) @@ -77,9 +83,12 @@ typedef TAILQ_HEAD(pthreadlist, pthread) pthreadlist; typedef TAILQ_HEAD(atfork_head, pthread_atfork) atfork_head; -TAILQ_HEAD(mutex_queue, pthread_mutex); + +struct mutex_link { + struct mutex_link *next; + struct pthread_mutex *mp; +}; -/* Signal to do cancellation */ #define SIGCANCEL SIGTHR /* @@ -135,75 +144,27 @@ #define THR_RWLOCK_INITIALIZER ((struct pthread_rwlock *)NULL) #define THR_RWLOCK_DESTROYED ((struct pthread_rwlock *)1) -#define PMUTEX_FLAG_TYPE_MASK 0x0ff -#define PMUTEX_FLAG_PRIVATE 0x100 -#define PMUTEX_FLAG_DEFERED 0x200 -#define PMUTEX_TYPE(mtxflags) ((mtxflags) & PMUTEX_FLAG_TYPE_MASK) +#define MAX_DEFER_WAITERS 50 -#define MAX_DEFER_WAITERS 50 +#define UMTX(mp) ((struct umutex *)&(mp)->__lockword) -struct pthread_mutex { - /* - * Lock for accesses to this structure. - */ - struct umutex m_lock; - int m_flags; - struct pthread *m_owner; - int m_count; - int m_spinloops; - int m_yieldloops; - /* - * Link for all mutexes a thread currently owns. - */ - TAILQ_ENTRY(pthread_mutex) m_qe; -}; - struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; + int m_pshared; + int m_robust; }; -#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ - { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } - -struct pthread_cond { - __uint32_t __has_user_waiters; - __uint32_t __has_kern_waiters; - __uint32_t __flags; - __uint32_t __clock_id; -}; - struct pthread_cond_attr { int c_pshared; int c_clockid; }; -struct pthread_barrier { - struct umutex b_lock; - struct ucond b_cv; - int64_t b_cycle; - int b_count; - int b_waiters; - int b_refcount; - int b_destroying; -}; - struct pthread_barrierattr { int pshared; }; -struct pthread_spinlock { - struct umutex s_lock; -}; - -/* - * Flags for condition variables. - */ -#define COND_FLAGS_PRIVATE 0x01 -#define COND_FLAGS_INITED 0x02 -#define COND_FLAGS_BUSY 0x04 - /* * Cleanup definitions. */ @@ -288,34 +249,13 @@ /* * Define priorities returned by kernel. */ -#define THR_MIN_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_min) -#define THR_MAX_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_max) -#define THR_DEF_PRIORITY (_thr_priorities[SCHED_OTHER-1].pri_default) - -#define THR_MIN_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_min) -#define THR_MAX_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_max) -#define THR_DEF_RR_PRIORITY (_thr_priorities[SCHED_RR-1].pri_default) - -/* XXX The SCHED_FIFO should have same priority range as SCHED_RR */ -#define THR_MIN_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO_1].pri_min) -#define THR_MAX_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_max) -#define THR_DEF_FIFO_PRIORITY (_thr_priorities[SCHED_FIFO-1].pri_default) +#define THR_MIN_RR_PRIORITY 0 +#define THR_MAX_RR_PRIORITY 31 -struct pthread_prio { - int pri_min; - int pri_max; - int pri_default; -}; - struct pthread_rwlockattr { int pshared; }; -struct pthread_rwlock { - struct urwlock lock; - struct pthread *owner; -}; - /* * Thread states. */ @@ -380,7 +320,7 @@ LIST_ENTRY(pthread) hle; /* Sleep queue entry */ - TAILQ_ENTRY(pthread) wle; + TAILQ_ENTRY(pthread) wle; /* Threads reference count. */ int refcount; @@ -464,12 +404,9 @@ #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */ #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */ - /* Queue of currently owned NORMAL or PRIO_INHERIT type mutexes. */ - struct mutex_queue mutexq; + struct mutex_link *pp_mutexes; + struct mutex_link *robust_mutexes; - /* Queue of all owned PRIO_PROTECT mutexes. */ - struct mutex_queue pp_mutexq; - void *ret; struct pthread_specific_elem *specific; int specific_data_count; @@ -493,6 +430,12 @@ int unwind_disabled; #endif + /* First signal stack */ + char *sig_stackptr; + + /* Alternative signal stack */ + stack_t sig_altstack; + /* * Magic value to help recognize a valid thread structure * from an invalid one: @@ -506,7 +449,7 @@ /* Event mask */ int event_mask; - /* Event */ + /* Debugging event */ td_event_msg_t event_buf; /* Wait channel */ @@ -515,11 +458,17 @@ /* Referenced mutex. */ struct pthread_mutex *mutex_obj; - /* Thread will sleep. */ - int will_sleep; + /* Delay a wake-up on mutex */ + unsigned int delay_wake_mutex; + + /* Should do a wake-up on mutex */ + unsigned int wake_mutex; + + /* Defered mutex */ + struct pthread_mutex *defered_mutex; /* Number of threads deferred. */ - int nwaiter_defer; + int ndefer; /* Deferred threads from pthread_cond_signal. */ unsigned int *defer_waiters[MAX_DEFER_WAITERS]; @@ -531,6 +480,9 @@ /* Sleep queue */ struct sleepqueue *sleepqueue; + /* Lastest inherited priority */ + int inherited_prio; + }; #define THR_SHOULD_GC(thrd) \ @@ -559,6 +511,9 @@ #define THR_UMUTEX_TIMEDLOCK(thrd, lck, timo) \ _thr_umutex_timedlock((lck), TID(thrd), (timo)) +#define THR_UMUTEX_LOCK_SPIN(thrd, lck) \ + _thr_umutex_lock_spin((lck), TID(thrd)) + #define THR_UMUTEX_UNLOCK(thrd, lck) \ _thr_umutex_unlock((lck), TID(thrd)) @@ -699,8 +654,6 @@ /* Default condition variable attributes: */ extern struct pthread_cond_attr _pthread_condattr_default __hidden; -extern struct pthread_prio _thr_priorities[] __hidden; - extern pid_t _thr_pid __hidden; extern int _thr_is_smp __hidden; @@ -715,6 +668,7 @@ /* Garbage thread count. */ extern int _gc_count __hidden; +extern struct umutex _lmalloc_lock __hidden; extern struct umutex _mutex_static_lock __hidden; extern struct umutex _cond_static_lock __hidden; extern struct umutex _rwlock_static_lock __hidden; @@ -727,13 +681,13 @@ */ __BEGIN_DECLS int _thr_setthreaded(int) __hidden; -int _mutex_cv_lock(struct pthread_mutex *, int count) __hidden; -int _mutex_cv_unlock(struct pthread_mutex *, int *count) __hidden; -int _mutex_cv_attach(struct pthread_mutex *, int count) __hidden; -int _mutex_cv_detach(struct pthread_mutex *, int *count) __hidden; -int _mutex_owned(struct pthread *, const struct pthread_mutex *) __hidden; -int _mutex_reinit(pthread_mutex_t *) __hidden; -void _mutex_fork(struct pthread *curthread) __hidden; +int _mutex_owned(const pthread_mutex_t *) __hidden; +void _mutex_thread_exit(struct pthread *); +void _mutex_fork_child(struct pthread *curthread) __hidden; +int _mutex_cv_lock(pthread_mutex_t *, int) __hidden; +int _mutex_cv_unlock(pthread_mutex_t *, int *, int *) __hidden; +int _highest_pp_ceiling(struct pthread *) __hidden; +void _dequeue_pp_mutex(struct pthread *, struct pthread_mutex *); void _libpthread_init(struct pthread *) __hidden; struct pthread *_thr_alloc(struct pthread *) __hidden; void _thread_exit(const char *, int, const char *) __hidden __dead2; @@ -774,6 +728,8 @@ void _thr_report_death(struct pthread *curthread) __hidden; int _thr_getscheduler(lwpid_t, int *, struct sched_param *) __hidden; int _thr_setscheduler(lwpid_t, int, const struct sched_param *) __hidden; +int _thr_enter_ceiling(struct pthread *, int); +void _thr_set_ceiling(struct pthread *, int); void _thr_signal_prefork(void) __hidden; void _thr_signal_postfork(void) __hidden; void _thr_signal_postfork_child(void) __hidden; @@ -854,6 +810,14 @@ _libpthread_init(NULL); } +static inline int +_validate_timespec(const struct timespec *tsp) +{ + if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000) + return (0); + return (1); +} + struct wake_addr *_thr_alloc_wake_addr(void); void _thr_release_wake_addr(struct wake_addr *); int _thr_sleep(struct pthread *, int, const struct timespec *); @@ -881,6 +845,24 @@ void _thr_wake_all(unsigned int *waddrs[], int) __hidden; +static inline void +_thr_flush_defer(struct pthread *curthread) +{ + if (curthread->ndefer > 0) { + _thr_wake_all(curthread->defer_waiters, + curthread->ndefer); + curthread->ndefer = 0; + } +} + +static inline void +_thr_save_waiter(struct pthread *curthread, unsigned int *waddr) +{ + if (curthread->ndefer >= MAX_DEFER_WAITERS) + _thr_flush_defer(curthread); + curthread->defer_waiters[curthread->ndefer++] = waddr; +} + static inline struct pthread * _sleepq_first(struct sleepqueue *sq) { @@ -904,6 +886,52 @@ void _thr_sigact_unload(struct dl_phdr_info *phdr_info) __hidden; void _thr_stack_fix_protection(struct pthread *thrd); +typedef struct pthread_mutex *pthread_mutex_old_t; +typedef struct pthread_cond *pthread_cond_old_t; +typedef struct pthread_rwlock *pthread_rwlock_old_t; +int _mutex_owned_old(pthread_mutex_old_t *) __hidden; + +/* Compatible functions */ +int _pthread_mutex_destroy_1_0(pthread_mutex_old_t *); +int _pthread_mutex_init_1_0(pthread_mutex_old_t *, const pthread_mutexattr_t *); +int _pthread_mutex_trylock_1_0(pthread_mutex_old_t *); +int _pthread_mutex_lock_1_0(pthread_mutex_old_t *); +int _pthread_mutex_timedlock_1_0(pthread_mutex_old_t *, const struct timespec *); +int _pthread_mutex_unlock_1_0(pthread_mutex_old_t *); +int _pthread_mutex_getprioceiling_1_0(pthread_mutex_old_t *, int *); +int _pthread_mutex_setprioceiling_1_0(pthread_mutex_old_t *, int, int *); +int _pthread_mutex_getspinloops_np_1_1(pthread_mutex_old_t *, int *); +int _pthread_mutex_setspinloops_np_1_1(pthread_mutex_old_t *, int); +int _pthread_mutex_getyieldloops_np_1_1(pthread_mutex_old_t *, int *); +int _pthread_mutex_setyieldloops_np_1_1(pthread_mutex_old_t *, int); +int _pthread_mutex_isowned_np_1_1(pthread_mutex_old_t *); + +int _pthread_cond_init_1_0(pthread_cond_old_t *, const pthread_condattr_t *); +int _pthread_cond_signal_1_0(pthread_cond_old_t *); +int _pthread_cond_destroy_1_0(pthread_cond_old_t *); +int _pthread_cond_wait_1_0(pthread_cond_old_t *, pthread_mutex_old_t *); +int _pthread_cond_timedwait_1_0(pthread_cond_old_t *, pthread_mutex_old_t *, + const struct timespec *); +int _pthread_cond_broadcast_1_0(pthread_cond_old_t *); + +int _pthread_rwlock_destroy_1_0(pthread_rwlock_old_t *); +int _pthread_rwlock_init_1_0(pthread_rwlock_old_t *, + const pthread_rwlockattr_t *); +int _pthread_rwlock_timedrdlock_1_0(pthread_rwlock_old_t *, + const struct timespec *); +int _pthread_rwlock_timedwrlock_1_0(pthread_rwlock_old_t *, + const struct timespec *); +int _pthread_rwlock_tryrdlock_1_0(pthread_rwlock_old_t *); +int _pthread_rwlock_trywrlock_1_0(pthread_rwlock_old_t *); +int _pthread_rwlock_rdlock_1_0(pthread_rwlock_old_t *); +int _pthread_rwlock_wrlock_1_0(pthread_rwlock_old_t *); +int _pthread_rwlock_unlock_1_0(pthread_rwlock_old_t *); + +void *lmalloc(size_t); +void *lcalloc(size_t, size_t); +void *lrealloc(void *, size_t); +void lfree(void *); + __END_DECLS #endif /* !_THR_PRIVATE_H */ --- src/lib/libthr/thread/thr_pspinlock.c 2007-10-16 07:45:51.000000000 0000 +++ src/lib/libthr/thread/thr_pspinlock.c 2012-05-02 02:09:13.000000000 0000 @@ -34,105 +34,135 @@ #include "thr_private.h" -#define SPIN_COUNT 100000 - __weak_reference(_pthread_spin_init, pthread_spin_init); __weak_reference(_pthread_spin_destroy, pthread_spin_destroy); __weak_reference(_pthread_spin_trylock, pthread_spin_trylock); __weak_reference(_pthread_spin_lock, pthread_spin_lock); __weak_reference(_pthread_spin_unlock, pthread_spin_unlock); +typedef pthread_spinlock_t *pthread_spinlock_old_t; +int _pthread_spin_destroy_1_0(pthread_spinlock_old_t *); +int _pthread_spin_init_1_0(pthread_spinlock_old_t *, int); +int _pthread_spin_lock_1_0(pthread_spinlock_old_t *); +int _pthread_spin_trylock_1_0(pthread_spinlock_old_t *); +int _pthread_spin_unlock_1_0(pthread_spinlock_old_t *); + +int +_pthread_spin_init(pthread_spinlock_t *lckp, int pshared) +{ + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) + return (EINVAL); + lckp->__lock = 0; + return (0); +} + +int +_pthread_spin_destroy(pthread_spinlock_t *lckp) +{ + /* Nothing to do. */ + return (0); +} + int -_pthread_spin_init(pthread_spinlock_t *lock, int pshared) +_pthread_spin_trylock(pthread_spinlock_t *lckp) { - struct pthread_spinlock *lck; - int ret; + if (atomic_cmpset_acq_32(&lckp->__lock, 0, 1)) + return (0); + return (EBUSY); +} - if (lock == NULL || pshared != PTHREAD_PROCESS_PRIVATE) - ret = EINVAL; - else if ((lck = malloc(sizeof(struct pthread_spinlock))) == NULL) - ret = ENOMEM; - else { - _thr_umutex_init(&lck->s_lock); - *lock = lck; - ret = 0; +int +_pthread_spin_lock(pthread_spinlock_t *lckp) +{ + /* + * Nothing has been checked, the lock should be + * as fast as possible. + */ + if (atomic_cmpset_acq_32(&lckp->__lock, 0, 1)) + return (0); + for (;;) { + if (*(volatile int32_t *)&(lckp->__lock) == 0) + if (atomic_cmpset_acq_32(&lckp->__lock, 0, 1)) + break; + if (!_thr_is_smp) + _pthread_yield(); + else + CPU_SPINWAIT; } + return (0); +} - return (ret); +int +_pthread_spin_unlock(pthread_spinlock_t *lckp) +{ + lckp->__lock = 0; + wmb(); + return (0); } int -_pthread_spin_destroy(pthread_spinlock_t *lock) +_pthread_spin_init_1_0(pthread_spinlock_old_t *lckpp, int pshared) { - int ret; + pthread_spinlock_t *lckp; - if (lock == NULL || *lock == NULL) - ret = EINVAL; - else { - free(*lock); - *lock = NULL; - ret = 0; - } - - return (ret); + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) + return (EINVAL); + + lckp = lmalloc(sizeof(pthread_spinlock_t)); + if (lckp == NULL) + return (ENOMEM); + lckp->__lock = 0; + *lckpp = lckp; + return (0); } int -_pthread_spin_trylock(pthread_spinlock_t *lock) +_pthread_spin_destroy_1_0(pthread_spinlock_old_t *lckpp) { - struct pthread *curthread = _get_curthread(); - struct pthread_spinlock *lck; - int ret; + pthread_spinlock_t *lckp = *lckpp; - if (lock == NULL || (lck = *lock) == NULL) - ret = EINVAL; - else - ret = THR_UMUTEX_TRYLOCK(curthread, &lck->s_lock); - return (ret); + if (lckp != NULL) { + lfree(lckp); + *lckpp = NULL; + return (0); + } else + return (EINVAL); } int -_pthread_spin_lock(pthread_spinlock_t *lock) +_pthread_spin_trylock_1_0(pthread_spinlock_old_t *lckpp) { - struct pthread *curthread = _get_curthread(); - struct pthread_spinlock *lck; - int ret, count; + pthread_spinlock_t *lckp = *lckpp; - if (lock == NULL || (lck = *lock) == NULL) - ret = EINVAL; - else { - count = SPIN_COUNT; - while ((ret = THR_UMUTEX_TRYLOCK(curthread, &lck->s_lock)) != 0) { - while (lck->s_lock.m_owner) { - if (!_thr_is_smp) { - _pthread_yield(); - } else { - CPU_SPINWAIT; + if (lckp == NULL) + return (EINVAL); + return _pthread_spin_trylock(lckp); +} - if (--count <= 0) { - count = SPIN_COUNT; - _pthread_yield(); - } - } - } - } - ret = 0; - } +int +_pthread_spin_lock_1_0(pthread_spinlock_old_t *lckpp) +{ + pthread_spinlock_t *lckp = *lckpp; - return (ret); + if (lckp == NULL) + return (EINVAL); + return _pthread_spin_lock(lckp); } int -_pthread_spin_unlock(pthread_spinlock_t *lock) +_pthread_spin_unlock_1_0(pthread_spinlock_old_t *lckpp) { - struct pthread *curthread = _get_curthread(); - struct pthread_spinlock *lck; - int ret; + pthread_spinlock_t *lckp = *lckpp; - if (lock == NULL || (lck = *lock) == NULL) - ret = EINVAL; - else { - ret = THR_UMUTEX_UNLOCK(curthread, &lck->s_lock); - } - return (ret); + if (lckp == NULL) + return (EINVAL); + return _pthread_spin_unlock(lckp); } + +FB10_COMPAT(_pthread_spin_destroy_1_0, pthread_spin_destroy); +FB10_COMPAT(_pthread_spin_init_1_0, pthread_spin_init); +FB10_COMPAT(_pthread_spin_lock_1_0, pthread_spin_lock); +FB10_COMPAT(_pthread_spin_trylock_1_0, pthread_spin_trylock); +FB10_COMPAT(_pthread_spin_unlock_1_0, pthread_spin_unlock); --- src/lib/libthr/thread/thr_rwlock.c 2012-02-27 14:36:22.000000000 0000 +++ src/lib/libthr/thread/thr_rwlock.c 2012-05-04 08:54:36.000000000 0000 @@ -1,4 +1,5 @@ /*- + * Copyright (c) 2010 David Xu * Copyright (c) 1998 Alex Nash * All rights reserved. * @@ -29,6 +30,7 @@ #include #include #include +#include #include "namespace.h" #include @@ -45,89 +47,54 @@ __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock); __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock); -#define CHECK_AND_INIT_RWLOCK \ - if (__predict_false((prwlock = (*rwlock)) <= THR_RWLOCK_DESTROYED)) { \ - if (prwlock == THR_RWLOCK_INITIALIZER) { \ - int ret; \ - ret = init_static(_get_curthread(), rwlock); \ - if (ret) \ - return (ret); \ - } else if (prwlock == THR_RWLOCK_DESTROYED) { \ - return (EINVAL); \ - } \ - prwlock = *rwlock; \ - } +#define RWL_PSHARED(rwp) ((rwp->__flags & USYNC_PROCESS_SHARED) != 0) /* * Prototypes */ static int -rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused) +rwlock_init(struct pthread_rwlock *rwp, const pthread_rwlockattr_t *attr) { - pthread_rwlock_t prwlock; + + memset(rwp, 0, sizeof(*rwp)); + rwp->__magic = _PTHREAD_RWLOCK_MAGIC; + if (attr == NULL || *attr == NULL) + return (0); + else { + if ((*attr)->pshared) + rwp->__flags |= USYNC_PROCESS_SHARED; + } - prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock)); - if (prwlock == NULL) - return (ENOMEM); - *rwlock = prwlock; return (0); } -int -_pthread_rwlock_destroy (pthread_rwlock_t *rwlock) +static int +rwlock_destroy_common(struct pthread_rwlock *rwp) { - pthread_rwlock_t prwlock; - int ret; - - prwlock = *rwlock; - if (prwlock == THR_RWLOCK_INITIALIZER) - ret = 0; - else if (prwlock == THR_RWLOCK_DESTROYED) - ret = EINVAL; - else { - *rwlock = THR_RWLOCK_DESTROYED; - - free(prwlock); - ret = 0; - } - return (ret); + memset(rwp, 0, sizeof(*rwp)); + return (0); } -static int -init_static(struct pthread *thread, pthread_rwlock_t *rwlock) +int +_pthread_rwlock_destroy (pthread_rwlock_t *rwp) { - int ret; - - THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock); - - if (*rwlock == THR_RWLOCK_INITIALIZER) - ret = rwlock_init(rwlock, NULL); - else - ret = 0; - - THR_LOCK_RELEASE(thread, &_rwlock_static_lock); - - return (ret); + return rwlock_destroy_common(rwp); } int -_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) +_pthread_rwlock_init(pthread_rwlock_t *rwp, const pthread_rwlockattr_t *attr) { - *rwlock = NULL; - return (rwlock_init(rwlock, attr)); + return (rwlock_init(rwp, attr)); } static int -rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime) +rwlock_rdlock_common(struct pthread_rwlock *rwlp, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); - pthread_rwlock_t prwlock; int flags; - int ret; + int error; - CHECK_AND_INIT_RWLOCK - if (curthread->rdlock_count) { /* * To avoid having to track all the rdlocks held by @@ -147,58 +114,54 @@ } /* - * POSIX said the validity of the abstimeout parameter need + * POSIX said the validity of the abstime parameter need * not be checked if the lock can be immediately acquired. */ - ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags); - if (ret == 0) { + error = _thr_rwlock_tryrdlock((struct urwlock *)&rwlp->__state, flags); + if (error == 0) { curthread->rdlock_count++; - return (ret); + return (error); } - if (__predict_false(abstime && - (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0))) + if (__predict_false(abstime && !_validate_timespec(abstime))) return (EINVAL); for (;;) { /* goto kernel and lock it */ - ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime); - if (ret != EINTR) + error = __thr_rwlock_rdlock((struct urwlock *)&rwlp->__state, flags, abstime); + if (error != EINTR) break; /* if interrupted, try to lock it in userland again. */ - if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) { - ret = 0; + if (_thr_rwlock_tryrdlock((struct urwlock *)&rwlp->__state, flags) == 0) { + error = 0; break; } } - if (ret == 0) + if (error == 0) curthread->rdlock_count++; - return (ret); + return (error); } int -_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) +_pthread_rwlock_rdlock (pthread_rwlock_t *rwlp) { - return (rwlock_rdlock_common(rwlock, NULL)); + return (rwlock_rdlock_common(rwlp, NULL)); } int -_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock, - const struct timespec *abstime) +_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlp, + const struct timespec *abstime) { - return (rwlock_rdlock_common(rwlock, abstime)); + return (rwlock_rdlock_common(rwlp, abstime)); } int -_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) +_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlp) { struct pthread *curthread = _get_curthread(); - pthread_rwlock_t prwlock; int flags; - int ret; - - CHECK_AND_INIT_RWLOCK + int error; if (curthread->rdlock_count) { /* @@ -218,107 +181,265 @@ flags = 0; } - ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags); - if (ret == 0) + error = _thr_rwlock_tryrdlock((struct urwlock *)&rwlp->__state, flags); + if (error == 0) curthread->rdlock_count++; - return (ret); + return (error); +} + +static void +rwlock_setowner(struct pthread_rwlock *rwlp, struct pthread *td) +{ + if (!RWL_PSHARED(rwlp)) + rwlp->__ownerdata.__ownertd = td; + else + rwlp->__ownerdata.__ownertid = TID(td); } int -_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) +_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlp) { struct pthread *curthread = _get_curthread(); - pthread_rwlock_t prwlock; - int ret; + int error; - CHECK_AND_INIT_RWLOCK - - ret = _thr_rwlock_trywrlock(&prwlock->lock); - if (ret == 0) - prwlock->owner = curthread; - return (ret); + error = _thr_rwlock_trywrlock((struct urwlock *)&rwlp->__state); + if (error == 0) + rwlock_setowner(rwlp, curthread); + return (error); } static int -rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) +rwlock_wrlock_common(pthread_rwlock_t *rwlp, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); - pthread_rwlock_t prwlock; - int ret; - - CHECK_AND_INIT_RWLOCK + int error; /* - * POSIX said the validity of the abstimeout parameter need + * POSIX said the validity of the abstime parameter need * not be checked if the lock can be immediately acquired. */ - ret = _thr_rwlock_trywrlock(&prwlock->lock); - if (ret == 0) { - prwlock->owner = curthread; - return (ret); + error = _thr_rwlock_trywrlock((struct urwlock *)&rwlp->__state); + if (error == 0) { + rwlock_setowner(rwlp, curthread); + return (error); } - if (__predict_false(abstime && - (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0))) + if (__predict_false(abstime && !_validate_timespec(abstime))) return (EINVAL); for (;;) { /* goto kernel and lock it */ - ret = __thr_rwlock_wrlock(&prwlock->lock, abstime); - if (ret == 0) { - prwlock->owner = curthread; + error = __thr_rwlock_wrlock((struct urwlock *)&rwlp->__state, abstime); + if (error == 0) { + rwlock_setowner(rwlp, curthread); break; } - if (ret != EINTR) + if (error != EINTR) break; /* if interrupted, try to lock it in userland again. */ - if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) { - ret = 0; - prwlock->owner = curthread; + if (_thr_rwlock_trywrlock((struct urwlock *)&rwlp->__state) == 0) { + error = 0; + rwlock_setowner(rwlp, curthread); break; } } - return (ret); + return (error); } int -_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) +_pthread_rwlock_wrlock (pthread_rwlock_t *rwlp) { - return (rwlock_wrlock_common (rwlock, NULL)); + return (rwlock_wrlock_common(rwlp, NULL)); } int -_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock, - const struct timespec *abstime) +_pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlp, + const struct timespec *abstime) { - return (rwlock_wrlock_common (rwlock, abstime)); + return (rwlock_wrlock_common(rwlp, abstime)); } int -_pthread_rwlock_unlock (pthread_rwlock_t *rwlock) +_pthread_rwlock_unlock(pthread_rwlock_t *rwlp) { struct pthread *curthread = _get_curthread(); - pthread_rwlock_t prwlock; - int ret; - int32_t state; + int error; + uint32_t state; + + state = rwlp->__state; + if (state & URWLOCK_WRITE_OWNER) { + if (RWL_PSHARED(rwlp) && + rwlp->__ownerdata.__ownertid == TID(curthread)) { + rwlp->__ownerdata.__ownertid = 0; + } else if (!RWL_PSHARED(rwlp) && + rwlp->__ownerdata.__ownertd == curthread) { + rwlp->__ownerdata.__ownertd = NULL; + } else + return (EPERM); + } + error = _thr_rwlock_unlock((struct urwlock *)&rwlp->__state); + if (error == 0 && (state & URWLOCK_WRITE_OWNER) == 0) + curthread->rdlock_count--; + return (error); +} + +#define CHECK_AND_INIT_RWLOCK \ + if (__predict_false((rwlp = (*rwlpp)) <= THR_RWLOCK_DESTROYED)) { \ + if (rwlp == THR_RWLOCK_INITIALIZER) { \ + int error; \ + error = init_static(_get_curthread(), rwlpp); \ + if (error) \ + return (error); \ + } else if (rwlp == THR_RWLOCK_DESTROYED) { \ + return (EINVAL); \ + } \ + *rwlpp = rwlp; \ + } + +static int +rwlock_init_old(pthread_rwlock_old_t *rwlpp, const pthread_rwlockattr_t *attr) +{ + struct pthread_rwlock *rwlp; + int error; + + rwlp = (struct pthread_rwlock *)lmalloc(sizeof(struct pthread_rwlock)); + if (rwlp == NULL) + return (ENOMEM); + error = rwlock_init(rwlp, attr); + if (error) { + lfree(rwlp); + return (error); + } + *rwlpp = rwlp; + return (0); +} + +static int +init_static(struct pthread *thread, pthread_rwlock_old_t *rwlpp) +{ + int error; + + THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock); + + if (*rwlpp == THR_RWLOCK_INITIALIZER) + error = rwlock_init_old(rwlpp, NULL); + else + error = 0; + + THR_LOCK_RELEASE(thread, &_rwlock_static_lock); - prwlock = *rwlock; + return (error); +} - if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED)) - return (EINVAL); +int +_pthread_rwlock_destroy_1_0(pthread_rwlock_old_t *rwlpp) +{ + struct pthread_rwlock *rwlp; + int error; - state = prwlock->lock.rw_state; - if (state & URWLOCK_WRITE_OWNER) { - if (__predict_false(prwlock->owner != curthread)) - return (EPERM); - prwlock->owner = NULL; + rwlp = *rwlpp; + if (rwlp == THR_RWLOCK_INITIALIZER) + error = 0; + else if (rwlp == THR_RWLOCK_DESTROYED) + error = EINVAL; + else { + error = rwlock_destroy_common(rwlp); + if (error) + return (error); + *rwlpp = THR_RWLOCK_DESTROYED; + lfree(rwlp); } + return (error); +} + +int +_pthread_rwlock_init_1_0(pthread_rwlock_old_t *rwlpp, const pthread_rwlockattr_t *attr) +{ + *rwlpp = NULL; + return (rwlock_init_old(rwlpp, attr)); +} + +int +_pthread_rwlock_timedrdlock_1_0(pthread_rwlock_old_t *rwlpp, + const struct timespec *abstime) +{ + struct pthread_rwlock *rwlp; + + CHECK_AND_INIT_RWLOCK + + return (rwlock_rdlock_common(rwlp, abstime)); +} + +int +_pthread_rwlock_timedwrlock_1_0(pthread_rwlock_old_t *rwlpp, + const struct timespec *abstime) +{ + struct pthread_rwlock *rwlp; + + CHECK_AND_INIT_RWLOCK + + return (rwlock_wrlock_common(rwlp, abstime)); +} + +int +_pthread_rwlock_tryrdlock_1_0(pthread_rwlock_old_t *rwlpp) +{ + struct pthread_rwlock *rwlp; + + CHECK_AND_INIT_RWLOCK + + return _pthread_rwlock_tryrdlock(rwlp); +} + +int +_pthread_rwlock_trywrlock_1_0(pthread_rwlock_old_t *rwlpp) +{ + struct pthread_rwlock *rwlp; + + CHECK_AND_INIT_RWLOCK + + return _pthread_rwlock_trywrlock(rwlp); +} + +int +_pthread_rwlock_rdlock_1_0(pthread_rwlock_old_t *rwlpp) +{ + struct pthread_rwlock *rwlp; + + CHECK_AND_INIT_RWLOCK + + return rwlock_rdlock_common(rwlp, NULL); +} + +int +_pthread_rwlock_wrlock_1_0(pthread_rwlock_old_t *rwlpp) +{ + struct pthread_rwlock *rwlp; + + CHECK_AND_INIT_RWLOCK + + return (rwlock_wrlock_common(rwlp, NULL)); +} - ret = _thr_rwlock_unlock(&prwlock->lock); - if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0) - curthread->rdlock_count--; +int +_pthread_rwlock_unlock_1_0(pthread_rwlock_old_t *rwlpp) +{ + struct pthread_rwlock *rwlp; - return (ret); + rwlp = *rwlpp; + if (__predict_false(rwlp <= THR_RWLOCK_DESTROYED)) + return (EINVAL); + return _pthread_rwlock_unlock(rwlp); } + +FB10_COMPAT(_pthread_rwlock_destroy_1_0, pthread_rwlock_destroy); +FB10_COMPAT(_pthread_rwlock_init_1_0, pthread_rwlock_init); +FB10_COMPAT(_pthread_rwlock_rdlock_1_0, pthread_rwlock_rdlock); +FB10_COMPAT(_pthread_rwlock_timedrdlock_1_0, pthread_rwlock_timedrdlock); +FB10_COMPAT(_pthread_rwlock_tryrdlock_1_0, pthread_rwlock_tryrdlock); +FB10_COMPAT(_pthread_rwlock_trywrlock_1_0, pthread_rwlock_trywrlock); +FB10_COMPAT(_pthread_rwlock_unlock_1_0, pthread_rwlock_unlock); +FB10_COMPAT(_pthread_rwlock_wrlock_1_0, pthread_rwlock_wrlock); +FB10_COMPAT(_pthread_rwlock_timedwrlock_1_0, pthread_rwlock_timedwrlock); --- src/lib/libthr/thread/thr_rwlockattr.c 2010-10-20 02:35:31.000000000 0000 +++ src/lib/libthr/thread/thr_rwlockattr.c 2012-05-02 02:09:13.000000000 0000 @@ -39,6 +39,9 @@ __weak_reference(_pthread_rwlockattr_init, pthread_rwlockattr_init); __weak_reference(_pthread_rwlockattr_setpshared, pthread_rwlockattr_setpshared); +int _pthread_rwlockattr_setpshared_1_0(pthread_rwlockattr_t *, int); +FB10_COMPAT(_pthread_rwlockattr_setpshared_1_0, pthread_rwlockattr_setpshared); + int _pthread_rwlockattr_destroy(pthread_rwlockattr_t *rwlockattr) { @@ -52,7 +55,7 @@ if (prwlockattr == NULL) return(EINVAL); - free(prwlockattr); + lfree(prwlockattr); return(0); } @@ -61,6 +64,9 @@ _pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *rwlockattr, int *pshared) { + if (rwlockattr == NULL || *rwlockattr == NULL) + return (EINVAL); + *pshared = (*rwlockattr)->pshared; return(0); @@ -75,7 +81,7 @@ return(EINVAL); prwlockattr = (pthread_rwlockattr_t) - malloc(sizeof(struct pthread_rwlockattr)); + lmalloc(sizeof(struct pthread_rwlockattr)); if (prwlockattr == NULL) return(ENOMEM); @@ -89,6 +95,24 @@ int _pthread_rwlockattr_setpshared(pthread_rwlockattr_t *rwlockattr, int pshared) { + if (rwlockattr == NULL || *rwlockattr == NULL) + return (EINVAL); + + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) + return(EINVAL); + + (*rwlockattr)->pshared = pshared; + + return(0); +} + +int +_pthread_rwlockattr_setpshared_1_0(pthread_rwlockattr_t *rwlockattr, int pshared) +{ + if (rwlockattr == NULL || *rwlockattr == NULL) + return (EINVAL); + /* Only PTHREAD_PROCESS_PRIVATE is supported. */ if (pshared != PTHREAD_PROCESS_PRIVATE) return(EINVAL); --- src/lib/libthr/thread/thr_setprio.c 2007-01-12 08:40:30.000000000 0000 +++ src/lib/libthr/thread/thr_setprio.c 2012-05-02 02:09:13.000000000 0000 @@ -42,41 +42,37 @@ { struct pthread *curthread = _get_curthread(); struct sched_param param; - int ret; + int error; + + if (prio < 0 || prio > THR_MAX_RR_PRIORITY) + return (EINVAL); + + if (pthread != curthread) { + error = _thr_ref_add(curthread, pthread, /*include dead*/0); + if (error != 0) + return (error); + } - param.sched_priority = prio; - if (pthread == curthread) { - THR_LOCK(curthread); - if (curthread->attr.sched_policy == SCHED_OTHER || - curthread->attr.prio == prio) { - curthread->attr.prio = prio; - ret = 0; - } else { - ret = _thr_setscheduler(curthread->tid, - curthread->attr.sched_policy, ¶m); - if (ret == -1) - ret = errno; - else - curthread->attr.prio = prio; + THR_THREAD_LOCK(curthread, pthread); + if (pthread->attr.sched_policy == SCHED_OTHER || + pthread->attr.prio == prio) { + pthread->attr.prio = prio; + error = 0; + goto out; + } + if(pthread->inherited_prio < prio) { + param.sched_priority = prio; + error = _thr_setscheduler(pthread->tid, + pthread->attr.sched_policy, ¶m); + if (error == -1) { + error = errno; + goto out; } - THR_UNLOCK(curthread); - } else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0)) - == 0) { - THR_THREAD_LOCK(curthread, pthread); - if (pthread->attr.sched_policy == SCHED_OTHER || - pthread->attr.prio == prio) { - pthread->attr.prio = prio; - ret = 0; - } else { - ret = _thr_setscheduler(pthread->tid, - curthread->attr.sched_policy, ¶m); - if (ret == -1) - ret = errno; - else - pthread->attr.prio = prio; - } - THR_THREAD_UNLOCK(curthread, pthread); + } + pthread->attr.prio = prio; +out: + THR_THREAD_UNLOCK(curthread, pthread); + if (pthread != curthread) _thr_ref_delete(curthread, pthread); - } - return (ret); + return (error); } --- src/lib/libthr/thread/thr_setschedparam.c 2006-09-21 05:41:02.000000000 0000 +++ src/lib/libthr/thread/thr_setschedparam.c 2012-05-02 02:09:13.000000000 0000 @@ -51,44 +51,49 @@ const struct sched_param *param) { struct pthread *curthread = _get_curthread(); - int ret; + struct sched_param temp; + int error; + + if (policy != SCHED_RR && policy != SCHED_FIFO && + policy != SCHED_OTHER) + return (EINVAL); + if (policy != SCHED_OTHER && (param->sched_priority < 0 || + param->sched_priority > THR_MAX_RR_PRIORITY)) + return (EINVAL); + if (pthread != curthread) { + error = _thr_ref_add(curthread, pthread, /*include dead*/0); + if (error != 0) + return (error); + } + + THR_THREAD_LOCK(curthread, pthread); + if (pthread->attr.sched_policy == policy && + (policy == SCHED_OTHER || + pthread->attr.prio == param->sched_priority)) { + pthread->attr.prio = param->sched_priority; + error = 0; + goto out; + } - if (pthread == curthread) { - THR_LOCK(curthread); - if (curthread->attr.sched_policy == policy && - (policy == SCHED_OTHER || - curthread->attr.prio == param->sched_priority)) { - pthread->attr.prio = param->sched_priority; - THR_UNLOCK(curthread); - return (0); + if (pthread->inherited_prio < param->sched_priority) { + error = _thr_setscheduler(pthread->tid, policy, param); + if (error == -1) { + error = errno; + goto out; } - ret = _thr_setscheduler(curthread->tid, policy, param); - if (ret == -1) - ret = errno; - else { - curthread->attr.sched_policy = policy; - curthread->attr.prio = param->sched_priority; + } else if (pthread->attr.sched_policy != policy) { + temp.sched_priority = pthread->inherited_prio; + error = _thr_setscheduler(pthread->tid, policy, &temp); + if (error == -1) { + error = errno; + goto out; } - THR_UNLOCK(curthread); - } else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0)) - == 0) { - THR_THREAD_LOCK(curthread, pthread); - if (pthread->attr.sched_policy == policy && - (policy == SCHED_OTHER || - pthread->attr.prio == param->sched_priority)) { - pthread->attr.prio = param->sched_priority; - THR_THREAD_UNLOCK(curthread, pthread); - return (0); - } - ret = _thr_setscheduler(pthread->tid, policy, param); - if (ret == -1) - ret = errno; - else { - pthread->attr.sched_policy = policy; - pthread->attr.prio = param->sched_priority; - } - THR_THREAD_UNLOCK(curthread, pthread); + } + pthread->attr.sched_policy = policy; + pthread->attr.prio = param->sched_priority; +out: + THR_THREAD_UNLOCK(curthread, pthread); + if (pthread != curthread) _thr_ref_delete(curthread, pthread); - } - return (ret); + return (error); } --- src/lib/libthr/thread/thr_sig.c 2012-03-26 17:36:42.000000000 0000 +++ src/lib/libthr/thread/thr_sig.c 2012-05-10 08:29:12.000000000 0000 @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -193,6 +194,8 @@ int cancel_enable; int in_sigsuspend; int err; + char *stackptr = (char *) __builtin_frame_address(0); + char *prev_stackptr; /* add previous level mask */ SIGSETOR(actp->sa_mask, ucp->uc_sigmask); @@ -220,6 +223,12 @@ if (!cancel_async) curthread->cancel_enable = 0; + prev_stackptr = curthread->sig_stackptr; + if (prev_stackptr == 0) { + curthread->sig_stackptr = stackptr; + curthread->sig_altstack = ucp->uc_stack; + } + /* restore correct mask before calling user handler */ __sys_sigprocmask(SIG_SETMASK, &actp->sa_mask, NULL); @@ -251,6 +260,8 @@ /* reschedule cancellation */ check_cancel(curthread, &uc2); + if (prev_stackptr == 0) + curthread->sig_stackptr = 0; errno = err; __sys_sigreturn(&uc2); } @@ -388,7 +399,7 @@ break; curthread->flags |= THR_FLAGS_SUSPENDED; THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock); - _thr_umtx_wait_uint(&curthread->cycle, cycle, NULL, 0); + _thr_umtx_wait_uint(&curthread->cycle, cycle, 0); THR_UMUTEX_LOCK(curthread, &(curthread)->lock); curthread->flags &= ~THR_FLAGS_SUSPENDED; } @@ -725,24 +736,105 @@ return (ret); } +static void +check_longjmp(struct pthread *curthread, char *target_stack) +{ + if (curthread == NULL) + return; + + if (target_stack > curthread->sig_stackptr) + curthread->sig_stackptr = 0; + else if (target_stack < curthread->sig_stackptr) { + if (!(curthread->sig_altstack.ss_flags & SS_DISABLE)) { + if (target_stack < curthread->sig_altstack.ss_sp || + target_stack >= curthread->sig_altstack.ss_sp + + curthread->sig_altstack.ss_size) + curthread->sig_stackptr = 0; + } + } +} + __weak_reference(_setcontext, setcontext); int _setcontext(const ucontext_t *ucp) { + struct pthread *curthread = _get_curthread(); ucontext_t uc; + char *stackptr; + int err; (void) memcpy(&uc, ucp, sizeof(uc)); remove_thr_signals(&uc.uc_sigmask); - return __sys_setcontext(&uc); + + if (curthread != NULL) { + stackptr = curthread->sig_stackptr; + check_longjmp(curthread, (char *)UCONTEXT_STACKPTR(ucp)); + err = __sys_setcontext(&uc); + if (err != 0) + curthread->sig_stackptr = stackptr; + return (err); + } + + return (__sys_setcontext(&uc)); + } __weak_reference(_swapcontext, swapcontext); int _swapcontext(ucontext_t *oucp, const ucontext_t *ucp) { + struct pthread *curthread = _get_curthread(); ucontext_t uc; + char *stackptr; + int err; (void) memcpy(&uc, ucp, sizeof(uc)); remove_thr_signals(&uc.uc_sigmask); - return __sys_swapcontext(oucp, &uc); + + if (curthread != NULL) { + stackptr = curthread->sig_stackptr; + check_longjmp(curthread, (char *)UCONTEXT_STACKPTR(ucp)); + err = __sys_swapcontext(oucp, &uc); + if (err != 0) + curthread->sig_stackptr = stackptr; + return (err); + } + + return (__sys_swapcontext(oucp, &uc)); +} + +#ifndef JMPBUF_STACKPTR +#define JMPBUF_STACKPTR(buf) 0 +#endif + +__weak_reference(_thr_longjmp, longjmp); +__weak_reference(_thr_longjmp, _longjmp); + +extern void __longjmp(jmp_buf, int); + +void _thr_longjmp(jmp_buf, int); +void +_thr_longjmp(jmp_buf env, int val) +{ + struct pthread *curthread = _get_curthread(); + + if (curthread != NULL) + check_longjmp(curthread, (char *)JMPBUF_STACKPTR(env)); + __longjmp(env, val); +} + +extern void __siglongjmp(jmp_buf, int); + +__weak_reference(_thr_siglongjmp, longjmp); +__weak_reference(_thr_siglongjmp, _longjmp); + +void _thr_siglongjmp(jmp_buf, int); +void +_thr_siglongjmp(jmp_buf env, int val) +{ + struct pthread *curthread = _get_curthread(); + + if (curthread != NULL) + check_longjmp(curthread, (char *)JMPBUF_STACKPTR(env)); + __siglongjmp(env, val); } --- src/lib/libthr/thread/thr_sleepq.c 2012-05-10 09:36:55.000000000 0000 +++ src/lib/libthr/thread/thr_sleepq.c 2012-05-25 06:47:49.000000000 0000 @@ -62,7 +62,7 @@ { struct sleepqueue *sq; - sq = calloc(1, sizeof(struct sleepqueue)); + sq = lcalloc(1, sizeof(struct sleepqueue)); TAILQ_INIT(&sq->sq_blocked); SLIST_INIT(&sq->sq_freeq); return (sq); @@ -71,7 +71,7 @@ void _sleepq_free(struct sleepqueue *sq) { - free(sq); + lfree(sq); } void --- src/lib/libthr/thread/thr_spec.c 2010-08-27 05:35:27.000000000 0000 +++ src/lib/libthr/thread/thr_spec.c 2012-05-02 02:09:13.000000000 0000 @@ -159,7 +159,7 @@ } } THR_LOCK_RELEASE(curthread, &_keytable_lock); - free(curthread->specific); + lfree(curthread->specific); curthread->specific = NULL; if (curthread->specific_data_count > 0) stderr_debug("Thread %p has exited with leftover " @@ -173,7 +173,7 @@ struct pthread_specific_elem *new_data; new_data = (struct pthread_specific_elem *) - calloc(1, sizeof(struct pthread_specific_elem) * PTHREAD_KEYS_MAX); + lcalloc(1, sizeof(struct pthread_specific_elem) * PTHREAD_KEYS_MAX); return (new_data); } --- src/lib/libthr/thread/thr_suspend_np.c 2010-09-13 07:35:35.000000000 0000 +++ src/lib/libthr/thread/thr_suspend_np.c 2012-05-02 02:09:13.000000000 0000 @@ -132,7 +132,7 @@ _thr_send_sig(thread, SIGCANCEL); THR_THREAD_UNLOCK(curthread, thread); if (waitok) { - _thr_umtx_wait_uint(&thread->cycle, tmp, NULL, 0); + _thr_umtx_wait_uint(&thread->cycle, tmp, 0); THR_THREAD_LOCK(curthread, thread); } else { THR_THREAD_LOCK(curthread, thread); --- src/lib/libthr/thread/thr_umtx.c 2012-03-19 00:36:46.000000000 0000 +++ src/lib/libthr/thread/thr_umtx.c 2012-05-14 05:48:42.000000000 0000 @@ -55,22 +55,8 @@ } int -__thr_umutex_lock(struct umutex *mtx, uint32_t id) +__thr_umutex_lock(struct umutex *mtx) { - uint32_t owner; - - if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { - for (;;) { - /* wait in kernel */ - _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); - - owner = mtx->m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0 && - atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) - return (0); - } - } - return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); } @@ -82,9 +68,9 @@ uint32_t owner; if (!_thr_is_smp) - return __thr_umutex_lock(mtx, id); + return __thr_umutex_lock(mtx); - if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { + if ((mtx->m_flags & (UMUTEX_PRIO_INHERIT|UMUTEX_PRIO_PROTECT)) == 0) { for (;;) { int count = SPINLOOPS; while (count--) { @@ -108,13 +94,11 @@ } int -__thr_umutex_timedlock(struct umutex *mtx, uint32_t id, - const struct timespec *abstime) +__thr_umutex_timedlock(struct umutex *mtx, const struct timespec *abstime) { struct _umtx_time *tm_p, timeout; size_t tm_size; - uint32_t owner; - int ret; + int error; if (abstime == NULL) { tm_p = NULL; @@ -128,33 +112,21 @@ } for (;;) { - if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { - - /* wait in kernel */ - ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, - (void *)tm_size, __DECONST(void *, tm_p)); - - /* now try to lock it */ - owner = mtx->m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0 && - atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) - return (0); - } else { - ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, - (void *)tm_size, __DECONST(void *, tm_p)); - if (ret == 0) - break; - } - if (ret == ETIMEDOUT) + error = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, + (void *)tm_size, __DECONST(void *, tm_p)); + if (error == EINTR) + continue; + else break; } - return (ret); + return (error); } int -__thr_umutex_unlock(struct umutex *mtx, uint32_t id) +__thr_umutex_unlock(struct umutex *mtx, int keeprobst) { - return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0); + return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, + keeprobst ? UMUTEX_KEEP_STATE : 0, 0, 0); } int @@ -164,6 +136,32 @@ } int +__thr_umutex_wake(struct umutex *mtx, uint32_t flags) +{ + return _umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, flags, 0, 0); +} + +int +__thr_umutex_wait(struct umutex *mtx, const struct timespec *abstime) +{ + struct _umtx_time *tm_p, timeout; + size_t tm_size; + + if (abstime == NULL) { + tm_p = NULL; + tm_size = 0; + } else { + timeout._clockid = CLOCK_REALTIME; + timeout._flags = UMTX_ABSTIME; + timeout._timeout = *abstime; + tm_p = &timeout; + tm_size = sizeof(timeout); + } + return _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, + (void *)tm_size, __DECONST(void *, tm_p)); +} + +int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, uint32_t *oldceiling) { @@ -181,14 +179,10 @@ } int -_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared) +_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, int shared) { - if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && - timeout->tv_nsec <= 0))) - return (ETIMEDOUT); return _umtx_op_err(__DEVOLATILE(void *, mtx), - shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, - __DECONST(void*, timeout)); + shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, NULL); } int @@ -217,8 +211,9 @@ int _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared) { - return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, - nr_wakeup, 0, 0); + return _umtx_op_err(__DEVOLATILE(void *, mtx), + shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, + nr_wakeup, 0, 0); } void @@ -231,29 +226,19 @@ _thr_ucond_wait(struct ucond *cv, struct umutex *m, const struct timespec *timeout, int flags) { - if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && - timeout->tv_nsec <= 0))) { - struct pthread *curthread = _get_curthread(); - _thr_umutex_unlock(m, TID(curthread)); - return (ETIMEDOUT); - } return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, m, __DECONST(void*, timeout)); } int -_thr_ucond_signal(struct ucond *cv) +__thr_ucond_signal(struct ucond *cv) { - if (!cv->c_has_waiters) - return (0); return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL); } int -_thr_ucond_broadcast(struct ucond *cv) +__thr_ucond_broadcast(struct ucond *cv) { - if (!cv->c_has_waiters) - return (0); return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL); } --- src/lib/libthr/thread/thr_umtx.h 2012-04-05 02:36:35.000000000 0000 +++ src/lib/libthr/thread/thr_umtx.h 2012-05-14 05:48:42.000000000 0000 @@ -32,34 +32,33 @@ #include #include -#define DEFAULT_UMUTEX {0,0,{0,0},{0,0,0,0}} -#define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}} +#define DEFAULT_UMUTEX {.m_owner = 0} +#define DEFAULT_URWLOCK {.rw_state = 0} int _umtx_op_err(void *, int op, u_long, void *, void *) __hidden; -int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden; +int __thr_umutex_lock(struct umutex *mtx) __hidden; int __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) __hidden; -int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, +int __thr_umutex_timedlock(struct umutex *mtx, const struct timespec *timeout) __hidden; -int __thr_umutex_unlock(struct umutex *mtx, uint32_t id) __hidden; +int __thr_umutex_unlock(struct umutex *mtx, int keeprobst) __hidden; int __thr_umutex_trylock(struct umutex *mtx) __hidden; int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, uint32_t *oldceiling) __hidden; - +int __thr_umutex_wake(struct umutex *, uint32_t flags) __hidden; +int __thr_umutex_wait(struct umutex *, const struct timespec *)__hidden; void _thr_umutex_init(struct umutex *mtx) __hidden; -void _thr_urwlock_init(struct urwlock *rwl) __hidden; - int _thr_umtx_wait(volatile long *mtx, long exp, const struct timespec *timeout) __hidden; -int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp, - const struct timespec *timeout, int shared) __hidden; +int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp, int shared) __hidden; int _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int exp, int clockid, const struct timespec *timeout, int shared) __hidden; int _thr_umtx_wake(volatile void *mtx, int count, int shared) __hidden; int _thr_ucond_wait(struct ucond *cv, struct umutex *m, - const struct timespec *timeout, int check_unpaking) __hidden; + const struct timespec *timeout, int wflags) __hidden; void _thr_ucond_init(struct ucond *cv) __hidden; -int _thr_ucond_signal(struct ucond *cv) __hidden; -int _thr_ucond_broadcast(struct ucond *cv) __hidden; +int __thr_ucond_signal(struct ucond *cv) __hidden; +int __thr_ucond_broadcast(struct ucond *cv) __hidden; +void _thr_urwlock_init(struct urwlock *rwl) __hidden; int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags, const struct timespec *tsp) __hidden; @@ -72,51 +71,64 @@ void _thr_rwl_wrlock(struct urwlock *rwlock) __hidden; void _thr_rwl_unlock(struct urwlock *rwlock) __hidden; +/* + * These functions are used by the library for internal locking + * it is not used to implement POSIX mutex which is very complex. + */ + static inline int _thr_umutex_trylock(struct umutex *mtx, uint32_t id) { - if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) - return (0); - if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0) - return (EBUSY); - return (__thr_umutex_trylock(mtx)); + if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) + return (0); + if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED) { + if ((mtx->m_flags & UMUTEX_PRIO_INHERIT) == 0) { + if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED)) + return (0); + } else { + return __thr_umutex_trylock(mtx); + } + } + return (EBUSY); } static inline int -_thr_umutex_trylock2(struct umutex *mtx, uint32_t id) +_thr_umutex_trylock_user(struct umutex *mtx, uint32_t id) { - if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0) - return (0); - if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED && - __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0)) - if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED)) + if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) return (0); - return (EBUSY); + if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED) { + if ((mtx->m_flags & UMUTEX_PRIO_INHERIT) == 0) { + if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED)) + return (0); + } + } + return (EBUSY); } static inline int _thr_umutex_lock(struct umutex *mtx, uint32_t id) { - if (_thr_umutex_trylock2(mtx, id) == 0) - return (0); - return (__thr_umutex_lock(mtx, id)); + if (_thr_umutex_trylock_user(mtx, id) == 0) + return (0); + return (__thr_umutex_lock(mtx)); } static inline int _thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) { - if (_thr_umutex_trylock2(mtx, id) == 0) - return (0); - return (__thr_umutex_lock_spin(mtx, id)); + if (_thr_umutex_trylock_user(mtx, id) == 0) + return (0); + return (__thr_umutex_lock_spin(mtx, id)); } static inline int _thr_umutex_timedlock(struct umutex *mtx, uint32_t id, const struct timespec *timeout) { - if (_thr_umutex_trylock2(mtx, id) == 0) - return (0); - return (__thr_umutex_timedlock(mtx, id, timeout)); + if (_thr_umutex_trylock_user(mtx, id) == 0) + return (0); + return (__thr_umutex_timedlock(mtx, timeout)); } static inline int @@ -124,7 +136,7 @@ { uint32_t flags = mtx->m_flags; - if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { + if ((flags & UMUTEX_PRIO_INHERIT) == 0) { uint32_t owner; do { owner = mtx->m_owner; @@ -133,12 +145,12 @@ } while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner, owner, UMUTEX_UNOWNED))); if ((owner & UMUTEX_CONTESTED)) - (void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, flags, 0, 0); + __thr_umutex_wake(mtx, flags); return (0); } if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED)) return (0); - return (__thr_umutex_unlock(mtx, id)); + return (__thr_umutex_unlock(mtx, 0)); } static inline int @@ -221,4 +233,21 @@ } return (__thr_rwlock_unlock(rwlock)); } + +static inline int +_thr_ucond_broadcast(struct ucond *cv) +{ + if (!cv->c_has_waiters) + return (0); + return __thr_ucond_broadcast(cv); +} + +static inline int +_thr_ucond_signal(struct ucond *cv) +{ + if (!cv->c_has_waiters) + return (0); + return __thr_ucond_signal(cv); +} + #endif --- src/sys/kern/kern_thr.c 2012-05-26 20:05:16.000000000 0000 +++ src/sys/kern/kern_thr.c 2012-05-27 06:42:07.000000000 0000 @@ -313,6 +313,8 @@ kern_umtx_wake(td, uap->state, INT_MAX, 0); } + umtx_thread_exit(td); + rw_wlock(&tidhash_lock); PROC_LOCK(p); @@ -330,9 +332,11 @@ thread_stopped(p); thread_exit(); /* NOTREACHED */ + } else { + PROC_UNLOCK(p); + rw_wunlock(&tidhash_lock); + exit1(td, 0); } - PROC_UNLOCK(p); - rw_wunlock(&tidhash_lock); return (0); } --- src/sys/kern/kern_thread.c 2012-05-30 16:10:18.000000000 0000 +++ src/sys/kern/kern_thread.c 2012-06-07 09:00:40.000000000 0000 @@ -411,7 +411,6 @@ #ifdef AUDIT AUDIT_SYSCALL_EXIT(0, td); #endif - umtx_thread_exit(td); /* * drop FPU & debug register state storage, or any other * architecture specific resources that @@ -799,6 +798,7 @@ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { PROC_UNLOCK(p); tidhash_remove(td); + umtx_thread_exit(td); PROC_LOCK(p); tdsigcleanup(td); PROC_SLOCK(p); --- src/sys/kern/kern_umtx.c 2012-04-14 23:55:20.000000000 0000 +++ src/sys/kern/kern_umtx.c 2012-05-07 08:30:54.000000000 0000 @@ -85,6 +85,24 @@ struct umtx_key pi_key; }; +/* Robust mutex owner info. */ +struct robust_info { + /* Mutex owner thread. */ + struct thread *ownertd; + + /* Hash link entry. */ + SLIST_ENTRY(robust_info) hash_qe; + + /* Mutex in thread's link list. */ + LIST_ENTRY(robust_info) td_qe; + + /* User address */ + struct umutex *umtxp; +}; + +SLIST_HEAD(robust_hashlist, robust_info); +LIST_HEAD(robust_list, robust_info); + /* A userland synchronous object user. */ struct umtx_q { /* Linked list for the hash. */ @@ -121,6 +139,18 @@ /* The queue we on */ struct umtxq_queue *uq_cur_queue; + + /* Robust mutex list */ + struct robust_list uq_rob_list; + + /* Thread is exiting. */ + char uq_exiting; + + /* Total number of pshared PI mutexes. */ + int uq_pshared_pi_mutexes; + + /* Total numer of robust mutexes. */ + int uq_robust_mutexes; }; TAILQ_HEAD(umtxq_head, umtx_q); @@ -148,7 +178,7 @@ LIST_HEAD(, umtxq_queue) uc_spare_queue; /* Busy flag */ - char uc_busy; + volatile char uc_busy; /* Chain lock waiters */ int uc_waiters; @@ -162,18 +192,20 @@ #endif }; +struct robust_chain { + /* Lock for this chain. */ + struct mtx lock; + + /* Robust mutex list. */ + struct robust_hashlist rob_list; +}; + #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED) #define UMTXQ_BUSY_ASSERT(uc) KASSERT(&(uc)->uc_busy, ("umtx chain is not busy")) /* - * Don't propagate time-sharing priority, there is a security reason, - * a user can simply introduce PI-mutex, let thread A lock the mutex, - * and let another thread B block on the mutex, because B is - * sleeping, its priority will be boosted, this causes A's priority to - * be boosted via priority propagating too and will never be lowered even - * if it is using 100%CPU, this is unfair to other processes. + * Time-sharing should be treated as that it only has one priority. */ - #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\ (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\ PRI_MAX_TIMESHARE : (td)->td_user_pri) @@ -186,6 +218,8 @@ (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE) #define BUSY_SPINS 200 +#define ROBUST_CHAINS 128 +#define ROBUST_SHIFTS (__WORD_BIT - 7) struct abs_timeout { int clockid; @@ -194,12 +228,20 @@ }; static uma_zone_t umtx_pi_zone; +static uma_zone_t robust_zone; static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS]; static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory"); static int umtx_pi_allocated; +static int max_pi_mutexes = 3000; +static int max_robust_mutexes = 1000; +static struct robust_chain robust_chains[ROBUST_CHAINS]; -static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug"); -SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD, +static SYSCTL_NODE(_kern, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx"); +SYSCTL_INT(_kern_umtx, OID_AUTO, max_shared_pi_mutexes, CTLFLAG_RW, + &max_pi_mutexes, 0, "Maximum shared PI mutexes per-proc"); +SYSCTL_INT(_kern_umtx, OID_AUTO, max_robust_mutexes, CTLFLAG_RW, + &max_robust_mutexes, 0, "Maximum registered robust mutexes per-proc"); +SYSCTL_INT(_kern_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD, &umtx_pi_allocated, 0, "Allocated umtx_pi"); #ifdef UMTX_PROFILING @@ -209,22 +251,29 @@ #endif static void umtxq_sysinit(void *); -static void umtxq_hash(struct umtx_key *key); -static struct umtxq_chain *umtxq_getchain(struct umtx_key *key); -static void umtxq_lock(struct umtx_key *key); -static void umtxq_unlock(struct umtx_key *key); -static void umtxq_busy(struct umtx_key *key); -static void umtxq_unbusy(struct umtx_key *key); -static void umtxq_insert_queue(struct umtx_q *uq, int q); -static void umtxq_remove_queue(struct umtx_q *uq, int q); -static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *); -static int umtxq_count(struct umtx_key *key); +static void umtxq_hash(struct umtx_key *); +static struct umtxq_chain *umtxq_getchain(struct umtx_key *); +static void umtxq_lock(struct umtx_key *); +static void umtxq_unlock(struct umtx_key *); +static void umtxq_busy(struct umtx_key *); +static void umtxq_unbusy(struct umtx_key *); +static void umtxq_insert_queue(struct umtx_q *, int); +static void umtxq_remove_queue(struct umtx_q *, int); +static int umtxq_sleep(struct umtx_q *, const char *, struct abs_timeout *); +static int umtxq_count(struct umtx_key *); static struct umtx_pi *umtx_pi_alloc(int); -static void umtx_pi_free(struct umtx_pi *pi); -static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags); -static void umtx_thread_cleanup(struct thread *td); -static void umtx_exec_hook(void *arg __unused, struct proc *p __unused, - struct image_params *imgp __unused); +static void umtx_pi_free(struct umtx_pi *); +static int do_unlock_pp(struct thread *, struct umutex *, uint32_t, int); +static void umtx_thread_cleanup(struct thread *); +static void umtx_exec_hook(void *, struct proc *, struct image_params *); +static void umtx_exit_hook(void *, struct proc *); +static void umtx_fork_hook(void *, struct proc *, struct proc *, int); +static int robust_alloc(struct robust_info **); +static void robust_free(struct robust_info *); +static void robust_insert(struct thread *, struct robust_info *); +static void robust_remove(struct thread *, struct umutex *); +static int do_unlock_umutex(struct thread *, struct umutex *, int); + SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL); #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE) @@ -261,6 +310,9 @@ umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); + robust_zone = uma_zcreate("robust umtx", sizeof(struct robust_info), + NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); + for (i = 0; i < 2; ++i) { for (j = 0; j < UMTX_CHAINS; ++j) { mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL, @@ -280,9 +332,20 @@ #ifdef UMTX_PROFILING umtx_init_profiling(); #endif + + for (i = 0; i < ROBUST_CHAINS; ++i) { + mtx_init(&robust_chains[i].lock, "robql", NULL, + MTX_DEF | MTX_DUPOK); + SLIST_INIT(&robust_chains[i].rob_list); + } + mtx_init(&umtx_lock, "umtx lock", NULL, MTX_SPIN); EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL, EVENTHANDLER_PRI_ANY); + EVENTHANDLER_REGISTER(process_exit, umtx_exit_hook, NULL, + EVENTHANDLER_PRI_ANY); + EVENTHANDLER_REGISTER(process_fork, umtx_fork_hook, NULL, + EVENTHANDLER_PRI_ANY); } struct umtx_q * @@ -291,9 +354,11 @@ struct umtx_q *uq; uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO); - uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX, M_WAITOK | M_ZERO); + uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX, + M_WAITOK | M_ZERO); TAILQ_INIT(&uq->uq_spare_queue->head); TAILQ_INIT(&uq->uq_pi_contested); + LIST_INIT(&uq->uq_rob_list); uq->uq_inherited_pri = PRI_MAX; return (uq); } @@ -474,41 +539,35 @@ } /* - * Check if there are multiple waiters + * Check if there are multiple waiters and returns first + * waiter. */ static int -umtxq_count(struct umtx_key *key) +umtxq_count_first(struct umtx_key *key, struct umtx_q **first) { struct umtxq_chain *uc; struct umtxq_queue *uh; + if (first != NULL) + *first = NULL; uc = umtxq_getchain(key); UMTXQ_LOCKED_ASSERT(uc); uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); - if (uh != NULL) + if (uh != NULL) { + if (first != NULL) + *first = TAILQ_FIRST(&uh->head); return (uh->length); + } return (0); } /* - * Check if there are multiple PI waiters and returns first - * waiter. + * Check if there are multiple waiters */ -static int -umtxq_count_pi(struct umtx_key *key, struct umtx_q **first) +static inline int +umtxq_count(struct umtx_key *key) { - struct umtxq_chain *uc; - struct umtxq_queue *uh; - - *first = NULL; - uc = umtxq_getchain(key); - UMTXQ_LOCKED_ASSERT(uc); - uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); - if (uh != NULL) { - *first = TAILQ_FIRST(&uh->head); - return (uh->length); - } - return (0); + return umtxq_count_first(key, NULL); } /* @@ -844,7 +903,7 @@ umtxq_unlock(&key); /* - * When unlocking the umtx, it must be marked as unowned if + * When unlocking the mutex, it must be marked as unowned if * there is zero or one thread only waiting for it. * Otherwise, it must be marked as contested. */ @@ -1049,6 +1108,13 @@ u_long tmp; int error = 0; + if (compat32 == 0) + tmp = fuword(addr); + else + tmp = (unsigned int)fuword32(addr); + if (tmp != id) + return (0); + uq = td->td_umtxq; if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT, is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0) @@ -1109,8 +1175,16 @@ struct umtx_q *uq; uint32_t owner, old, id; int error = 0; + int errchk; + int robst; - id = td->td_tid; + if ((flags & UMUTEX_SIMPLE) != 0) { + id = UMUTEX_SIMPLE_OWNER; + errchk = 0; + } else { + id = td->td_tid; + errchk = 1; + } uq = td->td_umtxq; if (timeout != NULL) @@ -1121,6 +1195,11 @@ * can fault on any access. */ for (;;) { + if ((flags & UMUTEX_ROBUST) != 0) { + robst = fubyte(&m->m_robstate); + if (robst == ROBST_NOTRECOVERABLE) + return (ENOTRECOVERABLE); + } owner = fuword32(__DEVOLATILE(void *, &m->m_owner)); if (mode == _UMUTEX_WAIT) { if (owner == UMUTEX_UNOWNED || owner == UMUTEX_CONTESTED) @@ -1156,7 +1235,7 @@ } } - if ((flags & UMUTEX_ERROR_CHECK) != 0 && + if ((flags & UMUTEX_ERROR_CHECK) && errchk && (owner & ~UMUTEX_CONTESTED) == id) return (EDEADLK); @@ -1219,14 +1298,25 @@ * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex. */ static int -do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags) +do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, + int keeprobst) { struct umtx_key key; uint32_t owner, old, id; int error; int count; + int robst; + int nwake; + int errchk; + + if ((flags & UMUTEX_SIMPLE) != 0) { + id = UMUTEX_SIMPLE_OWNER; + errchk = 0; + } else { + id = td->td_tid; + errchk = 1; + } - id = td->td_tid; /* * Make sure we own this mtx. */ @@ -1234,9 +1324,22 @@ if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != id) + if ((flags & UMUTEX_ERROR_CHECK) != 0 && errchk && + (owner & ~UMUTEX_CONTESTED) != id) return (EPERM); + nwake = 1; + if ((flags & UMUTEX_ROBUST) != 0) { + robst = fubyte(&m->m_robstate); + if (robst == ROBST_OWNER_DEAD) { + if (!keeprobst) { + nwake = INT_MAX; + (void)subyte(&m->m_robstate, ROBST_NOTRECOVERABLE); + } + } else if (robst == ROBST_NOTRECOVERABLE) + nwake = INT_MAX; + } + if ((owner & UMUTEX_CONTESTED) == 0) { old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); if (old == -1) @@ -1264,7 +1367,7 @@ old = casuword32(&m->m_owner, owner, count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); umtxq_lock(&key); - umtxq_signal(&key,1); + umtxq_signal(&key, nwake); umtxq_unbusy(&key); umtxq_unlock(&key); umtx_key_release(&key); @@ -1330,6 +1433,8 @@ int type; int error; int count; + int robst; + int broadcast; switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { case 0: @@ -1353,6 +1458,13 @@ umtxq_busy(&key); count = umtxq_count(&key); umtxq_unlock(&key); + broadcast = 0; + if ((flags & UMUTEX_ROBUST) != 0 && count != 0) { + /* Access memory only when there is any waiter */ + robst = fubyte(&m->m_robstate); + if (robst == ROBST_NOTRECOVERABLE) + broadcast = 1; + } /* * Only repair contention bit if there is a waiter, this means the mutex * is still being referenced by userland code, otherwise don't update @@ -1378,11 +1490,13 @@ owner = old; } } - umtxq_lock(&key); if (owner == -1) { + broadcast = 1; error = EFAULT; + } + umtxq_lock(&key); + if (broadcast) umtxq_signal(&key, INT_MAX); - } else if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0) umtxq_signal(&key, 1); umtxq_unbusy(&key); @@ -1474,6 +1588,7 @@ for (;;) { td = pi->pi_owner; + /* Check against curthread to break circle loop. */ if (td == NULL || td == curthread) return; @@ -1502,8 +1617,8 @@ } /* - * Unpropagate priority for a PI mutex when a thread blocked on - * it is interrupted by signal or resumed by others. + * Repropagate priority for a PI mutex when a thread blocked on + * is removed. */ static void umtx_repropagate_priority(struct umtx_pi *pi) @@ -1575,6 +1690,10 @@ return (EPERM); } umtx_pi_setowner(pi, owner); + + if (pi->pi_key.shared) + uq_owner->uq_pshared_pi_mutexes++; + uq = TAILQ_FIRST(&pi->pi_blocked); if (uq != NULL) { int pri; @@ -1591,7 +1710,7 @@ /* * Adjust a thread's order position in its blocked PI mutex, - * this may result new priority propagating process. + * and repropagate. */ void umtx_pi_adjust(struct thread *td, u_char oldpri) @@ -1632,14 +1751,29 @@ UMTXQ_BUSY_ASSERT(uc); umtxq_insert(uq); mtx_lock_spin(&umtx_lock); + /* Only look up thread in current process. */ if (pi->pi_owner == NULL) { mtx_unlock_spin(&umtx_lock); - /* XXX Only look up thread in current process. */ - td1 = tdfind(owner, curproc->p_pid); + + td1 = tdfind(owner, pi->pi_key.shared ? -1 : curproc->p_pid); + if (td1 != NULL && pi->pi_key.shared) { + error = p_cansched(curthread, td1->td_proc); + if (error != 0) { + PROC_UNLOCK(td1->td_proc); + umtxq_remove(uq); + umtxq_unbusy(&uq->uq_key); + umtxq_unlock(&uq->uq_key); + return (error); + } + } + mtx_lock_spin(&umtx_lock); if (td1 != NULL) { - if (pi->pi_owner == NULL) - umtx_pi_setowner(pi, td1); + if((uq1 = td1->td_umtxq) != NULL && + uq1->uq_exiting == 0) { + if (pi->pi_owner == NULL) + umtx_pi_setowner(pi, td1); + } PROC_UNLOCK(td1->td_proc); } } @@ -1664,7 +1798,11 @@ umtxq_unbusy(&uq->uq_key); error = umtxq_sleep(uq, wmesg, timo); - umtxq_remove(uq); + if ((uq->uq_flags & UQF_UMTXQ) != 0) { + umtxq_busy(&uq->uq_key); + umtxq_remove(uq); + umtxq_unbusy(&uq->uq_key); + } mtx_lock_spin(&umtx_lock); uq->uq_pi_blocked = NULL; @@ -1752,18 +1890,46 @@ TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink); } +static struct umtx_pi * +umtx_pi_get(struct umtx_key *key) +{ + struct umtx_pi *pi, *new_pi; + + pi = umtx_pi_lookup(key); + if (pi == NULL) { + new_pi = umtx_pi_alloc(M_NOWAIT); + if (new_pi == NULL) { + umtxq_unlock(key); + new_pi = umtx_pi_alloc(M_WAITOK); + umtxq_lock(key); + pi = umtx_pi_lookup(key); + if (pi != NULL) { + umtx_pi_free(new_pi); + new_pi = NULL; + } + } + if (new_pi != NULL) { + new_pi->pi_key = *key; + umtx_pi_insert(new_pi); + pi = new_pi; + } + } + return (pi); +} + /* * Lock a PI mutex. */ static int -do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, +do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, struct _umtx_time *timeout, int try) { struct abs_timeout timo; struct umtx_q *uq; - struct umtx_pi *pi, *new_pi; + struct umtx_pi *pi; uint32_t id, owner, old; int error; + int robst; id = td->td_tid; uq = td->td_umtxq; @@ -1776,25 +1942,7 @@ abs_timeout_init2(&timo, timeout); umtxq_lock(&uq->uq_key); - pi = umtx_pi_lookup(&uq->uq_key); - if (pi == NULL) { - new_pi = umtx_pi_alloc(M_NOWAIT); - if (new_pi == NULL) { - umtxq_unlock(&uq->uq_key); - new_pi = umtx_pi_alloc(M_WAITOK); - umtxq_lock(&uq->uq_key); - pi = umtx_pi_lookup(&uq->uq_key); - if (pi != NULL) { - umtx_pi_free(new_pi); - new_pi = NULL; - } - } - if (new_pi != NULL) { - new_pi->pi_key = uq->uq_key; - umtx_pi_insert(new_pi); - pi = new_pi; - } - } + pi = umtx_pi_get(&uq->uq_key); umtx_pi_ref(pi); umtxq_unlock(&uq->uq_key); @@ -1803,6 +1951,14 @@ * can fault on any access. */ for (;;) { + if ((flags & UMUTEX_ROBUST) != 0) { + robst = fubyte(&m->m_robstate); + if (robst == ROBST_NOTRECOVERABLE) { + error = ENOTRECOVERABLE; + break; + } + } + /* * Try the uncontested case. This should be done in userland. */ @@ -1906,11 +2062,9 @@ return (error); } -/* - * Unlock a PI mutex. - */ static int -do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags) +do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, + int keeprobst) { struct umtx_key key; struct umtx_q *uq_first, *uq_first2, *uq_me; @@ -1919,6 +2073,8 @@ int error; int count; int pri; + int broadcast; + int robst; id = td->td_tid; /* @@ -1931,6 +2087,18 @@ if ((owner & ~UMUTEX_CONTESTED) != id) return (EPERM); + broadcast = FALSE; + if ((flags & UMUTEX_ROBUST) != 0) { + robst = fubyte(&m->m_robstate); + if (robst == ROBST_OWNER_DEAD) { + if (!keeprobst) { + subyte(&m->m_robstate, ROBST_NOTRECOVERABLE); + broadcast = TRUE; + } + } else if (robst == ROBST_NOTRECOVERABLE) + broadcast = TRUE; + } + /* This should be done in userland */ if ((owner & UMUTEX_CONTESTED) == 0) { old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); @@ -1948,7 +2116,7 @@ umtxq_lock(&key); umtxq_busy(&key); - count = umtxq_count_pi(&key, &uq_first); + count = umtxq_count_first(&key, &uq_first); if (uq_first != NULL) { mtx_lock_spin(&umtx_lock); pi = uq_first->uq_pi_blocked; @@ -1982,7 +2150,9 @@ sched_lend_user_prio(curthread, pri); thread_unlock(curthread); mtx_unlock_spin(&umtx_lock); - if (uq_first) + if (broadcast) + umtxq_signal(&key, INT_MAX); + else if (uq_first) umtxq_signal_thread(uq_first); } umtxq_unlock(&key); @@ -2019,8 +2189,15 @@ uint32_t ceiling; uint32_t owner, id; int error, pri, old_inherited_pri, su; + int robst, errchk; - id = td->td_tid; + if ((flags & UMUTEX_SIMPLE) != 0) { + id = UMUTEX_SIMPLE_OWNER; + errchk = 0; + } else { + id = td->td_tid; + errchk = 1; + } uq = td->td_umtxq; if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags), &uq->uq_key)) != 0) @@ -2032,6 +2209,15 @@ su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0); for (;;) { old_inherited_pri = uq->uq_inherited_pri; + + if ((flags & UMUTEX_ROBUST) != 0) { + robst = fubyte(&m->m_robstate); + if (robst == ROBST_NOTRECOVERABLE) { + error = ENOTRECOVERABLE; + break; + } + } + umtxq_lock(&uq->uq_key); umtxq_busy(&uq->uq_key); umtxq_unlock(&uq->uq_key); @@ -2071,7 +2257,7 @@ break; } - if ((flags & UMUTEX_ERROR_CHECK) != 0 && + if ((flags & UMUTEX_ERROR_CHECK) != 0 && errchk && (owner & ~UMUTEX_CONTESTED) == id) { error = EDEADLK; break; @@ -2132,8 +2318,15 @@ sched_lend_user_prio(td, pri); thread_unlock(td); mtx_unlock_spin(&umtx_lock); + } else { + /* Save previous priority */ + if (old_inherited_pri == PRI_MAX) + ceiling = -1; + else + ceiling = RTP_PRIO_MAX - + (old_inherited_pri - PRI_MIN_REALTIME); + (void)suword32(&m->m_ceilings[1], ceiling); } - out: umtxq_lock(&uq->uq_key); umtxq_unbusy(&uq->uq_key); @@ -2146,7 +2339,8 @@ * Unlock a PP mutex. */ static int -do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags) +do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, + int keeprobst) { struct umtx_key key; struct umtx_q *uq, *uq2; @@ -2154,19 +2348,30 @@ uint32_t owner, id; uint32_t rceiling; int error, pri, new_inherited_pri, su; + int broadcast; + int robst; + int errchk; - id = td->td_tid; + if ((flags & (UMUTEX_ROBUST | USYNC_PROCESS)) == (UMUTEX_ROBUST | USYNC_PROCESS)) { + id = td->td_proc->p_pid; + errchk = 0; + } else if ((flags & UMUTEX_SIMPLE) != 0) { + id = UMUTEX_SIMPLE_OWNER; + errchk = 0; + } else { + id = td->td_tid; + errchk = 1; + } + uq = td->td_umtxq; su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0); - /* - * Make sure we own this mtx. - */ owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != id) + if ((flags & UMUTEX_ERROR_CHECK) != 0 && errchk && + (owner & ~UMUTEX_CONTESTED) != id) return (EPERM); error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t)); @@ -2182,48 +2387,57 @@ new_inherited_pri = PRI_MIN_REALTIME + rceiling; } + broadcast = FALSE; + if ((flags & UMUTEX_ROBUST) != 0) { + robst = fubyte(&m->m_robstate); + if (robst == ROBST_OWNER_DEAD) { + if (!keeprobst) { + broadcast = TRUE; + subyte(&m->m_robstate, ROBST_NOTRECOVERABLE); + } + } else if (robst == ROBST_NOTRECOVERABLE) + broadcast = TRUE; + } + if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags), &key)) != 0) return (error); + umtxq_lock(&key); umtxq_busy(&key); umtxq_unlock(&key); + /* * For priority protected mutex, always set unlocked state * to UMUTEX_CONTESTED, so that userland always enters kernel * to lock the mutex, it is necessary because thread priority * has to be adjusted for such mutex. */ - error = suword32(__DEVOLATILE(uint32_t *, &m->m_owner), + (void)suword32(__DEVOLATILE(uint32_t *, &m->m_owner), UMUTEX_CONTESTED); umtxq_lock(&key); - if (error == 0) - umtxq_signal(&key, 1); + umtxq_signal(&key, broadcast ? 1 : INT_MAX); umtxq_unbusy(&key); umtxq_unlock(&key); - if (error == -1) - error = EFAULT; - else { - mtx_lock_spin(&umtx_lock); - if (su != 0) - uq->uq_inherited_pri = new_inherited_pri; - pri = PRI_MAX; - TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { - uq2 = TAILQ_FIRST(&pi->pi_blocked); - if (uq2 != NULL) { - if (pri > UPRI(uq2->uq_thread)) - pri = UPRI(uq2->uq_thread); - } + mtx_lock_spin(&umtx_lock); + if (su != 0) + uq->uq_inherited_pri = new_inherited_pri; + pri = PRI_MAX; + TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { + uq2 = TAILQ_FIRST(&pi->pi_blocked); + if (uq2 != NULL) { + if (pri > UPRI(uq2->uq_thread)) + pri = UPRI(uq2->uq_thread); } - if (pri > uq->uq_inherited_pri) - pri = uq->uq_inherited_pri; - thread_lock(td); - sched_lend_user_prio(td, pri); - thread_unlock(td); - mtx_unlock_spin(&umtx_lock); } + if (pri > uq->uq_inherited_pri) + pri = uq->uq_inherited_pri; + thread_lock(td); + sched_lend_user_prio(td, pri); + thread_unlock(td); + mtx_unlock_spin(&umtx_lock); umtx_key_release(&key); return (error); } @@ -2237,13 +2451,25 @@ uint32_t owner, id; uint32_t flags; int error; + int errchk; flags = fuword32(&m->m_flags); if ((flags & UMUTEX_PRIO_PROTECT) == 0) return (EINVAL); if (ceiling > RTP_PRIO_MAX) return (EINVAL); - id = td->td_tid; + + if ((flags & (UMUTEX_ROBUST | USYNC_PROCESS)) == (UMUTEX_ROBUST | USYNC_PROCESS)) { + id = td->td_proc->p_pid; + errchk = 0; + } else if ((flags & UMUTEX_SIMPLE) != 0) { + id = UMUTEX_SIMPLE_OWNER; + errchk = 0; + } else { + id = td->td_tid; + errchk = 1; + } + uq = td->td_umtxq; if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags), &uq->uq_key)) != 0) @@ -2272,7 +2498,7 @@ break; } - if ((owner & ~UMUTEX_CONTESTED) == id) { + if (errchk && (owner & ~UMUTEX_CONTESTED) == id) { suword32(&m->m_ceilings[0], ceiling); error = 0; break; @@ -2315,13 +2541,22 @@ do_lock_umutex(struct thread *td, struct umutex *m, struct _umtx_time *timeout, int mode) { + struct robust_info *rob = NULL; uint32_t flags; int error; + int robst; flags = fuword32(&m->m_flags); if (flags == -1) return (EFAULT); + if ((flags & (UMUTEX_ROBUST|USYNC_PROCESS)) == (UMUTEX_ROBUST| + USYNC_PROCESS) && mode != _UMUTEX_WAIT) { + error = robust_alloc(&rob); + if (error != 0) + return (error); + } + switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { case 0: error = do_lock_normal(td, m, flags, timeout, mode); @@ -2333,8 +2568,11 @@ error = do_lock_pp(td, m, flags, timeout, mode); break; default: + if (rob != NULL) + robust_free(rob); return (EINVAL); } + if (timeout == NULL) { if (error == EINTR && mode != _UMUTEX_WAIT) error = ERESTART; @@ -2343,6 +2581,27 @@ if (error == ERESTART) error = EINTR; } + + if (error == 0) { + if ((flags & UMUTEX_ROBUST) != 0 && mode != _UMUTEX_WAIT) { + robst = fubyte(&m->m_robstate); + if (robst == ROBST_OWNER_DEAD) + error = EOWNERDEAD; + else if (robst == ROBST_NOTRECOVERABLE) { + do_unlock_umutex(td, m, TRUE); + error = ENOTRECOVERABLE; + } + } + } + + if (rob != NULL) { + if (error == 0 || error == EOWNERDEAD) { + rob->ownertd = td; + rob->umtxp = m; + robust_insert(td, rob); + } else + robust_free(rob); + } return (error); } @@ -2350,24 +2609,42 @@ * Unlock a userland POSIX mutex. */ static int -do_unlock_umutex(struct thread *td, struct umutex *m) +do_unlock_umutex(struct thread *td, struct umutex *m, int keeprobst) { uint32_t flags; + int error; flags = fuword32(&m->m_flags); - if (flags == -1) - return (EFAULT); + if ((flags & (UMUTEX_ROBUST|USYNC_PROCESS)) == (UMUTEX_ROBUST| + USYNC_PROCESS)) + robust_remove(td, m); switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { case 0: - return (do_unlock_normal(td, m, flags)); + error = do_unlock_normal(td, m, flags, keeprobst); + break; case UMUTEX_PRIO_INHERIT: - return (do_unlock_pi(td, m, flags)); + error = do_unlock_pi(td, m, flags, keeprobst); + break; case UMUTEX_PRIO_PROTECT: - return (do_unlock_pp(td, m, flags)); + error = do_unlock_pp(td, m, flags, keeprobst); + break; + default: + error = EINVAL; } + return (error); +} - return (EINVAL); +/* Cleanup process-shared robust mutex */ +static void +do_cleanup_umutex(struct thread *td, struct umutex *m) +{ + int robst; + + robst = fubyte(&m->m_robstate); + if (robst == ROBST_NORMAL) + subyte(&m->m_robstate, ROBST_OWNER_DEAD); + do_unlock_umutex(td, m, TRUE); } static int @@ -2413,18 +2690,17 @@ umtxq_unbusy(&uq->uq_key); umtxq_unlock(&uq->uq_key); - error = do_unlock_umutex(td, m); + (void)do_unlock_umutex(td, m, FALSE); if (timeout != NULL) abs_timeout_init(&timo, clockid, ((wflags & CVWAIT_ABSTIME) != 0), timeout); - + umtxq_lock(&uq->uq_key); if (error == 0) { error = umtxq_sleep(uq, "ucond", timeout == NULL ? NULL : &timo); } - if ((uq->uq_flags & UQF_UMTXQ) == 0) error = 0; else { @@ -2449,7 +2725,6 @@ if (error == ERESTART) error = EINTR; } - umtxq_unlock(&uq->uq_key); umtx_key_release(&uq->uq_key); return (error); @@ -2461,27 +2736,32 @@ static int do_cv_signal(struct thread *td, struct ucond *cv) { + struct umtxq_queue *uh; + struct umtx_q *uq; struct umtx_key key; - int error, cnt, nwake; + int error; uint32_t flags; flags = fuword32(&cv->c_flags); if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0) return (error); + umtxq_lock(&key); umtxq_busy(&key); - cnt = umtxq_count(&key); - nwake = umtxq_signal(&key, 1); - if (cnt <= nwake) { + uh = umtxq_queue_lookup(&key, UMTX_SHARED_QUEUE); + if (uh == NULL || uh->length == 1) { umtxq_unlock(&key); - error = suword32( - __DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0); + suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0); umtxq_lock(&key); } + if (uh != NULL) { + uq = TAILQ_FIRST(&uh->head); + umtxq_signal_thread(uq); + } umtxq_unbusy(&key); umtxq_unlock(&key); umtx_key_release(&key); - return (error); + return (0); } static int @@ -2497,17 +2777,14 @@ umtxq_lock(&key); umtxq_busy(&key); - umtxq_signal(&key, INT_MAX); umtxq_unlock(&key); - - error = suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0); - + suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0); umtxq_lock(&key); + umtxq_signal(&key, INT_MAX); umtxq_unbusy(&key); umtxq_unlock(&key); - umtx_key_release(&key); - return (error); + return (0); } static int @@ -2903,6 +3180,78 @@ return (error); } +static int +robust_alloc(struct robust_info **robpp) +{ + struct umtx_q *uq; + + uq = curthread->td_umtxq; + if (uq->uq_robust_mutexes >= max_robust_mutexes) + return (ENOMEM); + uq->uq_robust_mutexes++; + *robpp = uma_zalloc(robust_zone, M_ZERO|M_WAITOK); + return (0); +} + +static void +robust_free(struct robust_info *robp) +{ + struct umtx_q *uq; + + uq = curthread->td_umtxq; + uq->uq_robust_mutexes--; + uma_zfree(robust_zone, robp); +} + +static unsigned int +robust_hash(struct umutex *m) +{ + unsigned n = (uintptr_t)m; + return ((n * GOLDEN_RATIO_PRIME) >> ROBUST_SHIFTS) % ROBUST_CHAINS; +} + +static void +robust_insert(struct thread *td, struct robust_info *rob) +{ + struct umtx_q *uq = td->td_umtxq; + int hash = robust_hash(rob->umtxp); + struct robust_chain *robc = &robust_chains[hash]; + + mtx_lock(&robc->lock); + rob->ownertd = td; + SLIST_INSERT_HEAD(&robc->rob_list, rob, hash_qe); + mtx_unlock(&robc->lock); + LIST_INSERT_HEAD(&uq->uq_rob_list, rob, td_qe); +} + +static void +robust_remove(struct thread *td, struct umutex *umtxp) +{ + struct robust_info *rob, *rob2; + int hash = robust_hash(umtxp); + struct robust_chain *robc = &robust_chains[hash]; + + rob2 = NULL; + mtx_lock(&robc->lock); + SLIST_FOREACH(rob, &robc->rob_list, hash_qe) { + if (rob->ownertd == td && + rob->umtxp == umtxp) { + if (rob2 == NULL) { + SLIST_REMOVE_HEAD(&robc->rob_list, hash_qe); + } else { + SLIST_REMOVE_AFTER(rob2, hash_qe); + } + break; + } + rob2 = rob; + } + mtx_unlock(&robc->lock); + if (rob != NULL) { + LIST_REMOVE(rob, td_qe); + robust_free(rob); + } +} + int sys__umtx_lock(struct thread *td, struct _umtx_lock_args *uap) /* struct umtx *umtx */ @@ -3120,7 +3469,7 @@ static int __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap) { - return do_unlock_umutex(td, uap->obj); + return do_unlock_umutex(td, uap->obj, (uap->val & UMUTEX_KEEP_STATE) != 0); } static int @@ -3581,6 +3930,7 @@ { td->td_umtxq = umtxq_alloc(); td->td_umtxq->uq_thread = td; + td->td_umtxq->uq_exiting = 0; } void @@ -3599,6 +3949,7 @@ uq = td->td_umtxq; uq->uq_inherited_pri = PRI_MAX; + uq->uq_exiting = 0; KASSERT(uq->uq_flags == 0, ("uq_flags != 0")); KASSERT(uq->uq_thread == td, ("uq_thread != td")); @@ -3607,7 +3958,7 @@ } /* - * exec() hook. + * exec() hook, clean up lastest thread's umtx info. */ static void umtx_exec_hook(void *arg __unused, struct proc *p __unused, @@ -3617,6 +3968,35 @@ } /* + * exit1() hook, clean up lastest thread's umtx info. + */ +static void +umtx_exit_hook(void *arg __unused, struct proc *p __unused) +{ + struct umtx_q *uq = curthread->td_umtxq; + + if (uq != NULL) { + uq->uq_exiting = 1; + umtx_thread_cleanup(curthread); + } +} + +/* + * fork() hook. First thread of process never call umtx_thread_alloc() + * again, we should clear uq_exiting here. + */ +void +umtx_fork_hook(void *arg __unused, struct proc *p1 __unused, + struct proc *p2, int flags __unused) +{ + struct thread *td = FIRST_THREAD_IN_PROC(p2); + struct umtx_q *uq = td->td_umtxq; + + if (uq != NULL) + uq->uq_exiting = 0; +} + +/* * thread_exit() hook. */ void @@ -3631,12 +4011,16 @@ static void umtx_thread_cleanup(struct thread *td) { + struct robust_info *rob; struct umtx_q *uq; struct umtx_pi *pi; if ((uq = td->td_umtxq) == NULL) return; + while ((rob = LIST_FIRST(&uq->uq_rob_list)) != NULL) + do_cleanup_umutex(td, rob->umtxp); + mtx_lock_spin(&umtx_lock); uq->uq_inherited_pri = PRI_MAX; while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) { @@ -3644,6 +4028,7 @@ TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link); } mtx_unlock_spin(&umtx_lock); + thread_lock(td); sched_lend_user_prio(td, PRI_MAX); thread_unlock(td); --- src/sys/sys/_pthreadtypes.h 2009-03-14 20:15:18.000000000 0000 +++ src/sys/sys/_pthreadtypes.h 2012-05-08 08:38:36.000000000 0000 @@ -1,4 +1,5 @@ /* + * Copyright (c) 2010 David Xu * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu * Copyright (c) 1995-1998 by John Birrell * All rights reserved. @@ -36,6 +37,10 @@ #ifndef _SYS__PTHREADTYPES_H_ #define _SYS__PTHREADTYPES_H_ +#include +#include +#include + /* * Forward structure definitions. * @@ -66,17 +71,69 @@ #define _PTHREAD_T_DECLARED #endif typedef struct pthread_attr *pthread_attr_t; -typedef struct pthread_mutex *pthread_mutex_t; +typedef struct pthread_mutex pthread_mutex_t; typedef struct pthread_mutex_attr *pthread_mutexattr_t; -typedef struct pthread_cond *pthread_cond_t; +typedef struct pthread_cond pthread_cond_t; typedef struct pthread_cond_attr *pthread_condattr_t; typedef int pthread_key_t; typedef struct pthread_once pthread_once_t; -typedef struct pthread_rwlock *pthread_rwlock_t; +typedef struct pthread_rwlock pthread_rwlock_t; typedef struct pthread_rwlockattr *pthread_rwlockattr_t; -typedef struct pthread_barrier *pthread_barrier_t; +typedef struct pthread_barrier pthread_barrier_t; typedef struct pthread_barrierattr *pthread_barrierattr_t; -typedef struct pthread_spinlock *pthread_spinlock_t; +typedef struct pthread_spinlock pthread_spinlock_t; + +struct pthread_mutex { + volatile __uint32_t __lockword; + __uint32_t __flags; + __uint32_t __ceiling; + __uint32_t __saved_ceiling; + union { + struct pthread *__pthread; + __uint64_t __ithread; + } __ownertd; + __uint32_t __ownerpid; + __uint8_t __robstate; + __uint8_t __userf; + __uint16_t __magic; + __uint32_t __recurse; +}; + +struct pthread_cond { + __uint16_t __magic; + __uint16_t __pad; + __uint32_t __has_user_waiters; + __uint32_t __has_kern_waiters; + __uint32_t __flags; + __uint32_t __clock_id; +}; + +struct pthread_rwlock { + __uint16_t __magic; + __uint16_t __pad; + __uint32_t __pad2; + union { + struct pthread *__ownertd; + __uint32_t __ownertid; + char __ownerpad[8]; + } __ownerdata; + __uint32_t __state; + __uint32_t __flags; + __uint32_t __blocked_readers; + __uint32_t __blocked_writers; +}; + +struct pthread_barrier { + pthread_mutex_t __lock; + pthread_cond_t __cond; + __uint32_t __count; + __uint32_t __enter; + __uint32_t __leave; +}; + +struct pthread_spinlock { + __uint32_t __lock; +}; /* * Additional type definitions: @@ -92,7 +149,6 @@ */ struct pthread_once { int state; - pthread_mutex_t mutex; }; #endif /* ! _SYS__PTHREADTYPES_H_ */ --- src/sys/sys/_umtx.h 2012-02-25 02:15:23.000000000 0000 +++ src/sys/sys/_umtx.h 2012-04-18 09:19:15.000000000 0000 @@ -38,17 +38,25 @@ }; struct umutex { - volatile __lwpid_t m_owner; /* Owner of the mutex */ - __uint32_t m_flags; /* Flags of the mutex */ - __uint32_t m_ceilings[2]; /* Priority protect ceiling */ - __uint32_t m_spare[4]; + volatile __lwpid_t m_owner; /* Owner of the mutex */ + __uint32_t m_flags; /* Flags of the mutex */ + __uint32_t m_ceilings[2]; /* Priority protect ceiling */ + union { + struct pthread *m_pthread; /* Owner thread pointer */ + char m_pad[8]; + } m_ownertd; + __uint32_t m_ownerpid; + __uint8_t m_robstate; /* State flags */ + __uint8_t m_userf; + __uint16_t m_magic; + __uint32_t m_rcount; /* Recursive count */ }; struct ucond { volatile __uint32_t c_has_waiters; /* Has waiters in kernel */ __uint32_t c_flags; /* Flags of the condition variable */ - __uint32_t c_clockid; /* Clock id */ - __uint32_t c_spare[1]; /* Spare space */ + __uint32_t c_clockid; /* Clock id */ + __uint32_t c_spare[1]; /* Spare space */ }; struct urwlock { --- src/sys/sys/errno.h 2011-04-04 17:40:18.000000000 0000 +++ src/sys/sys/errno.h 2011-04-22 06:47:54.000000000 0000 @@ -178,8 +178,10 @@ #define ECAPMODE 94 /* Not permitted in capability mode */ #endif /* _POSIX_SOURCE */ +#define EOWNERDEAD 95 +#define ENOTRECOVERABLE 96 #ifndef _POSIX_SOURCE -#define ELAST 94 /* Must be equal largest errno */ +#define ELAST 96 /* Must be equal largest errno */ #endif /* _POSIX_SOURCE */ #ifdef _KERNEL --- src/sys/sys/proc.h 2012-05-30 16:45:21.000000000 0000 +++ src/sys/sys/proc.h 2012-06-07 09:00:40.000000000 0000 @@ -546,6 +546,7 @@ int p_pendingcnt; /* how many signals are pending */ struct itimers *p_itimers; /* (c) POSIX interval timers. */ struct procdesc *p_procdesc; /* (e) Process descriptor, if any. */ + struct robust_list *p_robustlist; /* End area that is zeroed on creation. */ #define p_endzero p_magic --- src/sys/sys/umtx.h 2012-04-05 02:30:18.000000000 0000 +++ src/sys/sys/umtx.h 2012-04-28 08:21:24.000000000 0000 @@ -36,23 +36,41 @@ #define UMTX_UNOWNED 0x0 #define UMTX_CONTESTED LONG_MIN -#define USYNC_PROCESS_SHARED 0x0001 /* Process shared sync objs */ +#define USYNC_PROCESS_SHARED 0x0001 /* Process shared sync objs */ +#define USYNC_PROCESS USYNC_PROCESS_SHARED #define UMUTEX_UNOWNED 0x0 #define UMUTEX_CONTESTED 0x80000000U -#define UMUTEX_ERROR_CHECK 0x0002 /* Error-checking mutex */ -#define UMUTEX_PRIO_INHERIT 0x0004 /* Priority inherited mutex */ -#define UMUTEX_PRIO_PROTECT 0x0008 /* Priority protect mutex */ +#define UMUTEX_ERROR_CHECK 0x00000002 /* Error-checking mutex */ +#define UMUTEX_PRIO_INHERIT 0x00000004 /* Priority inherited mutex */ +#define UMUTEX_PRIO_PROTECT 0x00000008 /* Deprecated */ +#define UMUTEX_SIMPLE 0x00000010 /* Use simple lock id. */ +#define UMUTEX_ROBUST 0x00000020 +#define UMUTEX_RECURSIVE 0x00000040 +#define UMUTEX_PRIO_PROTECT2 0x00000080 +#define UMUTEX_ADAPTIVE_SPIN 0x00000100 +#define UMUTEX_USER_FLAG_1 0x00000200 + +/* Mutex state flags. */ +#define ROBST_NORMAL 0 +#define ROBST_OWNER_DEAD 1 +#define ROBST_NOTRECOVERABLE 2 + +/* Speficial owner ids */ +#define UMUTEX_SIMPLE_OWNER 1 /* The simple mutex's lock ID. */ + +/* Unlock flags */ +#define UMUTEX_KEEP_STATE 0x01 /* Keep robust state. */ /* urwlock flags */ -#define URWLOCK_PREFER_READER 0x0002 +#define URWLOCK_PREFER_READER 0x0002 -#define URWLOCK_WRITE_OWNER 0x80000000U -#define URWLOCK_WRITE_WAITERS 0x40000000U -#define URWLOCK_READ_WAITERS 0x20000000U -#define URWLOCK_MAX_READERS 0x1fffffffU -#define URWLOCK_READER_COUNT(c) ((c) & URWLOCK_MAX_READERS) +#define URWLOCK_WRITE_OWNER 0x80000000U +#define URWLOCK_WRITE_WAITERS 0x40000000U +#define URWLOCK_READ_WAITERS 0x20000000U +#define URWLOCK_MAX_READERS 0x1fffffffU +#define URWLOCK_READER_COUNT(c) ((c) & URWLOCK_MAX_READERS) /* _usem flags */ #define SEM_NAMED 0x0002 @@ -79,7 +97,7 @@ #define UMTX_OP_MUTEX_WAKE 18 /* deprecated */ #define UMTX_OP_SEM_WAIT 19 #define UMTX_OP_SEM_WAKE 20 -#define UMTX_OP_NWAKE_PRIVATE 21 +#define UMTX_OP_NWAKE_PRIVATE 21 #define UMTX_OP_MUTEX_WAKE2 22 #define UMTX_OP_MAX 23 --- /dev/null 2012-06-07 17:11:00.000000000 +0800 +++ src/lib/libthr/thread/thr_malloc.c 2012-05-10 10:17:21.000000000 +0800 @@ -0,0 +1,542 @@ +/*- + * Copyright (c) 1983 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if defined(LIBC_SCCS) && !defined(lint) +/*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/ +static char *rcsid = "$FreeBSD: head/libexec/rtld-elf/malloc.c 233306 2012-03-22 14:11:10Z kib $"; +#endif /* LIBC_SCCS and not lint */ + +/* + * malloc.c (Caltech) 2/21/82 + * Chris Kingsley, kingsley@cit-20. + * + * This is a very fast storage allocator. It allocates blocks of a small + * number of different sizes, and keeps free lists of each size. Blocks that + * don't exactly fit are passed up to the next larger size. In this + * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. + * This is designed for use in a virtual memory environment. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "thr_private.h" + +/* + * Pre-allocate mmap'ed pages + */ +#define NPOOLPAGES (32*1024/pagesz) +static caddr_t pagepool_start, pagepool_end; + +/* + * The overhead on a block is at least 4 bytes. When free, this space + * contains a pointer to the next free block, and the bottom two bits must + * be zero. When in use, the first byte is set to MAGIC, and the second + * byte is the size index. The remaining bytes are for alignment. + * If range checking is enabled then a second word holds the size of the + * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). + * The order of elements is critical: ov_magic must overlay the low order + * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. + */ +union overhead { + union overhead *ov_next; /* when free */ + struct { + u_char ovu_magic; /* magic number */ + u_char ovu_index; /* bucket # */ +#ifdef RCHECK + u_short ovu_rmagic; /* range magic number */ + u_int ovu_size; /* actual block size */ +#endif + } ovu; +#define ov_magic ovu.ovu_magic +#define ov_index ovu.ovu_index +#define ov_rmagic ovu.ovu_rmagic +#define ov_size ovu.ovu_size +}; + +#define MAGIC 0xef /* magic # on accounting info */ +#define RMAGIC 0x5555 /* magic # on range info */ + +#ifdef RCHECK +#define RSLOP sizeof (u_short) +#else +#define RSLOP 0 +#endif + +/* + * nextf[i] is the pointer to the next free block of size 2^(i+3). The + * smallest allocatable block is 8 bytes. The overhead information + * precedes the data area returned to the user. + */ +#define NBUCKETS 30 +static union overhead *nextf[NBUCKETS]; + +static int pagesz; /* page size */ +static int pagebucket; /* page size bucket */ + +static void morecore(int); +static int morepages(int); +static int findbucket(union overhead *, int); + +#ifdef MSTATS +/* + * nmalloc[i] is the difference between the number of mallocs and frees + * for a given block size. + */ +static u_int nmalloc[NBUCKETS]; +#include +#endif + +#if defined(MALLOC_DEBUG) || defined(RCHECK) +#define ASSERT(p) if (!(p)) botch("p") +#include +static void +botch(s) + char *s; +{ + fprintf(stderr, "\r\nassertion botched: %s\r\n", s); + (void) fflush(stderr); /* just in case user buffered it */ + abort(); +} +#else +#define ASSERT(p) +#endif + +/* Debugging stuff */ +#define TRACE() rtld_printf("TRACE %s:%d\n", __FILE__, __LINE__) + +static int pagesize; + +static int +lgetpagesize(void) +{ + int mib[2]; + size_t size; + + if (pagesize != 0) + return (pagesize); + + mib[0] = CTL_HW; + mib[1] = HW_PAGESIZE; + size = sizeof(pagesize); + if (sysctl(mib, 2, &pagesize, &size, NULL, 0) == -1) + return (-1); + return (pagesize); + +} + +static void * +lmalloc_impl(size_t nbytes) +{ + register union overhead *op; + register int bucket; + register long n; + register unsigned amt; + + /* + * First time malloc is called, setup page size and + * align break pointer so all data will be page aligned. + */ + if (pagesz == 0) { + pagesz = n = lgetpagesize(); + if (morepages(NPOOLPAGES) == 0) + return NULL; + op = (union overhead *)(pagepool_start); + n = n - sizeof (*op) - ((long)op & (n - 1)); + if (n < 0) + n += pagesz; + if (n) { + pagepool_start += n; + } + bucket = 0; + amt = 8; + while ((unsigned)pagesz > amt) { + amt <<= 1; + bucket++; + } + pagebucket = bucket; + } + /* + * Convert amount of memory requested into closest block size + * stored in hash buckets which satisfies request. + * Account for space used per block for accounting. + */ + if (nbytes <= (unsigned long)(n = pagesz - sizeof (*op) - RSLOP)) { +#ifndef RCHECK + amt = 8; /* size of first bucket */ + bucket = 0; +#else + amt = 16; /* size of first bucket */ + bucket = 1; +#endif + n = -(sizeof (*op) + RSLOP); + } else { + amt = pagesz; + bucket = pagebucket; + } + while (nbytes > (size_t)amt + n) { + amt <<= 1; + if (amt == 0) + return (NULL); + bucket++; + } + /* + * If nothing in hash bucket right now, + * request more memory from the system. + */ + if ((op = nextf[bucket]) == NULL) { + morecore(bucket); + if ((op = nextf[bucket]) == NULL) + return (NULL); + } + /* remove from linked list */ + nextf[bucket] = op->ov_next; + op->ov_magic = MAGIC; + op->ov_index = bucket; +#ifdef MSTATS + nmalloc[bucket]++; +#endif +#ifdef RCHECK + /* + * Record allocated size of block and + * bound space with magic numbers. + */ + op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); + op->ov_rmagic = RMAGIC; + *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; +#endif + return ((char *)(op + 1)); +} + +void * +lcalloc(size_t num, size_t size) +{ + void *ret; + + if (size != 0 && (num * size) / size != num) { + /* size_t overflow. */ + return (NULL); + } + + if ((ret = lmalloc(num * size)) != NULL) + memset(ret, 0, num * size); + + return (ret); +} + +/* + * Allocate more memory to the indicated bucket. + */ +static void +morecore(int bucket) +{ + register union overhead *op; + register int sz; /* size of desired block */ + int amt; /* amount to allocate */ + int nblks; /* how many blocks we get */ + + /* + * sbrk_size <= 0 only for big, FLUFFY, requests (about + * 2^30 bytes on a VAX, I think) or for a negative arg. + */ + sz = 1 << (bucket + 3); +#ifdef MALLOC_DEBUG + ASSERT(sz > 0); +#else + if (sz <= 0) + return; +#endif + if (sz < pagesz) { + amt = pagesz; + nblks = amt / sz; + } else { + amt = sz + pagesz; + nblks = 1; + } + if (amt > pagepool_end - pagepool_start) + if (morepages(amt/pagesz + NPOOLPAGES) == 0) + return; + op = (union overhead *)pagepool_start; + pagepool_start += amt; + + /* + * Add new memory allocated to that on + * free list for this hash bucket. + */ + nextf[bucket] = op; + while (--nblks > 0) { + op->ov_next = (union overhead *)((caddr_t)op + sz); + op = (union overhead *)((caddr_t)op + sz); + } +} + +static void +lfree_impl(void *cp) +{ + register int size; + register union overhead *op; + + if (cp == NULL) + return; + op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); +#ifdef MALLOC_DEBUG + ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ +#else + if (op->ov_magic != MAGIC) + return; /* sanity */ +#endif +#ifdef RCHECK + ASSERT(op->ov_rmagic == RMAGIC); + ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); +#endif + size = op->ov_index; + ASSERT(size < NBUCKETS); + op->ov_next = nextf[size]; /* also clobbers ov_magic */ + nextf[size] = op; +#ifdef MSTATS + nmalloc[size]--; +#endif +} + +/* + * When a program attempts "storage compaction" as mentioned in the + * old malloc man page, it realloc's an already freed block. Usually + * this is the last block it freed; occasionally it might be farther + * back. We have to search all the free lists for the block in order + * to determine its bucket: 1st we make one pass thru the lists + * checking only the first block in each; if that fails we search + * ``realloc_srchlen'' blocks in each list for a match (the variable + * is extern so the caller can modify it). If that fails we just copy + * however many bytes was given to realloc() and hope it's not huge. + */ +static int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ + +static void * +lrealloc_impl(void *cp, size_t nbytes) +{ + register u_int onb; + register int i; + union overhead *op; + char *res; + int was_alloced = 0; + + if (cp == NULL) + return (lmalloc_impl(nbytes)); + op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); + if (op->ov_magic == MAGIC) { + was_alloced++; + i = op->ov_index; + } else { + /* + * Already free, doing "compaction". + * + * Search for the old block of memory on the + * free list. First, check the most common + * case (last element free'd), then (this failing) + * the last ``realloc_srchlen'' items free'd. + * If all lookups fail, then assume the size of + * the memory block being realloc'd is the + * largest possible (so that all "nbytes" of new + * memory are copied into). Note that this could cause + * a memory fault if the old area was tiny, and the moon + * is gibbous. However, that is very unlikely. + */ + if ((i = findbucket(op, 1)) < 0 && + (i = findbucket(op, realloc_srchlen)) < 0) + i = NBUCKETS; + } + onb = 1 << (i + 3); + if (onb < (u_int)pagesz) + onb -= sizeof (*op) + RSLOP; + else + onb += pagesz - sizeof (*op) - RSLOP; + /* avoid the copy if same size block */ + if (was_alloced) { + if (i) { + i = 1 << (i + 2); + if (i < pagesz) + i -= sizeof (*op) + RSLOP; + else + i += pagesz - sizeof (*op) - RSLOP; + } + if (nbytes <= onb && nbytes > (size_t)i) { +#ifdef RCHECK + op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); + *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; +#endif + return(cp); + } else + lfree_impl(cp); + } + if ((res = lmalloc_impl(nbytes)) == NULL) + return (NULL); + if (cp != res) /* common optimization if "compacting" */ + bcopy(cp, res, (nbytes < onb) ? nbytes : onb); + return (res); +} + +/* + * Search ``srchlen'' elements of each free list for a block whose + * header starts at ``freep''. If srchlen is -1 search the whole list. + * Return bucket number, or -1 if not found. + */ +static int +findbucket(union overhead *freep, int srchlen) +{ + register union overhead *p; + register int i, j; + + for (i = 0; i < NBUCKETS; i++) { + j = 0; + for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { + if (p == freep) + return (i); + j++; + } + } + return (-1); +} + +#ifdef MSTATS +/* + * mstats - print out statistics about malloc + * + * Prints two lines of numbers, one showing the length of the free list + * for each size category, the second showing the number of mallocs - + * frees for each size category. + */ +mstats(s) + char *s; +{ + register int i, j; + register union overhead *p; + int totfree = 0, + totused = 0; + + fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s); + for (i = 0; i < NBUCKETS; i++) { + for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) + ; + fprintf(stderr, " %d", j); + totfree += j * (1 << (i + 3)); + } + fprintf(stderr, "\nused:\t"); + for (i = 0; i < NBUCKETS; i++) { + fprintf(stderr, " %d", nmalloc[i]); + totused += nmalloc[i] * (1 << (i + 3)); + } + fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n", + totused, totfree); +} +#endif + + +static int +morepages(int n) +{ + int fd = -1; + int offset; + + if (pagepool_end - pagepool_start > pagesz) { + caddr_t addr = (caddr_t) + (((long)pagepool_start + pagesz - 1) & ~(pagesz - 1)); + if (munmap(addr, pagepool_end - addr) != 0) + _thread_printf(STDERR_FILENO, "morepages: munmap %p", + addr); + } + + offset = (long)pagepool_start - ((long)pagepool_start & ~(pagesz - 1)); + + if ((pagepool_start = mmap(0, n * pagesz, + PROT_READ|PROT_WRITE, + MAP_ANON|MAP_COPY, fd, 0)) == (caddr_t)-1) { + _thread_printf(STDERR_FILENO, "Cannot map anonymous memory\n"); + return 0; + } + pagepool_end = pagepool_start + n * pagesz; + pagepool_start += offset; + + return n; +} + +void * +lmalloc(size_t nbytes) +{ + struct pthread *curthread; + void *p; + + curthread = _get_curthread(); + if (curthread != NULL) + THR_UMUTEX_LOCK(curthread, &_lmalloc_lock); + p = lmalloc_impl(nbytes); + if (curthread != NULL) + THR_UMUTEX_UNLOCK(curthread, &_lmalloc_lock); + return (p); +} + +void * +lrealloc(void *cp, size_t nbytes) +{ + struct pthread *curthread; + void *p; + + curthread = _get_curthread(); + if (curthread != NULL) + THR_UMUTEX_LOCK(curthread, &_lmalloc_lock); + p = lrealloc_impl(cp, nbytes); + if (curthread != NULL) + THR_UMUTEX_UNLOCK(curthread, &_lmalloc_lock); + return (p); +} + +void +lfree(void *cp) +{ + struct pthread *curthread; + + curthread = _get_curthread(); + if (curthread != NULL) + THR_UMUTEX_LOCK(curthread, &_lmalloc_lock); + lfree_impl(cp); + if (curthread != NULL) + THR_UMUTEX_UNLOCK(curthread, &_lmalloc_lock); +}