--- src/gnu/lib/libgcc/Makefile 2010-11-14 02:35:32.000000000 0000 +++ src/gnu/lib/libgcc/Makefile 2010-12-01 00:14:37.000000000 0000 @@ -3,7 +3,7 @@ GCCDIR= ${.CURDIR}/../../../contrib/gcc GCCLIB= ${.CURDIR}/../../../contrib/gcclibs -SHLIB_NAME= libgcc_s.so.1 +SHLIB_NAME= libgcc_s.so.2 SHLIBDIR?= /lib .include --- src/gnu/lib/libgomp/Makefile 2010-08-28 15:36:01.000000000 0000 +++ src/gnu/lib/libgomp/Makefile 2010-11-25 14:32:12.000000000 0000 @@ -7,7 +7,7 @@ .PATH: ${SRCDIR} ${SRCDIR}/config/posix LIB= gomp -SHLIB_MAJOR= 1 +SHLIB_MAJOR= 2 SRCS= alloc.c barrier.c critical.c env.c \ error.c iter.c loop.c ordered.c parallel.c sections.c \ @@ -28,18 +28,18 @@ (${MACHINE_CPUARCH} == mips && \ (!defined(TARGET_ABI) || ${TARGET_ABI} != "n64")) OMP_LOCK_ALIGN = 4 -OMP_LOCK_KIND= 4 -OMP_LOCK_SIZE= 4 +OMP_LOCK_KIND= 8 +OMP_LOCK_SIZE= 32 OMP_NEST_LOCK_ALIGN= 4 OMP_NEST_LOCK_KIND= 8 -OMP_NEST_LOCK_SIZE= 8 +OMP_NEST_LOCK_SIZE= 36 .else OMP_LOCK_ALIGN = 8 OMP_LOCK_KIND= 8 -OMP_LOCK_SIZE= 8 +OMP_LOCK_SIZE= 32 OMP_NEST_LOCK_ALIGN= 8 OMP_NEST_LOCK_KIND= 8 -OMP_NEST_LOCK_SIZE= 16 +OMP_NEST_LOCK_SIZE= 40 .endif gstdint.h: --- src/gnu/lib/libstdc++/Makefile 2010-09-07 08:35:28.000000000 0000 +++ src/gnu/lib/libstdc++/Makefile 2010-11-23 01:58:38.000000000 0000 @@ -11,7 +11,7 @@ ${SRCDIR}/include ${SUPDIR} ${GCCDIR} ${GCCLIB}/libiberty LIB= stdc++ -SHLIB_MAJOR= 6 +SHLIB_MAJOR= 7 CFLAGS+= -DIN_GLIBCPP_V3 -DHAVE_CONFIG_H .if ${MACHINE_CPUARCH} == "arm" --- src/include/pthread.h 2010-10-18 05:35:39.000000000 0000 +++ src/include/pthread.h 2010-11-30 05:10:43.000000000 0000 @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include #include #include @@ -92,15 +92,25 @@ /* * Static once initialization values. */ -#define PTHREAD_ONCE_INIT { PTHREAD_NEEDS_INIT, NULL } +#define PTHREAD_ONCE_INIT { PTHREAD_NEEDS_INIT } + +#define _PTHREAD_MUTEX_MAGIC 0x3643 +#define _PTHREAD_COND_MAGIC 0x4723 +#define _PTHREAD_RWLOCK_MAGIC 0x5821 /* * Static initialization values. */ -#define PTHREAD_MUTEX_INITIALIZER NULL -#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP ((pthread_mutex_t)1) -#define PTHREAD_COND_INITIALIZER NULL -#define PTHREAD_RWLOCK_INITIALIZER NULL +#define PTHREAD_MUTEX_INITIALIZER \ + {_PTHREAD_MUTEX_MAGIC, PTHREAD_MUTEX_DEFAULT, 0, {NULL}, 0, 0, 0, \ + 0x0020, {0, 0}, 0} +#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \ + {_PTHREAD_MUTEX_MAGIC, PTHREAD_MUTEX_ADAPTIVE_NP, {NULL}, 0, 2000, 0,\ + 0x0020, {0, 0}} +#define PTHREAD_COND_INITIALIZER \ + {_PTHREAD_COND_MAGIC, 0, 0, 0, 0, CLOCK_REALTIME} +#define PTHREAD_RWLOCK_INITIALIZER \ + {_PTHREAD_RWLOCK_MAGIC, 0, 0, {NULL}, 0, 0, 0, 0} /* * Default attribute arguments (draft 4, deprecated). @@ -135,16 +145,10 @@ #define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_ERRORCHECK -enum pthread_rwlocktype_np -{ - PTHREAD_RWLOCK_PREFER_READER_NP, - PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, - PTHREAD_RWLOCK_PREFER_WRITER_NP, - PTHREAD_RWLOCK_DEFAULT_NP = - PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP -}; +#define PTHREAD_MUTEX_STALLED 0 +#define PTHREAD_MUTEX_ROBUST 1 -struct _pthread_cleanup_info { +struct pthread_cleanup_info { __uintptr_t pthread_cleanup_pad[8]; }; @@ -178,7 +182,7 @@ #define pthread_cleanup_push(cleanup_routine, cleanup_arg) \ { \ - struct _pthread_cleanup_info __cleanup_info__; \ + struct pthread_cleanup_info __cleanup_info__; \ __pthread_cleanup_push_imp(cleanup_routine, cleanup_arg,\ &__cleanup_info__); \ { @@ -214,13 +218,18 @@ int pthread_key_create(pthread_key_t *, void (*) (void *)); int pthread_key_delete(pthread_key_t); -int pthread_mutexattr_init(pthread_mutexattr_t *); int pthread_mutexattr_destroy(pthread_mutexattr_t *); +int pthread_mutexattr_getrobust(const pthread_mutexattr_t *__restrict, + int *__restrict); int pthread_mutexattr_getpshared(const pthread_mutexattr_t *, int *); int pthread_mutexattr_gettype(pthread_mutexattr_t *, int *); +int pthread_mutexattr_init(pthread_mutexattr_t *); int pthread_mutexattr_settype(pthread_mutexattr_t *, int); int pthread_mutexattr_setpshared(pthread_mutexattr_t *, int); +int pthread_mutexattr_setrobust(pthread_mutexattr_t *, + int); +int pthread_mutex_consistent(pthread_mutex_t *); int pthread_mutex_destroy(pthread_mutex_t *); int pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); @@ -299,7 +308,7 @@ #endif void __pthread_cleanup_push_imp(void (*)(void *), void *, - struct _pthread_cleanup_info *); + struct pthread_cleanup_info *); void __pthread_cleanup_pop_imp(int); __END_DECLS --- src/include/unistd.h 2010-10-29 13:35:47.000000000 0000 +++ src/include/unistd.h 2010-11-30 11:44:40.000000000 0000 @@ -111,7 +111,7 @@ #define _POSIX_THREAD_PRIO_INHERIT 200112L #define _POSIX_THREAD_PRIO_PROTECT 200112L #define _POSIX_THREAD_PRIORITY_SCHEDULING 200112L -#define _POSIX_THREAD_PROCESS_SHARED -1 +#define _POSIX_THREAD_PROCESS_SHARED 200112L #define _POSIX_THREAD_SAFE_FUNCTIONS -1 #define _POSIX_THREAD_SPORADIC_SERVER -1 #define _POSIX_THREADS 200112L --- src/lib/libc/gen/Symbol.map 2010-11-02 17:35:35.000000000 0000 +++ src/lib/libc/gen/Symbol.map 2010-11-26 07:33:52.000000000 0000 @@ -383,6 +383,7 @@ FBSDprivate_1.0 { /* needed by thread libraries */ __thr_jtable; + __thr_jtable12; _pthread_atfork; _pthread_attr_destroy; --- src/lib/libc/gen/_pthread_stubs.c 2010-09-25 02:35:28.000000000 0000 +++ src/lib/libc/gen/_pthread_stubs.c 2010-11-30 05:12:00.000000000 0000 @@ -274,6 +274,94 @@ STUB_FUNC1(_pthread_cancel_enter, PJT_CANCEL_ENTER, int, int) STUB_FUNC1(_pthread_cancel_leave, PJT_CANCEL_LEAVE, int, int) +#define FUNC12_TYPE(name) __CONCAT(name, _func12_t) +#define PJT_ENTRY(entry) (pthread_func_t)entry + +pthread_func_t __thr_jtable12[PJT12_MAX] = { + PJT_ENTRY(stub_zero), /* PJT12_COND_BROADCAST */ + PJT_ENTRY(stub_zero), /* PJT12_COND_DESTROY */ + PJT_ENTRY(stub_zero), /* PJT12_COND_INIT */ + PJT_ENTRY(stub_zero), /* PJT12_COND_SIGNAL */ + PJT_ENTRY(stub_zero), /* PJT12_COND_TIMEDWAIT */ + PJT_ENTRY(stub_zero), /* PJT12_COND_WAIT */ + PJT_ENTRY(stub_zero), /* PJT12_MUTEX_DESTROY */ + PJT_ENTRY(stub_zero), /* PJT12_MUTEX_INIT */ + PJT_ENTRY(stub_zero), /* PJT12_MUTEX_LOCK */ + PJT_ENTRY(stub_zero), /* PJT12_MUTEX_TRYLOCK */ + PJT_ENTRY(stub_zero), /* PJT12_MUTEX_UNLOCK */ + PJT_ENTRY(stub_zero), /* PJT12_RWLOCK_DESTROY */ + PJT_ENTRY(stub_zero), /* PJT12_RWLOCK_INIT */ + PJT_ENTRY(stub_zero), /* PJT12_RWLOCK_RDLOCK */ + PJT_ENTRY(stub_zero), /* PJT12_RWLOCK_TRYRDLOCK */ + PJT_ENTRY(stub_zero), /* PJT12_RWLOCK_TRYWRLOCK */ + PJT_ENTRY(stub_zero), /* PJT12_RWLOCK_UNLOCK */ + PJT_ENTRY(stub_zero) /* PJT12_RWLOCK_WRLOCK */ +}; + +#define SYM_FB12(sym) __CONCAT(sym, _fb12) +#define SYM_COMPAT(sym, impl, ver) __sym_compat(sym, impl, ver) +#define SYM_DEFAULT(sym, impl, ver) __sym_default(sym, impl, ver) + +#define FB12_COMPAT(func, sym) \ + WEAK_REF(func, SYM_FB12(sym)); \ + SYM_COMPAT(sym, SYM_FB12(sym), FBSD_1.2) + +#define FUNC12_EXP(name) __CONCAT(name, _1_2_exp) + +#define STUB12_FUNC1(name, idx, ret, p0_type) \ + static ret FUNC12_EXP(name)(p0_type) __used; \ + FB12_COMPAT(FUNC12_EXP(name), name); \ + typedef ret (*FUNC12_TYPE(name))(p0_type); \ + static ret FUNC12_EXP(name)(p0_type p0) \ + { \ + FUNC12_TYPE(name) func; \ + func = (FUNC12_TYPE(name))__thr_jtable12[idx]; \ + return (func(p0)); \ + } \ + +#define STUB12_FUNC2(name, idx, ret, p0_type, p1_type) \ + static ret FUNC12_EXP(name)(p0_type, p1_type) __used; \ + FB12_COMPAT(FUNC12_EXP(name), name); \ + typedef ret (*FUNC12_TYPE(name))(p0_type, p1_type); \ + static ret FUNC12_EXP(name)(p0_type p0, p1_type p1) \ + { \ + FUNC12_TYPE(name) func; \ + func = (FUNC12_TYPE(name))__thr_jtable12[idx]; \ + return (func(p0, p1)); \ + } \ + +#define STUB12_FUNC3(name, idx, ret, p0_type, p1_type, p2_type) \ + static ret FUNC12_EXP(name)(p0_type, p1_type, p2_type) __used; \ + FB12_COMPAT(FUNC12_EXP(name), name); \ + typedef ret (*FUNC12_TYPE(name))(p0_type, p1_type, p2_type); \ + static ret FUNC12_EXP(name)(p0_type p0, p1_type p1, p2_type p2) \ + { \ + FUNC12_TYPE(name) func; \ + func = (FUNC12_TYPE(name))__thr_jtable12[idx]; \ + return (func(p0, p1, p2)); \ + } \ + +STUB12_FUNC1(pthread_cond_broadcast, PJT12_COND_BROADCAST, int, void *) +STUB12_FUNC1(pthread_cond_destroy, PJT12_COND_DESTROY, int, void *) +STUB12_FUNC2(pthread_cond_init, PJT12_COND_INIT, int, void *, void *) +STUB12_FUNC1(pthread_cond_signal, PJT12_COND_SIGNAL, int, void *) +STUB12_FUNC3(pthread_cond_timedwait, PJT12_COND_TIMEDWAIT, int, void *, void *, void *) +STUB12_FUNC2(pthread_cond_wait, PJT12_COND_WAIT, int, void *, void *) + +STUB12_FUNC1(pthread_mutex_destroy, PJT12_MUTEX_DESTROY, int, void *) +STUB12_FUNC2(pthread_mutex_init, PJT12_MUTEX_INIT, int, void *, void *) +STUB12_FUNC1(pthread_mutex_lock, PJT12_MUTEX_LOCK, int, void *) +STUB12_FUNC1(pthread_mutex_trylock, PJT12_MUTEX_TRYLOCK, int, void *) +STUB12_FUNC1(pthread_mutex_unlock, PJT12_MUTEX_UNLOCK, int, void *) + +STUB12_FUNC1(pthread_rwlock_destroy, PJT12_RWLOCK_DESTROY, int, void *) +STUB12_FUNC2(pthread_rwlock_init, PJT12_RWLOCK_INIT, int, void *, void *) +STUB12_FUNC1(pthread_rwlock_rdlock, PJT12_RWLOCK_RDLOCK, int, void *) +STUB12_FUNC1(pthread_rwlock_tryrdlock, PJT12_RWLOCK_TRYRDLOCK, int, void *) +STUB12_FUNC1(pthread_rwlock_trywrlock, PJT12_RWLOCK_TRYWRLOCK, int, void *) +STUB12_FUNC1(pthread_rwlock_unlock, PJT12_RWLOCK_UNLOCK, int, void *) +STUB12_FUNC1(pthread_rwlock_wrlock, PJT12_RWLOCK_WRLOCK, int, void *) + static int stub_zero(void) { --- src/lib/libc/gen/closedir.c 2007-12-03 14:43:52.000000000 0000 +++ src/lib/libc/gen/closedir.c 2010-11-23 01:58:38.000000000 0000 @@ -54,7 +54,7 @@ int fd; if (__isthreaded) - _pthread_mutex_lock(&dirp->dd_lock); + _pthread_mutex_lock(dirp->dd_lock); _seekdir(dirp, dirp->dd_rewind); /* free seekdir storage */ fd = dirp->dd_fd; dirp->dd_fd = -1; @@ -62,8 +62,9 @@ free((void *)dirp->dd_buf); _reclaim_telldir(dirp); if (__isthreaded) { - _pthread_mutex_unlock(&dirp->dd_lock); - _pthread_mutex_destroy(&dirp->dd_lock); + _pthread_mutex_unlock(dirp->dd_lock); + _pthread_mutex_destroy(dirp->dd_lock); + free(dirp->dd_lock); } free((void *)dirp); return(_close(fd)); --- src/lib/libc/gen/errlst.c 2009-10-07 20:35:31.000000000 0000 +++ src/lib/libc/gen/errlst.c 2010-11-23 01:58:38.000000000 0000 @@ -151,5 +151,7 @@ "Link has been severed", /* 91 - ENOLINK */ "Protocol error", /* 92 - EPROTO */ "Capabilities insufficient", /* 93 - ENOTCAPABLE */ + "Previous owning thread terminated", /* 94 - EOWNERDEAD */ + "State is not recoverable" /* 95 - ENOTRECOVERABLE */ }; const int sys_nerr = sizeof(sys_errlist) / sizeof(sys_errlist[0]); --- src/lib/libc/gen/opendir.c 2010-03-21 21:38:33.000000000 0000 +++ src/lib/libc/gen/opendir.c 2010-11-23 01:58:38.000000000 0000 @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -297,7 +298,8 @@ dirp->dd_loc = 0; dirp->dd_fd = fd; dirp->dd_flags = flags; - dirp->dd_lock = NULL; + dirp->dd_lock = malloc(sizeof(struct pthread_mutex)); + _pthread_mutex_init(dirp->dd_lock, NULL); /* * Set up seek point for rewinddir. --- src/lib/libc/gen/readdir.c 2008-05-05 14:41:28.000000000 0000 +++ src/lib/libc/gen/readdir.c 2010-11-23 01:58:38.000000000 0000 @@ -88,9 +88,9 @@ struct dirent *dp; if (__isthreaded) { - _pthread_mutex_lock(&dirp->dd_lock); + _pthread_mutex_lock(dirp->dd_lock); dp = _readdir_unlocked(dirp, 1); - _pthread_mutex_unlock(&dirp->dd_lock); + _pthread_mutex_unlock(dirp->dd_lock); } else dp = _readdir_unlocked(dirp, 1); @@ -109,10 +109,10 @@ saved_errno = errno; errno = 0; if (__isthreaded) { - _pthread_mutex_lock(&dirp->dd_lock); + _pthread_mutex_lock(dirp->dd_lock); if ((dp = _readdir_unlocked(dirp, 1)) != NULL) memcpy(entry, dp, _GENERIC_DIRSIZ(dp)); - _pthread_mutex_unlock(&dirp->dd_lock); + _pthread_mutex_unlock(dirp->dd_lock); } else if ((dp = _readdir_unlocked(dirp, 1)) != NULL) memcpy(entry, dp, _GENERIC_DIRSIZ(dp)); --- src/lib/libc/gen/seekdir.c 2007-12-03 14:43:52.000000000 0000 +++ src/lib/libc/gen/seekdir.c 2010-11-23 01:58:38.000000000 0000 @@ -52,8 +52,8 @@ long loc; { if (__isthreaded) - _pthread_mutex_lock(&dirp->dd_lock); + _pthread_mutex_lock(dirp->dd_lock); _seekdir(dirp, loc); if (__isthreaded) - _pthread_mutex_unlock(&dirp->dd_lock); + _pthread_mutex_unlock(dirp->dd_lock); } --- src/lib/libc/gen/telldir.c 2008-05-05 14:41:28.000000000 0000 +++ src/lib/libc/gen/telldir.c 2010-11-23 01:58:38.000000000 0000 @@ -64,13 +64,13 @@ if ((lp = (struct ddloc *)malloc(sizeof(struct ddloc))) == NULL) return (-1); if (__isthreaded) - _pthread_mutex_lock(&dirp->dd_lock); + _pthread_mutex_lock(dirp->dd_lock); lp->loc_index = dirp->dd_td->td_loccnt++; lp->loc_seek = dirp->dd_seek; lp->loc_loc = dirp->dd_loc; LIST_INSERT_HEAD(&dirp->dd_td->td_locq, lp, loc_lqe); if (__isthreaded) - _pthread_mutex_unlock(&dirp->dd_lock); + _pthread_mutex_unlock(dirp->dd_lock); return (lp->loc_index); } --- src/lib/libc/include/libc_private.h 2010-09-25 02:35:28.000000000 0000 +++ src/lib/libc/include/libc_private.h 2010-11-30 05:13:51.000000000 0000 @@ -134,10 +134,33 @@ PJT_MAX } pjt_index_t; +typedef enum { + PJT12_COND_BROADCAST, + PJT12_COND_DESTROY, + PJT12_COND_INIT, + PJT12_COND_SIGNAL, + PJT12_COND_TIMEDWAIT, + PJT12_COND_WAIT, + PJT12_MUTEX_DESTROY, + PJT12_MUTEX_INIT, + PJT12_MUTEX_LOCK, + PJT12_MUTEX_TRYLOCK, + PJT12_MUTEX_UNLOCK, + PJT12_RWLOCK_DESTROY, + PJT12_RWLOCK_INIT, + PJT12_RWLOCK_RDLOCK, + PJT12_RWLOCK_TRYRDLOCK, + PJT12_RWLOCK_TRYWRLOCK, + PJT12_RWLOCK_UNLOCK, + PJT12_RWLOCK_WRLOCK, + PJT12_MAX +} pjt12_index_t; + typedef int (*pthread_func_t)(void); typedef pthread_func_t pthread_func_entry_t[2]; extern pthread_func_entry_t __thr_jtable[]; +extern pthread_func_t __thr_jtable12[]; /* * yplib internal interfaces --- src/lib/libc/include/namespace.h 2010-10-20 02:35:31.000000000 0000 +++ src/lib/libc/include/namespace.h 2010-11-23 01:58:38.000000000 0000 @@ -143,6 +143,7 @@ #define pthread_kill _pthread_kill #define pthread_main_np _pthread_main_np #define pthread_multi_np _pthread_multi_np +#define pthread_mutex_consistent _pthread_mutex_consistent #define pthread_mutex_destroy _pthread_mutex_destroy #define pthread_mutex_getprioceiling _pthread_mutex_getprioceiling #define pthread_mutex_init _pthread_mutex_init @@ -157,12 +158,14 @@ #define pthread_mutexattr_getprioceiling _pthread_mutexattr_getprioceiling #define pthread_mutexattr_getprotocol _pthread_mutexattr_getprotocol #define pthread_mutexattr_getpshared _pthread_mutexattr_getpshared +#define pthread_mutexattr_getrobust _pthread_mutexattr_getrobust #define pthread_mutexattr_gettype _pthread_mutexattr_gettype #define pthread_mutexattr_init _pthread_mutexattr_init #define pthread_mutexattr_setkind_np _pthread_mutexattr_setkind_np #define pthread_mutexattr_setprioceiling _pthread_mutexattr_setprioceiling #define pthread_mutexattr_setprotocol _pthread_mutexattr_setprotocol #define pthread_mutexattr_setpshared _pthread_mutexattr_setpshared +#define pthread_mutexattr_setrobust _pthread_mutexattr_setrobust #define pthread_mutexattr_settype _pthread_mutexattr_settype #define pthread_once _pthread_once #define pthread_resume_all_np _pthread_resume_all_np --- src/lib/libc/include/un-namespace.h 2010-10-20 02:35:31.000000000 0000 +++ src/lib/libc/include/un-namespace.h 2010-11-23 01:58:38.000000000 0000 @@ -124,6 +124,7 @@ #undef pthread_kill #undef pthread_main_np #undef pthread_multi_np +#undef pthread_mutex_consistent #undef pthread_mutex_destroy #undef pthread_mutex_getprioceiling #undef pthread_mutex_init @@ -138,12 +139,14 @@ #undef pthread_mutexattr_getprioceiling #undef pthread_mutexattr_getprotocol #undef pthread_mutexattr_getpshared +#undef pthread_mutexattr_getrobust #undef pthread_mutexattr_gettype #undef pthread_mutexattr_init #undef pthread_mutexattr_setkind_np #undef pthread_mutexattr_setprioceiling #undef pthread_mutexattr_setprotocol #undef pthread_mutexattr_setpshared +#undef pthread_mutexattr_setrobust #undef pthread_mutexattr_settype #undef pthread_once #undef pthread_resume_all_np --- src/lib/libc/stdio/_flock_stub.c 2008-04-17 22:41:57.000000000 0000 +++ src/lib/libc/stdio/_flock_stub.c 2010-11-23 01:58:38.000000000 0000 @@ -67,7 +67,7 @@ * Make sure this mutex is treated as a private * internal mutex: */ - _pthread_mutex_lock(&fp->_fl_mutex); + _pthread_mutex_lock(fp->_fl_mutex); fp->_fl_owner = curthread; fp->_fl_count = 1; } @@ -94,7 +94,7 @@ * Make sure this mutex is treated as a private * internal mutex: */ - else if (_pthread_mutex_trylock(&fp->_fl_mutex) == 0) { + else if (_pthread_mutex_trylock(fp->_fl_mutex) == 0) { fp->_fl_owner = curthread; fp->_fl_count = 1; } @@ -130,7 +130,7 @@ */ fp->_fl_count = 0; fp->_fl_owner = NULL; - _pthread_mutex_unlock(&fp->_fl_mutex); + _pthread_mutex_unlock(fp->_fl_mutex); } } } --- src/lib/libc/stdio/findfp.c 2010-03-11 17:37:06.000000000 0000 +++ src/lib/libc/stdio/findfp.c 2010-11-23 01:58:38.000000000 0000 @@ -36,6 +36,7 @@ #include __FBSDID("$FreeBSD: src/lib/libc/stdio/findfp.c,v 1.35 2010/03/11 17:03:32 jhb Exp $"); +#include "namespace.h" #include #include #include @@ -48,12 +49,14 @@ #include "libc_private.h" #include "local.h" #include "glue.h" +#include "un-namespace.h" int __sdidinit; #define NDYNAMIC 10 /* add ten more whenever necessary */ -#define std(flags, file) { \ + +#define std(flags, file, lock) { \ ._flags = (flags), \ ._file = (file), \ ._cookie = __sF + (file), \ @@ -61,16 +64,21 @@ ._read = __sread, \ ._seek = __sseek, \ ._write = __swrite, \ - ._fl_mutex = PTHREAD_MUTEX_INITIALIZER, \ + ._fl_mutex = &lock, \ } /* the usual - (stdin + stdout + stderr) */ static FILE usual[FOPEN_MAX - 3]; static struct glue uglue = { NULL, FOPEN_MAX - 3, usual }; +static pthread_mutex_t sfLOCK[3] = { + PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER, + PTHREAD_MUTEX_INITIALIZER +}; static FILE __sF[3] = { - std(__SRD, STDIN_FILENO), - std(__SWR, STDOUT_FILENO), - std(__SWR|__SNBF, STDERR_FILENO) + std(__SRD, STDIN_FILENO, sfLOCK[0]), + std(__SWR, STDOUT_FILENO, sfLOCK[1]), + std(__SWR|__SNBF, STDERR_FILENO, sfLOCK[2]) }; FILE *__stdinp = &__sF[0]; @@ -97,7 +105,7 @@ int n; { struct glue *g; - static FILE empty = { ._fl_mutex = PTHREAD_MUTEX_INITIALIZER }; + static FILE empty = { ._fl_mutex = NULL }; FILE *p; g = (struct glue *)malloc(sizeof(*g) + ALIGNBYTES + n * sizeof(FILE)); @@ -155,7 +163,10 @@ fp->_ub._size = 0; fp->_lb._base = NULL; /* no line buffer */ fp->_lb._size = 0; -/* fp->_fl_mutex = NULL; */ /* once set always set (reused) */ + if (fp->_fl_mutex == NULL) { /* once set always set (reused) */ + fp->_fl_mutex = malloc(sizeof(struct pthread_mutex)); + _pthread_mutex_init(fp->_fl_mutex, NULL); + } fp->_orientation = 0; memset(&fp->_mbstate, 0, sizeof(mbstate_t)); return (fp); --- src/lib/libc/stdio/local.h 2010-03-11 17:37:06.000000000 0000 +++ src/lib/libc/stdio/local.h 2010-11-23 01:58:38.000000000 0000 @@ -114,7 +114,7 @@ */ #define FAKE_FILE { \ ._file = -1, \ - ._fl_mutex = PTHREAD_MUTEX_INITIALIZER, \ + ._fl_mutex = NULL, \ } /* --- src/lib/libthr/Makefile 2010-10-08 02:35:36.000000000 0000 +++ src/lib/libthr/Makefile 2010-11-26 07:33:52.000000000 0000 @@ -40,7 +40,7 @@ # enable extra internal consistancy checks CFLAGS+=-D_PTHREADS_INVARIANTS -#CFLAGS+=-g +#CFLAGS+=-g -O0 PRECIOUSLIB= --- src/lib/libthr/pthread.map 2010-10-20 02:35:31.000000000 0000 +++ src/lib/libthr/pthread.map 2010-11-23 01:58:38.000000000 0000 @@ -23,13 +23,9 @@ poll; pselect; pthread_atfork; - pthread_barrier_destroy; - pthread_barrier_init; - pthread_barrier_wait; pthread_barrierattr_destroy; pthread_barrierattr_getpshared; pthread_barrierattr_init; - pthread_barrierattr_setpshared; pthread_attr_destroy; pthread_attr_get_np; pthread_attr_getdetachstate; @@ -55,18 +51,11 @@ pthread_cancel; pthread_cleanup_pop; pthread_cleanup_push; - pthread_cond_broadcast; - pthread_cond_destroy; - pthread_cond_init; - pthread_cond_signal; - pthread_cond_timedwait; - pthread_cond_wait; pthread_condattr_destroy; pthread_condattr_getclock; pthread_condattr_getpshared; pthread_condattr_init; pthread_condattr_setclock; - pthread_condattr_setpshared; pthread_create; pthread_detach; pthread_equal; @@ -81,14 +70,6 @@ pthread_kill; pthread_main_np; pthread_multi_np; - pthread_mutex_destroy; - pthread_mutex_getprioceiling; - pthread_mutex_init; - pthread_mutex_lock; - pthread_mutex_setprioceiling; - pthread_mutex_timedlock; - pthread_mutex_trylock; - pthread_mutex_unlock; pthread_mutexattr_destroy; pthread_mutexattr_getkind_np; pthread_mutexattr_getprioceiling; @@ -99,24 +80,13 @@ pthread_mutexattr_setkind_np; pthread_mutexattr_setprioceiling; pthread_mutexattr_setprotocol; - pthread_mutexattr_setpshared; pthread_mutexattr_settype; pthread_once; pthread_resume_all_np; pthread_resume_np; - pthread_rwlock_destroy; - pthread_rwlock_init; - pthread_rwlock_rdlock; - pthread_rwlock_timedrdlock; - pthread_rwlock_timedwrlock; - pthread_rwlock_tryrdlock; - pthread_rwlock_trywrlock; - pthread_rwlock_unlock; - pthread_rwlock_wrlock; pthread_rwlockattr_destroy; pthread_rwlockattr_getpshared; pthread_rwlockattr_init; - pthread_rwlockattr_setpshared; pthread_set_name_np; pthread_self; pthread_setcancelstate; @@ -127,11 +97,6 @@ pthread_setspecific; pthread_sigmask; pthread_single_np; - pthread_spin_destroy; - pthread_spin_init; - pthread_spin_lock; - pthread_spin_trylock; - pthread_spin_unlock; pthread_suspend_all_np; pthread_suspend_np; pthread_switch_add_np; @@ -278,6 +243,7 @@ _pthread_kill; _pthread_main_np; _pthread_multi_np; + _pthread_mutex_consistent; _pthread_mutex_destroy; _pthread_mutex_getprioceiling; _pthread_mutex_getspinloops_np; @@ -297,12 +263,14 @@ _pthread_mutexattr_getprioceiling; _pthread_mutexattr_getprotocol; _pthread_mutexattr_getpshared; + _pthread_mutexattr_getrobust; _pthread_mutexattr_gettype; _pthread_mutexattr_init; _pthread_mutexattr_setkind_np; _pthread_mutexattr_setprioceiling; _pthread_mutexattr_setprotocol; _pthread_mutexattr_setpshared; + _pthread_mutexattr_setrobust; _pthread_mutexattr_settype; _pthread_once; _pthread_resume_all_np; @@ -392,15 +360,53 @@ pthread_getaffinity_np; pthread_getcpuclockid; pthread_setaffinity_np; +}; + +FBSD_1.2 { + openat; + pthread_barrier_destroy; + pthread_barrier_init; + pthread_barrier_wait; + pthread_barrierattr_setpshared; + pthread_cond_broadcast; + pthread_cond_destroy; + pthread_cond_init; + pthread_cond_signal; + pthread_cond_timedwait; + pthread_cond_wait; + pthread_condattr_setpshared; + pthread_mutex_destroy; + pthread_mutexattr_getrobust; + pthread_mutexattr_setpshared; + pthread_mutexattr_setrobust; + pthread_mutex_consistent; + pthread_mutex_getprioceiling; pthread_mutex_getspinloops_np; pthread_mutex_getyieldloops_np; + pthread_mutex_init; pthread_mutex_isowned_np; + pthread_mutex_lock; + pthread_mutex_setprioceiling; pthread_mutex_setspinloops_np; pthread_mutex_setyieldloops_np; -}; - -FBSD_1.2 { - openat; + pthread_mutex_timedlock; + pthread_mutex_trylock; + pthread_mutex_unlock; + pthread_spin_destroy; + pthread_spin_init; + pthread_spin_lock; + pthread_spin_trylock; + pthread_spin_unlock; + pthread_rwlock_destroy; + pthread_rwlock_init; + pthread_rwlock_rdlock; + pthread_rwlock_timedrdlock; + pthread_rwlock_timedwrlock; + pthread_rwlock_tryrdlock; + pthread_rwlock_trywrlock; + pthread_rwlock_unlock; + pthread_rwlock_wrlock; + pthread_rwlockattr_setpshared; setcontext; swapcontext; }; --- src/lib/libthr/thread/Makefile.inc 2010-01-05 08:35:28.000000000 0000 +++ src/lib/libthr/thread/Makefile.inc 2010-11-27 04:11:24.000000000 0000 @@ -45,6 +45,7 @@ thr_setschedparam.c \ thr_sig.c \ thr_single_np.c \ + thr_sleepq.c \ thr_spec.c \ thr_spinlock.c \ thr_stack.c \ --- src/lib/libthr/thread/thr_barrier.c 2006-12-06 01:40:06.000000000 0000 +++ src/lib/libthr/thread/thr_barrier.c 2010-11-23 01:58:38.000000000 0000 @@ -29,6 +29,7 @@ #include "namespace.h" #include #include +#include #include #include "un-namespace.h" @@ -38,76 +39,111 @@ __weak_reference(_pthread_barrier_wait, pthread_barrier_wait); __weak_reference(_pthread_barrier_destroy, pthread_barrier_destroy); +typedef struct pthread_barrier *pthread_barrier_old_t; +int _pthread_barrier_destroy_1_0(pthread_barrier_old_t *); +int _pthread_barrier_wait_1_0(pthread_barrier_old_t *); +int _pthread_barrier_init_1_0(pthread_barrier_old_t *, + const pthread_barrierattr_t *, unsigned); + int -_pthread_barrier_destroy(pthread_barrier_t *barrier) +_pthread_barrier_destroy(pthread_barrier_t *barp) { - pthread_barrier_t bar; + (void)_pthread_cond_destroy(&barp->__cond); + (void)_pthread_mutex_destroy(&barp->__lock); + memset(barp, -1, sizeof(*barp)); + return (0); +} - if (barrier == NULL || *barrier == NULL) +int +_pthread_barrier_init(pthread_barrier_t *barp, + const pthread_barrierattr_t *attr, unsigned count) +{ + if (count == 0) return (EINVAL); - bar = *barrier; - if (bar->b_waiters > 0) - return (EBUSY); - *barrier = NULL; - free(bar); + _pthread_mutex_init(&barp->__lock, NULL); + _pthread_cond_init(&barp->__cond, NULL); + if (attr != NULL && *attr != NULL) { + if ((*attr)->pshared == PTHREAD_PROCESS_SHARED) { + barp->__lock.__lockflags |= USYNC_PROCESS_SHARED; + barp->__cond.__flags |= USYNC_PROCESS_SHARED; + } else if ((*attr)->pshared != PTHREAD_PROCESS_PRIVATE) { + return (EINVAL); + } + } + barp->__cycle = 0; + barp->__waiters = 0; + barp->__count = count; return (0); } int -_pthread_barrier_init(pthread_barrier_t *barrier, - const pthread_barrierattr_t *attr, unsigned count) +_pthread_barrier_wait(pthread_barrier_t *barp) { - pthread_barrier_t bar; + uint64_t cycle; + int error; + + _pthread_mutex_lock(&barp->__lock); + if (++barp->__waiters == barp->__count) { + /* Current thread is lastest thread. */ + barp->__waiters = 0; + barp->__cycle++; + _pthread_cond_broadcast(&barp->__cond); + _pthread_mutex_unlock(&barp->__lock); + error = PTHREAD_BARRIER_SERIAL_THREAD; + } else { + cycle = barp->__cycle; + do { + _pthread_cond_wait(&barp->__cond, &barp->__lock); + /* test cycle to avoid bogus wakeup */ + } while (cycle == barp->__cycle); + _pthread_mutex_unlock(&barp->__lock); + error = 0; + } + return (error); +} - (void)attr; +int +_pthread_barrier_destroy_1_0(pthread_barrier_old_t *barpp) +{ + struct pthread_barrier *barp; - if (barrier == NULL || count <= 0) + if ((barp = *barpp) == NULL) return (EINVAL); + _pthread_barrier_destroy(barp); + free(barp); + return (0); +} - bar = malloc(sizeof(struct pthread_barrier)); - if (bar == NULL) +int +_pthread_barrier_init_1_0(pthread_barrier_old_t *barpp, + const pthread_barrierattr_t *attr, unsigned count) +{ + struct pthread_barrier *barp; + int error; + + barp = malloc(sizeof(struct pthread_barrier)); + if (barp == NULL) return (ENOMEM); - - _thr_umutex_init(&bar->b_lock); - _thr_ucond_init(&bar->b_cv); - bar->b_cycle = 0; - bar->b_waiters = 0; - bar->b_count = count; - *barrier = bar; - + error = _pthread_barrier_init(barp, attr, count); + if (error) { + free(barp); + return (error); + } + *barpp = barp; return (0); } int -_pthread_barrier_wait(pthread_barrier_t *barrier) +_pthread_barrier_wait_1_0(pthread_barrier_old_t *barpp) { - struct pthread *curthread = _get_curthread(); - pthread_barrier_t bar; - int64_t cycle; - int ret; + struct pthread_barrier *barp; - if (barrier == NULL || *barrier == NULL) + if ((barp = *barpp) == NULL) return (EINVAL); + return _pthread_barrier_wait(barp); +} - bar = *barrier; - THR_UMUTEX_LOCK(curthread, &bar->b_lock); - if (++bar->b_waiters == bar->b_count) { - /* Current thread is lastest thread */ - bar->b_waiters = 0; - bar->b_cycle++; - _thr_ucond_broadcast(&bar->b_cv); - THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); - ret = PTHREAD_BARRIER_SERIAL_THREAD; - } else { - cycle = bar->b_cycle; - do { - _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, 0); - THR_UMUTEX_LOCK(curthread, &bar->b_lock); - /* test cycle to avoid bogus wakeup */ - } while (cycle == bar->b_cycle); - THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); - ret = 0; - } - return (ret); -} +FB10_COMPAT(_pthread_barrier_destroy_1_0, pthread_barrier_destroy); +FB10_COMPAT(_pthread_barrier_init_1_0, pthread_barrier_init); +FB10_COMPAT(_pthread_barrier_wait_1_0, pthread_barrier_wait); --- src/lib/libthr/thread/thr_barrierattr.c 2006-04-04 03:38:46.000000000 0000 +++ src/lib/libthr/thread/thr_barrierattr.c 2010-11-23 01:58:38.000000000 0000 @@ -43,6 +43,10 @@ __weak_reference(_pthread_barrierattr_getpshared, pthread_barrierattr_getpshared); +int _pthread_barrierattr_setpshared_1_0(pthread_barrierattr_t *, int); + +FB10_COMPAT(_pthread_barrierattr_setpshared_1_0, pthread_barrierattr_setpshared); + int _pthread_barrierattr_destroy(pthread_barrierattr_t *attr) { @@ -88,6 +92,22 @@ return (EINVAL); /* Only PTHREAD_PROCESS_PRIVATE is supported. */ + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) + return (EINVAL); + + (*attr)->pshared = pshared; + return (0); +} + +int +_pthread_barrierattr_setpshared_1_0(pthread_barrierattr_t *attr, int pshared) +{ + + if (attr == NULL || *attr == NULL) + return (EINVAL); + + /* Only PTHREAD_PROCESS_PRIVATE is supported. */ if (pshared != PTHREAD_PROCESS_PRIVATE) return (EINVAL); --- src/lib/libthr/thread/thr_clean.c 2010-09-25 10:35:36.000000000 0000 +++ src/lib/libthr/thread/thr_clean.c 2010-11-30 05:09:26.000000000 0000 @@ -47,7 +47,7 @@ void __pthread_cleanup_push_imp(void (*routine)(void *), void *arg, - struct _pthread_cleanup_info *info) + struct pthread_cleanup_info *info) { struct pthread *curthread = _get_curthread(); struct pthread_cleanup *newbuf; @@ -84,7 +84,7 @@ curthread->unwind_disabled = 1; #endif if ((newbuf = (struct pthread_cleanup *) - malloc(sizeof(struct _pthread_cleanup_info))) != NULL) { + malloc(sizeof(struct pthread_cleanup_info))) != NULL) { newbuf->routine = routine; newbuf->routine_arg = arg; newbuf->onheap = 1; --- src/lib/libthr/thread/thr_cond.c 2010-09-28 05:35:27.000000000 0000 +++ src/lib/libthr/thread/thr_cond.c 2010-12-01 01:13:39.000000000 0000 @@ -45,7 +45,8 @@ static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int cancel); -static int cond_signal_common(pthread_cond_t *cond, int broadcast); +static int cond_signal_common(struct pthread_cond *cond); +static int cond_broadcast_common(struct pthread_cond *cond); /* * Double underscore versions are cancellation points. Single underscore @@ -60,116 +61,46 @@ __weak_reference(_pthread_cond_signal, pthread_cond_signal); __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); +#define CV_PSHARED(cvp) (((cvp)->__flags & USYNC_PROCESS_SHARED) != 0) + static int -cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) +cond_init(struct pthread_cond *cvp, const pthread_condattr_t *cond_attr) { - pthread_cond_t pcond; - int rval = 0; + int error = 0; - if ((pcond = (pthread_cond_t) - calloc(1, sizeof(struct pthread_cond))) == NULL) { - rval = ENOMEM; + /* + * Initialise the condition variable structure: + */ + memset(cvp, 0, sizeof(*cvp)); + cvp->__magic = _PTHREAD_COND_MAGIC; + if (cond_attr == NULL || *cond_attr == NULL) { + cvp->__clock_id = CLOCK_REALTIME; } else { - /* - * Initialise the condition variable structure: - */ - if (cond_attr == NULL || *cond_attr == NULL) { - pcond->c_pshared = 0; - pcond->c_clockid = CLOCK_REALTIME; - } else { - pcond->c_pshared = (*cond_attr)->c_pshared; - pcond->c_clockid = (*cond_attr)->c_clockid; - } - _thr_umutex_init(&pcond->c_lock); - *cond = pcond; + if ((*cond_attr)->c_pshared) + cvp->__flags |= USYNC_PROCESS_SHARED; + cvp->__clock_id = (*cond_attr)->c_clockid; } - /* Return the completion status: */ - return (rval); + return (error); } -static int -init_static(struct pthread *thread, pthread_cond_t *cond) -{ - int ret; - - THR_LOCK_ACQUIRE(thread, &_cond_static_lock); - - if (*cond == NULL) - ret = cond_init(cond, NULL); - else - ret = 0; - - THR_LOCK_RELEASE(thread, &_cond_static_lock); - - return (ret); -} - -#define CHECK_AND_INIT_COND \ - if (__predict_false((cv = (*cond)) <= THR_COND_DESTROYED)) { \ - if (cv == THR_COND_INITIALIZER) { \ - int ret; \ - ret = init_static(_get_curthread(), cond); \ - if (ret) \ - return (ret); \ - } else if (cv == THR_COND_DESTROYED) { \ - return (EINVAL); \ - } \ - cv = *cond; \ - } - int _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) { - *cond = NULL; return (cond_init(cond, cond_attr)); } -int -_pthread_cond_destroy(pthread_cond_t *cond) +static int +cond_destroy_common(pthread_cond_t *cvp) { - struct pthread *curthread = _get_curthread(); - struct pthread_cond *cv; - int rval = 0; - - if ((cv = *cond) == THR_COND_INITIALIZER) - rval = 0; - else if (cv == THR_COND_DESTROYED) - rval = EINVAL; - else { - cv = *cond; - THR_UMUTEX_LOCK(curthread, &cv->c_lock); - *cond = THR_COND_DESTROYED; - THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); - - /* - * Free the memory allocated for the condition - * variable structure: - */ - free(cv); - } - return (rval); + memset(cvp, 0, sizeof(*cvp)); + return (0); } -struct cond_cancel_info +int +_pthread_cond_destroy(pthread_cond_t *cvp) { - pthread_mutex_t *mutex; - pthread_cond_t *cond; - int count; -}; - -static void -cond_cancel_handler(void *arg) -{ - struct pthread *curthread = _get_curthread(); - struct cond_cancel_info *info = (struct cond_cancel_info *)arg; - pthread_cond_t cv; - - if (info->cond != NULL) { - cv = *(info->cond); - THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); - } - _mutex_cv_lock(info->mutex, info->count); + return cond_destroy_common(cvp); } /* @@ -181,71 +112,165 @@ * to be lost. */ static int -cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, +cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, + const struct timespec *abstime, int cancel) +{ + struct pthread *curthread = _get_curthread(); + int recurse; + int error, error2 = 0; + + error = _mutex_cv_detach(mp, &recurse); + if (__predict_false(error != 0)) + return (error); + + if (cancel) { + _thr_cancel_enter2(curthread, 0); + error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, + (struct umutex *)&mp->__lockword, abstime, + CVWAIT_ABSTIME|CVWAIT_CLOCKID); + _thr_cancel_leave(curthread, 0); + } else { + error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, + (struct umutex *)&mp->__lockword, abstime, + CVWAIT_ABSTIME|CVWAIT_CLOCKID); + } + + /* + * Note that PP mutex and ROBUST mutex may return + * interesting error codes. + */ + if (error == 0) { + error2 = _mutex_cv_lock(mp, recurse); + } else if (error == EINTR || error == ETIMEDOUT) { + error2 = _mutex_cv_lock(mp, recurse); + if (error2 == 0 && cancel) + _thr_testcancel(curthread); + if (error == EINTR) + error = 0; + } else { + /* We know that it didn't unlock the mutex. */ + error2 = _mutex_cv_attach(mp, recurse); + if (error2 == 0 && cancel) + _thr_testcancel(curthread); + } + return (error2 != 0 ? error2 : error); +} + +/* + * Thread waits in userland queue whenever possible, when thread + * is signaled or broadcasted, it is removed from the queue, and + * is saved in curthread's defer_waiters[] buffer, but won't be + * woken up until mutex is unlocked. + */ + +static int +cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, const struct timespec *abstime, int cancel) { struct pthread *curthread = _get_curthread(); - struct timespec ts, ts2, *tsp; - struct cond_cancel_info info; - pthread_cond_t cv; - int ret; + struct sleepqueue *sq; + int recurse; + int error; + + if (curthread->wchan != NULL) + PANIC("thread was already on queue."); + if ((error = _mutex_owned(curthread, mp)) != 0) + return (error); + + if (cancel) + _thr_testcancel(curthread); + _sleepq_lock(cvp); /* - * If the condition variable is statically initialized, - * perform the dynamic initialization: + * set __has_user_waiters before unlocking mutex, this allows + * us to check it without locking in pthread_cond_signal(). */ - CHECK_AND_INIT_COND + cvp->__has_user_waiters = 1; + curthread->will_sleep = 1; + (void)_mutex_cv_unlock(mp, &recurse); + curthread->mutex_obj = mp; + _sleepq_add(cvp, curthread); + for(;;) { + _thr_clear_wake(curthread); + _sleepq_unlock(cvp); + + if (cancel) { + _thr_cancel_enter2(curthread, 0); + error = _thr_sleep(curthread, cvp->__clock_id, abstime); + _thr_cancel_leave(curthread, 0); + } else { + error = _thr_sleep(curthread, cvp->__clock_id, abstime); + } + + if (curthread->wchan == NULL) { + error = 0; + goto out; + } - cv = *cond; - THR_UMUTEX_LOCK(curthread, &cv->c_lock); - ret = _mutex_cv_unlock(mutex, &info.count); - if (__predict_false(ret != 0)) { - THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); - return (ret); + _sleepq_lock(cvp); + if (curthread->wchan == NULL) { + error = 0; + break; + } else if (cancel && SHOULD_CANCEL(curthread)) { + sq = _sleepq_lookup(cvp); + cvp->__has_user_waiters = + _sleepq_remove(sq, curthread); + _sleepq_unlock(cvp); + curthread->mutex_obj = NULL; + _mutex_cv_lock(mp, recurse); + if (!THR_IN_CRITICAL(curthread)) + _pthread_exit(PTHREAD_CANCELED); + else /* this should not happen */ + return (0); + } else if (error == ETIMEDOUT) { + sq = _sleepq_lookup(cvp); + cvp->__has_user_waiters = + _sleepq_remove(sq, curthread); + break; + } } + _sleepq_unlock(cvp); +out: + curthread->mutex_obj = NULL; + _mutex_cv_lock(mp, recurse); + return (error); +} - info.mutex = mutex; - info.cond = cond; +static int +cond_wait_common(struct pthread_cond *cvp, struct pthread_mutex *mp, + const struct timespec *abstime, int cancel) +{ + struct pthread *curthread = _get_curthread(); + int error; - if (abstime != NULL) { - clock_gettime(cv->c_clockid, &ts); - TIMESPEC_SUB(&ts2, abstime, &ts); - tsp = &ts2; - } else - tsp = NULL; + if ((error = _mutex_owned(curthread, mp)) != 0) + return (error); - if (cancel) { - THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &info); - _thr_cancel_enter2(curthread, 0); - ret = _thr_ucond_wait(&cv->c_kerncv, &cv->c_lock, tsp, 1); - info.cond = NULL; - _thr_cancel_leave(curthread, (ret != 0)); - THR_CLEANUP_POP(curthread, 0); - } else { - ret = _thr_ucond_wait(&cv->c_kerncv, &cv->c_lock, tsp, 0); - } - if (ret == EINTR) - ret = 0; - _mutex_cv_lock(mutex, info.count); - return (ret); + if (curthread->attr.sched_policy != SCHED_OTHER || + (mp->__lockflags & (UMUTEX_PRIO_PROTECT2|UMUTEX_PRIO_INHERIT| + UMUTEX_ROBUST|USYNC_PROCESS_SHARED)) != 0 || + (cvp->__flags & USYNC_PROCESS_SHARED) != 0) + return cond_wait_kernel(cvp, mp, abstime, cancel); + else + return cond_wait_user(cvp, mp, abstime, cancel); } int -_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) +_pthread_cond_wait(pthread_cond_t *cvp, pthread_mutex_t *mp) { - return (cond_wait_common(cond, mutex, NULL, 0)); + return (cond_wait_common(cvp, mp, NULL, 0)); } int -__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) +__pthread_cond_wait(pthread_cond_t *cvp, pthread_mutex_t *mp) { - return (cond_wait_common(cond, mutex, NULL, 1)); + return (cond_wait_common(cvp, mp, NULL, 1)); } int -_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, +_pthread_cond_timedwait(pthread_cond_t *cvp, pthread_mutex_t *mp, const struct timespec * abstime) { @@ -253,27 +278,235 @@ abstime->tv_nsec >= 1000000000) return (EINVAL); - return (cond_wait_common(cond, mutex, abstime, 0)); + return (cond_wait_common(cvp, mp, abstime, 0)); } int -__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, - const struct timespec *abstime) +__pthread_cond_timedwait(pthread_cond_t *cvp, pthread_mutex_t *mp, + const struct timespec * abstime) { if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return (EINVAL); - return (cond_wait_common(cond, mutex, abstime, 1)); + return (cond_wait_common(cvp, mp, abstime, 1)); +} + +static int +cond_signal_common(struct pthread_cond *cvp) +{ + struct pthread *curthread; + struct pthread *td; + struct pthread_mutex *mp; + struct sleepqueue *sq; + int *waddr; + int pshared = CV_PSHARED(cvp); + + _thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters); + + if (pshared || cvp->__has_user_waiters == 0) + return (0); + + curthread = _get_curthread(); + waddr = NULL; + _sleepq_lock(cvp); + sq = _sleepq_lookup(cvp); + if (sq == NULL) { + _sleepq_unlock(cvp); + return (0); + } + + td = _sleepq_first(sq); + mp = td->mutex_obj; + cvp->__has_user_waiters = _sleepq_remove(sq, td); + if (mp->__ownerdata.__ownertd == curthread) { + if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { + _thr_wake_all(curthread->defer_waiters, + curthread->nwaiter_defer); + curthread->nwaiter_defer = 0; + } + curthread->defer_waiters[curthread->nwaiter_defer++] = + &td->wake_addr->value; + mp->__flags |= PMUTEX_FLAG_DEFERED; + } else { + waddr = &td->wake_addr->value; + } + _sleepq_unlock(cvp); + if (waddr != NULL) + _thr_set_wake(waddr); + return (0); +} + +struct broadcast_arg { + struct pthread *curthread; + unsigned int *waddrs[MAX_DEFER_WAITERS]; + int count; +}; + +static void +drop_cb(struct pthread *td, void *arg) +{ + struct broadcast_arg *ba = arg; + struct pthread_mutex *mp; + struct pthread *curthread = ba->curthread; + + mp = td->mutex_obj; + if (mp->__ownerdata.__ownertd == curthread) { + if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { + _thr_wake_all(curthread->defer_waiters, + curthread->nwaiter_defer); + curthread->nwaiter_defer = 0; + } + curthread->defer_waiters[curthread->nwaiter_defer++] = + &td->wake_addr->value; + mp->__flags |= PMUTEX_FLAG_DEFERED; + } else { + if (ba->count >= MAX_DEFER_WAITERS) { + _thr_wake_all(ba->waddrs, ba->count); + ba->count = 0; + } + ba->waddrs[ba->count++] = &td->wake_addr->value; + } +} + +static int +cond_broadcast_common(struct pthread_cond *cvp) +{ + int pshared = CV_PSHARED(cvp); + struct sleepqueue *sq; + struct broadcast_arg ba; + + _thr_ucond_broadcast((struct ucond *)&cvp->__has_kern_waiters); + + if (pshared || cvp->__has_user_waiters == 0) + return (0); + + ba.curthread = _get_curthread(); + ba.count = 0; + + _sleepq_lock(cvp); + sq = _sleepq_lookup(cvp); + if (sq == NULL) { + _sleepq_unlock(cvp); + return (0); + } + _sleepq_drop(sq, drop_cb, &ba); + cvp->__has_user_waiters = 0; + _sleepq_unlock(cvp); + if (ba.count > 0) + _thr_wake_all(ba.waddrs, ba.count); + return (0); +} + +int +_pthread_cond_signal(pthread_cond_t *cvp) +{ + return (cond_signal_common(cvp)); +} + +int +_pthread_cond_broadcast(pthread_cond_t *cvp) +{ + return (cond_broadcast_common(cvp)); +} + +#define CHECK_AND_INIT_COND \ + if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ + if (cvp == THR_COND_INITIALIZER) { \ + int error; \ + error = init_static(_get_curthread(), cond); \ + if (error != 0) \ + return (error); \ + } else if (cvp == THR_COND_DESTROYED) { \ + return (EINVAL); \ + } \ + cvp = *cond; \ + } + +static int +cond_init_old(pthread_cond_old_t *cond, const pthread_condattr_t *cond_attr) +{ + struct pthread_cond *cvp = NULL; + int error; + + if ((cvp = (struct pthread_cond *) + malloc(sizeof(struct pthread_cond))) == NULL) { + error = ENOMEM; + } else { + error = cond_init(cvp, cond_attr); + if (error != 0) + free(cvp); + else + *cond = cvp; + } + return (error); } static int -cond_signal_common(pthread_cond_t *cond, int broadcast) +init_static(struct pthread *thread, pthread_cond_old_t *cond) +{ + int error; + + THR_LOCK_ACQUIRE(thread, &_cond_static_lock); + + if (*cond == NULL) + error = cond_init_old(cond, NULL); + else + error = 0; + + THR_LOCK_RELEASE(thread, &_cond_static_lock); + + return (error); +} + +int +_pthread_cond_init_1_0(pthread_cond_old_t *cond, const pthread_condattr_t *cond_attr) +{ + + *cond = NULL; + return (cond_init_old(cond, cond_attr)); +} + +int +_pthread_cond_destroy_1_0(pthread_cond_old_t *cond) +{ + struct pthread_cond *cvp; + int error = 0; + + if ((cvp = *cond) == THR_COND_INITIALIZER) + error = 0; + else if (cvp == THR_COND_DESTROYED) + error = EINVAL; + else { + cvp = *cond; + error = cond_destroy_common(cvp); + if (error != 0) + return (error); + *cond = THR_COND_DESTROYED; + free(cvp); + } + return (error); +} + +int +_pthread_cond_signal_1_0(pthread_cond_old_t *cond) +{ + pthread_cond_t *cvp; + + /* + * If the condition variable is statically initialized, perform dynamic + * initialization. + */ + CHECK_AND_INIT_COND + + return (cond_signal_common(cvp)); +} + +int +_pthread_cond_broadcast_1_0(pthread_cond_old_t *cond) { - struct pthread *curthread = _get_curthread(); - pthread_cond_t cv; - int ret = 0; + pthread_cond_t *cvp; /* * If the condition variable is statically initialized, perform dynamic @@ -281,25 +514,49 @@ */ CHECK_AND_INIT_COND - THR_UMUTEX_LOCK(curthread, &cv->c_lock); - if (!broadcast) - ret = _thr_ucond_signal(&cv->c_kerncv); - else - ret = _thr_ucond_broadcast(&cv->c_kerncv); - THR_UMUTEX_UNLOCK(curthread, &cv->c_lock); - return (ret); + return (cond_broadcast_common(cvp)); } int -_pthread_cond_signal(pthread_cond_t * cond) +_pthread_cond_wait_1_0(pthread_cond_old_t *cond, pthread_mutex_old_t *mutex) { + pthread_cond_t *cvp; + int error; + + /* + * If the condition variable is statically initialized, perform dynamic + * initialization. + */ + CHECK_AND_INIT_COND + + if ((error = _mutex_owned_old(_get_curthread(), mutex)) != 0) + return (error); - return (cond_signal_common(cond, 0)); + return (cond_wait_common(cvp, *mutex, NULL, 1)); } int -_pthread_cond_broadcast(pthread_cond_t * cond) +_pthread_cond_timedwait_1_0(pthread_cond_old_t *cond, pthread_mutex_old_t *mutex, + const struct timespec * abstime) { + pthread_cond_t *cvp; + int error; - return (cond_signal_common(cond, 1)); + /* + * If the condition variable is statically initialized, perform dynamic + * initialization. + */ + CHECK_AND_INIT_COND + + if ((error = _mutex_owned_old(_get_curthread(), mutex)) != 0) + return (error); + + return (cond_wait_common(cvp, *mutex, abstime, 1)); } + +FB10_COMPAT(_pthread_cond_destroy_1_0, pthread_cond_destroy); +FB10_COMPAT(_pthread_cond_init_1_0, pthread_cond_init); +FB10_COMPAT(_pthread_cond_wait_1_0, pthread_cond_wait); +FB10_COMPAT(_pthread_cond_timedwait_1_0, pthread_cond_timedwait); +FB10_COMPAT(_pthread_cond_signal_1_0, pthread_cond_signal); +FB10_COMPAT(_pthread_cond_broadcast_1_0, pthread_cond_broadcast); --- src/lib/libthr/thread/thr_condattr.c 2010-05-24 14:43:34.000000000 0000 +++ src/lib/libthr/thread/thr_condattr.c 2010-11-23 01:58:38.000000000 0000 @@ -45,6 +45,9 @@ __weak_reference(_pthread_condattr_getpshared, pthread_condattr_getpshared); __weak_reference(_pthread_condattr_setpshared, pthread_condattr_setpshared); +int _pthread_condattr_setpshared_1_0(pthread_condattr_t *, int); +FB10_COMPAT(_pthread_condattr_setpshared_1_0, pthread_condattr_setpshared); + int _pthread_condattr_init(pthread_condattr_t *attr) { @@ -108,7 +111,7 @@ if (attr == NULL || *attr == NULL) return (EINVAL); - *pshared = PTHREAD_PROCESS_PRIVATE; + *pshared = (*attr)->c_pshared; return (0); } @@ -118,7 +121,23 @@ if (attr == NULL || *attr == NULL) return (EINVAL); + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) + return (EINVAL); + + (*attr)->c_pshared = pshared; + return (0); +} + +int +_pthread_condattr_setpshared_1_0(pthread_condattr_t *attr, int pshared) +{ + if (attr == NULL || *attr == NULL) + return (EINVAL); + if (pshared != PTHREAD_PROCESS_PRIVATE) return (EINVAL); + + (*attr)->c_pshared = pshared; return (0); } --- src/lib/libthr/thread/thr_create.c 2010-09-15 03:35:59.000000000 0000 +++ src/lib/libthr/thread/thr_create.c 2010-11-23 01:58:38.000000000 0000 @@ -111,8 +111,7 @@ new_thread->cancel_enable = 1; new_thread->cancel_async = 0; /* Initialize the mutex queue: */ - TAILQ_INIT(&new_thread->mutexq); - TAILQ_INIT(&new_thread->pp_mutexq); + _thr_mutex_link_init(new_thread); /* Initialise hooks in the thread structure: */ if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) { --- src/lib/libthr/thread/thr_exit.c 2010-09-30 13:35:27.000000000 0000 +++ src/lib/libthr/thread/thr_exit.c 2010-11-23 01:58:38.000000000 0000 @@ -279,6 +279,7 @@ /* Tell malloc that the thread is exiting. */ _malloc_thread_cleanup(); + _thr_mutex_link_exit(curthread); THR_LOCK(curthread); curthread->state = PS_DEAD; --- src/lib/libthr/thread/thr_init.c 2010-09-28 05:35:27.000000000 0000 +++ src/lib/libthr/thread/thr_init.c 2010-11-30 06:50:05.000000000 0000 @@ -89,13 +89,16 @@ struct pthread_mutex_attr _pthread_mutexattr_default = { .m_type = PTHREAD_MUTEX_DEFAULT, .m_protocol = PTHREAD_PRIO_NONE, - .m_ceiling = 0 + .m_ceiling = 0, + .m_pshared = 0, + .m_robust = PTHREAD_MUTEX_STALLED }; struct pthread_mutex_attr _pthread_mutexattr_adaptive_default = { .m_type = PTHREAD_MUTEX_ADAPTIVE_NP, .m_protocol = PTHREAD_PRIO_NONE, - .m_ceiling = 0 + .m_ceiling = 0, + .m_pshared = 0 }; /* Default condition variable attributes: */ @@ -213,13 +216,25 @@ {DUAL_ENTRY(_pthread_cancel)}, /* PJT_CANCEL */ {DUAL_ENTRY(_pthread_cleanup_pop)}, /* PJT_CLEANUP_POP */ {DUAL_ENTRY(_pthread_cleanup_push)}, /* PJT_CLEANUP_PUSH */ - {DUAL_ENTRY(_pthread_cond_broadcast)}, /* PJT_COND_BROADCAST */ - {DUAL_ENTRY(_pthread_cond_destroy)}, /* PJT_COND_DESTROY */ - {DUAL_ENTRY(_pthread_cond_init)}, /* PJT_COND_INIT */ - {DUAL_ENTRY(_pthread_cond_signal)}, /* PJT_COND_SIGNAL */ - {DUAL_ENTRY(_pthread_cond_timedwait)}, /* PJT_COND_TIMEDWAIT */ - {(pthread_func_t)__pthread_cond_wait, + + {(pthread_func_t)_pthread_cond_broadcast_1_0, + (pthread_func_t)_pthread_cond_broadcast}, /* PJT_COND_BROADCAST */ + + {(pthread_func_t)_pthread_cond_destroy_1_0, + (pthread_func_t)_pthread_cond_destroy}, /* PJT_COND_DESTROY */ + + {(pthread_func_t)_pthread_cond_init_1_0, + (pthread_func_t)_pthread_cond_init}, /* PJT_COND_INIT */ + + {(pthread_func_t)_pthread_cond_signal_1_0, + (pthread_func_t)_pthread_cond_signal}, /* PJT_COND_SIGNAL */ + + {(pthread_func_t)_pthread_cond_timedwait_1_0, + (pthread_func_t)_pthread_cond_timedwait}, /* PJT_COND_TIMEDWAIT */ + + {(pthread_func_t)_pthread_cond_wait_1_0, (pthread_func_t)_pthread_cond_wait}, /* PJT_COND_WAIT */ + {DUAL_ENTRY(_pthread_detach)}, /* PJT_DETACH */ {DUAL_ENTRY(_pthread_equal)}, /* PJT_EQUAL */ {DUAL_ENTRY(_pthread_exit)}, /* PJT_EXIT */ @@ -232,21 +247,45 @@ {DUAL_ENTRY(_pthread_mutexattr_destroy)}, /* PJT_MUTEXATTR_DESTROY */ {DUAL_ENTRY(_pthread_mutexattr_init)}, /* PJT_MUTEXATTR_INIT */ {DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */ - {DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */ - {DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */ - {(pthread_func_t)__pthread_mutex_lock, + + {(pthread_func_t)_pthread_mutex_destroy_1_0, + (pthread_func_t)_pthread_mutex_destroy}, /* PJT_MUTEX_DESTROY */ + + {(pthread_func_t)_pthread_mutex_init_1_0, + (pthread_func_t)_pthread_mutex_init}, /* PJT_MUTEX_INIT */ + + {(pthread_func_t)_pthread_mutex_lock_1_0, (pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */ - {(pthread_func_t)__pthread_mutex_trylock, - (pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */ - {DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */ + + {(pthread_func_t)_pthread_mutex_trylock_1_0, + (pthread_func_t)_pthread_mutex_trylock}, /* PJT_MUTEX_TRYLOCK */ + + {(pthread_func_t)_pthread_mutex_unlock_1_0, + (pthread_func_t)_pthread_mutex_unlock}, /* PJT_MUTEX_UNLOCK */ + {DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */ - {DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */ - {DUAL_ENTRY(_pthread_rwlock_init)}, /* PJT_RWLOCK_INIT */ - {DUAL_ENTRY(_pthread_rwlock_rdlock)}, /* PJT_RWLOCK_RDLOCK */ - {DUAL_ENTRY(_pthread_rwlock_tryrdlock)},/* PJT_RWLOCK_TRYRDLOCK */ - {DUAL_ENTRY(_pthread_rwlock_trywrlock)},/* PJT_RWLOCK_TRYWRLOCK */ - {DUAL_ENTRY(_pthread_rwlock_unlock)}, /* PJT_RWLOCK_UNLOCK */ - {DUAL_ENTRY(_pthread_rwlock_wrlock)}, /* PJT_RWLOCK_WRLOCK */ + + {(pthread_func_t)_pthread_rwlock_destroy_1_0, + (pthread_func_t)_pthread_rwlock_destroy}, /* PJT_RWLOCK_DESTROY */ + + {(pthread_func_t)_pthread_rwlock_init_1_0, + (pthread_func_t)_pthread_rwlock_init}, /* PJT_RWLOCK_INIT */ + + {(pthread_func_t)_pthread_rwlock_rdlock_1_0, + (pthread_func_t)_pthread_rwlock_rdlock}, /* PJT_RWLOCK_RDLOCK */ + + {(pthread_func_t)_pthread_rwlock_tryrdlock_1_0, + (pthread_func_t)_pthread_rwlock_tryrdlock}, /* PJT_RWLOCK_TRYRDLOCK */ + + {(pthread_func_t)_pthread_rwlock_trywrlock_1_0, + (pthread_func_t)_pthread_rwlock_trywrlock}, /* PJT_RWLOCK_TRYWRLOCK */ + + {(pthread_func_t)_pthread_rwlock_unlock_1_0, + (pthread_func_t)_pthread_rwlock_unlock}, /* PJT_RWLOCK_UNLOCK */ + + {(pthread_func_t)_pthread_rwlock_wrlock_1_0, + (pthread_func_t)_pthread_rwlock_wrlock}, /* PJT_RWLOCK_WRLOCK */ + {DUAL_ENTRY(_pthread_self)}, /* PJT_SELF */ {DUAL_ENTRY(_pthread_setcancelstate)}, /* PJT_SETCANCELSTATE */ {DUAL_ENTRY(_pthread_setcanceltype)}, /* PJT_SETCANCELTYPE */ @@ -259,6 +298,29 @@ {DUAL_ENTRY(_pthread_cancel_leave)} /* PJT_CANCEL_LEAVE */ }; +#define SINGLE_ENTRY(entry) (pthread_func_t)entry + +pthread_func_t jmp_table12[PJT12_MAX] = { + SINGLE_ENTRY(_pthread_cond_broadcast), /* PJT12_COND_BROADCAST */ + SINGLE_ENTRY(_pthread_cond_destroy), /* PJT12_COND_DESTROY */ + SINGLE_ENTRY(_pthread_cond_init), /* PJT12_COND_INIT */ + SINGLE_ENTRY(_pthread_cond_signal), /* PJT12_COND_SIGNAL */ + SINGLE_ENTRY(_pthread_cond_timedwait), /* PJT12_COND_TIMEDWAIT */ + SINGLE_ENTRY(__pthread_cond_wait), /* PJT12_COND_WAIT */ + SINGLE_ENTRY(_pthread_mutex_destroy), /* PJT12_MUTEX_DESTROY */ + SINGLE_ENTRY(_pthread_mutex_init), /* PJT12_MUTEX_INIT */ + SINGLE_ENTRY(_pthread_mutex_lock), /* PJT12_MUTEX_LOCK */ + SINGLE_ENTRY(_pthread_mutex_trylock), /* PJT12_MUTEX_TRYLOCK */ + SINGLE_ENTRY(_pthread_mutex_unlock), /* PJT12_MUTEX_UNLOCK */ + SINGLE_ENTRY(_pthread_rwlock_destroy), /* PJT12_RWLOCK_DESTROY */ + SINGLE_ENTRY(_pthread_rwlock_init), /* PJT12_RWLOCK_INIT */ + SINGLE_ENTRY(_pthread_rwlock_rdlock), /* PJT12_RWLOCK_RDLOCK */ + SINGLE_ENTRY(_pthread_rwlock_tryrdlock),/* PJT12_RWLOCK_TRYRDLOCK */ + SINGLE_ENTRY(_pthread_rwlock_trywrlock),/* PJT12_RWLOCK_TRYWRLOCK */ + SINGLE_ENTRY(_pthread_rwlock_unlock), /* PJT12_RWLOCK_UNLOCK */ + SINGLE_ENTRY(_pthread_rwlock_wrlock) /* PJT12_RWLOCK_WRLOCK */ +}; + static int init_once = 0; /* @@ -311,6 +373,10 @@ PANIC("Thread jump table not properly initialized"); memcpy(__thr_jtable, jmp_table, sizeof(jmp_table)); + if (sizeof(jmp_table12) != (sizeof(pthread_func_t) * PJT12_MAX)) + PANIC("Thread jump table not properly initialized"); + memcpy(__thr_jtable12, jmp_table12, sizeof(jmp_table)); + /* * Check for the special case of this process running as * or in place of init as pid = 1: @@ -412,8 +478,7 @@ thr_set_name(thread->tid, "initial thread"); /* Initialize the mutex queue: */ - TAILQ_INIT(&thread->mutexq); - TAILQ_INIT(&thread->pp_mutexq); + _thr_mutex_link_init(thread); thread->state = PS_RUNNING; @@ -444,6 +509,8 @@ _thr_once_init(); _thr_spinlock_init(); _thr_list_init(); + _thr_wake_addr_init(); + _sleepq_init(); /* * Avoid reinitializing some things if they don't need to be, --- src/lib/libthr/thread/thr_kern.c 2010-09-01 02:35:27.000000000 0000 +++ src/lib/libthr/thread/thr_kern.c 2010-11-30 12:17:45.000000000 0000 @@ -30,6 +30,7 @@ #include #include #include +#include #include #include "thr_private.h" @@ -41,6 +42,10 @@ #define DBG_MSG(x...) #endif +static struct umutex addr_lock; +static struct wake_addr *wake_addr_head; +static struct wake_addr default_wake_addr; + /* * This is called when the first thread (other than the initial * thread) is created. @@ -130,3 +135,90 @@ _schedparam_to_rtp(policy, param, &rtp); return (rtprio_thread(RTP_SET, lwpid, &rtp)); } + +void +_thr_wake_addr_init(void) +{ + _thr_umutex_init(&addr_lock); + wake_addr_head = NULL; +} + +/* + * Allocate wake-address, the memory area is never freed after + * allocated, this becauses threads may be referencing it. + */ +struct wake_addr * +_thr_alloc_wake_addr(void) +{ + struct pthread *curthread; + struct wake_addr *p; + + if (_thr_initial == NULL) { + return &default_wake_addr; + } + + curthread = _get_curthread(); + + THR_LOCK_ACQUIRE(curthread, &addr_lock); + if (wake_addr_head == NULL) { + unsigned i; + unsigned pagesize = getpagesize(); + struct wake_addr *pp = (struct wake_addr *) + mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, + MAP_ANON|MAP_PRIVATE, -1, 0); + for (i = 1; i < pagesize/sizeof(struct wake_addr); ++i) + pp[i].link = &pp[i+1]; + pp[i-1].link = NULL; + wake_addr_head = &pp[1]; + p = &pp[0]; + } else { + p = wake_addr_head; + wake_addr_head = p->link; + } + THR_LOCK_RELEASE(curthread, &addr_lock); + p->value = 0; + return (p); +} + +void +_thr_release_wake_addr(struct wake_addr *wa) +{ + struct pthread *curthread = _get_curthread(); + + if (wa == &default_wake_addr) + return; + THR_LOCK_ACQUIRE(curthread, &addr_lock); + wa->link = wake_addr_head; + wake_addr_head = wa; + THR_LOCK_RELEASE(curthread, &addr_lock); +} + +/* Sleep on thread wakeup address */ +int +_thr_sleep(struct pthread *curthread, int clockid, + const struct timespec *abstime) +{ + + curthread->will_sleep = 0; + if (curthread->nwaiter_defer > 0) { + _thr_wake_all(curthread->defer_waiters, + curthread->nwaiter_defer); + curthread->nwaiter_defer = 0; + } + + if (curthread->wake_addr->value != 0) + return (0); + + return _thr_umtx_timedwait_uint(&curthread->wake_addr->value, 0, + clockid, abstime, 0); +} + +void +_thr_wake_all(unsigned int *waddrs[], int count) +{ + int i; + + for (i = 0; i < count; ++i) + *waddrs[i] = 1; + _umtx_op(waddrs, UMTX_OP_NWAKE_PRIVATE, count, NULL, NULL); +} --- src/lib/libthr/thread/thr_list.c 2010-09-13 07:35:35.000000000 0000 +++ src/lib/libthr/thread/thr_list.c 2010-11-29 07:33:25.000000000 0000 @@ -165,6 +165,8 @@ if (tcb != NULL) { memset(thread, 0, sizeof(*thread)); thread->tcb = tcb; + thread->sleepqueue = _sleepq_alloc(); + thread->wake_addr = _thr_alloc_wake_addr(); } else { thr_destroy(curthread, thread); atomic_fetchadd_int(&total_threads, -1); @@ -192,6 +194,8 @@ } thread->tcb = NULL; if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) { + _sleepq_free(thread->sleepqueue); + _thr_release_wake_addr(thread->wake_addr); thr_destroy(curthread, thread); atomic_fetchadd_int(&total_threads, -1); } else { --- src/lib/libthr/thread/thr_mutex.c 2010-10-27 04:35:31.000000000 0000 +++ src/lib/libthr/thread/thr_mutex.c 2010-11-30 03:02:41.000000000 0000 @@ -41,30 +41,11 @@ #include #include #include +#include #include "un-namespace.h" #include "thr_private.h" -#if defined(_PTHREADS_INVARIANTS) -#define MUTEX_INIT_LINK(m) do { \ - (m)->m_qe.tqe_prev = NULL; \ - (m)->m_qe.tqe_next = NULL; \ -} while (0) -#define MUTEX_ASSERT_IS_OWNED(m) do { \ - if (__predict_false((m)->m_qe.tqe_prev == NULL))\ - PANIC("mutex is not on list"); \ -} while (0) -#define MUTEX_ASSERT_NOT_OWNED(m) do { \ - if (__predict_false((m)->m_qe.tqe_prev != NULL || \ - (m)->m_qe.tqe_next != NULL)) \ - PANIC("mutex is on list"); \ -} while (0) -#else -#define MUTEX_INIT_LINK(m) -#define MUTEX_ASSERT_IS_OWNED(m) -#define MUTEX_ASSERT_NOT_OWNED(m) -#endif - /* * For adaptive mutexes, how many times to spin doing trylock2 * before entering the kernel to block @@ -74,61 +55,50 @@ /* * Prototypes */ -int __pthread_mutex_init(pthread_mutex_t *mutex, - const pthread_mutexattr_t *mutex_attr); -int __pthread_mutex_trylock(pthread_mutex_t *mutex); -int __pthread_mutex_lock(pthread_mutex_t *mutex); -int __pthread_mutex_timedlock(pthread_mutex_t *mutex, - const struct timespec *abstime); -int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, - void *(calloc_cb)(size_t, size_t)); int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count); int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); -int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); -int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); +int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); -static int mutex_self_trylock(pthread_mutex_t); -static int mutex_self_lock(pthread_mutex_t, +static int mutex_self_trylock(struct pthread_mutex *); +static int mutex_self_lock(struct pthread_mutex *, const struct timespec *abstime); -static int mutex_unlock_common(pthread_mutex_t *); -static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, +static int mutex_unlock_common(struct pthread_mutex *); +static int mutex_lock_sleep(struct pthread_mutex *, const struct timespec *); +static void enqueue_mutex(struct pthread *, struct pthread_mutex *); +static void dequeue_mutex(struct pthread *, struct pthread_mutex *); -__weak_reference(__pthread_mutex_init, pthread_mutex_init); -__strong_reference(__pthread_mutex_init, _pthread_mutex_init); -__weak_reference(__pthread_mutex_lock, pthread_mutex_lock); -__strong_reference(__pthread_mutex_lock, _pthread_mutex_lock); -__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); -__strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock); -__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); -__strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock); - /* Single underscore versions provided for libc internal usage: */ /* No difference between libc and application usage of these: */ +__weak_reference(_pthread_mutex_init, pthread_mutex_init); +__weak_reference(_pthread_mutex_lock, pthread_mutex_lock); +__weak_reference(_pthread_mutex_timedlock, pthread_mutex_timedlock); +__weak_reference(_pthread_mutex_trylock, pthread_mutex_trylock); __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); - __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); - -__weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); -__strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np); +__weak_reference(_pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); - -__weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); -__strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np); +__weak_reference(_pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np); +__weak_reference(_pthread_mutex_consistent, pthread_mutex_consistent); +int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mp, + void *(calloc_cb)(size_t, size_t)); + static int -mutex_init(pthread_mutex_t *mutex, - const struct pthread_mutex_attr *mutex_attr, - void *(calloc_cb)(size_t, size_t)) +mutex_init(struct pthread_mutex *mp, + const struct pthread_mutex_attr *mutex_attr) { const struct pthread_mutex_attr *attr; - struct pthread_mutex *pmutex; + + /* Must align at integer boundary */ + if (((uintptr_t)mp) & 0x03) + return (EINVAL); if (mutex_attr == NULL) { attr = &_pthread_mutexattr_default; @@ -141,384 +111,343 @@ attr->m_protocol > PTHREAD_PRIO_PROTECT) return (EINVAL); } - if ((pmutex = (pthread_mutex_t) - calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) - return (ENOMEM); - - pmutex->m_type = attr->m_type; - pmutex->m_owner = NULL; - pmutex->m_count = 0; - pmutex->m_refcount = 0; - pmutex->m_spinloops = 0; - pmutex->m_yieldloops = 0; - MUTEX_INIT_LINK(pmutex); + memset(mp, 0, sizeof(*mp)); + mp->__magic = _PTHREAD_MUTEX_MAGIC; + mp->__flags = attr->m_type; + mp->__ownerdata.__ownertd = NULL; + mp->__recurse = 0; + mp->__spinloops = 0; switch(attr->m_protocol) { case PTHREAD_PRIO_NONE: - pmutex->m_lock.m_owner = UMUTEX_UNOWNED; - pmutex->m_lock.m_flags = 0; + mp->__lockword = UMUTEX_UNOWNED; + if (attr->m_pshared == 0) + mp->__lockflags |= UMUTEX_SIMPLE; break; case PTHREAD_PRIO_INHERIT: - pmutex->m_lock.m_owner = UMUTEX_UNOWNED; - pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; + mp->__lockword = UMUTEX_UNOWNED; + mp->__lockflags = UMUTEX_PRIO_INHERIT; break; case PTHREAD_PRIO_PROTECT: - pmutex->m_lock.m_owner = UMUTEX_CONTESTED; - pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; - pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; + mp->__lockword = UMUTEX_UNOWNED; + mp->__lockflags = UMUTEX_PRIO_PROTECT2; + if (attr->m_pshared == 0) + mp->__lockflags |= UMUTEX_SIMPLE; + mp->__ceilings[0] = attr->m_ceiling; break; } - - if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) { - pmutex->m_spinloops = + if (attr->m_pshared != 0) + mp->__lockflags |= USYNC_PROCESS_SHARED; + if (attr->m_robust == PTHREAD_MUTEX_ROBUST) + mp->__lockflags |= UMUTEX_ROBUST; + if (PMUTEX_TYPE(mp->__flags) == PTHREAD_MUTEX_ADAPTIVE_NP) { + mp->__spinloops = _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; - pmutex->m_yieldloops = _thr_yieldloops; } - - *mutex = pmutex; return (0); } -static int -init_static(struct pthread *thread, pthread_mutex_t *mutex) -{ - int ret; - - THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); - - if (*mutex == THR_MUTEX_INITIALIZER) - ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc); - else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) - ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc); - else - ret = 0; - THR_LOCK_RELEASE(thread, &_mutex_static_lock); - - return (ret); -} - -static void -set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) -{ - struct pthread_mutex *m2; - - m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); - if (m2 != NULL) - m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; - else - m->m_lock.m_ceilings[1] = -1; -} - int -__pthread_mutex_init(pthread_mutex_t *mutex, +_pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr) { - return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc); -} - -/* This function is used internally by malloc. */ -int -_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, - void *(calloc_cb)(size_t, size_t)) -{ - static const struct pthread_mutex_attr attr = { - .m_type = PTHREAD_MUTEX_NORMAL, - .m_protocol = PTHREAD_PRIO_NONE, - .m_ceiling = 0 - }; - int ret; - - ret = mutex_init(mutex, &attr, calloc_cb); - if (ret == 0) - (*mutex)->m_private = 1; - return (ret); + return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL); } void _mutex_fork(struct pthread *curthread) { - struct pthread_mutex *m; + struct mutex_link *ml; /* - * Fix mutex ownership for child process. - * note that process shared mutex should not - * be inherited because owner is forking thread - * which is in parent process, they should be - * removed from the owned mutex list, current, - * process shared mutex is not supported, so I - * am not worried. + * Fix mutex ownership for child process. Only PI mutex need to + * be changed, because we still use TID as lock-word. */ - - TAILQ_FOREACH(m, &curthread->mutexq, m_qe) - m->m_lock.m_owner = TID(curthread); - TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) - m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; + TAILQ_FOREACH(ml, &curthread->pi_mutexq, qe) + ml->mutexp->__lockword = TID(curthread); } int -_pthread_mutex_destroy(pthread_mutex_t *mutex) +_pthread_mutex_destroy(pthread_mutex_t *mp) { - pthread_mutex_t m; - int ret; - - m = *mutex; - if (m < THR_MUTEX_DESTROYED) { - ret = 0; - } else if (m == THR_MUTEX_DESTROYED) { - ret = EINVAL; - } else { - if (m->m_owner != NULL || m->m_refcount != 0) { - ret = EBUSY; - } else { - *mutex = THR_MUTEX_DESTROYED; - MUTEX_ASSERT_NOT_OWNED(m); - free(m); - ret = 0; - } - } - - return (ret); + memset(mp, 0, sizeof(*mp)); + return (0); } -#define ENQUEUE_MUTEX(curthread, m) \ - do { \ - (m)->m_owner = curthread; \ - /* Add to the list of owned mutexes: */ \ - MUTEX_ASSERT_NOT_OWNED((m)); \ - if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ - TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ - else \ - TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ - } while (0) - -#define CHECK_AND_INIT_MUTEX \ - if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \ - if (m == THR_MUTEX_DESTROYED) \ - return (EINVAL); \ - int ret; \ - ret = init_static(_get_curthread(), mutex); \ - if (ret) \ - return (ret); \ - m = *mutex; \ - } - -static int -mutex_trylock_common(pthread_mutex_t *mutex) +static inline int +mutex_trylock_common(struct pthread_mutex *mp) { struct pthread *curthread = _get_curthread(); - struct pthread_mutex *m = *mutex; uint32_t id; - int ret; + int error; - id = TID(curthread); - if (m->m_private) - THR_CRITICAL_ENTER(curthread); - ret = _thr_umutex_trylock(&m->m_lock, id); - if (__predict_true(ret == 0)) { - ENQUEUE_MUTEX(curthread, m); - } else if (m->m_owner == curthread) { - ret = mutex_self_trylock(m); - } /* else {} */ - if (ret && m->m_private) - THR_CRITICAL_LEAVE(curthread); - return (ret); + if ((mp->__lockflags & (UMUTEX_ROBUST | UMUTEX_PRIO_PROTECT2 | + UMUTEX_PRIO_INHERIT)) == 0) { + if (mp->__lockflags & UMUTEX_SIMPLE) + id = UMUTEX_SIMPLE_OWNER; + else + id = TID(curthread); + if (atomic_cmpset_acq_32(&mp->__lockword, UMUTEX_UNOWNED, + id)) { + if ((mp->__lockflags & USYNC_PROCESS_SHARED) == 0) + mp->__ownerdata.__ownertd = curthread; + return (0); + } + if (mp->__lockword == UMUTEX_CONTESTED) { + if (atomic_cmpset_acq_32(&mp->__lockword, + UMUTEX_CONTESTED, id|UMUTEX_CONTESTED)) { + if ((mp->__lockflags & USYNC_PROCESS_SHARED) == 0) + mp->__ownerdata.__ownertd = curthread; + return (0); + } + } + if (_mutex_owned(curthread, mp) == 0) + return mutex_self_trylock(mp); + return (EBUSY); + } else if (mp->__lockflags & (UMUTEX_ROBUST | UMUTEX_PRIO_PROTECT2)) { + if (_mutex_owned(curthread, mp) == 0) + return mutex_self_trylock(mp); + error = __thr_umutex_trylock((struct umutex *)&mp->__lockword); + if (error == 0) + enqueue_mutex(curthread, mp); + else if (error == EOWNERDEAD) { + /* + * Fix inconsistent recursive count for robust mutex. + */ + mp->__recurse = 0; + enqueue_mutex(curthread, mp); + } + return (error); + } else if (mp->__lockflags & UMUTEX_PRIO_INHERIT) { + id = TID(curthread); + if (atomic_cmpset_acq_32(&mp->__lockword, UMUTEX_UNOWNED, id)){ + enqueue_mutex(curthread, mp); + return (0); + } + if (_mutex_owned(curthread, mp) == 0) + return mutex_self_trylock(mp); + return (EBUSY); + } + return (EINVAL); } int -__pthread_mutex_trylock(pthread_mutex_t *mutex) +_pthread_mutex_trylock(pthread_mutex_t *mp) { - struct pthread_mutex *m; + struct pthread *curthread = _get_curthread(); + int error; - CHECK_AND_INIT_MUTEX - - return (mutex_trylock_common(mutex)); + if (!(mp->__flags & PMUTEX_FLAG_PRIVATE)) + return mutex_trylock_common(mp); + THR_CRITICAL_ENTER(curthread); + error = mutex_trylock_common(mp); + if (error != 0 && error != EOWNERDEAD) + THR_CRITICAL_LEAVE(curthread); + return (error); } static int -mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, +mutex_lock_sleep(struct pthread_mutex *mp, const struct timespec *abstime) { + struct pthread *curthread = _get_curthread(); uint32_t id, owner; int count; - int ret; - - if (m->m_owner == curthread) - return mutex_self_lock(m, abstime); + int error; - id = TID(curthread); /* * For adaptive mutexes, spin for a bit in the expectation * that if the application requests this mutex type then * the lock is likely to be released quickly and it is - * faster than entering the kernel + * faster than entering the kernel. */ if (__predict_false( - (m->m_lock.m_flags & - (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)) + (mp->__lockflags & + (UMUTEX_PRIO_PROTECT2 | UMUTEX_PRIO_INHERIT | + UMUTEX_ROBUST)) != 0)) goto sleep_in_kernel; + if ((mp->__lockflags & UMUTEX_SIMPLE) != 0) + id = UMUTEX_SIMPLE_OWNER; + else + id = TID(curthread); + if (!_thr_is_smp) - goto yield_loop; + goto sleep_in_kernel; - count = m->m_spinloops; + count = mp->__spinloops; while (count--) { - owner = m->m_lock.m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { - ret = 0; + owner = mp->__lockword; + if ((owner & UMUTEX_OWNER_MASK) == 0) { + if (atomic_cmpset_acq_32(&mp->__lockword, owner, + id|owner)) { + error = 0; goto done; } } CPU_SPINWAIT; } -yield_loop: - count = m->m_yieldloops; - while (count--) { - _sched_yield(); - owner = m->m_lock.m_owner; - if ((owner & ~UMUTEX_CONTESTED) == 0) { - if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { - ret = 0; - goto done; - } - } - } - sleep_in_kernel: if (abstime == NULL) { - ret = __thr_umutex_lock(&m->m_lock, id); - } else if (__predict_false( - abstime->tv_nsec < 0 || - abstime->tv_nsec >= 1000000000)) { - ret = EINVAL; + error = __thr_umutex_lock((struct umutex *)&mp->__lockword, id); } else { - ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); + error = __thr_umutex_timedlock((struct umutex *)&mp->__lockword, id, abstime); } done: - if (ret == 0) - ENQUEUE_MUTEX(curthread, m); + if (error == 0) + enqueue_mutex(curthread, mp); + else if (error == EOWNERDEAD) { + /* + * Fix inconsistent recursive count for robust mutex. + */ + mp->__recurse = 0; + enqueue_mutex(curthread, mp); + } - return (ret); + return (error); } static inline int -mutex_lock_common(struct pthread_mutex *m, +_mutex_lock_common(struct pthread_mutex *mp, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); - int ret; + uint32_t id; + + if ((mp->__lockflags & UMUTEX_SIMPLE) != 0) + id = UMUTEX_SIMPLE_OWNER; + else + id = TID(curthread); + if ((mp->__lockflags & (UMUTEX_ROBUST | UMUTEX_PRIO_PROTECT2 | + UMUTEX_PRIO_INHERIT)) == 0) { + if (atomic_cmpset_acq_32(&mp->__lockword, UMUTEX_UNOWNED, + id)) { + if ((mp->__lockflags & USYNC_PROCESS_SHARED) == 0) + mp->__ownerdata.__ownertd = curthread; + return (0); + } + if (mp->__lockword == UMUTEX_CONTESTED) { + if (atomic_cmpset_acq_32(&mp->__lockword, + UMUTEX_CONTESTED, id|UMUTEX_CONTESTED)) { + if ((mp->__lockflags & USYNC_PROCESS_SHARED) == 0) + mp->__ownerdata.__ownertd = curthread; + return (0); + } + } + } else if ((mp->__lockflags & (UMUTEX_PRIO_INHERIT|UMUTEX_ROBUST)) == + UMUTEX_PRIO_INHERIT) { + id = TID(curthread); + if (atomic_cmpset_acq_32(&mp->__lockword, UMUTEX_UNOWNED, + id)) { + enqueue_mutex(curthread, mp); + return (0); + } + } + + if (_mutex_owned(curthread, mp) == 0) + return mutex_self_lock(mp, abstime); + + if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || + abstime->tv_nsec >= 1000000000)) + return (EINVAL); + + return mutex_lock_sleep(mp, abstime); +} + +static inline int +mutex_lock_common(struct pthread_mutex *mp, + const struct timespec *abstime, int cvattach) +{ + struct pthread *curthread = _get_curthread(); + int error; - if (m->m_private) + if (cvattach || (mp->__flags & PMUTEX_FLAG_PRIVATE) == 0) + return _mutex_lock_common(mp, abstime); + if (mp->__flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_ENTER(curthread); - if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) { - ENQUEUE_MUTEX(curthread, m); - ret = 0; - } else { - ret = mutex_lock_sleep(curthread, m, abstime); - } - if (ret && m->m_private) + error = _mutex_lock_common(mp, abstime); + if (error && error != EOWNERDEAD) THR_CRITICAL_LEAVE(curthread); - return (ret); + return (error); } int -__pthread_mutex_lock(pthread_mutex_t *mutex) +_pthread_mutex_lock(pthread_mutex_t *mp) { - struct pthread_mutex *m; _thr_check_init(); - CHECK_AND_INIT_MUTEX - - return (mutex_lock_common(m, NULL)); + return (mutex_lock_common(mp, NULL, 0)); } int -__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime) +_pthread_mutex_timedlock(pthread_mutex_t *mp, const struct timespec *abstime) { - struct pthread_mutex *m; _thr_check_init(); - CHECK_AND_INIT_MUTEX - - return (mutex_lock_common(m, abstime)); + return (mutex_lock_common(mp, abstime, 0)); } int -_pthread_mutex_unlock(pthread_mutex_t *m) +_pthread_mutex_unlock(pthread_mutex_t *mp) { - return (mutex_unlock_common(m)); -} - -int -_mutex_cv_lock(pthread_mutex_t *mutex, int count) -{ - struct pthread_mutex *m; - int ret; - - m = *mutex; - ret = mutex_lock_common(m, NULL); - if (ret == 0) { - m->m_refcount--; - m->m_count += count; - } - return (ret); + return (mutex_unlock_common(mp)); } static int -mutex_self_trylock(struct pthread_mutex *m) +mutex_self_trylock(struct pthread_mutex *mp) { - int ret; + int error; - switch (m->m_type) { + switch (PMUTEX_TYPE(mp->__flags)) { case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_NORMAL: - ret = EBUSY; + error = EBUSY; break; case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ - if (m->m_count + 1 > 0) { - m->m_count++; - ret = 0; + if (mp->__recurse + 1 > 0) { + mp->__recurse++; + error = 0; } else - ret = EAGAIN; + error = EAGAIN; break; default: /* Trap invalid mutex types; */ - ret = EINVAL; + error = EINVAL; } - return (ret); + return (error); } static int -mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) +mutex_self_lock(struct pthread_mutex *mp, const struct timespec *abstime) { struct timespec ts1, ts2; - int ret; + int error; - switch (m->m_type) { + switch (PMUTEX_TYPE(mp->__flags)) { case PTHREAD_MUTEX_ERRORCHECK: case PTHREAD_MUTEX_ADAPTIVE_NP: if (abstime) { if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) { - ret = EINVAL; + error = EINVAL; } else { clock_gettime(CLOCK_REALTIME, &ts1); TIMESPEC_SUB(&ts2, abstime, &ts1); __sys_nanosleep(&ts2, NULL); - ret = ETIMEDOUT; + error = ETIMEDOUT; } } else { /* * POSIX specifies that mutexes should return * EDEADLK if a recursive lock is detected. */ - ret = EDEADLK; + error = EDEADLK; } break; @@ -527,16 +456,16 @@ * What SS2 define as a 'normal' mutex. Intentionally * deadlock on attempts to get a lock you already own. */ - ret = 0; + error = 0; if (abstime) { if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) { - ret = EINVAL; + error = EINVAL; } else { clock_gettime(CLOCK_REALTIME, &ts1); TIMESPEC_SUB(&ts2, abstime, &ts1); __sys_nanosleep(&ts2, NULL); - ret = ETIMEDOUT; + error = ETIMEDOUT; } } else { ts1.tv_sec = 30; @@ -548,212 +477,669 @@ case PTHREAD_MUTEX_RECURSIVE: /* Increment the lock count: */ - if (m->m_count + 1 > 0) { - m->m_count++; - ret = 0; + if (mp->__recurse + 1 > 0) { + mp->__recurse++; + error = 0; } else - ret = EAGAIN; + error = EAGAIN; break; default: /* Trap invalid mutex types; */ - ret = EINVAL; + error = EINVAL; } - return (ret); + return (error); +} + +int +_mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp) +{ + /* + * Check if the running thread is not the owner of the mutex. + */ + if ((mp->__lockflags & UMUTEX_SIMPLE) != 0) { + if (__predict_false(mp->__ownerdata.__ownertd != curthread)) + return (EPERM); + } else { + if ((mp->__lockword & UMUTEX_OWNER_MASK) != TID(curthread)) + return (EPERM); + } + return (0); } -static int -mutex_unlock_common(pthread_mutex_t *mutex) +static inline int +_mutex_unlock_common(struct pthread_mutex *mp) { - struct pthread *curthread = _get_curthread(); - struct pthread_mutex *m; + struct pthread *curthread; uint32_t id; + int defered; - m = *mutex; - if (__predict_false(m <= THR_MUTEX_DESTROYED)) { - if (m == THR_MUTEX_DESTROYED) - return (EINVAL); - return (EPERM); + if (__predict_false( + PMUTEX_TYPE(mp->__flags) == PTHREAD_MUTEX_RECURSIVE && + mp->__recurse > 0)) { + mp->__recurse--; + if (mp->__flags & PMUTEX_FLAG_PRIVATE) + THR_CRITICAL_LEAVE(curthread); + return (0); } - /* - * Check if the running thread is not the owner of the mutex. - */ - if (__predict_false(m->m_owner != curthread)) - return (EPERM); + curthread = _get_curthread(); + dequeue_mutex(curthread, mp); + + if ((mp->__lockflags & UMUTEX_SIMPLE) != 0) + id = UMUTEX_SIMPLE_OWNER; + else + id = TID(curthread); - id = TID(curthread); - if (__predict_false( - m->m_type == PTHREAD_MUTEX_RECURSIVE && - m->m_count > 0)) { - m->m_count--; - } else { - m->m_owner = NULL; - /* Remove the mutex from the threads queue. */ - MUTEX_ASSERT_IS_OWNED(m); - if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) - TAILQ_REMOVE(&curthread->mutexq, m, m_qe); - else { - TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); - set_inherited_priority(curthread, m); + if (curthread->will_sleep == 0 && (mp->__flags & PMUTEX_FLAG_DEFERED) != 0) { + defered = 1; + mp->__flags &= ~PMUTEX_FLAG_DEFERED; + } else + defered = 0; + if ((mp->__lockflags & (UMUTEX_ROBUST | UMUTEX_PRIO_PROTECT2 | + UMUTEX_PRIO_INHERIT)) == 0) { + if (atomic_cmpset_rel_32(&mp->__lockword, id, + UMUTEX_UNOWNED)) { + goto out; + } + } else if ((mp->__lockflags & (UMUTEX_PRIO_INHERIT|UMUTEX_ROBUST)) == + UMUTEX_PRIO_INHERIT) { + id = TID(curthread); + if (atomic_cmpset_rel_32(&mp->__lockword, id, + UMUTEX_UNOWNED)) { + goto out; } - MUTEX_INIT_LINK(m); - _thr_umutex_unlock(&m->m_lock, id); + } + __thr_umutex_unlock((struct umutex *)&mp->__lockword, id); +out: + if (defered) { + _thr_wake_all(curthread->defer_waiters, + curthread->nwaiter_defer); + curthread->nwaiter_defer = 0; } - if (m->m_private) + if (mp->__flags & PMUTEX_FLAG_PRIVATE) THR_CRITICAL_LEAVE(curthread); return (0); } +static int +mutex_unlock_common(pthread_mutex_t *mp) +{ + int error; + + if ((error = _mutex_owned(_get_curthread(), mp)) != 0) + return (error); + return _mutex_unlock_common(mp); +} + +int +_mutex_cv_lock(pthread_mutex_t *mp, int count) +{ + int error; + + error = mutex_lock_common(mp, NULL, 1); + if (error == 0 || error == EOWNERDEAD) + mp->__recurse += count; + return (error); +} + int -_mutex_cv_unlock(pthread_mutex_t *mutex, int *count) +_mutex_cv_unlock(pthread_mutex_t *mp, int *count) { +#if 0 struct pthread *curthread = _get_curthread(); - struct pthread_mutex *m; + int error; - m = *mutex; - if (__predict_false(m <= THR_MUTEX_DESTROYED)) { - if (m == THR_MUTEX_DESTROYED) - return (EINVAL); - return (EPERM); - } + if ((error = _mutex_owned(curthread, mp)) != 0) + return (error); +#endif /* - * Check if the running thread is not the owner of the mutex. + * Clear the count in case this is a recursive mutex. */ - if (__predict_false(m->m_owner != curthread)) - return (EPERM); + *count = mp->__recurse; + mp->__recurse = 0; + + (void)_mutex_unlock_common(mp); + return (0); +} + +int +_mutex_cv_attach(pthread_mutex_t *mp, int count) +{ + struct pthread *curthread = _get_curthread(); + int error; + + enqueue_mutex(curthread, mp); + mp->__recurse += count; + return (error); +} + +int +_mutex_cv_detach(pthread_mutex_t *mp, int *recurse) +{ + struct pthread *curthread = _get_curthread(); + int defered; + int error; + + if ((error = _mutex_owned(curthread, mp)) != 0) + return (error); /* * Clear the count in case this is a recursive mutex. */ - *count = m->m_count; - m->m_refcount++; - m->m_count = 0; - m->m_owner = NULL; - /* Remove the mutex from the threads queue. */ - MUTEX_ASSERT_IS_OWNED(m); - if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) - TAILQ_REMOVE(&curthread->mutexq, m, m_qe); - else { - TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); - set_inherited_priority(curthread, m); + *recurse = mp->__recurse; + mp->__recurse = 0; + dequeue_mutex(curthread, mp); + + /* Will this happen in real-world ? */ + if ((mp->__flags & PMUTEX_FLAG_DEFERED) != 0) { + defered = 1; + mp->__flags &= ~PMUTEX_FLAG_DEFERED; + } else + defered = 0; + + if (defered) { + _thr_wake_all(curthread->defer_waiters, + curthread->nwaiter_defer); + curthread->nwaiter_defer = 0; } - MUTEX_INIT_LINK(m); - _thr_umutex_unlock(&m->m_lock, TID(curthread)); - - if (m->m_private) - THR_CRITICAL_LEAVE(curthread); return (0); } int -_pthread_mutex_getprioceiling(pthread_mutex_t *mutex, - int *prioceiling) +_pthread_mutex_getprioceiling(pthread_mutex_t *mp, int *prioceiling) { - struct pthread_mutex *m; - int ret; + int error; - m = *mutex; - if ((m <= THR_MUTEX_DESTROYED) || - (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) - ret = EINVAL; + if ((mp->__lockflags & UMUTEX_PRIO_PROTECT2) == 0) + error = EINVAL; else { - *prioceiling = m->m_lock.m_ceilings[0]; - ret = 0; + *prioceiling = mp->__ceilings[0]; + error = 0; } - return (ret); + return (error); } int -_pthread_mutex_setprioceiling(pthread_mutex_t *mutex, +_pthread_mutex_setprioceiling(pthread_mutex_t *mp, int ceiling, int *old_ceiling) { struct pthread *curthread = _get_curthread(); - struct pthread_mutex *m, *m1, *m2; - int ret; + struct mutex_link *ml, *ml1, *ml2; - m = *mutex; - if ((m <= THR_MUTEX_DESTROYED) || - (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) + if ((mp->__lockflags & UMUTEX_PRIO_PROTECT2) == 0) return (EINVAL); - ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); - if (ret != 0) - return (ret); + if (_mutex_owned(curthread, mp) != 0) + return __thr_umutex_set_ceiling((struct umutex *)&mp->__lockword, + ceiling, old_ceiling); + if (old_ceiling != NULL) + *old_ceiling = mp->__ceilings[0]; + mp->__ceilings[0] = ceiling; + TAILQ_FOREACH(ml, &curthread->pp_mutexq, qe) { + if (ml->mutexp == mp) + break; + } + if (ml == NULL) /* howto ? */ + return (0); + ml1 = TAILQ_PREV(ml, mutex_link_list, qe); + ml2 = TAILQ_NEXT(ml, qe); + if ((ml1 != NULL && ml1->mutexp->__ceilings[0] < (u_int)ceiling) || + (ml2 != NULL && ml2->mutexp->__ceilings[0] > (u_int)ceiling)) { + TAILQ_REMOVE(&curthread->pp_mutexq, ml, qe); + TAILQ_FOREACH(ml2, &curthread->pp_mutexq, qe) { + if (ml2->mutexp->__ceilings[0] < (u_int)ceiling) { + TAILQ_INSERT_BEFORE(ml2, ml, qe); + return (0); + } + } + TAILQ_INSERT_HEAD(&curthread->pp_mutexq, ml, qe); + } + return (0); +} + +int +_pthread_mutex_getspinloops_np(pthread_mutex_t *mp, int *count) +{ + + *count = mp->__spinloops; + return (0); +} + +int +_pthread_mutex_setspinloops_np(pthread_mutex_t *mp, int count) +{ + + mp->__spinloops = count; + return (0); +} + +int +_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) +{ + *count = 0; + return (0); +} + +int +_pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) +{ + return (0); +} + +int +_pthread_mutex_isowned_np(pthread_mutex_t *mp) +{ + return (_mutex_owned(_get_curthread(), mp) == 0); +} + +int +_pthread_mutex_consistent(pthread_mutex_t *mp) +{ + + if (_mutex_owned(_get_curthread(), mp) == 0) { + if (mp->__lockflags & UMUTEX_ROBUST) { + atomic_clear_32(&mp->__lockword, UMUTEX_OWNER_DEAD); + return (0); + } + } + return (EINVAL); +} + +void +_thr_mutex_link_init(struct pthread *td) +{ + TAILQ_INIT(&td->mutex_link_freeq); + TAILQ_INIT(&td->mutex_link_pages); + TAILQ_INIT(&td->pi_mutexq); + TAILQ_INIT(&td->pp_mutexq); +} + +struct mutex_link * +_thr_mutex_link_alloc(void) +{ + struct pthread *curthread = _get_curthread(); + struct mutex_link *p; + unsigned i; + + p = TAILQ_FIRST(&curthread->mutex_link_freeq); + if (p == NULL) { + struct mutex_link *pp = (struct mutex_link *)mmap(NULL, + _thr_page_size, PROT_READ|PROT_WRITE, + MAP_ANON|MAP_PRIVATE, -1, 0); + for (i = 2; i < _thr_page_size/sizeof(struct mutex_link); ++i) + TAILQ_INSERT_TAIL(&curthread->mutex_link_freeq, &pp[i], qe); + pp[0].mutexp = (struct pthread_mutex *)pp; /* the page address */ + TAILQ_INSERT_HEAD(&curthread->mutex_link_pages, &pp[0], qe); + p = &pp[1]; + } + return (p); +} + +void +_thr_mutex_link_free(struct mutex_link *ml) +{ + struct pthread *curthread = _get_curthread(); + + TAILQ_INSERT_TAIL(&curthread->mutex_link_freeq, ml, qe); +} + +void +_thr_mutex_link_exit(struct pthread *curthread) +{ + struct mutex_link *ml, *ml2; + + TAILQ_FOREACH_SAFE(ml, &curthread->mutex_link_pages, qe, ml2) { + TAILQ_REMOVE(&curthread->mutex_link_pages, ml, qe); + munmap(ml->mutexp, _thr_page_size); + } + TAILQ_INIT(&curthread->mutex_link_freeq); +} + +static void +set_inherited_priority(struct pthread *curthread, struct pthread_mutex *mp) +{ + struct mutex_link *ml2; + + ml2 = TAILQ_FIRST(&curthread->pp_mutexq); + if (ml2 != NULL) + mp->__ceilings[1] = ml2->mutexp->__ceilings[0]; + else + mp->__ceilings[1] = -1; +} + +static void +enqueue_mutex(struct pthread *curthread, struct pthread_mutex *mp) +{ + struct mutex_link *ml; + + if ((mp->__lockflags & USYNC_PROCESS_SHARED) == 0) + mp->__ownerdata.__ownertd = curthread; + + /* + * For PP mutex, we should restore previous priority after a PP + * mutex is unlocked, so we should remember every PP mutex. + */ + if ((mp->__lockflags & UMUTEX_PRIO_PROTECT2) != 0) { + curthread->priority_mutex_count++; + ml = _thr_mutex_link_alloc(); + ml->mutexp = mp; + TAILQ_INSERT_TAIL(&curthread->pp_mutexq, ml, qe); + } else if ((mp->__lockflags & UMUTEX_PRIO_INHERIT) != 0) { + curthread->priority_mutex_count++; + /* + * To make unlocking after fork() work, we need to link it, + * because we still use TID as lock-word for PI mutex. + * However, processs-shared mutex only has one copy, it should + * not be unlockable for child process, so we don't link it, + * and _mutex_fork() won't find it. + */ + if ((mp->__lockflags & USYNC_PROCESS_SHARED) != 0) + return; + ml = _thr_mutex_link_alloc(); + ml->mutexp = mp; + TAILQ_INSERT_HEAD(&curthread->pi_mutexq, ml, qe); + } +} + +static void +dequeue_mutex(struct pthread *curthread, struct pthread_mutex *mp) +{ + struct mutex_link *ml; + + if ((mp->__lockflags & USYNC_PROCESS_SHARED) == 0) + mp->__ownerdata.__ownertd = NULL; - if (m->m_owner == curthread) { - MUTEX_ASSERT_IS_OWNED(m); - m1 = TAILQ_PREV(m, mutex_queue, m_qe); - m2 = TAILQ_NEXT(m, m_qe); - if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || - (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { - TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); - TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { - if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { - TAILQ_INSERT_BEFORE(m2, m, m_qe); - return (0); - } + if ((mp->__lockflags & UMUTEX_PRIO_PROTECT2) != 0) { + curthread->priority_mutex_count--; + TAILQ_FOREACH(ml, &curthread->pp_mutexq, qe) { + if (ml->mutexp == mp) { + TAILQ_REMOVE(&curthread->pp_mutexq, ml, qe); + set_inherited_priority(curthread, mp); + _thr_mutex_link_free(ml); + break; + } + } + } else if ((mp->__lockflags & UMUTEX_PRIO_INHERIT) != 0) { + curthread->priority_mutex_count--; + if ((mp->__lockflags & USYNC_PROCESS_SHARED) != 0) + return; + TAILQ_FOREACH(ml, &curthread->pi_mutexq, qe) { + if (ml->mutexp == mp) { + TAILQ_REMOVE(&curthread->pi_mutexq, ml, qe); + _thr_mutex_link_free(ml); + break; } - TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); } } - return (0); +} + +int +_mutex_owned_old(struct pthread *curthread, pthread_mutex_old_t *mutex) +{ + struct pthread_mutex *mp; + + mp = *mutex; + if (__predict_false(mp <= THR_MUTEX_DESTROYED)) { + if (mp == THR_MUTEX_DESTROYED) + return (EINVAL); + return (EPERM); + } + + return _mutex_owned(curthread, mp); +} + +static int +mutex_init_old(pthread_mutex_old_t *mutex, + const struct pthread_mutex_attr *mutex_attr) +{ + struct pthread_mutex *mp; + int error; + + if ((mp = (struct pthread_mutex *) + malloc(sizeof(struct pthread_mutex))) == NULL) { + return (ENOMEM); + } + error = mutex_init(mp, mutex_attr); + if (error != 0) + free(mp); + else + *mutex = mp; + return (error); +} + +#define CHECK_AND_INIT_MUTEX \ + if (__predict_false((mp = *mutex) <= THR_MUTEX_DESTROYED)) { \ + if (mp == THR_MUTEX_DESTROYED) \ + return (EINVAL); \ + int error; \ + error = init_static(_get_curthread(), mutex); \ + if (error) \ + return (error); \ + mp = *mutex; \ + } + +static int +init_static(struct pthread *thread, pthread_mutex_old_t *mutex) +{ + int error; + + THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); + + if (*mutex == THR_MUTEX_INITIALIZER) { + error = mutex_init_old(mutex, &_pthread_mutexattr_default); + } else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) { + error = mutex_init_old(mutex, + &_pthread_mutexattr_adaptive_default); + } + else + error = 0; + THR_LOCK_RELEASE(thread, &_mutex_static_lock); + return (error); +} + +int +_pthread_mutex_destroy_1_0(pthread_mutex_old_t *mutex) +{ + pthread_mutex_t *mp; + int error; + + mp = *mutex; + if (mp < THR_MUTEX_DESTROYED) { + error = 0; + } else if (mp == THR_MUTEX_DESTROYED) { + error = EINVAL; + } else { + *mutex = THR_MUTEX_DESTROYED; + free(mp); + error = 0; + } + + return (error); +} + +int +_pthread_mutex_init_1_0(pthread_mutex_old_t *mutex, + const pthread_mutexattr_t *mutex_attr) +{ + return mutex_init_old(mutex, mutex_attr ? *mutex_attr : NULL); +} + +int +_pthread_mutex_trylock_1_0(pthread_mutex_old_t *mutex) +{ + struct pthread *curthread = _get_curthread(); + struct pthread_mutex *mp; + int error; + + CHECK_AND_INIT_MUTEX + + if (!(mp->__flags & PMUTEX_FLAG_PRIVATE)) + return mutex_trylock_common(mp); + THR_CRITICAL_ENTER(curthread); + error = mutex_trylock_common(mp); + if (error != 0 && error != EOWNERDEAD) + THR_CRITICAL_LEAVE(curthread); + return (error); +} + +int +_pthread_mutex_lock_1_0(pthread_mutex_old_t *mutex) +{ + struct pthread_mutex *mp; + + _thr_check_init(); + + CHECK_AND_INIT_MUTEX + + return (mutex_lock_common(mp, NULL, 0)); +} + +int +_pthread_mutex_timedlock_1_0(pthread_mutex_old_t *mutex, + const struct timespec *abstime) +{ + struct pthread_mutex *mp; + + _thr_check_init(); + + CHECK_AND_INIT_MUTEX + + return (mutex_lock_common(mp, abstime, 0)); +} + +int +_pthread_mutex_unlock_1_0(pthread_mutex_old_t *mutex) +{ + struct pthread_mutex *mp; + + mp = *mutex; + if (__predict_false(mp <= THR_MUTEX_DESTROYED)) { + if (mp == THR_MUTEX_DESTROYED) + return (EINVAL); + return (EPERM); + } + return mutex_unlock_common(mp); } int -_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) +_pthread_mutex_getspinloops_np_1_0(pthread_mutex_old_t *mutex, int *count) { - struct pthread_mutex *m; + struct pthread_mutex *mp; CHECK_AND_INIT_MUTEX - *count = m->m_spinloops; + *count = mp->__spinloops; return (0); } int -__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) +_pthread_mutex_setspinloops_np_1_0(pthread_mutex_old_t *mutex, int count) { - struct pthread_mutex *m; + struct pthread_mutex *mp; CHECK_AND_INIT_MUTEX - m->m_spinloops = count; + mp->__spinloops = count; return (0); } int -_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) +_pthread_mutex_getyieldloops_np_1_0(pthread_mutex_old_t *mutex, int *count) { - struct pthread_mutex *m; +#if 0 + struct pthread_mutex *mp; CHECK_AND_INIT_MUTEX *count = m->m_yieldloops; +#endif + *count = 0; return (0); } int -__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) +_pthread_mutex_setyieldloops_np_1_0(pthread_mutex_old_t *mutex, int count) { - struct pthread_mutex *m; +#if 0 + struct pthread_mutex *mp; CHECK_AND_INIT_MUTEX - m->m_yieldloops = count; + mp->m_yieldloops = count; +#endif return (0); } int -_pthread_mutex_isowned_np(pthread_mutex_t *mutex) +_pthread_mutex_isowned_np_1_0(pthread_mutex_old_t *mutex) +{ + return (_mutex_owned_old(_get_curthread(), mutex) == 0); +} + +int +_pthread_mutex_getprioceiling_1_0(pthread_mutex_old_t *mutex, + int *prioceiling) +{ + struct pthread_mutex *mp; + int error; + + mp = *mutex; + if ((mp <= THR_MUTEX_DESTROYED) || + (mp->__lockflags & UMUTEX_PRIO_PROTECT2) == 0) + error = EINVAL; + else { + *prioceiling = mp->__ceilings[0]; + error = 0; + } + + return (error); +} + +int +_pthread_mutex_setprioceiling_1_0(pthread_mutex_old_t *mutex, + int ceiling, int *old_ceiling) { - struct pthread_mutex *m; + struct pthread_mutex *mp; + + mp = *mutex; + if ((mp <= THR_MUTEX_DESTROYED) || + (mp->__lockflags & UMUTEX_PRIO_PROTECT2) == 0) + return (EINVAL); + return _pthread_mutex_setprioceiling(mp, ceiling, old_ceiling); +} - m = *mutex; - if (m <= THR_MUTEX_DESTROYED) - return (0); - return (m->m_owner == _get_curthread()); +/* This function is used internally by malloc. */ +int +_pthread_mutex_init_calloc_cb(pthread_mutex_t *mp, + void *(calloc_cb)(size_t, size_t)) +{ + static const struct pthread_mutex_attr attr = { + .m_type = PTHREAD_MUTEX_NORMAL, + .m_protocol = PTHREAD_PRIO_NONE, + .m_ceiling = 0, + .m_pshared = 0, + .m_robust = PTHREAD_MUTEX_STALLED + }; + int error; + error = mutex_init(mp, &attr); + if (error == 0) + mp->__flags |= PMUTEX_FLAG_PRIVATE; + return (error); } + +FB10_COMPAT(_pthread_mutex_destroy_1_0, pthread_mutex_destroy); +FB10_COMPAT(_pthread_mutex_getprioceiling_1_0, pthread_mutex_getprioceiling); +FB10_COMPAT(_pthread_mutex_getspinloops_np_1_0, pthread_mutex_getspinloops_np); +FB10_COMPAT(_pthread_mutex_getyieldloops_np_1_0, pthread_mutex_getyieldloops_np); +FB10_COMPAT(_pthread_mutex_init_1_0, pthread_mutex_init); +FB10_COMPAT(_pthread_mutex_lock_1_0, pthread_mutex_lock); +FB10_COMPAT(_pthread_mutex_setprioceiling_1_0, pthread_mutex_setprioceiling); +FB10_COMPAT(_pthread_mutex_setspinloops_np_1_0, pthread_mutex_setspinloops_np); +FB10_COMPAT(_pthread_mutex_setyieldloops_np_1_0, pthread_mutex_setyieldloops_np); +FB10_COMPAT(_pthread_mutex_timedlock_1_0, pthread_mutex_timedlock); +FB10_COMPAT(_pthread_mutex_trylock_1_0, pthread_mutex_trylock); +FB10_COMPAT(_pthread_mutex_unlock_1_0, pthread_mutex_unlock); --- src/lib/libthr/thread/thr_mutexattr.c 2008-03-20 12:41:24.000000000 0000 +++ src/lib/libthr/thread/thr_mutexattr.c 2010-11-23 01:58:38.000000000 0000 @@ -81,6 +81,11 @@ __weak_reference(_pthread_mutexattr_setprotocol, pthread_mutexattr_setprotocol); __weak_reference(_pthread_mutexattr_getprioceiling, pthread_mutexattr_getprioceiling); __weak_reference(_pthread_mutexattr_setprioceiling, pthread_mutexattr_setprioceiling); +__weak_reference(_pthread_mutexattr_getrobust, pthread_mutexattr_getrobust); +__weak_reference(_pthread_mutexattr_setrobust, pthread_mutexattr_setrobust); + +int _pthread_mutexattr_setpshared_1_0(pthread_mutexattr_t *attr, int pshared); +FB10_COMPAT(_pthread_mutexattr_setpshared_1_0, pthread_mutexattr_setpshared); int _pthread_mutexattr_init(pthread_mutexattr_t *attr) @@ -177,7 +182,7 @@ if (attr == NULL || *attr == NULL) return (EINVAL); - *pshared = PTHREAD_PROCESS_PRIVATE; + *pshared = (*attr)->m_pshared; return (0); } @@ -189,9 +194,11 @@ return (EINVAL); /* Only PTHREAD_PROCESS_PRIVATE is supported. */ - if (pshared != PTHREAD_PROCESS_PRIVATE) + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) return (EINVAL); + (*attr)->m_pshared = pshared; return (0); } @@ -253,3 +260,49 @@ return(ret); } +int +_pthread_mutexattr_getrobust(const pthread_mutexattr_t *mattr, + int *robust) +{ + int error; + + if ((mattr == NULL) || (*mattr == NULL)) + error = EINVAL; + else { + *robust =(*mattr)->m_robust; + error = 0; + } + return (error); +} + +int +_pthread_mutexattr_setrobust(pthread_mutexattr_t *mattr, + int robust) +{ + int error; + + if ((mattr == NULL) || (*mattr == NULL)) + error = EINVAL; + else if (robust == PTHREAD_MUTEX_STALLED || + robust == PTHREAD_MUTEX_ROBUST) { + (*mattr)->m_robust = robust; + error = 0; + } else{ + error = EINVAL; + } + return (error); +} + +int +_pthread_mutexattr_setpshared_1_0(pthread_mutexattr_t *attr, int pshared) +{ + if (attr == NULL || *attr == NULL) + return (EINVAL); + + /* Only PTHREAD_PROCESS_PRIVATE is supported. */ + if (pshared != PTHREAD_PROCESS_PRIVATE) + return (EINVAL); + + (*attr)->m_pshared = pshared; + return (0); +} --- src/lib/libthr/thread/thr_once.c 2008-05-30 00:46:50.000000000 0000 +++ src/lib/libthr/thread/thr_once.c 2010-11-29 06:32:08.000000000 0000 @@ -73,9 +73,9 @@ break; } else if (state == ONCE_IN_PROGRESS) { if (atomic_cmpset_acq_int(&once_control->state, state, ONCE_WAIT)) - _thr_umtx_wait_uint(&once_control->state, ONCE_WAIT, NULL, 0); + _thr_umtx_wait_uint(&once_control->state, ONCE_WAIT, 0); } else if (state == ONCE_WAIT) { - _thr_umtx_wait_uint(&once_control->state, state, NULL, 0); + _thr_umtx_wait_uint(&once_control->state, state, 0); } else return (EINVAL); } --- src/lib/libthr/thread/thr_private.h 2010-10-25 09:35:54.000000000 0000 +++ src/lib/libthr/thread/thr_private.h 2010-11-30 11:38:58.000000000 0000 @@ -53,7 +53,9 @@ #include #define SYM_FB10(sym) __CONCAT(sym, _fb10) +#define SYM_FB11(sym) __CONCAT(sym, _fb11) #define SYM_FBP10(sym) __CONCAT(sym, _fbp10) +#define SYM_FBP11(sym) __CONCAT(sym, _fbp11) #define WEAK_REF(sym, alias) __weak_reference(sym, alias) #define SYM_COMPAT(sym, impl, ver) __sym_compat(sym, impl, ver) #define SYM_DEFAULT(sym, impl, ver) __sym_default(sym, impl, ver) @@ -62,6 +64,10 @@ WEAK_REF(func, SYM_FB10(sym)); \ SYM_COMPAT(sym, SYM_FB10(sym), FBSD_1.0) +#define FB11_COMPAT(func, sym) \ + WEAK_REF(func, SYM_FB10(sym)); \ + SYM_COMPAT(sym, SYM_FB10(sym), FBSD_1.1) + #define FB10_COMPAT_PRIVATE(func, sym) \ WEAK_REF(func, SYM_FBP10(sym)); \ SYM_DEFAULT(sym, SYM_FBP10(sym), FBSDprivate_1.0) @@ -77,7 +83,12 @@ typedef TAILQ_HEAD(pthreadlist, pthread) pthreadlist; typedef TAILQ_HEAD(atfork_head, pthread_atfork) atfork_head; -TAILQ_HEAD(mutex_queue, pthread_mutex); +struct mutex_link { + TAILQ_ENTRY(mutex_link) qe; + struct pthread_mutex *mutexp; +}; + +TAILQ_HEAD(mutex_link_list, mutex_link); /* Signal to do cancellation */ #define SIGCANCEL 32 @@ -135,38 +146,19 @@ #define THR_RWLOCK_INITIALIZER ((struct pthread_rwlock *)NULL) #define THR_RWLOCK_DESTROYED ((struct pthread_rwlock *)1) -struct pthread_mutex { - /* - * Lock for accesses to this structure. - */ - struct umutex m_lock; - enum pthread_mutextype m_type; - struct pthread *m_owner; - int m_count; - int m_refcount; - int m_spinloops; - int m_yieldloops; - int m_private; - /* - * Link for all mutexes a thread currently owns. - */ - TAILQ_ENTRY(pthread_mutex) m_qe; -}; +#define PMUTEX_FLAG_TYPE_MASK 0x0ff +#define PMUTEX_FLAG_PRIVATE 0x100 +#define PMUTEX_FLAG_DEFERED 0x200 +#define PMUTEX_TYPE(mtxflags) ((mtxflags) & PMUTEX_FLAG_TYPE_MASK) + +#define MAX_DEFER_WAITERS 50 struct pthread_mutex_attr { enum pthread_mutextype m_type; int m_protocol; int m_ceiling; -}; - -#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ - { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } - -struct pthread_cond { - struct umutex c_lock; - struct ucond c_kerncv; - int c_pshared; - int c_clockid; + int m_pshared; + int m_robust; }; struct pthread_cond_attr { @@ -174,29 +166,10 @@ int c_clockid; }; -struct pthread_barrier { - struct umutex b_lock; - struct ucond b_cv; - volatile int64_t b_cycle; - volatile int b_count; - volatile int b_waiters; -}; - struct pthread_barrierattr { int pshared; }; -struct pthread_spinlock { - struct umutex s_lock; -}; - -/* - * Flags for condition variables. - */ -#define COND_FLAGS_PRIVATE 0x01 -#define COND_FLAGS_INITED 0x02 -#define COND_FLAGS_BUSY 0x04 - /* * Cleanup definitions. */ @@ -245,6 +218,21 @@ size_t cpusetsize; }; +struct wake_addr { + struct wake_addr *link; + unsigned int value; + char pad[12]; +}; + +struct sleepqueue { + TAILQ_HEAD(, pthread) sq_blocked; + SLIST_HEAD(, sleepqueue) sq_freeq; + LIST_ENTRY(sleepqueue) sq_hash; + SLIST_ENTRY(sleepqueue) sq_flink; + void *sq_wchan; + int sq_type; +}; + /* * Thread creation state attributes. */ @@ -289,11 +277,6 @@ int pshared; }; -struct pthread_rwlock { - struct urwlock lock; - struct pthread *owner; -}; - /* * Thread states. */ @@ -356,9 +339,15 @@ /* Hash queue entry. */ LIST_ENTRY(pthread) hle; + /* Sleep queue entry */ + TAILQ_ENTRY(pthread) wle; + /* Threads reference count. */ int refcount; + struct wake_addr *wake_addr; +#define WAKE_ADDR(td) ((td)->wake_addr) + /* * Thread start routine, argument, stack pointer and thread * attributes. @@ -438,16 +427,22 @@ #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */ #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */ - /* Queue of currently owned NORMAL or PRIO_INHERIT type mutexes. */ - struct mutex_queue mutexq; + /* Queue of currently owned PRIO_INHERIT type mutexes. */ + struct mutex_link_list pi_mutexq; /* Queue of all owned PRIO_PROTECT mutexes. */ - struct mutex_queue pp_mutexq; + struct mutex_link_list pp_mutexq; + + struct mutex_link_list mutex_link_freeq; + struct mutex_link_list mutex_link_pages; void *ret; struct pthread_specific_elem *specific; int specific_data_count; + /* Number of priority mutex held. */ + int priority_mutex_count; + /* Number rwlocks rdlocks held. */ int rdlock_count; @@ -480,8 +475,26 @@ /* Event mask */ int event_mask; - /* Event */ + /* Debugging event */ td_event_msg_t event_buf; + + /* Sleep queue */ + struct sleepqueue *sleepqueue; + + /* Wait channel */ + void *wchan; + + /* Referenced mutex. */ + struct pthread_mutex *mutex_obj; + + /* Thread will sleep. */ + int will_sleep; + + /* Number of threads deferred. */ + int nwaiter_defer; + + /* Deferred threads from pthread_cond_signal. */ + unsigned int *defer_waiters[MAX_DEFER_WAITERS]; }; #define THR_SHOULD_GC(thrd) \ @@ -510,6 +523,9 @@ #define THR_UMUTEX_TIMEDLOCK(thrd, lck, timo) \ _thr_umutex_timedlock((lck), TID(thrd), (timo)) +#define THR_UMUTEX_LOCK_SPIN(thrd, lck) \ + _thr_umutex_lock_spin((lck), TID(thrd)) + #define THR_UMUTEX_UNLOCK(thrd, lck) \ _thr_umutex_unlock((lck), TID(thrd)) @@ -519,6 +535,12 @@ _thr_umutex_lock(lck, TID(thrd)); \ } while (0) +#define THR_LOCK_ACQUIRE_SPIN(thrd, lck) \ +do { \ + (thrd)->locklevel++; \ + _thr_umutex_lock_spin(lck, TID(thrd)); \ +} while (0) + #ifdef _PTHREADS_INVARIANTS #define THR_ASSERT_LOCKLEVEL(thrd) \ do { \ @@ -673,6 +695,9 @@ int _thr_setthreaded(int) __hidden; int _mutex_cv_lock(pthread_mutex_t *, int count) __hidden; int _mutex_cv_unlock(pthread_mutex_t *, int *count) __hidden; +int _mutex_cv_attach(pthread_mutex_t *, int count) __hidden; +int _mutex_cv_detach(pthread_mutex_t *, int *count) __hidden; +int _mutex_owned(struct pthread *, const pthread_mutex_t *) __hidden; int _mutex_reinit(pthread_mutex_t *) __hidden; void _mutex_fork(struct pthread *curthread) __hidden; void _libpthread_init(struct pthread *) __hidden; @@ -726,6 +751,10 @@ void _thread_bp_create(void); void _thread_bp_death(void); int _sched_yield(void); +void _thr_mutex_link_init(struct pthread *); +struct mutex_link *_thr_mutex_link_alloc(void); +void _thr_mutex_link_free(struct mutex_link *); +void _thr_mutex_link_exit(struct pthread *); void _pthread_cleanup_push(void (*)(void *), void *); void _pthread_cleanup_pop(int); @@ -797,11 +826,96 @@ _libpthread_init(NULL); } +struct wake_addr *_thr_alloc_wake_addr(void); +void _thr_release_wake_addr(struct wake_addr *); +int _thr_sleep(struct pthread *, int, const struct timespec *); + +void _thr_wake_addr_init(void) __hidden; + +static inline void +_thr_clear_wake(struct pthread *td) +{ + td->wake_addr->value = 0; +} + +static inline int +_thr_is_woken(struct pthread *td) +{ + return td->wake_addr->value != 0; +} + +static inline void +_thr_set_wake(unsigned int *waddr) +{ + *waddr = 1; + _thr_umtx_wake(waddr, INT_MAX, 0); +} + +void _thr_wake_all(unsigned int *waddrs[], int) __hidden; + +static inline struct pthread * +_sleepq_first(struct sleepqueue *sq) +{ + return TAILQ_FIRST(&sq->sq_blocked); +} + +void _sleepq_init(void) __hidden; +struct sleepqueue *_sleepq_alloc(void) __hidden; +void _sleepq_free(struct sleepqueue *) __hidden; +void _sleepq_lock(void *) __hidden; +void _sleepq_unlock(void *) __hidden; +struct sleepqueue *_sleepq_lookup(void *) __hidden; +void _sleepq_add(void *, struct pthread *) __hidden; +int _sleepq_remove(struct sleepqueue *, struct pthread *) __hidden; +void _sleepq_drop(struct sleepqueue *, + void (*cb)(struct pthread *, void *arg), void *) __hidden; + struct dl_phdr_info; void __pthread_cxa_finalize(struct dl_phdr_info *phdr_info); void _thr_tsd_unload(struct dl_phdr_info *phdr_info) __hidden; void _thr_sigact_unload(struct dl_phdr_info *phdr_info) __hidden; +typedef struct pthread_mutex *pthread_mutex_old_t; +typedef struct pthread_cond *pthread_cond_old_t; +typedef struct pthread_rwlock *pthread_rwlock_old_t; +int _mutex_owned_old(struct pthread *, pthread_mutex_old_t *) __hidden; + +/* Compatible functions */ +int _pthread_mutex_destroy_1_0(pthread_mutex_old_t *); +int _pthread_mutex_init_1_0(pthread_mutex_old_t *, const pthread_mutexattr_t *); +int _pthread_mutex_trylock_1_0(pthread_mutex_old_t *); +int _pthread_mutex_lock_1_0(pthread_mutex_old_t *); +int _pthread_mutex_timedlock_1_0(pthread_mutex_old_t *, const struct timespec *); +int _pthread_mutex_unlock_1_0(pthread_mutex_old_t *); +int _pthread_mutex_getprioceiling_1_0(pthread_mutex_old_t *, int *); +int _pthread_mutex_setprioceiling_1_0(pthread_mutex_old_t *, int, int *); +int _pthread_mutex_getspinloops_np_1_0(pthread_mutex_old_t *, int *); +int _pthread_mutex_setspinloops_np_1_0(pthread_mutex_old_t *, int); +int _pthread_mutex_getyieldloops_np_1_0(pthread_mutex_old_t *, int *); +int _pthread_mutex_setyieldloops_np_1_0(pthread_mutex_old_t *, int); +int _pthread_mutex_isowned_np_1_0(pthread_mutex_old_t *); + +int _pthread_cond_init_1_0(pthread_cond_old_t *, const pthread_condattr_t *); +int _pthread_cond_signal_1_0(pthread_cond_old_t *); +int _pthread_cond_destroy_1_0(pthread_cond_old_t *); +int _pthread_cond_wait_1_0(pthread_cond_old_t *, pthread_mutex_old_t *); +int _pthread_cond_timedwait_1_0(pthread_cond_old_t *, pthread_mutex_old_t *, + const struct timespec *); +int _pthread_cond_broadcast_1_0(pthread_cond_old_t *); + +int _pthread_rwlock_destroy_1_0(pthread_rwlock_old_t *); +int _pthread_rwlock_init_1_0(pthread_rwlock_old_t *, + const pthread_rwlockattr_t *); +int _pthread_rwlock_timedrdlock_1_0(pthread_rwlock_old_t *, + const struct timespec *); +int _pthread_rwlock_timedwrlock_1_0(pthread_rwlock_old_t *, + const struct timespec *); +int _pthread_rwlock_tryrdlock_1_0(pthread_rwlock_old_t *); +int _pthread_rwlock_trywrlock_1_0(pthread_rwlock_old_t *); +int _pthread_rwlock_rdlock_1_0(pthread_rwlock_old_t *); +int _pthread_rwlock_wrlock_1_0(pthread_rwlock_old_t *); +int _pthread_rwlock_unlock_1_0(pthread_rwlock_old_t *); + __END_DECLS #endif /* !_THR_PRIVATE_H */ --- src/lib/libthr/thread/thr_pspinlock.c 2007-10-16 07:45:51.000000000 0000 +++ src/lib/libthr/thread/thr_pspinlock.c 2010-11-23 01:58:38.000000000 0000 @@ -34,105 +34,135 @@ #include "thr_private.h" -#define SPIN_COUNT 100000 - __weak_reference(_pthread_spin_init, pthread_spin_init); __weak_reference(_pthread_spin_destroy, pthread_spin_destroy); __weak_reference(_pthread_spin_trylock, pthread_spin_trylock); __weak_reference(_pthread_spin_lock, pthread_spin_lock); __weak_reference(_pthread_spin_unlock, pthread_spin_unlock); +typedef pthread_spinlock_t *pthread_spinlock_old_t; +int _pthread_spin_destroy_1_0(pthread_spinlock_old_t *); +int _pthread_spin_init_1_0(pthread_spinlock_old_t *, int); +int _pthread_spin_lock_1_0(pthread_spinlock_old_t *); +int _pthread_spin_trylock_1_0(pthread_spinlock_old_t *); +int _pthread_spin_unlock_1_0(pthread_spinlock_old_t *); + +int +_pthread_spin_init(pthread_spinlock_t *lckp, int pshared) +{ + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) + return (EINVAL); + lckp->__lock = 0; + return (0); +} + +int +_pthread_spin_destroy(pthread_spinlock_t *lckp) +{ + /* Nothing to do. */ + return (0); +} + int -_pthread_spin_init(pthread_spinlock_t *lock, int pshared) +_pthread_spin_trylock(pthread_spinlock_t *lckp) { - struct pthread_spinlock *lck; - int ret; + if (atomic_cmpset_acq_32(&lckp->__lock, 0, 1)) + return (0); + return (EBUSY); +} - if (lock == NULL || pshared != PTHREAD_PROCESS_PRIVATE) - ret = EINVAL; - else if ((lck = malloc(sizeof(struct pthread_spinlock))) == NULL) - ret = ENOMEM; - else { - _thr_umutex_init(&lck->s_lock); - *lock = lck; - ret = 0; +int +_pthread_spin_lock(pthread_spinlock_t *lckp) +{ + /* + * Nothing has been checked, the lock should be + * as fast as possible. + */ + if (atomic_cmpset_acq_32(&lckp->__lock, 0, 1)) + return (0); + for (;;) { + if (*(volatile int32_t *)&(lckp->__lock) == 0) + if (atomic_cmpset_acq_32(&lckp->__lock, 0, 1)) + break; + if (!_thr_is_smp) + _pthread_yield(); + else + CPU_SPINWAIT; } + return (0); +} - return (ret); +int +_pthread_spin_unlock(pthread_spinlock_t *lckp) +{ + lckp->__lock = 0; + wmb(); + return (0); } int -_pthread_spin_destroy(pthread_spinlock_t *lock) +_pthread_spin_init_1_0(pthread_spinlock_old_t *lckpp, int pshared) { - int ret; + pthread_spinlock_t *lckp; - if (lock == NULL || *lock == NULL) - ret = EINVAL; - else { - free(*lock); - *lock = NULL; - ret = 0; - } - - return (ret); + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) + return (EINVAL); + + lckp = malloc(sizeof(pthread_spinlock_t)); + if (lckp == NULL) + return (ENOMEM); + lckp->__lock = 0; + *lckpp = lckp; + return (0); } int -_pthread_spin_trylock(pthread_spinlock_t *lock) +_pthread_spin_destroy_1_0(pthread_spinlock_old_t *lckpp) { - struct pthread *curthread = _get_curthread(); - struct pthread_spinlock *lck; - int ret; + pthread_spinlock_t *lckp = *lckpp; - if (lock == NULL || (lck = *lock) == NULL) - ret = EINVAL; - else - ret = THR_UMUTEX_TRYLOCK(curthread, &lck->s_lock); - return (ret); + if (lckp != NULL) { + free(lckp); + *lckpp = NULL; + return (0); + } else + return (EINVAL); } int -_pthread_spin_lock(pthread_spinlock_t *lock) +_pthread_spin_trylock_1_0(pthread_spinlock_old_t *lckpp) { - struct pthread *curthread = _get_curthread(); - struct pthread_spinlock *lck; - int ret, count; + pthread_spinlock_t *lckp = *lckpp; - if (lock == NULL || (lck = *lock) == NULL) - ret = EINVAL; - else { - count = SPIN_COUNT; - while ((ret = THR_UMUTEX_TRYLOCK(curthread, &lck->s_lock)) != 0) { - while (lck->s_lock.m_owner) { - if (!_thr_is_smp) { - _pthread_yield(); - } else { - CPU_SPINWAIT; + if (lckp == NULL) + return (EINVAL); + return _pthread_spin_trylock(lckp); +} - if (--count <= 0) { - count = SPIN_COUNT; - _pthread_yield(); - } - } - } - } - ret = 0; - } +int +_pthread_spin_lock_1_0(pthread_spinlock_old_t *lckpp) +{ + pthread_spinlock_t *lckp = *lckpp; - return (ret); + if (lckp == NULL) + return (EINVAL); + return _pthread_spin_lock(lckp); } int -_pthread_spin_unlock(pthread_spinlock_t *lock) +_pthread_spin_unlock_1_0(pthread_spinlock_old_t *lckpp) { - struct pthread *curthread = _get_curthread(); - struct pthread_spinlock *lck; - int ret; + pthread_spinlock_t *lckp = *lckpp; - if (lock == NULL || (lck = *lock) == NULL) - ret = EINVAL; - else { - ret = THR_UMUTEX_UNLOCK(curthread, &lck->s_lock); - } - return (ret); + if (lckp == NULL) + return (EINVAL); + return _pthread_spin_unlock(lckp); } + +FB10_COMPAT(_pthread_spin_destroy_1_0, pthread_spin_destroy); +FB10_COMPAT(_pthread_spin_init_1_0, pthread_spin_init); +FB10_COMPAT(_pthread_spin_lock_1_0, pthread_spin_lock); +FB10_COMPAT(_pthread_spin_trylock_1_0, pthread_spin_trylock); +FB10_COMPAT(_pthread_spin_unlock_1_0, pthread_spin_unlock); --- src/lib/libthr/thread/thr_rwlock.c 2010-10-20 02:35:31.000000000 0000 +++ src/lib/libthr/thread/thr_rwlock.c 2010-11-26 07:33:52.000000000 0000 @@ -1,4 +1,5 @@ /*- + * Copyright (c) 2010 David Xu * Copyright (c) 1998 Alex Nash * All rights reserved. * @@ -29,6 +30,7 @@ #include #include #include +#include #include "namespace.h" #include @@ -45,89 +47,54 @@ __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock); __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock); -#define CHECK_AND_INIT_RWLOCK \ - if (__predict_false((prwlock = (*rwlock)) <= THR_RWLOCK_DESTROYED)) { \ - if (prwlock == THR_RWLOCK_INITIALIZER) { \ - int ret; \ - ret = init_static(_get_curthread(), rwlock); \ - if (ret) \ - return (ret); \ - } else if (prwlock == THR_RWLOCK_DESTROYED) { \ - return (EINVAL); \ - } \ - prwlock = *rwlock; \ - } +#define RWL_PSHARED(rwp) ((rwp->__flags & USYNC_PROCESS_SHARED) != 0) /* * Prototypes */ static int -rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused) +rwlock_init(struct pthread_rwlock *rwp, const pthread_rwlockattr_t *attr) { - pthread_rwlock_t prwlock; + + memset(rwp, 0, sizeof(*rwp)); + rwp->__magic = _PTHREAD_RWLOCK_MAGIC; + if (attr == NULL || *attr == NULL) + return (0); + else { + if ((*attr)->pshared) + rwp->__flags |= USYNC_PROCESS_SHARED; + } - prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock)); - if (prwlock == NULL) - return (ENOMEM); - *rwlock = prwlock; return (0); } -int -_pthread_rwlock_destroy (pthread_rwlock_t *rwlock) +static int +rwlock_destroy_common(struct pthread_rwlock *rwp) { - pthread_rwlock_t prwlock; - int ret; - - prwlock = *rwlock; - if (prwlock == THR_RWLOCK_INITIALIZER) - ret = 0; - else if (prwlock == THR_RWLOCK_DESTROYED) - ret = EINVAL; - else { - *rwlock = THR_RWLOCK_DESTROYED; - - free(prwlock); - ret = 0; - } - return (ret); + memset(rwp, 0, sizeof(*rwp)); + return (0); } -static int -init_static(struct pthread *thread, pthread_rwlock_t *rwlock) +int +_pthread_rwlock_destroy (pthread_rwlock_t *rwp) { - int ret; - - THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock); - - if (*rwlock == THR_RWLOCK_INITIALIZER) - ret = rwlock_init(rwlock, NULL); - else - ret = 0; - - THR_LOCK_RELEASE(thread, &_rwlock_static_lock); - - return (ret); + return rwlock_destroy_common(rwp); } int -_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) +_pthread_rwlock_init(pthread_rwlock_t *rwp, const pthread_rwlockattr_t *attr) { - *rwlock = NULL; - return (rwlock_init(rwlock, attr)); + return (rwlock_init(rwp, attr)); } static int -rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime) +rwlock_rdlock_common(struct pthread_rwlock *rwlp, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); - pthread_rwlock_t prwlock; struct timespec ts, ts2, *tsp; int flags; - int ret; - - CHECK_AND_INIT_RWLOCK + int error; if (curthread->rdlock_count) { /* @@ -148,13 +115,13 @@ } /* - * POSIX said the validity of the abstimeout parameter need + * POSIX said the validity of the abstime parameter need * not be checked if the lock can be immediately acquired. */ - ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags); - if (ret == 0) { + error = _thr_rwlock_tryrdlock((struct urwlock *)&rwlp->__state, flags); + if (error == 0) { curthread->rdlock_count++; - return (ret); + return (error); } if (__predict_false(abstime && @@ -173,44 +140,41 @@ tsp = NULL; /* goto kernel and lock it */ - ret = __thr_rwlock_rdlock(&prwlock->lock, flags, tsp); - if (ret != EINTR) + error = __thr_rwlock_rdlock((struct urwlock *)&rwlp->__state, flags, tsp); + if (error != EINTR) break; /* if interrupted, try to lock it in userland again. */ - if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) { - ret = 0; + if (_thr_rwlock_tryrdlock((struct urwlock *)&rwlp->__state, flags) == 0) { + error = 0; break; } } - if (ret == 0) + if (error == 0) curthread->rdlock_count++; - return (ret); + return (error); } int -_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) +_pthread_rwlock_rdlock (pthread_rwlock_t *rwlp) { - return (rwlock_rdlock_common(rwlock, NULL)); + return (rwlock_rdlock_common(rwlp, NULL)); } int -_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock, - const struct timespec *abstime) +_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlp, + const struct timespec *abstime) { - return (rwlock_rdlock_common(rwlock, abstime)); + return (rwlock_rdlock_common(rwlp, abstime)); } int -_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) +_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlp) { struct pthread *curthread = _get_curthread(); - pthread_rwlock_t prwlock; int flags; - int ret; + int error; - CHECK_AND_INIT_RWLOCK - if (curthread->rdlock_count) { /* * To avoid having to track all the rdlocks held by @@ -229,45 +193,48 @@ flags = 0; } - ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags); - if (ret == 0) + error = _thr_rwlock_tryrdlock((struct urwlock *)&rwlp->__state, flags); + if (error == 0) curthread->rdlock_count++; - return (ret); + return (error); +} + +static void +rwlock_setowner(struct pthread_rwlock *rwlp, struct pthread *td) +{ + if (!RWL_PSHARED(rwlp)) + rwlp->__ownerdata.__ownertd = td; + else + rwlp->__ownerdata.__ownertid = TID(td); } int -_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) +_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlp) { struct pthread *curthread = _get_curthread(); - pthread_rwlock_t prwlock; - int ret; + int error; - CHECK_AND_INIT_RWLOCK - - ret = _thr_rwlock_trywrlock(&prwlock->lock); - if (ret == 0) - prwlock->owner = curthread; - return (ret); + error = _thr_rwlock_trywrlock((struct urwlock *)&rwlp->__state); + if (error == 0) + rwlock_setowner(rwlp, curthread); + return (error); } static int -rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) +rwlock_wrlock_common(pthread_rwlock_t *rwlp, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); - pthread_rwlock_t prwlock; struct timespec ts, ts2, *tsp; - int ret; - - CHECK_AND_INIT_RWLOCK + int error; /* - * POSIX said the validity of the abstimeout parameter need + * POSIX said the validity of the abstime parameter need * not be checked if the lock can be immediately acquired. */ - ret = _thr_rwlock_trywrlock(&prwlock->lock); - if (ret == 0) { - prwlock->owner = curthread; - return (ret); + error = _thr_rwlock_trywrlock((struct urwlock *)&rwlp->__state); + if (error == 0) { + rwlock_setowner(rwlp, curthread); + return (error); } if (__predict_false(abstime && @@ -286,61 +253,217 @@ tsp = NULL; /* goto kernel and lock it */ - ret = __thr_rwlock_wrlock(&prwlock->lock, tsp); - if (ret == 0) { - prwlock->owner = curthread; + error = __thr_rwlock_wrlock((struct urwlock *)&rwlp->__state, tsp); + if (error == 0) { + rwlock_setowner(rwlp, curthread); break; } - if (ret != EINTR) + if (error != EINTR) break; /* if interrupted, try to lock it in userland again. */ - if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) { - ret = 0; - prwlock->owner = curthread; + if (_thr_rwlock_trywrlock((struct urwlock *)&rwlp->__state) == 0) { + error = 0; + rwlock_setowner(rwlp, curthread); break; } } - return (ret); + return (error); } int -_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) +_pthread_rwlock_wrlock (pthread_rwlock_t *rwlp) { - return (rwlock_wrlock_common (rwlock, NULL)); + return (rwlock_wrlock_common(rwlp, NULL)); } int -_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock, - const struct timespec *abstime) +_pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlp, + const struct timespec *abstime) { - return (rwlock_wrlock_common (rwlock, abstime)); + return (rwlock_wrlock_common(rwlp, abstime)); } int -_pthread_rwlock_unlock (pthread_rwlock_t *rwlock) +_pthread_rwlock_unlock(pthread_rwlock_t *rwlp) { struct pthread *curthread = _get_curthread(); - pthread_rwlock_t prwlock; - int ret; - int32_t state; + int error; + uint32_t state; + + state = rwlp->__state; + if (state & URWLOCK_WRITE_OWNER) { + if (RWL_PSHARED(rwlp) && + rwlp->__ownerdata.__ownertid == TID(curthread)) { + rwlp->__ownerdata.__ownertid = 0; + } else if (!RWL_PSHARED(rwlp) && + rwlp->__ownerdata.__ownertd == curthread) { + rwlp->__ownerdata.__ownertd = NULL; + } else + return (EPERM); + } + error = _thr_rwlock_unlock((struct urwlock *)&rwlp->__state); + if (error == 0 && (state & URWLOCK_WRITE_OWNER) == 0) + curthread->rdlock_count--; + return (error); +} + +#define CHECK_AND_INIT_RWLOCK \ + if (__predict_false((rwlp = (*rwlpp)) <= THR_RWLOCK_DESTROYED)) { \ + if (rwlp == THR_RWLOCK_INITIALIZER) { \ + int error; \ + error = init_static(_get_curthread(), rwlpp); \ + if (error) \ + return (error); \ + } else if (rwlp == THR_RWLOCK_DESTROYED) { \ + return (EINVAL); \ + } \ + *rwlpp = rwlp; \ + } + +static int +rwlock_init_old(pthread_rwlock_old_t *rwlpp, const pthread_rwlockattr_t *attr) +{ + struct pthread_rwlock *rwlp; + int error; + + rwlp = (struct pthread_rwlock *)malloc(sizeof(struct pthread_rwlock)); + if (rwlp == NULL) + return (ENOMEM); + error = rwlock_init(rwlp, attr); + if (error) { + free(rwlp); + return (error); + } + *rwlpp = rwlp; + return (0); +} + +static int +init_static(struct pthread *thread, pthread_rwlock_old_t *rwlpp) +{ + int error; + + THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock); + + if (*rwlpp == THR_RWLOCK_INITIALIZER) + error = rwlock_init_old(rwlpp, NULL); + else + error = 0; + + THR_LOCK_RELEASE(thread, &_rwlock_static_lock); - prwlock = *rwlock; + return (error); +} - if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED)) - return (EINVAL); +int +_pthread_rwlock_destroy_1_0(pthread_rwlock_old_t *rwlpp) +{ + struct pthread_rwlock *rwlp; + int error; - state = prwlock->lock.rw_state; - if (state & URWLOCK_WRITE_OWNER) { - if (__predict_false(prwlock->owner != curthread)) - return (EPERM); - prwlock->owner = NULL; + rwlp = *rwlpp; + if (rwlp == THR_RWLOCK_INITIALIZER) + error = 0; + else if (rwlp == THR_RWLOCK_DESTROYED) + error = EINVAL; + else { + error = rwlock_destroy_common(rwlp); + if (error) + return (error); + *rwlpp = THR_RWLOCK_DESTROYED; + free(rwlp); } + return (error); +} + +int +_pthread_rwlock_init_1_0(pthread_rwlock_old_t *rwlpp, const pthread_rwlockattr_t *attr) +{ + *rwlpp = NULL; + return (rwlock_init_old(rwlpp, attr)); +} + +int +_pthread_rwlock_timedrdlock_1_0(pthread_rwlock_old_t *rwlpp, + const struct timespec *abstime) +{ + struct pthread_rwlock *rwlp; + + CHECK_AND_INIT_RWLOCK + + return (rwlock_rdlock_common(rwlp, abstime)); +} + +int +_pthread_rwlock_timedwrlock_1_0(pthread_rwlock_old_t *rwlpp, + const struct timespec *abstime) +{ + struct pthread_rwlock *rwlp; + + CHECK_AND_INIT_RWLOCK + + return (rwlock_wrlock_common(rwlp, abstime)); +} + +int +_pthread_rwlock_tryrdlock_1_0(pthread_rwlock_old_t *rwlpp) +{ + struct pthread_rwlock *rwlp; + + CHECK_AND_INIT_RWLOCK + + return _pthread_rwlock_tryrdlock(rwlp); +} + +int +_pthread_rwlock_trywrlock_1_0(pthread_rwlock_old_t *rwlpp) +{ + struct pthread_rwlock *rwlp; - ret = _thr_rwlock_unlock(&prwlock->lock); - if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0) - curthread->rdlock_count--; + CHECK_AND_INIT_RWLOCK + + return _pthread_rwlock_trywrlock(rwlp); +} + +int +_pthread_rwlock_rdlock_1_0(pthread_rwlock_old_t *rwlpp) +{ + struct pthread_rwlock *rwlp; + + CHECK_AND_INIT_RWLOCK + + return rwlock_rdlock_common(rwlp, NULL); +} + +int +_pthread_rwlock_wrlock_1_0(pthread_rwlock_old_t *rwlpp) +{ + struct pthread_rwlock *rwlp; + + CHECK_AND_INIT_RWLOCK + + return (rwlock_wrlock_common(rwlp, NULL)); +} + +int +_pthread_rwlock_unlock_1_0(pthread_rwlock_old_t *rwlpp) +{ + struct pthread_rwlock *rwlp; - return (ret); + rwlp = *rwlpp; + if (__predict_false(rwlp <= THR_RWLOCK_DESTROYED)) + return (EINVAL); + return _pthread_rwlock_unlock(rwlp); } + +FB10_COMPAT(_pthread_rwlock_destroy_1_0, pthread_rwlock_destroy); +FB10_COMPAT(_pthread_rwlock_init_1_0, pthread_rwlock_init); +FB10_COMPAT(_pthread_rwlock_rdlock_1_0, pthread_rwlock_rdlock); +FB10_COMPAT(_pthread_rwlock_timedrdlock_1_0, pthread_rwlock_timedrdlock); +FB10_COMPAT(_pthread_rwlock_tryrdlock_1_0, pthread_rwlock_tryrdlock); +FB10_COMPAT(_pthread_rwlock_trywrlock_1_0, pthread_rwlock_trywrlock); +FB10_COMPAT(_pthread_rwlock_unlock_1_0, pthread_rwlock_unlock); +FB10_COMPAT(_pthread_rwlock_wrlock_1_0, pthread_rwlock_wrlock); +FB10_COMPAT(_pthread_rwlock_timedwrlock_1_0, pthread_rwlock_timedwrlock); --- src/lib/libthr/thread/thr_rwlockattr.c 2010-10-20 02:35:31.000000000 0000 +++ src/lib/libthr/thread/thr_rwlockattr.c 2010-11-23 01:58:38.000000000 0000 @@ -39,6 +39,9 @@ __weak_reference(_pthread_rwlockattr_init, pthread_rwlockattr_init); __weak_reference(_pthread_rwlockattr_setpshared, pthread_rwlockattr_setpshared); +int _pthread_rwlockattr_setpshared_1_0(pthread_rwlockattr_t *, int); +FB10_COMPAT(_pthread_rwlockattr_setpshared_1_0, pthread_rwlockattr_setpshared); + int _pthread_rwlockattr_destroy(pthread_rwlockattr_t *rwlockattr) { @@ -61,6 +64,9 @@ _pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *rwlockattr, int *pshared) { + if (rwlockattr == NULL || *rwlockattr == NULL) + return (EINVAL); + *pshared = (*rwlockattr)->pshared; return(0); @@ -89,6 +95,24 @@ int _pthread_rwlockattr_setpshared(pthread_rwlockattr_t *rwlockattr, int pshared) { + if (rwlockattr == NULL || *rwlockattr == NULL) + return (EINVAL); + + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) + return(EINVAL); + + (*rwlockattr)->pshared = pshared; + + return(0); +} + +int +_pthread_rwlockattr_setpshared_1_0(pthread_rwlockattr_t *rwlockattr, int pshared) +{ + if (rwlockattr == NULL || *rwlockattr == NULL) + return (EINVAL); + /* Only PTHREAD_PROCESS_PRIVATE is supported. */ if (pshared != PTHREAD_PROCESS_PRIVATE) return(EINVAL); --- src/lib/libthr/thread/thr_sig.c 2010-10-29 10:36:02.000000000 0000 +++ src/lib/libthr/thread/thr_sig.c 2010-11-29 06:32:08.000000000 0000 @@ -379,7 +379,7 @@ break; curthread->flags |= THR_FLAGS_SUSPENDED; THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock); - _thr_umtx_wait_uint(&curthread->cycle, cycle, NULL, 0); + _thr_umtx_wait_uint(&curthread->cycle, cycle, 0); THR_UMUTEX_LOCK(curthread, &(curthread)->lock); curthread->flags &= ~THR_FLAGS_SUSPENDED; } --- src/lib/libthr/thread/thr_suspend_np.c 2010-09-13 07:35:35.000000000 0000 +++ src/lib/libthr/thread/thr_suspend_np.c 2010-11-29 06:32:08.000000000 0000 @@ -132,7 +132,7 @@ _thr_send_sig(thread, SIGCANCEL); THR_THREAD_UNLOCK(curthread, thread); if (waitok) { - _thr_umtx_wait_uint(&thread->cycle, tmp, NULL, 0); + _thr_umtx_wait_uint(&thread->cycle, tmp, 0); THR_THREAD_LOCK(curthread, thread); } else { THR_THREAD_LOCK(curthread, thread); --- src/lib/libthr/thread/thr_umtx.c 2010-09-01 03:35:39.000000000 0000 +++ src/lib/libthr/thread/thr_umtx.c 2010-11-30 05:04:10.000000000 0000 @@ -59,18 +59,52 @@ { uint32_t owner; - if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { + if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT2 | + UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST)) == 0) { for (;;) { /* wait in kernel */ _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); owner = mtx->m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0 && - atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) + atomic_cmpset_acq_32(&mtx->m_owner, owner, + id|owner)) return (0); } } + return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); +} + +#define SPINLOOPS 1000 + +int +__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) +{ + uint32_t owner; + if (!_thr_is_smp) + return __thr_umutex_lock(mtx, id); + + if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { + for (;;) { + int count = SPINLOOPS; + while (count--) { + owner = mtx->m_owner; + if ((owner & ~UMUTEX_CONTESTED) == 0) { + if (atomic_cmpset_acq_32( + &mtx->m_owner, + owner, id|owner)) { + return (0); + } + } + CPU_SPINWAIT; + } + + /* wait in kernel */ + _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); + } + } + return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); } @@ -78,40 +112,32 @@ __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, const struct timespec *ets) { - struct timespec timo, cts; uint32_t owner; int ret; - clock_gettime(CLOCK_REALTIME, &cts); - TIMESPEC_SUB(&timo, ets, &cts); - - if (timo.tv_sec < 0) - return (ETIMEDOUT); - for (;;) { - if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { + if ((mtx->m_flags & + (UMUTEX_PRIO_PROTECT2 | UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST)) == 0) { /* wait in kernel */ - ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, &timo); + ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, + UMUTEX_ABSTIME, NULL, __DECONST(void*, ets)); /* now try to lock it */ owner = mtx->m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0 && - atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) + atomic_cmpset_acq_32(&mtx->m_owner, owner, + id|owner)) { return (0); + } } else { - ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, &timo); + ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, + UMUTEX_ABSTIME, NULL, __DECONST(void *, ets)); if (ret == 0) break; } if (ret == ETIMEDOUT) break; - clock_gettime(CLOCK_REALTIME, &cts); - TIMESPEC_SUB(&timo, ets, &cts); - if (timo.tv_sec < 0 || (timo.tv_sec == 0 && timo.tv_nsec == 0)) { - ret = ETIMEDOUT; - break; - } } return (ret); } @@ -121,8 +147,9 @@ { #ifndef __ia64__ /* XXX this logic has a race-condition on ia64. */ - if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { - atomic_cmpset_rel_32(&mtx->m_owner, id | UMUTEX_CONTESTED, UMUTEX_CONTESTED); + if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT2 | UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST)) == 0) { + atomic_cmpset_rel_32(&mtx->m_owner, id | UMUTEX_CONTESTED, + UMUTEX_CONTESTED); return _umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE, 0, 0, 0); } #endif /* __ia64__ */ @@ -153,21 +180,38 @@ } int -_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared) +_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, int shared) +{ + return _umtx_op_err(__DEVOLATILE(void *, mtx), + shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, NULL); +} + +int +_thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid, + const struct timespec *abstime, int shared) { - if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && - timeout->tv_nsec <= 0))) - return (ETIMEDOUT); + struct timespec ts, ts2, *tsp; + + if (abstime != NULL) { + clock_gettime(clockid, &ts); + TIMESPEC_SUB(&ts2, abstime, &ts); + if (ts2.tv_sec < 0 || ts2.tv_nsec <= 0) + return (ETIMEDOUT); + tsp = &ts2; + } else { + tsp = NULL; + } return _umtx_op_err(__DEVOLATILE(void *, mtx), - shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, - __DECONST(void*, timeout)); + shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, NULL, + tsp); } int _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared) { - return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, - nr_wakeup, 0, 0); + return _umtx_op_err(__DEVOLATILE(void *, mtx), + shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, + nr_wakeup, 0, 0); } void @@ -178,16 +222,9 @@ int _thr_ucond_wait(struct ucond *cv, struct umutex *m, - const struct timespec *timeout, int check_unparking) + const struct timespec *timeout, int flags) { - if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && - timeout->tv_nsec <= 0))) { - struct pthread *curthread = _get_curthread(); - _thr_umutex_unlock(m, TID(curthread)); - return (ETIMEDOUT); - } - return _umtx_op_err(cv, UMTX_OP_CV_WAIT, - check_unparking ? UMTX_CHECK_UNPARKING : 0, + return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, m, __DECONST(void*, timeout)); } --- src/lib/libthr/thread/thr_umtx.h 2010-09-01 03:35:39.000000000 0000 +++ src/lib/libthr/thread/thr_umtx.h 2010-11-30 05:04:10.000000000 0000 @@ -32,10 +32,13 @@ #include #include -#define DEFAULT_UMUTEX {0,0,{0,0},{0,0,0,0}} -#define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}} +#define DEFAULT_UMUTEX {0, 0, {0}, 0, {0}} +#define DEFAULT_URWLOCK {0, 0, 0, 0, {0}} + +typedef uint32_t umtx_t; int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden; +int __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) __hidden; int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, const struct timespec *timeout) __hidden; int __thr_umutex_unlock(struct umutex *mtx, uint32_t id) __hidden; @@ -48,11 +51,12 @@ int _thr_umtx_wait(volatile long *mtx, long exp, const struct timespec *timeout) __hidden; -int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp, +int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp, int shared) __hidden; +int _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int exp, int clockid, const struct timespec *timeout, int shared) __hidden; int _thr_umtx_wake(volatile void *mtx, int count, int shared) __hidden; int _thr_ucond_wait(struct ucond *cv, struct umutex *m, - const struct timespec *timeout, int check_unpaking) __hidden; + const struct timespec *timeout, int wflags) __hidden; void _thr_ucond_init(struct ucond *cv) __hidden; int _thr_ucond_signal(struct ucond *cv) __hidden; int _thr_ucond_broadcast(struct ucond *cv) __hidden; @@ -66,12 +70,16 @@ void _thr_rwl_wrlock(struct urwlock *rwlock) __hidden; void _thr_rwl_unlock(struct urwlock *rwlock) __hidden; +/* + * These functions are used by the library for internal locking + * it is not used to implement POSIX mutex which is very complex. + */ static inline int _thr_umutex_trylock(struct umutex *mtx, uint32_t id) { if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id)) return (0); - if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0) + if ((mtx->m_flags & UMUTEX_PRIO_PROTECT2) == 0) return (EBUSY); return (__thr_umutex_trylock(mtx)); } @@ -82,7 +90,7 @@ if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0) return (0); if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED && - __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0)) + __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT2 | UMUTEX_PRIO_INHERIT)) == 0)) if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED)) return (0); return (EBUSY); @@ -97,6 +105,14 @@ } static inline int +_thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) +{ + if (_thr_umutex_trylock2(mtx, id) == 0) + return (0); + return (__thr_umutex_lock_spin(mtx, id)); +} + +static inline int _thr_umutex_timedlock(struct umutex *mtx, uint32_t id, const struct timespec *timeout) { @@ -113,6 +129,9 @@ return (__thr_umutex_unlock(mtx, id)); } +/* + * pthread rwlock depends on these functions. + */ static inline int _thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags) { --- src/sys/kern/kern_thr.c 2010-10-23 13:20:17.000000000 0000 +++ src/sys/kern/kern_thr.c 2010-11-23 01:58:38.000000000 0000 @@ -285,12 +285,10 @@ kern_umtx_wake(td, uap->state, INT_MAX, 0); } + umtx_thread_exit(td); + rw_wlock(&tidhash_lock); PROC_LOCK(p); - /* - * Shutting down last thread in the proc. This will actually - * call exit() in the trampoline when it returns. - */ if (p->p_numthreads != 1) { LIST_REMOVE(td, td_hash); rw_wunlock(&tidhash_lock); @@ -299,9 +297,11 @@ thread_stopped(p); thread_exit(); /* NOTREACHED */ + } else { + PROC_UNLOCK(p); + rw_wunlock(&tidhash_lock); + exit1(td, 0); } - PROC_UNLOCK(p); - rw_wunlock(&tidhash_lock); return (0); } --- src/sys/kern/kern_thread.c 2010-10-17 11:05:23.000000000 0000 +++ src/sys/kern/kern_thread.c 2010-11-23 01:58:38.000000000 0000 @@ -81,15 +81,54 @@ static void thread_zombie(struct thread *); +#define TID_BUFFER_SIZE 1024 + struct mtx tid_lock; static struct unrhdr *tid_unrhdr; - +static lwpid_t tid_buffer[TID_BUFFER_SIZE]; +static int tid_head, tid_tail; static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); struct tidhashhead *tidhashtbl; u_long tidhash; struct rwlock tidhash_lock; +static lwpid_t +tid_alloc(void) +{ + lwpid_t tid; + + tid = alloc_unr(tid_unrhdr); + if (tid != -1) + return (tid); + mtx_lock(&tid_lock); + if (tid_head == tid_tail) { + mtx_unlock(&tid_lock); + return (-1); + } + tid = tid_buffer[tid_head++]; + tid_head %= TID_BUFFER_SIZE; + mtx_unlock(&tid_lock); + return (tid); +} + +static void +tid_free(lwpid_t tid) +{ + lwpid_t tmp_tid = -1; + + mtx_lock(&tid_lock); + if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) { + tmp_tid = tid_buffer[tid_head++]; + tid_head = (tid_head + 1) % TID_BUFFER_SIZE; + } + tid_buffer[tid_tail++] = tid; + tid_tail %= TID_BUFFER_SIZE; + mtx_unlock(&tid_lock); + if (tmp_tid != -1) + free_unr(tid_unrhdr, tmp_tid); +} + /* * Prepare a thread for use. */ @@ -102,7 +141,7 @@ td->td_state = TDS_INACTIVE; td->td_oncpu = NOCPU; - td->td_tid = alloc_unr(tid_unrhdr); + td->td_tid = tid_alloc(); /* * Note that td_critnest begins life as 1 because the thread is not @@ -155,7 +194,7 @@ osd_thread_exit(td); EVENTHANDLER_INVOKE(thread_dtor, td); - free_unr(tid_unrhdr, td->td_tid); + tid_free(td->td_tid); } /* @@ -373,7 +412,6 @@ #ifdef AUDIT AUDIT_SYSCALL_EXIT(0, td); #endif - umtx_thread_exit(td); /* * drop FPU & debug register state storage, or any other * architecture specific resources that @@ -754,6 +792,7 @@ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { PROC_UNLOCK(p); tidhash_remove(td); + umtx_thread_exit(td); PROC_LOCK(p); tdsigcleanup(td); PROC_SLOCK(p); --- src/sys/kern/kern_umtx.c 2010-11-22 02:45:21.000000000 0000 +++ src/sys/kern/kern_umtx.c 2010-11-25 09:13:19.000000000 0000 @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -72,6 +73,10 @@ #define _UMUTEX_TRY 1 #define _UMUTEX_WAIT 2 +#define ROB_USER_UNLOCK 0 +#define ROB_THREAD_EXIT 1 +#define ROB_KERNEL_UNLOCK 2 + /* Key to represent a unique userland synchronous object */ struct umtx_key { int hash; @@ -91,6 +96,7 @@ uintptr_t b; } both; } info; + struct umtxq_chain * volatile chain; }; /* Priority inheritance mutex info. */ @@ -114,6 +120,16 @@ struct umtx_key pi_key; }; +struct robust_info { + struct thread *ownertd; + SLIST_ENTRY(robust_info) hash_qe; + LIST_ENTRY(robust_info) td_qe; + struct umutex *umtxp; +}; + +SLIST_HEAD(robust_hashlist, robust_info); +LIST_HEAD(robust_list, robust_info); + /* A userland synchronous object user. */ struct umtx_q { /* Linked list for the hash. */ @@ -150,6 +166,14 @@ /* The queue we on */ struct umtxq_queue *uq_cur_queue; + + int uq_repair_mutex; + + /* Robust mutex list */ + struct robust_list uq_rob_list; + + /* Thread is exiting. */ + char uq_exiting; }; TAILQ_HEAD(umtxq_head, umtx_q); @@ -160,6 +184,10 @@ struct umtx_key key; LIST_ENTRY(umtxq_queue) link; int length; + + int binding; + struct umutex *bind_mutex; + struct umtx_key bind_mkey; }; LIST_HEAD(umtxq_list, umtxq_queue); @@ -177,7 +205,7 @@ LIST_HEAD(, umtxq_queue) uc_spare_queue; /* Busy flag */ - char uc_busy; + volatile char uc_busy; /* Chain lock waiters */ int uc_waiters; @@ -187,6 +215,13 @@ }; +struct robust_chain { + /* Lock for this chain. */ + struct mtx lock; + struct robust_hashlist rob_list; +}; + + #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED) #define UMTXQ_BUSY_ASSERT(uc) KASSERT(&(uc)->uc_busy, ("umtx chain is not busy")) @@ -216,15 +251,63 @@ #define BUSY_SPINS 200 +#define ROBUST_CHAINS 128 +#define ROBUST_SHIFTS (__WORD_BIT - 7) + static uma_zone_t umtx_pi_zone; +static uma_zone_t robust_zone; static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS]; static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory"); static int umtx_pi_allocated; +#ifdef SMP +static int umtx_cvsig_migrate = 0; +#else +static int umtx_cvsig_migrate = 1; +#endif + +static struct robust_chain robust_chains[ROBUST_CHAINS]; +static int set_max_robust(SYSCTL_HANDLER_ARGS); SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug"); SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD, &umtx_pi_allocated, 0, "Allocated umtx_pi"); +SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_cvsig_migrate, CTLFLAG_RW, + &umtx_cvsig_migrate, 0, "cvsig migrate"); + +SYSCTL_DECL(_kern_threads); +SYSCTL_PROC(_kern_threads, OID_AUTO, max_robust_mutexs_per_proc, + CTLTYPE_INT | CTLFLAG_RW, 0, sizeof(int), set_max_robust, "I", + "Set maximum number of robust mutex"); + +static int max_robust_per_proc = 3000; +static struct mtx max_robust_lock; +static struct timeval max_robust_lasttime; +static struct timeval max_robust_interval; + +#define UMTX_STATE +#ifdef UMTX_STATE +static int umtx_cv_broadcast_migrate; +static int umtx_cv_signal_migrate; +static int umtx_cv_insert_failure; +static int umtx_cv_unlock_failure; +static int umtx_timedlock_count; +SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_cv_broadcast_migrate, CTLFLAG_RD, + &umtx_cv_broadcast_migrate, 0, "cv_broadcast thread migrated"); +SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_cv_signal_migrate, CTLFLAG_RD, + &umtx_cv_signal_migrate, 0, "cv_signal thread migrated"); +SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_cv_insert_failure, CTLFLAG_RD, + &umtx_cv_insert_failure, 0, "cv_wait failure"); +SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_cv_unlock_failure, CTLFLAG_RD, + &umtx_cv_unlock_failure, 0, "cv_wait unlock mutex failure"); +SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_timedlock_count, CTLFLAG_RD, + &umtx_timedlock_count, 0, "umutex timedlock count"); +#define UMTX_STATE_INC(var) umtx_##var++ +#define UMTX_STATE_ADD(var, val) (umtx_##var += (val)) +#else +#define UMTX_STATE_INC(var) +#endif + static void umtxq_sysinit(void *); static void umtxq_hash(struct umtx_key *key); static struct umtxq_chain *umtxq_getchain(struct umtx_key *key); @@ -232,7 +315,9 @@ static void umtxq_unlock(struct umtx_key *key); static void umtxq_busy(struct umtx_key *key); static void umtxq_unbusy(struct umtx_key *key); -static void umtxq_insert_queue(struct umtx_q *uq, int q); +static void umtxq_insert_queue(struct umtx_q *, int); +static int umtxq_insert_queue2(struct umtx_q *, int, struct umutex *, + const struct umtx_key *); static void umtxq_remove_queue(struct umtx_q *uq, int q); static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, int timo); static int umtxq_count(struct umtx_key *key); @@ -243,10 +328,19 @@ static struct umtx_pi *umtx_pi_alloc(int); static void umtx_pi_free(struct umtx_pi *pi); static void umtx_pi_adjust_locked(struct thread *td, u_char oldpri); -static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags); -static void umtx_thread_cleanup(struct thread *td); +static int do_unlock_pp(struct thread *, struct umutex *, uint32_t, int); +static void umtx_thread_cleanup(struct thread *); static void umtx_exec_hook(void *arg __unused, struct proc *p __unused, struct image_params *imgp __unused); +static void umtx_exit_hook(void *arg __unused, struct proc *p __unused); +static void umtx_fork_hook(void *arg __unused, struct proc *p1 __unused, + struct proc *p2, int flags __unused); +static int robust_alloc(struct robust_info **); +static void robust_free(struct robust_info *); +static void robust_insert(struct thread *, struct robust_info *); +static void robust_remove(struct thread *, struct umutex *); +static int do_unlock_umutex(struct thread *, struct umutex *, int); + SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL); #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE) @@ -262,6 +356,8 @@ umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); + robust_zone = uma_zcreate("robust umtx", sizeof(struct robust_info), + NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); for (i = 0; i < 2; ++i) { for (j = 0; j < UMTX_CHAINS; ++j) { mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL, @@ -274,9 +370,24 @@ umtxq_chains[i][j].uc_waiters = 0; } } + + for (i = 0; i < ROBUST_CHAINS; ++i) { + mtx_init(&robust_chains[i].lock, "robql", NULL, + MTX_DEF | MTX_DUPOK); + SLIST_INIT(&robust_chains[i].rob_list); + } + mtx_init(&umtx_lock, "umtx lock", NULL, MTX_SPIN); + mtx_init(&max_robust_lock, "max robust lock", NULL, MTX_DEF); EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL, EVENTHANDLER_PRI_ANY); + EVENTHANDLER_REGISTER(process_exit, umtx_exit_hook, NULL, + EVENTHANDLER_PRI_ANY); + EVENTHANDLER_REGISTER(process_fork, umtx_fork_hook, NULL, + EVENTHANDLER_PRI_ANY); + + max_robust_interval.tv_sec = 10; + max_robust_interval.tv_usec = 0; } struct umtx_q * @@ -285,9 +396,11 @@ struct umtx_q *uq; uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO); - uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX, M_WAITOK | M_ZERO); + uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX, + M_WAITOK | M_ZERO); TAILQ_INIT(&uq->uq_spare_queue->head); TAILQ_INIT(&uq->uq_pi_contested); + LIST_INIT(&uq->uq_rob_list); uq->uq_inherited_pri = PRI_MAX; return (uq); } @@ -315,14 +428,30 @@ k1->info.both.b == k2->info.both.b); } +static inline void +umtx_key_copy(struct umtx_key *k1, const struct umtx_key *k2) +{ + k1->hash = k2->hash; + k1->type = k2->type; + k1->shared = k2->shared; + k1->info.both = k2->info.both; + k1->chain = k2->chain; +} + static inline struct umtxq_chain * -umtxq_getchain(struct umtx_key *key) +umtxq_calcchain(struct umtx_key *key) { if (key->type <= TYPE_SEM) return (&umtxq_chains[1][key->hash]); return (&umtxq_chains[0][key->hash]); } +static inline struct umtxq_chain * +umtxq_getchain(struct umtx_key *key) +{ + return (key->chain); +} + /* * Lock a chain. */ @@ -331,8 +460,14 @@ { struct umtxq_chain *uc; - uc = umtxq_getchain(key); - mtx_lock(&uc->uc_lock); + for (;;) { + uc = key->chain; + mtx_lock(&uc->uc_lock); + if (key->chain != uc) + mtx_unlock(&uc->uc_lock); + else + break; + } } /* @@ -341,10 +476,7 @@ static inline void umtxq_unlock(struct umtx_key *key) { - struct umtxq_chain *uc; - - uc = umtxq_getchain(key); - mtx_unlock(&uc->uc_lock); + mtx_unlock(&key->chain->uc_lock); } /* @@ -364,8 +496,10 @@ int count = BUSY_SPINS; if (count > 0) { umtxq_unlock(key); - while (uc->uc_busy && --count > 0) + while (uc->uc_busy && --count > 0) { cpu_spinwait(); + uc = key->chain; + } umtxq_lock(key); } } @@ -374,6 +508,9 @@ uc->uc_waiters++; msleep(uc, &uc->uc_lock, 0, "umtxqb", 0); uc->uc_waiters--; + mtx_unlock(&uc->uc_lock); + umtxq_lock(key); + uc = umtxq_getchain(key); } } uc->uc_busy = 1; @@ -414,19 +551,46 @@ static inline void umtxq_insert_queue(struct umtx_q *uq, int q) { + int error; + + error = umtxq_insert_queue2(uq, q, NULL, NULL); + MPASS(error == 0); +} + +static inline int +umtxq_insert_queue2(struct umtx_q *uq, int q, struct umutex *m, + const struct umtx_key *mkey) +{ struct umtxq_queue *uh; struct umtxq_chain *uc; uc = umtxq_getchain(&uq->uq_key); UMTXQ_LOCKED_ASSERT(uc); - KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue")); + KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, + ("umtx_q is already on queue")); uh = umtxq_queue_lookup(&uq->uq_key, q); if (uh != NULL) { + if (uh->binding) { + if (mkey == NULL || + !umtx_key_match(&uh->bind_mkey, mkey)) + return (EEXIST); + } else { + if (mkey != NULL) + return (EEXIST); + } LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link); } else { uh = uq->uq_spare_queue; uh->key = uq->uq_key; LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link); + uh->bind_mutex = m; + uh->length = 0; + if (mkey != NULL) { + uh->binding = 1; + uh->bind_mkey = *mkey; + } else { + uh->binding = 0; + } } uq->uq_spare_queue = NULL; @@ -434,7 +598,7 @@ uh->length++; uq->uq_flags |= UQF_UMTXQ; uq->uq_cur_queue = uh; - return; + return (0); } static inline void @@ -458,6 +622,8 @@ uh = LIST_FIRST(&uc->uc_spare_queue); KASSERT(uh != NULL, ("uc_spare_queue is empty")); LIST_REMOVE(uh, link); + uh->bind_mutex = NULL; + uh->binding = 0; } uq->uq_spare_queue = uh; uq->uq_cur_queue = NULL; @@ -558,9 +724,10 @@ UMTXQ_LOCKED_ASSERT(uc); if (!(uq->uq_flags & UQF_UMTXQ)) return (0); - error = msleep(uq, &uc->uc_lock, PCATCH, wmesg, timo); + error = msleep(uq, &uc->uc_lock, PCATCH|PDROP, wmesg, timo); if (error == EWOULDBLOCK) error = ETIMEDOUT; + umtxq_lock(&uq->uq_key); return (error); } @@ -578,6 +745,7 @@ boolean_t wired; key->type = type; + key->chain = NULL; if (share == THREAD_SHARE) { key->shared = 0; key->info.private.vs = td->td_proc->p_vmspace; @@ -607,6 +775,7 @@ } umtxq_hash(key); + key->chain = umtxq_calcchain(key); return (0); } @@ -1105,6 +1274,50 @@ return (0); } +static uint32_t +calc_lockword(uint32_t oldval, uint32_t flags, int qlen, int robact, int *nwake) +{ + uint32_t newval; + + if (flags & UMUTEX_ROBUST) { + if (robact == ROB_THREAD_EXIT) { + /* + * Thread is exiting, but did not unlock the mutex, + * mark it in OWNER_DEAD state. + */ + newval = (oldval & ~UMUTEX_OWNER_MASK) | UMUTEX_OWNER_DEAD; + *nwake = 1; + } else if (robact == ROB_USER_UNLOCK && + (oldval & UMUTEX_OWNER_DEAD) != 0) { + /* + * if user unlocks it, and previous owner was dead, + * mark it in INCONSISTENT state. + */ + newval = (oldval & ~UMUTEX_OWNER_MASK) | UMUTEX_INCONSISTENT; + *nwake = INT_MAX; + return (newval); + } else { + newval = oldval & ~UMUTEX_OWNER_MASK; + *nwake = 1; + } + } else { + *nwake = 1; + newval = oldval & ~UMUTEX_OWNER_MASK; + } + + /* + * When unlocking the umtx, it must be marked as unowned if + * there is zero or one thread only waiting for it. + * Otherwise, it must be marked as contested. + */ + if (qlen <= 1) + newval &= ~UMUTEX_CONTESTED; + else + newval |= UMUTEX_CONTESTED; + + return (newval); +} + /* * Lock PTHREAD_PRIO_NONE protocol POSIX mutex. */ @@ -1116,7 +1329,10 @@ uint32_t owner, old, id; int error = 0; - id = td->td_tid; + if (flags & UMUTEX_SIMPLE) + id = UMUTEX_SIMPLE_OWNER; + else + id = td->td_tid; uq = td->td_umtxq; /* @@ -1125,42 +1341,36 @@ */ for (;;) { owner = fuword32(__DEVOLATILE(void *, &m->m_owner)); - if (mode == _UMUTEX_WAIT) { - if (owner == UMUTEX_UNOWNED || owner == UMUTEX_CONTESTED) + if ((flags & UMUTEX_ROBUST) != 0 && + (owner & UMUTEX_OWNER_MASK) == UMUTEX_INCONSISTENT) { + return (ENOTRECOVERABLE); + } + + if ((owner & UMUTEX_OWNER_MASK) == 0) { + if (mode == _UMUTEX_WAIT) return (0); - } else { /* - * Try the uncontested case. This should be done in userland. + * Try lock it. */ - owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id); - + old = casuword32(&m->m_owner, owner, owner|id); /* The acquire succeeded. */ - if (owner == UMUTEX_UNOWNED) + if (owner == old) { + if ((flags & UMUTEX_ROBUST) != 0 && + (owner & UMUTEX_OWNER_DEAD) != 0) + return (EOWNERDEAD); return (0); + } /* The address was invalid. */ - if (owner == -1) + if (old == -1) return (EFAULT); - /* If no one owns it but it is contested try to acquire it. */ - if (owner == UMUTEX_CONTESTED) { - owner = casuword32(&m->m_owner, - UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); - - if (owner == UMUTEX_CONTESTED) - return (0); - - /* The address was invalid. */ - if (owner == -1) - return (EFAULT); - - /* If this failed the lock has changed, restart. */ - continue; - } + /* If this failed the lock has changed, restart. */ + continue; } if ((flags & UMUTEX_ERROR_CHECK) != 0 && - (owner & ~UMUTEX_CONTESTED) == id) + (owner & UMUTEX_OWNER_MASK) == id) return (EDEADLK); if (mode == _UMUTEX_TRY) @@ -1209,7 +1419,11 @@ umtxq_unbusy(&uq->uq_key); if (old == owner) error = umtxq_sleep(uq, "umtxn", timo); - umtxq_remove(uq); + if ((uq->uq_flags & UQF_UMTXQ) != 0) { + umtxq_busy(&uq->uq_key); + umtxq_remove(uq); + umtxq_unbusy(&uq->uq_key); + } umtxq_unlock(&uq->uq_key); umtx_key_release(&uq->uq_key); } @@ -1224,14 +1438,17 @@ * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex. */ static int -do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags) +do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, + int robact) { struct umtx_key key; - uint32_t owner, old, id; - int error; - int count; + uint32_t owner, old, id, newval; + int error, count, nwake; - id = td->td_tid; + if (flags & UMUTEX_SIMPLE) + id = UMUTEX_SIMPLE_OWNER; + else + id = td->td_tid; /* * Make sure we own this mtx. */ @@ -1239,19 +1456,18 @@ if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != id) + if ((owner & UMUTEX_OWNER_MASK) != id) return (EPERM); - if ((owner & UMUTEX_CONTESTED) == 0) { + if ((owner & ~UMUTEX_OWNER_MASK) == 0) { + /* No other bits set, just unlock it. */ old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); if (old == -1) return (EFAULT); if (old == owner) return (0); - owner = old; } - /* We should only ever be in here for contested locks */ if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags), &key)) != 0) return (error); @@ -1260,16 +1476,13 @@ umtxq_busy(&key); count = umtxq_count(&key); umtxq_unlock(&key); + + owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + newval = calc_lockword(owner, flags, count, robact, &nwake); - /* - * When unlocking the umtx, it must be marked as unowned if - * there is zero or one thread only waiting for it. - * Otherwise, it must be marked as contested. - */ - old = casuword32(&m->m_owner, owner, - count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); + old = casuword32(&m->m_owner, owner, newval); umtxq_lock(&key); - umtxq_signal(&key,1); + umtxq_signal(&key, nwake); umtxq_unbusy(&key); umtxq_unlock(&key); umtx_key_release(&key); @@ -1297,7 +1510,7 @@ if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != 0) + if ((owner & UMUTEX_OWNER_MASK) != 0) return (0); flags = fuword32(&m->m_flags); @@ -1316,7 +1529,7 @@ owner = casuword32(&m->m_owner, UMUTEX_CONTESTED, UMUTEX_UNOWNED); umtxq_lock(&key); - if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0) + if (count != 0 && (owner & UMUTEX_OWNER_MASK) == 0) umtxq_signal(&key, 1); umtxq_unbusy(&key); umtxq_unlock(&key); @@ -1618,12 +1831,14 @@ mtx_unlock_spin(&umtx_lock); umtxq_unbusy(&uq->uq_key); - if (uq->uq_flags & UQF_UMTXQ) { + if ((uq->uq_flags & UQF_UMTXQ) != 0) { error = msleep(uq, &uc->uc_lock, PCATCH, wmesg, timo); if (error == EWOULDBLOCK) error = ETIMEDOUT; - if (uq->uq_flags & UQF_UMTXQ) { + if ((uq->uq_flags & UQF_UMTXQ) != 0) { + umtxq_busy(&uq->uq_key); umtxq_remove(uq); + umtxq_unbusy(&uq->uq_key); } } mtx_lock_spin(&umtx_lock); @@ -1758,44 +1973,38 @@ * can fault on any access. */ for (;;) { - /* - * Try the uncontested case. This should be done in userland. - */ - owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, id); - - /* The acquire succeeded. */ - if (owner == UMUTEX_UNOWNED) { - error = 0; + owner = fuword32(__DEVOLATILE(void *, &m->m_owner)); + if ((flags & UMUTEX_ROBUST) != 0 && + (owner & UMUTEX_OWNER_MASK) == UMUTEX_INCONSISTENT) { + error = ENOTRECOVERABLE; break; } - /* The address was invalid. */ - if (owner == -1) { - error = EFAULT; - break; - } - - /* If no one owns it but it is contested try to acquire it. */ - if (owner == UMUTEX_CONTESTED) { - owner = casuword32(&m->m_owner, - UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); - - if (owner == UMUTEX_CONTESTED) { - umtxq_lock(&uq->uq_key); - umtxq_busy(&uq->uq_key); - error = umtx_pi_claim(pi, td); - umtxq_unbusy(&uq->uq_key); - umtxq_unlock(&uq->uq_key); + if ((owner & UMUTEX_OWNER_MASK) == 0) { + old = casuword32(&m->m_owner, owner, id|owner); + /* The acquire succeeded. */ + if (owner == old) { + if ((owner & UMUTEX_CONTESTED) != 0) { + umtxq_lock(&uq->uq_key); + umtxq_busy(&uq->uq_key); + umtx_pi_claim(pi, td); + umtxq_unbusy(&uq->uq_key); + umtxq_unlock(&uq->uq_key); + } + if ((flags & UMUTEX_ROBUST) != 0 && + (owner & UMUTEX_OWNER_DEAD) != 0) + error = EOWNERDEAD; + else + error = 0; break; } /* The address was invalid. */ - if (owner == -1) { + if (old == -1) { error = EFAULT; break; } - /* If this failed the lock has changed, restart. */ continue; } @@ -1865,14 +2074,14 @@ * Unlock a PI mutex. */ static int -do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags) +do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, + int robact) { struct umtx_key key; struct umtx_q *uq_first, *uq_first2, *uq_me; struct umtx_pi *pi, *pi2; - uint32_t owner, old, id; - int error; - int count; + uint32_t owner, old, id, newval; + int error, count, nwake; int pri; id = td->td_tid; @@ -1883,11 +2092,10 @@ if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != id) + if ((owner & UMUTEX_OWNER_MASK) != id) return (EPERM); - /* This should be done in userland */ - if ((owner & UMUTEX_CONTESTED) == 0) { + if ((owner & ~UMUTEX_OWNER_MASK) == 0) { old = casuword32(&m->m_owner, owner, UMUTEX_UNOWNED); if (old == -1) return (EFAULT); @@ -1896,7 +2104,6 @@ owner = old; } - /* We should only ever be in here for contested locks */ if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags), &key)) != 0) return (error); @@ -1942,13 +2149,15 @@ } umtxq_unlock(&key); + owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + newval = calc_lockword(owner, flags, count, robact, &nwake); + /* * When unlocking the umtx, it must be marked as unowned if * there is zero or one thread only waiting for it. * Otherwise, it must be marked as contested. */ - old = casuword32(&m->m_owner, owner, - count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); + old = casuword32(&m->m_owner, owner, newval); umtxq_lock(&key); umtxq_unbusy(&key); @@ -1961,6 +2170,12 @@ return (0); } +struct old_pp_mutex { + volatile __lwpid_t m_owner; /* Owner of the mutex */ + __uint32_t m_flags; /* Flags of the mutex */ + __uint32_t m_ceilings[2]; /* Priority protect ceiling */ +}; + /* * Lock a PP mutex. */ @@ -1971,22 +2186,33 @@ struct umtx_q *uq, *uq2; struct umtx_pi *pi; uint32_t ceiling; - uint32_t owner, id; + uint32_t owner, id, old; int error, pri, old_inherited_pri, su; + struct old_pp_mutex *oldmtx = (struct old_pp_mutex *)m; - id = td->td_tid; + if (flags & UMUTEX_SIMPLE) + id = UMUTEX_SIMPLE_OWNER; + else + id = td->td_tid; uq = td->td_umtxq; if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags), &uq->uq_key)) != 0) return (error); su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0); for (;;) { - old_inherited_pri = uq->uq_inherited_pri; + /* + * We busy the lock, so no one can change the priority ceiling + * while we are locking it. + */ umtxq_lock(&uq->uq_key); umtxq_busy(&uq->uq_key); umtxq_unlock(&uq->uq_key); - ceiling = RTP_PRIO_MAX - fuword32(&m->m_ceilings[0]); + old_inherited_pri = uq->uq_inherited_pri; + if (flags & UMUTEX_PRIO_PROTECT) + ceiling = RTP_PRIO_MAX - fuword32(&oldmtx->m_ceilings[0]); + else + ceiling = RTP_PRIO_MAX - fubyte(&m->m_ceilings[0]); if (ceiling > RTP_PRIO_MAX) { error = EINVAL; goto out; @@ -2007,18 +2233,35 @@ } mtx_unlock_spin(&umtx_lock); - owner = casuword32(&m->m_owner, - UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); - - if (owner == UMUTEX_CONTESTED) { - error = 0; +again: + owner = fuword32(__DEVOLATILE(void *, &m->m_owner)); + if ((flags & UMUTEX_ROBUST) != 0 && + (owner & UMUTEX_OWNER_MASK) == UMUTEX_INCONSISTENT) { + error = ENOTRECOVERABLE; break; } - /* The address was invalid. */ - if (owner == -1) { - error = EFAULT; - break; + /* + * Try lock it. + */ + if ((owner & UMUTEX_OWNER_MASK) == 0) { + old = casuword32(&m->m_owner, owner, id|owner); + /* The acquire succeeded. */ + if (owner == old) { + if ((flags & UMUTEX_ROBUST) != 0 && + (owner & UMUTEX_OWNER_DEAD) != 0) + error = EOWNERDEAD; + else + error = 0; + break; + } + + /* The address was invalid. */ + if (old == -1) { + error = EFAULT; + break; + } + goto again; } if ((flags & UMUTEX_ERROR_CHECK) != 0 && @@ -2041,9 +2284,19 @@ umtxq_lock(&uq->uq_key); umtxq_insert(uq); + umtxq_unlock(&uq->uq_key); + + old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED); + + umtxq_lock(&uq->uq_key); umtxq_unbusy(&uq->uq_key); - error = umtxq_sleep(uq, "umtxpp", timo); - umtxq_remove(uq); + if (old == owner) + error = umtxq_sleep(uq, "umtxn", timo); + if ((uq->uq_flags & UQF_UMTXQ) != 0) { + umtxq_busy(&uq->uq_key); + umtxq_remove(uq); + umtxq_unbusy(&uq->uq_key); + } umtxq_unlock(&uq->uq_key); mtx_lock_spin(&umtx_lock); @@ -2095,16 +2348,22 @@ * Unlock a PP mutex. */ static int -do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags) +do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, + int robact) { + struct old_pp_mutex *oldmtx = (struct old_pp_mutex *)m; struct umtx_key key; struct umtx_q *uq, *uq2; struct umtx_pi *pi; - uint32_t owner, id; + uint32_t owner, id, newval, old; uint32_t rceiling; int error, pri, new_inherited_pri, su; + int count, nwake; - id = td->td_tid; + if (flags & UMUTEX_SIMPLE) + id = UMUTEX_SIMPLE_OWNER; + else + id = td->td_tid; uq = td->td_umtxq; su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0); @@ -2115,12 +2374,15 @@ if (owner == -1) return (EFAULT); - if ((owner & ~UMUTEX_CONTESTED) != id) + if ((owner & UMUTEX_OWNER_MASK) != id) return (EPERM); - error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t)); - if (error != 0) - return (error); + if (flags & UMUTEX_PRIO_PROTECT) { + /* old style */ + rceiling = fuword32(&oldmtx->m_ceilings[1]); + } else { + rceiling = fubyte(&m->m_ceilings[1]); + } if (rceiling == -1) new_inherited_pri = PRI_MAX; @@ -2136,43 +2398,49 @@ return (error); umtxq_lock(&key); umtxq_busy(&key); + count = umtxq_count(&key); umtxq_unlock(&key); - /* - * For priority protected mutex, always set unlocked state - * to UMUTEX_CONTESTED, so that userland always enters kernel - * to lock the mutex, it is necessary because thread priority - * has to be adjusted for such mutex. - */ - error = suword32(__DEVOLATILE(uint32_t *, &m->m_owner), - UMUTEX_CONTESTED); + + owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + newval = calc_lockword(owner, flags, count, robact, &nwake); + + if (flags & UMUTEX_PRIO_PROTECT) { + /* + * For old priority protected mutex, always set unlocked state + * to UMUTEX_CONTESTED, so that userland always enters kernel + * to lock the mutex, it is necessary because thread priority + * has to be adjusted for such mutex. + */ + newval |= UMUTEX_CONTESTED; + } + old = casuword32(&m->m_owner, owner, newval); + if (old == -1) + error = EFAULT; + if (old != owner) + error = EINVAL; umtxq_lock(&key); - if (error == 0) - umtxq_signal(&key, 1); + umtxq_signal(&key, nwake); umtxq_unbusy(&key); umtxq_unlock(&key); - if (error == -1) - error = EFAULT; - else { - mtx_lock_spin(&umtx_lock); - if (su != 0) - uq->uq_inherited_pri = new_inherited_pri; - pri = PRI_MAX; - TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { - uq2 = TAILQ_FIRST(&pi->pi_blocked); - if (uq2 != NULL) { - if (pri > UPRI(uq2->uq_thread)) - pri = UPRI(uq2->uq_thread); - } + mtx_lock_spin(&umtx_lock); + if (su != 0) + uq->uq_inherited_pri = new_inherited_pri; + pri = PRI_MAX; + TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { + uq2 = TAILQ_FIRST(&pi->pi_blocked); + if (uq2 != NULL) { + if (pri > UPRI(uq2->uq_thread)) + pri = UPRI(uq2->uq_thread); } - if (pri > uq->uq_inherited_pri) - pri = uq->uq_inherited_pri; - thread_lock(td); - sched_unlend_user_prio(td, pri); - thread_unlock(td); - mtx_unlock_spin(&umtx_lock); } + if (pri > uq->uq_inherited_pri) + pri = uq->uq_inherited_pri; + thread_lock(td); + sched_unlend_user_prio(td, pri); + thread_unlock(td); + mtx_unlock_spin(&umtx_lock); umtx_key_release(&key); return (error); } @@ -2181,50 +2449,77 @@ do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling, uint32_t *old_ceiling) { + struct old_pp_mutex *oldmtx = (struct old_pp_mutex *)m; struct umtx_q *uq; uint32_t save_ceiling; - uint32_t owner, id; + uint32_t owner, id, old; uint32_t flags; int error; flags = fuword32(&m->m_flags); - if ((flags & UMUTEX_PRIO_PROTECT) == 0) + if ((flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_PROTECT2)) == 0) return (EINVAL); if (ceiling > RTP_PRIO_MAX) return (EINVAL); - id = td->td_tid; + if (flags & UMUTEX_SIMPLE) + id = UMUTEX_SIMPLE_OWNER; + else + id = td->td_tid; uq = td->td_umtxq; if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags), &uq->uq_key)) != 0) return (error); for (;;) { + /* + * This is the protocol that we must busy the lock + * before locking. + */ umtxq_lock(&uq->uq_key); umtxq_busy(&uq->uq_key); umtxq_unlock(&uq->uq_key); - save_ceiling = fuword32(&m->m_ceilings[0]); +again: + if (flags & UMUTEX_PRIO_PROTECT) { + /* old style */ + save_ceiling = fuword32(&oldmtx->m_ceilings[0]); + } else { + save_ceiling = fubyte(&m->m_ceilings[0]); + } - owner = casuword32(&m->m_owner, - UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); - - if (owner == UMUTEX_CONTESTED) { - suword32(&m->m_ceilings[0], ceiling); - suword32(__DEVOLATILE(uint32_t *, &m->m_owner), - UMUTEX_CONTESTED); - error = 0; + owner = fuword32(__DEVOLATILE(void *, &m->m_owner)); + if ((flags & UMUTEX_ROBUST) != 0 && + (owner & UMUTEX_OWNER_MASK) == UMUTEX_INCONSISTENT) { + error = ENOTRECOVERABLE; break; } - /* The address was invalid. */ - if (owner == -1) { - error = EFAULT; - break; - } + /* + * Try lock it. + */ + if ((owner & UMUTEX_OWNER_MASK) == 0) { + old = casuword32(&m->m_owner, owner, id|owner); + /* The acquire succeeded. */ + if (owner == old) { + if ((flags & UMUTEX_ROBUST) != 0 && + (owner & UMUTEX_OWNER_DEAD) != 0) + error = EOWNERDEAD; + else + error = 0; + if (flags & UMUTEX_PRIO_PROTECT) + suword32(&oldmtx->m_ceilings[0], ceiling); + else + subyte(&m->m_ceilings[0], ceiling); + /* unlock */ + suword32(__DEVOLATILE(void *, &m->m_owner), old); + break; + } - if ((owner & ~UMUTEX_CONTESTED) == id) { - suword32(&m->m_ceilings[0], ceiling); - error = 0; - break; + /* The address was invalid. */ + if (old == -1) { + error = EFAULT; + break; + } + goto again; } /* @@ -2242,13 +2537,23 @@ umtxq_lock(&uq->uq_key); umtxq_insert(uq); umtxq_unbusy(&uq->uq_key); - error = umtxq_sleep(uq, "umtxpp", 0); - umtxq_remove(uq); + umtxq_unlock(&uq->uq_key); + + old = casuword32(&m->m_owner, owner, owner | UMUTEX_CONTESTED); + + umtxq_lock(&uq->uq_key); + umtxq_unbusy(&uq->uq_key); + if (old == owner) + error = umtxq_sleep(uq, "umtxpp", 0); + if ((uq->uq_flags & UQF_UMTXQ) != 0) { + umtxq_busy(&uq->uq_key); + umtxq_remove(uq); + umtxq_unbusy(&uq->uq_key); + } umtxq_unlock(&uq->uq_key); } umtxq_lock(&uq->uq_key); - if (error == 0) - umtxq_signal(&uq->uq_key, INT_MAX); + umtxq_signal(&uq->uq_key, INT_MAX); umtxq_unbusy(&uq->uq_key); umtxq_unlock(&uq->uq_key); umtx_key_release(&uq->uq_key); @@ -2261,12 +2566,14 @@ _do_lock_umutex(struct thread *td, struct umutex *m, int flags, int timo, int mode) { - switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { + switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT | + UMUTEX_PRIO_PROTECT2)) { case 0: return (_do_lock_normal(td, m, flags, timo, mode)); case UMUTEX_PRIO_INHERIT: return (_do_lock_pi(td, m, flags, timo, mode)); case UMUTEX_PRIO_PROTECT: + case UMUTEX_PRIO_PROTECT2: return (_do_lock_pp(td, m, flags, timo, mode)); } return (EINVAL); @@ -2277,9 +2584,10 @@ */ static int do_lock_umutex(struct thread *td, struct umutex *m, - struct timespec *timeout, int mode) + struct timespec *timeout, int mode, int wflags) { - struct timespec ts, ts2, ts3; + struct timespec cts, ets, tts; + struct robust_info *rob = NULL; struct timeval tv; uint32_t flags; int error; @@ -2294,26 +2602,59 @@ if (error == EINTR && mode != _UMUTEX_WAIT) error = ERESTART; } else { - getnanouptime(&ts); - timespecadd(&ts, timeout); - TIMESPEC_TO_TIMEVAL(&tv, timeout); + const clockid_t clockid = CLOCK_REALTIME; + + UMTX_STATE_INC(timedlock_count); + + if ((wflags & UMUTEX_ABSTIME) == 0) { + kern_clock_gettime(td, clockid, &ets); + timespecadd(&ets, timeout); + tts = *timeout; + } else { /* absolute time */ + ets = *timeout; + tts = *timeout; + kern_clock_gettime(td, clockid, &cts); + timespecsub(&tts, &cts); + } + TIMESPEC_TO_TIMEVAL(&tv, &tts); for (;;) { error = _do_lock_umutex(td, m, flags, tvtohz(&tv), mode); if (error != ETIMEDOUT) break; - getnanouptime(&ts2); - if (timespeccmp(&ts2, &ts, >=)) { + kern_clock_gettime(td, clockid, &cts); + if (timespeccmp(&cts, &ets, >=)) { error = ETIMEDOUT; break; } - ts3 = ts; - timespecsub(&ts3, &ts2); - TIMESPEC_TO_TIMEVAL(&tv, &ts3); + tts = ets; + timespecsub(&tts, &cts); + TIMESPEC_TO_TIMEVAL(&tv, &tts); } /* Timed-locking is not restarted. */ if (error == ERESTART) error = EINTR; } + + if (error == 0 || error == EOWNERDEAD) { + if ((flags & UMUTEX_ROBUST) != 0 && mode != _UMUTEX_WAIT) { + int error2; + + error2 = robust_alloc(&rob); + if (error2 == 0) { + rob->ownertd = td; + rob->umtxp = m; + robust_insert(td, rob); + } else { + do_unlock_umutex(td, m, ROB_KERNEL_UNLOCK); + if (timeout == NULL) { + error2 = ERESTART; + } else if (error2 == ERESTART) { + error2 = EINTR; + } + error = error2; + } + } + } return (error); } @@ -2321,24 +2662,73 @@ * Unlock a userland POSIX mutex. */ static int -do_unlock_umutex(struct thread *td, struct umutex *m) +do_unlock_umutex(struct thread *td, struct umutex *m, int robact) { uint32_t flags; + int error; flags = fuword32(&m->m_flags); - if (flags == -1) - return (EFAULT); + if ((flags & UMUTEX_ROBUST) != 0 || robact == ROB_THREAD_EXIT) + robust_remove(td, m); switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { case 0: - return (do_unlock_normal(td, m, flags)); + error = do_unlock_normal(td, m, flags, robact); + break; case UMUTEX_PRIO_INHERIT: - return (do_unlock_pi(td, m, flags)); + error = do_unlock_pi(td, m, flags, robact); + break; case UMUTEX_PRIO_PROTECT: - return (do_unlock_pp(td, m, flags)); + case UMUTEX_PRIO_PROTECT2: + error = do_unlock_pp(td, m, flags, robact); + break; + default: + error = EINVAL; } + return (error); +} - return (EINVAL); +static int +set_contested_bit(struct umutex *m, struct umtxq_queue *uhm, int repair) +{ + int do_wake; + int qlen = uhm->length; + uint32_t owner; + + do_wake = 0; + /* + * Set contested bit for mutex when necessary, so that userland + * mutex unlocker will wake up a waiter thread. + */ + owner = fuword32(__DEVOLATILE(uint32_t *, &m->m_owner)); + for (;;) { + if (owner == UMUTEX_UNOWNED) { + if (!repair && qlen == 1) { + do_wake = 1; + break; + } + if ((owner = casuword32(&m->m_owner, UMUTEX_UNOWNED, + UMUTEX_CONTESTED)) == UMUTEX_UNOWNED) { + do_wake = 1; + break; + } + } + if (owner == UMUTEX_CONTESTED) { + do_wake = 1; + break; + } + if ((owner & UMUTEX_CONTESTED) == 0) { + uint32_t old; + old = casuword32(&m->m_owner, owner, + owner|UMUTEX_CONTESTED); + if (old == owner) + break; + owner = old; + } else { + break; + } + } + return (do_wake); } static int @@ -2346,50 +2736,104 @@ struct timespec *timeout, u_long wflags) { struct umtx_q *uq; + struct umtx_key mkey, *mkeyp, savekey; + struct umutex *bind_mutex; struct timeval tv; struct timespec cts, ets, tts; - uint32_t flags; + struct umtxq_chain *old_chain; + uint32_t flags, mflags; + uint32_t clockid; int error; uq = td->td_umtxq; flags = fuword32(&cv->c_flags); + mflags = fuword32(&m->m_flags); error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key); if (error != 0) return (error); + + if ((wflags & CVWAIT_CLOCKID) != 0) { + clockid = fuword32(&cv->c_clockid); + if (clockid < CLOCK_REALTIME || + clockid >= CLOCK_THREAD_CPUTIME_ID) { + /* hmm, only HW clock id will work. */ + return (EINVAL); + } + } else { + clockid = CLOCK_REALTIME; + } + + savekey = uq->uq_key; + if ((flags & UCOND_BIND_MUTEX) != 0) { + if ((mflags & UMUTEX_PRIO_INHERIT) != 0) + goto ignore; + error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, + GET_SHARE(mflags), &mkey); + if (error != 0) { + umtx_key_release(&uq->uq_key); + return (error); + } + if (mkey.shared == 0) + bind_mutex = m; + else + bind_mutex = NULL; + mkeyp = &mkey; + } else { +ignore: + bind_mutex = NULL; + mkeyp = NULL; + } + + old_chain = uq->uq_key.chain; umtxq_lock(&uq->uq_key); umtxq_busy(&uq->uq_key); - umtxq_insert(uq); + error = umtxq_insert_queue2(uq, UMTX_SHARED_QUEUE, bind_mutex, mkeyp); + if (error != 0) { + UMTX_STATE_INC(cv_insert_failure); + umtxq_unbusy(&uq->uq_key); + umtxq_unlock(&uq->uq_key); + return (error); + } umtxq_unlock(&uq->uq_key); /* - * The magic thing is we should set c_has_waiters to 1 before - * releasing user mutex. + * Set c_has_waiters to 1 before releasing user mutex, also + * don't modify cache line when unnecessary. */ - suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 1); + if (fuword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters)) == 0) + suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 1); umtxq_lock(&uq->uq_key); umtxq_unbusy(&uq->uq_key); umtxq_unlock(&uq->uq_key); - error = do_unlock_umutex(td, m); + error = do_unlock_umutex(td, m, ROB_USER_UNLOCK); + if (error) { + UMTX_STATE_INC(cv_unlock_failure); + error = 0; /* ignore the error */ + } umtxq_lock(&uq->uq_key); if (error == 0) { - if ((wflags & UMTX_CHECK_UNPARKING) && - (td->td_pflags & TDP_WAKEUP)) { - td->td_pflags &= ~TDP_WAKEUP; - error = EINTR; - } else if (timeout == NULL) { + if (timeout == NULL) { error = umtxq_sleep(uq, "ucond", 0); } else { - getnanouptime(&ets); - timespecadd(&ets, timeout); - TIMESPEC_TO_TIMEVAL(&tv, timeout); + if ((wflags & CVWAIT_ABSTIME) == 0) { + kern_clock_gettime(td, clockid, &ets); + timespecadd(&ets, timeout); + tts = *timeout; + } else { /* absolute time */ + ets = *timeout; + tts = *timeout; + kern_clock_gettime(td, clockid, &cts); + timespecsub(&tts, &cts); + } + TIMESPEC_TO_TIMEVAL(&tv, &tts); for (;;) { error = umtxq_sleep(uq, "ucond", tvtohz(&tv)); if (error != ETIMEDOUT) break; - getnanouptime(&cts); + kern_clock_gettime(td, clockid, &cts); if (timespeccmp(&cts, &ets, >=)) { error = ETIMEDOUT; break; @@ -2400,52 +2844,237 @@ } } } - if ((uq->uq_flags & UQF_UMTXQ) == 0) error = 0; else { - umtxq_remove(uq); + /* + * This must be timeout or interrupted by signal or + * surprious wakeup. + */ + umtxq_busy(&uq->uq_key); + if ((uq->uq_flags & UQF_UMTXQ) != 0) { + int oldlen = uq->uq_cur_queue->length; + umtxq_remove(uq); + if (oldlen == 1 && old_chain == uq->uq_key.chain) { + umtxq_unlock(&uq->uq_key); + suword32( + __DEVOLATILE(uint32_t *, + &cv->c_has_waiters), 0); + umtxq_lock(&uq->uq_key); + } + } + umtxq_unbusy(&uq->uq_key); if (error == ERESTART) error = EINTR; } + umtxq_unlock(&uq->uq_key); + + /* We were moved to mutex queue. */ + if (mkeyp != NULL && + old_chain != uq->uq_key.chain) { + /* + * cv_broadcast can not access the mutex if we are pshared, + * but it still migrate threads to mutex queue, + * we should repair contested bit here. + */ + if ((mflags & USYNC_PROCESS_SHARED) != 0 && uq->uq_repair_mutex) { + uint32_t owner = fuword32( + __DEVOLATILE(void *, &m->m_owner)); + if ((owner & UMUTEX_CONTESTED) == 0) { + struct umtxq_queue *uhm; + umtxq_lock(mkeyp); + umtxq_busy(mkeyp); + uhm = umtxq_queue_lookup(mkeyp, + UMTX_SHARED_QUEUE); + if (uhm != NULL) + set_contested_bit(m, uhm, 1); + umtxq_unbusy(mkeyp); + umtxq_unlock(mkeyp); + } + } - umtxq_unlock(&uq->uq_key); - umtx_key_release(&uq->uq_key); + error = 0; + } + /* + * Note that we should release a saved key, because if we + * were migrated, the vmobject reference is no longer the original, + * however, we should release the original. + */ + umtx_key_release(&savekey); + if (mkeyp != NULL) + umtx_key_release(mkeyp); + uq->uq_spare_queue->bind_mutex = NULL; + uq->uq_spare_queue->binding = 0; + uq->uq_repair_mutex = 0; return (error); } /* + * Entered with queue busied but not locked, exits with queue locked. + */ +static void +cv_after_migration(int oldlen, struct umutex *bind_mutex, + struct umtxq_queue *uhm) +{ + struct umtx_q *uq; + int do_wake = 0; + int shared = uhm->key.shared; + + /* + * Wake one thread when necessary. if before the queue + * migration, there is thread on mutex queue, we don't + * need to wake up a thread, because the mutex contention + * bit should have already been set by other mutex locking + * code. + * For pshared mutex, because different process has different + * address even for same process-shared mutex! + * we don't know where the mutex is in our address space. + * In this situation, we let a thread resumed from cv_wait + * to repair the mutex contention bit. + * XXX Fixme! we should make the repairing thread runs as + * soon as possible, boost its priority. + */ + + if (oldlen == 0) { + if (!shared) { + do_wake = set_contested_bit(bind_mutex, uhm, 0); + } else { + do_wake = 1; + } + } else { + do_wake = 0; + } + + umtxq_lock(&uhm->key); + if (do_wake) { + uq = TAILQ_FIRST(&uhm->head); + if (uq != NULL) { + if (shared) + uq->uq_repair_mutex = 1; + umtxq_signal_thread(uq); + } + } +} + +/* * Signal a userland condition variable. */ static int do_cv_signal(struct thread *td, struct ucond *cv) { + struct umtxq_queue *uh, *uhm; + struct umtxq_chain *uc, *ucm; + struct umtx_q *uq; struct umtx_key key; - int error, cnt, nwake; - uint32_t flags; + int error, len, migrate; + uint32_t flags, owner; flags = fuword32(&cv->c_flags); if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0) return (error); + umtxq_lock(&key); umtxq_busy(&key); - cnt = umtxq_count(&key); - nwake = umtxq_signal(&key, 1); - if (cnt <= nwake) { + uh = umtxq_queue_lookup(&key, UMTX_SHARED_QUEUE); + if (uh == NULL) { + int has_waiters = fuword32(__DEVOLATILE(uint32_t *, + &cv->c_has_waiters)); + if (has_waiters) { + suword32(__DEVOLATILE(uint32_t *, + &cv->c_has_waiters), 0); + } + umtxq_unbusy(&key); + umtxq_unlock(&key); + umtx_key_release(&key); + return (0); + } + + len = uh->length; + switch(umtx_cvsig_migrate) { + case 1: /* auto */ + migrate = (mp_ncpus == 1); + break; + case 0: /* disable */ + migrate = 0; + break; + default: /* always */ + migrate = 1; + } + if (migrate && uh->binding) { + struct umutex *bind_mutex = uh->bind_mutex; + struct umtx_key mkey; + int oldlen; + + mkey = uh->bind_mkey; + umtxq_unlock(&key); + + if (!mkey.shared) { + owner = fuword32(__DEVOLATILE(void *, + &bind_mutex->m_owner)); + /* If mutex is not locked, wake up one. */ + if ((owner & ~UMUTEX_CONTESTED) == 0) { + goto wake_one; + } + } + + /* Try to move thread between mutex and cv queues. */ + uc = umtxq_getchain(&key); + ucm = umtxq_getchain(&mkey); + + umtxq_lock(&mkey); + umtxq_busy(&mkey); + umtxq_unlock(&mkey); + umtxq_lock(&key); + umtxq_lock(&mkey); + uhm = umtxq_queue_lookup(&mkey, UMTX_SHARED_QUEUE); + if (uhm == NULL) + oldlen = 0; + else + oldlen = uhm->length; + uq = TAILQ_FIRST(&uh->head); + umtxq_remove_queue(uq, UMTX_SHARED_QUEUE); + umtx_key_copy(&uq->uq_key, &mkey); + umtxq_insert(uq); + if (uhm == NULL) + uhm = uq->uq_cur_queue; + umtxq_unlock(&mkey); umtxq_unlock(&key); - error = suword32( - __DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0); + UMTX_STATE_INC(cv_signal_migrate); + if (len == 1) + suword32(__DEVOLATILE(uint32_t *, + &cv->c_has_waiters), 0); + umtxq_lock(&key); + umtxq_unbusy(&key); + umtxq_unlock(&key); + umtx_key_release(&key); + + cv_after_migration(oldlen, bind_mutex, uhm); + + umtxq_unbusy(&mkey); + umtxq_unlock(&mkey); + return (0); + } else { + umtxq_unlock(&key); } + +wake_one: + if (len == 1) + suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0); + umtxq_lock(&key); + uq = TAILQ_FIRST(&uh->head); + umtxq_signal_thread(uq); umtxq_unbusy(&key); umtxq_unlock(&key); umtx_key_release(&key); - return (error); + return (0); } static int do_cv_broadcast(struct thread *td, struct ucond *cv) { + struct umtxq_queue *uh, *uhm, *uh_temp; + struct umtxq_chain *uc, *ucm; struct umtx_key key; int error; uint32_t flags; @@ -2456,17 +3085,111 @@ umtxq_lock(&key); umtxq_busy(&key); - umtxq_signal(&key, INT_MAX); - umtxq_unlock(&key); + uh = umtxq_queue_lookup(&key, UMTX_SHARED_QUEUE); + if (uh != NULL && uh->binding) { + /* + * To avoid thundering herd problem, if there are waiters, + * try to move them to mutex queue. + */ + struct umutex *bind_mutex = uh->bind_mutex; + struct umtx_key mkey; + struct umtx_q *uq; + int len, oldlen; + + len = uh->length; + mkey = uh->bind_mkey; + uc = umtxq_getchain(&key); + ucm = umtxq_getchain(&mkey); + LIST_REMOVE(uh, link); + + /* + * Before busying mutex sleep-queue, we must unlock cv's + * sleep-queue mutex, because the mutex is unsleepable. + */ + umtxq_unlock(&key); + + umtxq_lock(&mkey); + umtxq_busy(&mkey); + umtxq_unlock(&mkey); + umtxq_lock(&key); + umtxq_lock(&mkey); + uhm = umtxq_queue_lookup(&mkey, UMTX_SHARED_QUEUE); + + /* Change waiter's key (include chain address). */ + TAILQ_FOREACH(uq, &uh->head, uq_link) { + umtx_key_copy(&uq->uq_key, &mkey); + if (uhm != NULL) + uq->uq_cur_queue = uhm; + } + if (uhm == NULL) { + /* + * Mutex has no waiters, just move the queue head to + * new chain. + */ + oldlen = 0; + uh->key = mkey; + uh->bind_mutex = NULL; + uh->binding = 0; + LIST_INSERT_HEAD(&ucm->uc_queue[UMTX_SHARED_QUEUE], + uh, link); + uhm = uh; + } else { + /* + * Otherwise, move cv waiters. + */ + oldlen = uhm->length; + TAILQ_CONCAT(&uhm->head, &uh->head, uq_link); + uhm->length += uh->length; + uh->length = 0; + uh->bind_mutex = NULL; + uh->binding = 0; + LIST_INSERT_HEAD(&ucm->uc_spare_queue, uh, link); + } + + UMTX_STATE_ADD(cv_broadcast_migrate, len); + + /* + * At this point, cv's queue no longer needs to be accessed, + * NULL it. + */ + uh = NULL; + + /* + * One queue head has already been moved, we need to + * move (n - 1) free queue head to new chain. + */ + while (--len > 0) { + uh_temp = LIST_FIRST(&uc->uc_spare_queue); + LIST_REMOVE(uh_temp, link); + LIST_INSERT_HEAD(&ucm->uc_spare_queue, uh_temp, link); + } + + umtxq_unlock(&mkey); + umtxq_unlock(&key); + + /* Now, the cv does not have any waiter. */ + suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0); - error = suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0); + umtxq_lock(&key); + umtxq_unbusy(&key); + umtxq_unlock(&key); + umtx_key_release(&key); - umtxq_lock(&key); - umtxq_unbusy(&key); - umtxq_unlock(&key); + cv_after_migration(oldlen, bind_mutex, uhm); - umtx_key_release(&key); - return (error); + umtxq_unbusy(&mkey); + umtxq_unlock(&mkey); + return (0); + } else { + umtxq_signal(&key, INT_MAX); + umtxq_unlock(&key); + suword32(__DEVOLATILE(uint32_t *, &cv->c_has_waiters), 0); + umtxq_lock(&key); + umtxq_unbusy(&key); + umtxq_unlock(&key); + umtx_key_release(&key); + } + return (0); } static int @@ -3027,7 +3750,33 @@ return (kern_umtx_wake(td, uap->obj, uap->val, 0)); } +#define BATCH_SIZE 128 static int +__umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap) +{ + int count = uap->val; + void *uaddrs[BATCH_SIZE]; + char **upp = (char **)uap->obj; + int tocopy; + int error = 0; + int i, pos = 0; + + while (count > 0) { + tocopy = count; + if (tocopy > BATCH_SIZE) + tocopy = BATCH_SIZE; + error = copyin(upp+pos, uaddrs, tocopy * sizeof(char *)); + if (error != 0) + break; + for (i = 0; i < tocopy; ++i) + kern_umtx_wake(td, uaddrs[i], INT_MAX, 1); + count -= tocopy; + pos += tocopy; + } + return (error); +} + +static int __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap) { return (kern_umtx_wake(td, uap->obj, uap->val, 1)); @@ -3053,13 +3802,13 @@ } ts = &timeout; } - return do_lock_umutex(td, uap->obj, ts, 0); + return do_lock_umutex(td, uap->obj, ts, 0, uap->val); } static int __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap) { - return do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY); + return do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY, 0); } static int @@ -3082,7 +3831,7 @@ } ts = &timeout; } - return do_lock_umutex(td, uap->obj, ts, _UMUTEX_WAIT); + return do_lock_umutex(td, uap->obj, ts, _UMUTEX_WAIT, uap->val); } static int @@ -3094,7 +3843,7 @@ static int __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap) { - return do_unlock_umutex(td, uap->obj); + return do_unlock_umutex(td, uap->obj, ROB_USER_UNLOCK); } static int @@ -3243,7 +3992,8 @@ __umtx_op_wait_umutex, /* UMTX_OP_UMUTEX_WAIT */ __umtx_op_wake_umutex, /* UMTX_OP_UMUTEX_WAKE */ __umtx_op_sem_wait, /* UMTX_OP_SEM_WAIT */ - __umtx_op_sem_wake /* UMTX_OP_SEM_WAKE */ + __umtx_op_sem_wake, /* UMTX_OP_SEM_WAKE */ + __umtx_op_nwake_private /* UMTX_OP_NWAKE_PRIVATE */ }; int @@ -3486,6 +4236,32 @@ return (do_sem_wait(td, uap->obj, ts)); } +static int +__umtx_op_nwake_private32(struct thread *td, struct _umtx_op_args *uap) +{ + int count = uap->val; + uint32_t uaddrs[BATCH_SIZE]; + uint32_t **upp = (uint32_t **)uap->obj; + int tocopy; + int error; + int i, pos; + + while (count > 0) { + tocopy = count; + if (tocopy > BATCH_SIZE) + tocopy = BATCH_SIZE; + error = copyin(upp+pos, uaddrs, tocopy * sizeof(uint32_t)); + if (error != 0) + break; + for (i = 0; i < tocopy; ++i) + kern_umtx_wake(td, (void *)(intptr_t)uaddrs[i], + INT_MAX, 1); + count -= tocopy; + pos += tocopy; + } + return (error); +} + static _umtx_op_func op_table_compat32[] = { __umtx_op_lock_umtx_compat32, /* UMTX_OP_LOCK */ __umtx_op_unlock_umtx_compat32, /* UMTX_OP_UNLOCK */ @@ -3507,7 +4283,8 @@ __umtx_op_wait_umutex_compat32, /* UMTX_OP_UMUTEX_WAIT */ __umtx_op_wake_umutex, /* UMTX_OP_UMUTEX_WAKE */ __umtx_op_sem_wait_compat32, /* UMTX_OP_SEM_WAIT */ - __umtx_op_sem_wake /* UMTX_OP_SEM_WAKE */ + __umtx_op_sem_wake, /* UMTX_OP_SEM_WAKE */ + __umtx_op_nwake_private32 /* UMTX_OP_NWAKE_PRIVATE */ }; int @@ -3520,6 +4297,102 @@ } #endif +int +robust_alloc(struct robust_info **robpp) +{ + struct proc *p = curproc; + int error; + + atomic_fetchadd_int(&p->p_robustcount, 1); + if (p->p_robustcount > max_robust_per_proc) { + mtx_lock(&max_robust_lock); + while (p->p_robustcount > max_robust_per_proc) { + if (ratecheck(&max_robust_lasttime, + &max_robust_interval)) { + printf("Process %lu (%s) exceeded maximum" + " number of robust mutexes\n", + (u_long)p->p_pid, p->p_comm); + } + p->p_robustwaiters++; + error = msleep(&max_robust_per_proc, + &max_robust_lock, PCATCH, "maxrob", hz); + p->p_robustwaiters--; + if (error != 0 && error != EWOULDBLOCK) { + mtx_unlock(&max_robust_lock); + atomic_fetchadd_int(&p->p_robustcount, -1); + return (error); + } + } + mtx_unlock(&max_robust_lock); + } + *robpp = uma_zalloc(robust_zone, M_ZERO|M_WAITOK); + return (0); +} + +static void +robust_free(struct robust_info *robp) +{ + struct proc *p = curproc; + int waiters = p->p_robustwaiters; + + atomic_fetchadd_int(&p->p_robustcount, -1); + if (waiters != 0) { + mtx_lock(&max_robust_lock); + wakeup(&max_robust_per_proc); + mtx_unlock(&max_robust_lock); + } + uma_zfree(robust_zone, robp); +} + +static unsigned int +robust_hash(struct umutex *m) +{ + unsigned n = (uintptr_t)m; + return ((n * GOLDEN_RATIO_PRIME) >> ROBUST_SHIFTS) % ROBUST_CHAINS; +} + +static void +robust_insert(struct thread *td, struct robust_info *rob) +{ + struct umtx_q *uq = td->td_umtxq; + int hash = robust_hash(rob->umtxp); + struct robust_chain *robc = &robust_chains[hash]; + + mtx_lock(&robc->lock); + rob->ownertd = td; + SLIST_INSERT_HEAD(&robc->rob_list, rob, hash_qe); + mtx_unlock(&robc->lock); + LIST_INSERT_HEAD(&uq->uq_rob_list, rob, td_qe); +} + +static void +robust_remove(struct thread *td, struct umutex *umtxp) +{ + struct robust_info *rob, *rob2; + int hash = robust_hash(umtxp); + struct robust_chain *robc = &robust_chains[hash]; + + rob2 = NULL; + mtx_lock(&robc->lock); + SLIST_FOREACH(rob, &robc->rob_list, hash_qe) { + if (rob->ownertd == td && + rob->umtxp == umtxp) { + if (rob2 == NULL) { + SLIST_REMOVE_HEAD(&robc->rob_list, hash_qe); + } else { + SLIST_REMOVE_AFTER(rob2, hash_qe); + } + break; + } + rob2 = rob; + } + mtx_unlock(&robc->lock); + if (rob != NULL) { + LIST_REMOVE(rob, td_qe); + robust_free(rob); + } +} + void umtx_thread_init(struct thread *td) { @@ -3543,6 +4416,7 @@ uq = td->td_umtxq; uq->uq_inherited_pri = PRI_MAX; + uq->uq_exiting = 0; KASSERT(uq->uq_flags == 0, ("uq_flags != 0")); KASSERT(uq->uq_thread == td, ("uq_thread != td")); @@ -3551,7 +4425,7 @@ } /* - * exec() hook. + * exec() hook, clean up lastest thread's umtx info. */ static void umtx_exec_hook(void *arg __unused, struct proc *p __unused, @@ -3561,6 +4435,35 @@ } /* + * exit1() hook, clean up lastest thread's umtx info. + */ +static void +umtx_exit_hook(void *arg __unused, struct proc *p __unused) +{ + struct umtx_q *uq = curthread->td_umtxq; + + if (uq != NULL) { + uq->uq_exiting = 1; + umtx_thread_cleanup(curthread); + } +} + +/* + * fork() hook. First thread of process never call umtx_thread_alloc() + * again, we should clear uq_exiting here. + */ +void +umtx_fork_hook(void *arg __unused, struct proc *p1 __unused, + struct proc *p2, int flags __unused) +{ + struct thread *td = FIRST_THREAD_IN_PROC(p2); + struct umtx_q *uq = td->td_umtxq; + + if (uq != NULL) + uq->uq_exiting = 0; +} + +/* * thread_exit() hook. */ void @@ -3577,10 +4480,14 @@ { struct umtx_q *uq; struct umtx_pi *pi; + struct robust_info *rob; if ((uq = td->td_umtxq) == NULL) return; + while ((rob = LIST_FIRST(&uq->uq_rob_list)) != NULL) + do_unlock_umutex(td, rob->umtxp, ROB_THREAD_EXIT); + mtx_lock_spin(&umtx_lock); uq->uq_inherited_pri = PRI_MAX; while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) { @@ -3592,3 +4499,23 @@ thread_unlock(td); mtx_unlock_spin(&umtx_lock); } + +static int +set_max_robust(SYSCTL_HANDLER_ARGS) +{ + int error, v; + + v = max_robust_per_proc; + error = sysctl_handle_int(oidp, &v, 0, req); + if (error) + return (error); + if (req->newptr == NULL) + return (error); + if (v <= 0) + return (EINVAL); + mtx_lock(&max_robust_lock); + max_robust_per_proc = v; + wakeup(&max_robust_per_proc); + mtx_unlock(&max_robust_lock); + return (0); +} --- src/sys/sys/_pthreadtypes.h 2009-03-14 20:15:18.000000000 0000 +++ src/sys/sys/_pthreadtypes.h 2010-11-30 05:24:34.000000000 0000 @@ -1,4 +1,5 @@ /* + * Copyright (c) 2010 David Xu * Copyright (c) 1993, 1994 by Chris Provenzano, proven@mit.edu * Copyright (c) 1995-1998 by John Birrell * All rights reserved. @@ -36,6 +37,9 @@ #ifndef _SYS__PTHREADTYPES_H_ #define _SYS__PTHREADTYPES_H_ +#include +#include + /* * Forward structure definitions. * @@ -66,17 +70,71 @@ #define _PTHREAD_T_DECLARED #endif typedef struct pthread_attr *pthread_attr_t; -typedef struct pthread_mutex *pthread_mutex_t; +typedef struct pthread_mutex pthread_mutex_t; typedef struct pthread_mutex_attr *pthread_mutexattr_t; -typedef struct pthread_cond *pthread_cond_t; +typedef struct pthread_cond pthread_cond_t; typedef struct pthread_cond_attr *pthread_condattr_t; typedef int pthread_key_t; typedef struct pthread_once pthread_once_t; -typedef struct pthread_rwlock *pthread_rwlock_t; +typedef struct pthread_rwlock pthread_rwlock_t; typedef struct pthread_rwlockattr *pthread_rwlockattr_t; -typedef struct pthread_barrier *pthread_barrier_t; +typedef struct pthread_barrier pthread_barrier_t; typedef struct pthread_barrierattr *pthread_barrierattr_t; -typedef struct pthread_spinlock *pthread_spinlock_t; +typedef struct pthread_spinlock pthread_spinlock_t; + +struct pthread_mutex { + __int16_t __magic; + __int16_t __flags; + __int32_t __recurse; + union { + struct pthread *__ownertd; + char __ownerpad[8]; + } __ownerdata; + __uint16_t __spinloops; + __uint16_t __pad1; + /* kernel umtx part */ + volatile __uint32_t __lockword; + __uint32_t __lockflags; + __uint8_t __ceilings[2]; + __uint16_t __pad2; +}; + +struct pthread_cond { + __uint16_t __magic; + __uint16_t __pad; + __uint32_t __has_user_waiters; + __uint32_t __has_kern_waiters; + __uint32_t __flags; + __uint32_t __clock_id; +}; + +struct pthread_rwlock { + __uint16_t __magic; + __uint16_t __pad; + __uint32_t __pad2; + union { + struct pthread *__ownertd; + __uint32_t __ownertid; + char __ownerpad[8]; + } __ownerdata; + __uint32_t __state; + __uint32_t __flags; + __uint32_t __blocked_readers; + __uint32_t __blocked_writers; +}; + +struct pthread_barrier { + pthread_mutex_t __lock; + pthread_cond_t __cond; + __uint32_t __pad; + __uint64_t __cycle; + __uint32_t __count; + __uint32_t __waiters; +}; + +struct pthread_spinlock { + __uint32_t __lock; +}; /* * Additional type definitions: @@ -92,7 +150,6 @@ */ struct pthread_once { int state; - pthread_mutex_t mutex; }; #endif /* ! _SYS__PTHREADTYPES_H_ */ --- src/sys/sys/_umtx.h 2010-01-04 05:30:13.000000000 0000 +++ src/sys/sys/_umtx.h 2010-11-25 14:30:51.000000000 0000 @@ -39,14 +39,16 @@ struct umutex { volatile __lwpid_t m_owner; /* Owner of the mutex */ __uint32_t m_flags; /* Flags of the mutex */ - __uint32_t m_ceilings[2]; /* Priority protect ceiling */ + __uint8_t m_ceilings[2]; /* Priority protect ceiling */ + __uint16_t m_pad; __uint32_t m_spare[4]; }; struct ucond { volatile __uint32_t c_has_waiters; /* Has waiters in kernel */ __uint32_t c_flags; /* Flags of the condition variable */ - __uint32_t c_spare[2]; /* Spare space */ + __uint32_t c_clockid; /* Clock id */ + __uint32_t c_spare[1]; /* Spare space */ }; struct urwlock { --- src/sys/sys/errno.h 2009-10-07 20:25:47.000000000 0000 +++ src/sys/sys/errno.h 2010-11-23 01:58:38.000000000 0000 @@ -177,8 +177,10 @@ #define ENOTCAPABLE 93 /* Capabilities insufficient */ #endif /* _POSIX_SOURCE */ +#define EOWNERDEAD 94 +#define ENOTRECOVERABLE 95 #ifndef _POSIX_SOURCE -#define ELAST 93 /* Must be equal largest errno */ +#define ELAST 95 /* Must be equal largest errno */ #endif /* _POSIX_SOURCE */ #ifdef _KERNEL --- src/sys/sys/proc.h 2010-10-17 11:05:23.000000000 0000 +++ src/sys/sys/proc.h 2010-11-23 01:58:38.000000000 0000 @@ -524,6 +524,9 @@ int p_boundary_count;/* (c) Num threads at user boundary */ int p_pendingcnt; /* how many signals are pending */ struct itimers *p_itimers; /* (c) POSIX interval timers. */ + int p_robustcount; /* (*) Number of robust mutexes. */ + int p_robustwaiters;/* (*) Number of robust mutex + * waiters. */ /* End area that is zeroed on creation. */ #define p_endzero p_magic --- src/sys/sys/umtx.h 2010-01-04 05:30:13.000000000 0000 +++ src/sys/sys/umtx.h 2010-11-30 05:23:05.000000000 0000 @@ -39,20 +39,31 @@ #define USYNC_PROCESS_SHARED 0x0001 /* Process shared sync objs */ #define UMUTEX_UNOWNED 0x0 +#define UMUTEX_OWNER_DEAD 0x40000000U #define UMUTEX_CONTESTED 0x80000000U +#define UMUTEX_OWNER_MASK 0x3FFFFFFFU #define UMUTEX_ERROR_CHECK 0x0002 /* Error-checking mutex */ #define UMUTEX_PRIO_INHERIT 0x0004 /* Priority inherited mutex */ -#define UMUTEX_PRIO_PROTECT 0x0008 /* Priority protect mutex */ +#define UMUTEX_PRIO_PROTECT 0x0008 /* Deprecated */ +#define UMUTEX_PRIO_PROTECT2 0x0010 /* Priority protect mutex */ +#define UMUTEX_SIMPLE 0x0020 /* Use simple lock id. */ +#define UMUTEX_ROBUST 0x0040 + +/* Speficial owner ids */ +#define UMUTEX_SIMPLE_OWNER 1 /* The simple mutex's lock bit. */ +#define UMUTEX_INCONSISTENT 2 /* The mutex is unusable */ /* urwlock flags */ -#define URWLOCK_PREFER_READER 0x0002 +#define URWLOCK_PREFER_READER 0x0002 + +#define URWLOCK_WRITE_OWNER 0x80000000U +#define URWLOCK_WRITE_WAITERS 0x40000000U +#define URWLOCK_READ_WAITERS 0x20000000U +#define URWLOCK_MAX_READERS 0x1fffffffU +#define URWLOCK_READER_COUNT(c) ((c) & URWLOCK_MAX_READERS) -#define URWLOCK_WRITE_OWNER 0x80000000U -#define URWLOCK_WRITE_WAITERS 0x40000000U -#define URWLOCK_READ_WAITERS 0x20000000U -#define URWLOCK_MAX_READERS 0x1fffffffU -#define URWLOCK_READER_COUNT(c) ((c) & URWLOCK_MAX_READERS) +#define UCOND_BIND_MUTEX 0x0002 /* _usem flags */ #define SEM_NAMED 0x0002 @@ -79,10 +90,18 @@ #define UMTX_OP_MUTEX_WAKE 18 #define UMTX_OP_SEM_WAIT 19 #define UMTX_OP_SEM_WAKE 20 -#define UMTX_OP_MAX 21 +#define UMTX_OP_NWAKE_PRIVATE 21 +#define UMTX_OP_MAX 22 + +/* flags for UMUTEX_LOCK */ +#define UMUTEX_ABSTIME 0x01 /* flags for UMTX_OP_CV_WAIT */ -#define UMTX_CHECK_UNPARKING 0x01 +#define CVWAIT_CHECK_UNPARKING 0x01 +#define CVWAIT_ABSTIME 0x02 +#define CVWAIT_CLOCKID 0x04 + +#define UMTX_CHECK_UNPARKING CVWAIT_CHECK_UNPARKING #ifndef _KERNEL --- /dev/null 2010-12-01 10:22:00.000000000 +0800 +++ src/lib/libthr/thread/thr_sleepq.c 2010-11-29 14:32:12.000000000 +0800 @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2010 David Xu + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include "thr_private.h" + +#define HASHSHIFT 9 +#define HASHSIZE (1 << HASHSHIFT) +#define SC_HASH(wchan) ((unsigned) \ + ((((uintptr_t)(wchan) >> 3) \ + ^ ((uintptr_t)(wchan) >> (HASHSHIFT + 3))) \ + & (HASHSIZE - 1))) +#define SC_LOOKUP(wc) &sc_table[SC_HASH(wc)] + +struct sleepqueue_chain { + struct umutex sc_lock; + LIST_HEAD(, sleepqueue) sc_queues; + int sc_type; +}; + +static struct sleepqueue_chain sc_table[HASHSIZE]; + +void +_sleepq_init(void) +{ + int i; + + for (i = 0; i < HASHSIZE; ++i) { + LIST_INIT(&sc_table[i].sc_queues); + _thr_umutex_init(&sc_table[i].sc_lock); + } +} + +struct sleepqueue * +_sleepq_alloc(void) +{ + struct sleepqueue *sq; + + sq = calloc(1, sizeof(struct sleepqueue)); + TAILQ_INIT(&sq->sq_blocked); + SLIST_INIT(&sq->sq_freeq); + return (sq); +} + +void +_sleepq_free(struct sleepqueue *sq) +{ + free(sq); +} + +void +_sleepq_lock(void *wchan) +{ + struct pthread *curthread = _get_curthread(); + struct sleepqueue_chain *sc; + + sc = SC_LOOKUP(wchan); + THR_LOCK_ACQUIRE_SPIN(curthread, &sc->sc_lock); +} + +void +_sleepq_unlock(void *wchan) +{ + struct sleepqueue_chain *sc; + struct pthread *curthread = _get_curthread(); + + sc = SC_LOOKUP(wchan); + THR_LOCK_RELEASE(curthread, &sc->sc_lock); +} + +struct sleepqueue * +_sleepq_lookup(void *wchan) +{ + struct sleepqueue_chain *sc; + struct sleepqueue *sq; + + sc = SC_LOOKUP(wchan); + LIST_FOREACH(sq, &sc->sc_queues, sq_hash) + if (sq->sq_wchan == wchan) + return (sq); + return (NULL); +} + +void +_sleepq_add(void *wchan, struct pthread *td) +{ + struct sleepqueue_chain *sc; + struct sleepqueue *sq; + + sq = _sleepq_lookup(wchan); + if (sq != NULL) { + SLIST_INSERT_HEAD(&sq->sq_freeq, td->sleepqueue, sq_flink); + } else { + sc = SC_LOOKUP(wchan); + sq = td->sleepqueue; + LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); + sq->sq_wchan = wchan; + /* sq->sq_type = type; */ + } + td->sleepqueue = NULL; + td->wchan = wchan; + TAILQ_INSERT_TAIL(&sq->sq_blocked, td, wle); +} + +int +_sleepq_remove(struct sleepqueue *sq, struct pthread *td) +{ + int rc = 1; + + TAILQ_REMOVE(&sq->sq_blocked, td, wle); + if (TAILQ_EMPTY(&sq->sq_blocked)) { + LIST_REMOVE(sq, sq_hash); + td->sleepqueue = sq; + rc = 0; + } else { + td->sleepqueue = SLIST_FIRST(&sq->sq_freeq); + SLIST_REMOVE_HEAD(&sq->sq_freeq, sq_flink); + } + td->wchan = NULL; + return (rc); +} + +void +_sleepq_drop(struct sleepqueue *sq, + void (*cb)(struct pthread *, void *arg), void *arg) +{ + struct pthread *td; + struct sleepqueue *sq2; + + td = TAILQ_FIRST(&sq->sq_blocked); + if (td == NULL) + return; + LIST_REMOVE(sq, sq_hash); + TAILQ_REMOVE(&sq->sq_blocked, td, wle); + if (cb != NULL) + cb(td, arg); + td->sleepqueue = sq; + td->wchan = NULL; + sq2 = SLIST_FIRST(&sq->sq_freeq); + TAILQ_FOREACH(td, &sq->sq_blocked, wle) { + if (cb != NULL) + cb(td, arg); + td->sleepqueue = sq2; + td->wchan = NULL; + sq2 = SLIST_NEXT(sq2, sq_flink); + } + TAILQ_INIT(&sq->sq_blocked); + SLIST_INIT(&sq->sq_freeq); +}