Index: sys/sys/lock.h =================================================================== --- sys/sys/lock.h (revision 255636) +++ sys/sys/lock.h (working copy) @@ -56,13 +56,14 @@ struct thread; */ struct lock_class { - const char *lc_name; - u_int lc_flags; - void (*lc_assert)(const struct lock_object *lock, int what); - void (*lc_ddb_show)(const struct lock_object *lock); - void (*lc_lock)(struct lock_object *lock, int how); - int (*lc_owner)(const struct lock_object *lock, struct thread **owner); - int (*lc_unlock)(struct lock_object *lock); + const char *lc_name; + u_int lc_flags; + void (*lc_assert)(const struct lock_object *lock, int what); + void (*lc_ddb_show)(const struct lock_object *lock); + void (*lc_lock)(struct lock_object *lock, uintptr_t how); + int (*lc_owner)(const struct lock_object *lock, + struct thread **owner); + uintptr_t (*lc_unlock)(struct lock_object *lock); }; #define LC_SLEEPLOCK 0x00000001 /* Sleep lock. */ Index: sys/kern/kern_mutex.c =================================================================== --- sys/kern/kern_mutex.c (revision 255636) +++ sys/kern/kern_mutex.c (working copy) @@ -101,14 +101,14 @@ static void assert_mtx(const struct lock_object *l #ifdef DDB static void db_show_mtx(const struct lock_object *lock); #endif -static void lock_mtx(struct lock_object *lock, int how); -static void lock_spin(struct lock_object *lock, int how); +static void lock_mtx(struct lock_object *lock, uintptr_t how); +static void lock_spin(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_mtx(const struct lock_object *lock, struct thread **owner); #endif -static int unlock_mtx(struct lock_object *lock); -static int unlock_spin(struct lock_object *lock); +static uintptr_t unlock_mtx(struct lock_object *lock); +static uintptr_t unlock_spin(struct lock_object *lock); /* * Lock classes for sleep and spin mutexes. @@ -154,20 +154,20 @@ assert_mtx(const struct lock_object *lock, int wha } void -lock_mtx(struct lock_object *lock, int how) +lock_mtx(struct lock_object *lock, uintptr_t how) { mtx_lock((struct mtx *)lock); } void -lock_spin(struct lock_object *lock, int how) +lock_spin(struct lock_object *lock, uintptr_t how) { panic("spin locks can only use msleep_spin"); } -int +uintptr_t unlock_mtx(struct lock_object *lock) { struct mtx *m; @@ -178,7 +178,7 @@ unlock_mtx(struct lock_object *lock) return (0); } -int +uintptr_t unlock_spin(struct lock_object *lock) { Index: sys/kern/kern_condvar.c =================================================================== --- sys/kern/kern_condvar.c (revision 255636) +++ sys/kern/kern_condvar.c (working copy) @@ -97,7 +97,7 @@ _cv_wait(struct cv *cvp, struct lock_object *lock) WITNESS_SAVE_DECL(lock_witness); struct lock_class *class; struct thread *td; - int lock_state; + uintptr_t lock_state; td = curthread; lock_state = 0; @@ -214,7 +214,7 @@ _cv_wait_sig(struct cv *cvp, struct lock_object *l WITNESS_SAVE_DECL(lock_witness); struct lock_class *class; struct thread *td; - int lock_state, rval; + uintptr_t lock_state, rval; td = curthread; lock_state = 0; Index: sys/kern/kern_synch.c =================================================================== --- sys/kern/kern_synch.c (revision 255636) +++ sys/kern/kern_synch.c (working copy) @@ -157,7 +157,8 @@ _sleep(void *ident, struct lock_object *lock, int struct thread *td; struct proc *p; struct lock_class *class; - int catch, lock_state, pri, rval, sleepq_flags; + uintptr_t lock_state; + int catch, pri, rval, sleepq_flags; WITNESS_SAVE_DECL(lock_witness); td = curthread; Index: sys/kern/kern_rwlock.c =================================================================== --- sys/kern/kern_rwlock.c (revision 255636) +++ sys/kern/kern_rwlock.c (working copy) @@ -83,11 +83,11 @@ SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG static void db_show_rwlock(const struct lock_object *lock); #endif static void assert_rw(const struct lock_object *lock, int what); -static void lock_rw(struct lock_object *lock, int how); +static void lock_rw(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_rw(const struct lock_object *lock, struct thread **owner); #endif -static int unlock_rw(struct lock_object *lock); +static uintptr_t unlock_rw(struct lock_object *lock); struct lock_class lock_class_rw = { .lc_name = "rw", @@ -141,7 +141,7 @@ assert_rw(const struct lock_object *lock, int what } void -lock_rw(struct lock_object *lock, int how) +lock_rw(struct lock_object *lock, uintptr_t how) { struct rwlock *rw; @@ -152,7 +152,7 @@ void rw_rlock(rw); } -int +uintptr_t unlock_rw(struct lock_object *lock) { struct rwlock *rw; Index: sys/kern/kern_sx.c =================================================================== --- sys/kern/kern_sx.c (revision 255636) +++ sys/kern/kern_sx.c (working copy) @@ -116,11 +116,11 @@ static void assert_sx(const struct lock_object *lo #ifdef DDB static void db_show_sx(const struct lock_object *lock); #endif -static void lock_sx(struct lock_object *lock, int how); +static void lock_sx(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_sx(const struct lock_object *lock, struct thread **owner); #endif -static int unlock_sx(struct lock_object *lock); +static uintptr_t unlock_sx(struct lock_object *lock); struct lock_class lock_class_sx = { .lc_name = "sx", @@ -156,7 +156,7 @@ assert_sx(const struct lock_object *lock, int what } void -lock_sx(struct lock_object *lock, int how) +lock_sx(struct lock_object *lock, uintptr_t how) { struct sx *sx; @@ -167,7 +167,7 @@ void sx_slock(sx); } -int +uintptr_t unlock_sx(struct lock_object *lock) { struct sx *sx; Index: sys/kern/kern_lock.c =================================================================== --- sys/kern/kern_lock.c (revision 255636) +++ sys/kern/kern_lock.c (working copy) @@ -142,12 +142,12 @@ static void assert_lockmgr(const struct lock_objec #ifdef DDB static void db_show_lockmgr(const struct lock_object *lock); #endif -static void lock_lockmgr(struct lock_object *lock, int how); +static void lock_lockmgr(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_lockmgr(const struct lock_object *lock, struct thread **owner); #endif -static int unlock_lockmgr(struct lock_object *lock); +static uintptr_t unlock_lockmgr(struct lock_object *lock); struct lock_class lock_class_lockmgr = { .lc_name = "lockmgr", @@ -350,13 +350,13 @@ assert_lockmgr(const struct lock_object *lock, int } static void -lock_lockmgr(struct lock_object *lock, int how) +lock_lockmgr(struct lock_object *lock, uintptr_t how) { panic("lockmgr locks do not support sleep interlocking"); } -static int +static uintptr_t unlock_lockmgr(struct lock_object *lock) { Index: sys/kern/kern_rmlock.c =================================================================== --- sys/kern/kern_rmlock.c (revision 255636) +++ sys/kern/kern_rmlock.c (working copy) @@ -77,11 +77,11 @@ static void assert_rm(const struct lock_object *lo #ifdef DDB static void db_show_rm(const struct lock_object *lock); #endif -static void lock_rm(struct lock_object *lock, int how); +static void lock_rm(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_rm(const struct lock_object *lock, struct thread **owner); #endif -static int unlock_rm(struct lock_object *lock); +static uintptr_t unlock_rm(struct lock_object *lock); struct lock_class lock_class_rm = { .lc_name = "rm", @@ -118,34 +118,61 @@ assert_rm(const struct lock_object *lock, int what rm_assert((const struct rmlock *)lock, what); } -/* - * These do not support read locks because it would be hard to make - * the tracker work correctly with the current lock_class API as you - * would need to have the tracker pointer available when calling - * rm_rlock() in lock_rm(). - */ static void -lock_rm(struct lock_object *lock, int how) +lock_rm(struct lock_object *lock, uintptr_t how) { struct rmlock *rm; + struct rm_priotracker *tracker; rm = (struct rmlock *)lock; - if (how) + if (how == 0) rm_wlock(rm); -#ifdef INVARIANTS - else - panic("lock_rm called in read mode"); -#endif + else { + tracker = (struct rm_priotracker *)how; + rm_rlock(rm, tracker); + } } -static int +static uintptr_t unlock_rm(struct lock_object *lock) { + struct thread *td; + struct pcpu *pc; struct rmlock *rm; + struct rm_queue *queue; + struct rm_priotracker *tracker; + uintptr_t how; rm = (struct rmlock *)lock; - rm_wunlock(rm); - return (1); + tracker = NULL; + how = 0; + rm_assert(rm, RA_LOCKED | RA_NOTRECURSED); + if (rm_wowned(rm)) + rm_wunlock(rm); + else { + /* + * Find the right rm_priotracker structure for curthread. + * The guarantee about its uniqueness is given by the fact + * we already asserted the lock wasn't recursively acquired. + */ + critical_enter(); + td = curthread; + pc = pcpu_find(curcpu); + for (queue = pc->pc_rm_queue.rmq_next; + queue != &pc->pc_rm_queue; queue = queue->rmq_next) { + tracker = (struct rm_priotracker *)queue; + if ((tracker->rmp_rmlock == rm) && + (tracker->rmp_thread == td)) { + how = (uintptr_t)tracker; + break; + } + } + KASSERT(tracker != NULL, + ("rm_priotracker is non-NULL when lock held in read mode")); + critical_exit(); + rm_runlock(rm, tracker); + } + return (how); } #ifdef KDTRACE_HOOKS