Index: kern/kern_lock.c =================================================================== --- kern/kern_lock.c (revision 196781) +++ kern/kern_lock.c (working copy) @@ -35,7 +35,6 @@ #include #include -#include #include #include #include @@ -62,6 +61,11 @@ #define SQ_EXCLUSIVE_QUEUE 0 #define SQ_SHARED_QUEUE 1 +#ifdef ADAPTIVE_LOCKMGRS +#define ALK_RETRIES 10 +#define ALK_LOOPS 10000 +#endif + #ifndef INVARIANTS #define _lockmgr_assert(lk, what, file, line) #define TD_LOCKS_INC(td) @@ -156,14 +160,6 @@ #endif }; -#ifdef ADAPTIVE_LOCKMGRS -static u_int alk_retries = 10; -static u_int alk_loops = 10000; -SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging"); -SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, ""); -SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, ""); -#endif - static __inline struct thread * lockmgr_xholder(struct lock *lk) { @@ -493,14 +489,14 @@ cpu_spinwait(); } else if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) !=0 && LK_SHARERS(x) && - spintries < alk_retries) { + spintries < ALK_RETRIES) { if (flags & LK_INTERLOCK) { class->lc_unlock(ilk); flags &= ~LK_INTERLOCK; } GIANT_SAVE(); spintries++; - for (i = 0; i < alk_loops; i++) { + for (i = 0; i < ALK_LOOPS; i++) { if (LOCK_LOG_TEST(&lk->lock_object, 0)) CTR4(KTR_LOCK, "%s: shared spinning on %p with %u and %u", @@ -511,7 +507,7 @@ break; cpu_spinwait(); } - if (i != alk_loops) + if (i != ALK_LOOPS) continue; } #endif @@ -706,7 +702,7 @@ cpu_spinwait(); } else if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) != 0 && LK_SHARERS(x) && - spintries < alk_retries) { + spintries < ALK_RETRIES) { if ((x & LK_EXCLUSIVE_SPINNERS) == 0 && !atomic_cmpset_ptr(&lk->lk_lock, x, x | LK_EXCLUSIVE_SPINNERS)) @@ -717,7 +713,7 @@ } GIANT_SAVE(); spintries++; - for (i = 0; i < alk_loops; i++) { + for (i = 0; i < ALK_LOOPS; i++) { if (LOCK_LOG_TEST(&lk->lock_object, 0)) CTR4(KTR_LOCK, "%s: shared spinning on %p with %u and %u", @@ -727,7 +723,7 @@ break; cpu_spinwait(); } - if (i != alk_loops) + if (i != ALK_LOOPS) continue; } #endif Index: kern/kern_rwlock.c =================================================================== --- kern/kern_rwlock.c (revision 196781) +++ kern/kern_rwlock.c (working copy) @@ -56,11 +56,8 @@ #endif #ifdef ADAPTIVE_RWLOCKS -static int rowner_retries = 10; -static int rowner_loops = 10000; -SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, "rwlock debugging"); -SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, ""); -SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, ""); +#define ROWNER_RETRIES 10 +#define ROWNER_LOOPS 10000 #endif #ifdef DDB @@ -380,15 +377,15 @@ } continue; } - } else if (spintries < rowner_retries) { + } else if (spintries < ROWNER_RETRIES) { spintries++; - for (i = 0; i < rowner_loops; i++) { + for (i = 0; i < ROWNER_LOOPS; i++) { v = rw->rw_lock; if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v)) break; cpu_spinwait(); } - if (i != rowner_loops) + if (i != ROWNER_LOOPS) continue; } #endif @@ -690,7 +687,7 @@ continue; } if ((v & RW_LOCK_READ) && RW_READERS(v) && - spintries < rowner_retries) { + spintries < ROWNER_RETRIES) { if (!(v & RW_LOCK_WRITE_SPINNER)) { if (!atomic_cmpset_ptr(&rw->rw_lock, v, v | RW_LOCK_WRITE_SPINNER)) { @@ -698,15 +695,15 @@ } } spintries++; - for (i = 0; i < rowner_loops; i++) { + for (i = 0; i < ROWNER_LOOPS; i++) { if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0) break; cpu_spinwait(); } #ifdef KDTRACE_HOOKS - spin_cnt += rowner_loops - i; + spin_cnt += ROWNER_LOOPS - i; #endif - if (i != rowner_loops) + if (i != ROWNER_LOOPS) continue; } #endif Index: kern/kern_sx.c =================================================================== --- kern/kern_sx.c (revision 196781) +++ kern/kern_sx.c (working copy) @@ -45,7 +45,6 @@ #include #include -#include #include #include #include @@ -72,6 +71,11 @@ #define SQ_EXCLUSIVE_QUEUE 0 #define SQ_SHARED_QUEUE 1 +#ifdef ADAPTIVE_SX +#define ASX_RETRIES 10 +#define ASX_LOOPS 10000 +#endif + /* * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We * drop Giant anytime we have to sleep or if we adaptively spin. @@ -134,14 +138,6 @@ #define _sx_assert(sx, what, file, line) #endif -#ifdef ADAPTIVE_SX -static u_int asx_retries = 10; -static u_int asx_loops = 10000; -SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging"); -SYSCTL_INT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, ""); -SYSCTL_INT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, ""); -#endif - void assert_sx(struct lock_object *lock, int what) { @@ -530,14 +526,14 @@ } continue; } - } else if (SX_SHARERS(x) && spintries < asx_retries) { + } else if (SX_SHARERS(x) && spintries < ASX_RETRIES) { + GIANT_SAVE(); spintries++; - for (i = 0; i < asx_loops; i++) { + for (i = 0; i < ASX_LOOPS; i++) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR4(KTR_LOCK, "%s: shared spinning on %p with %u and %u", __func__, sx, spintries, i); - GIANT_SAVE(); x = sx->sx_lock; if ((x & SX_LOCK_SHARED) == 0 || SX_SHARERS(x) == 0) @@ -547,7 +543,7 @@ spin_cnt++; #endif } - if (i != asx_loops) + if (i != ASX_LOOPS) continue; } }