--- //depot/vendor/freebsd/src/sys/amd64/include/atomic.h 2006/12/29 15:31:38 +++ //depot/user/pjd/uidinfo_waitfree/sys/amd64/include/atomic.h 2007/08/21 10:44:20 --- .orig +++ @@ -74,6 +74,7 @@ int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src); int atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src); u_int atomic_fetchadd_int(volatile u_int *p, u_int v); +u_long atomic_fetchadd_long(volatile u_long *p, u_long v); #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ @@ -174,6 +175,25 @@ return (v); } +/* + * Atomically add the value of v to the long integer pointed to by p and return + * the previous value of *p. + */ +static __inline u_long +atomic_fetchadd_long(volatile u_long *p, u_long v) +{ + + __asm __volatile( + " " MPLOCKED " " + " xaddq %0, %1 ; " + "# atomic_fetchadd_long" + : "+r" (v), /* 0 (result) */ + "=m" (*p) /* 1 */ + : "m" (*p)); /* 2 */ + + return (v); +} + #if defined(_KERNEL) && !defined(SMP) /* --- //depot/vendor/freebsd/src/sys/i386/include/atomic.h 2006/12/29 15:52:25 +++ //depot/user/pjd/uidinfo_waitfree/sys/i386/include/atomic.h 2007/08/21 11:13:57 @@ -278,6 +278,13 @@ (u_int)src)); } +static __inline u_long +atomic_fetchadd_long(volatile u_long *p, u_long v) +{ + + return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v)); +} + /* Read the current value and store a zero in the destination. */ #ifdef __GNUCLIKE_ASM --- //depot/vendor/freebsd/src/sys/ia64/include/atomic.h 2007/07/30 22:10:56 +++ //depot/user/pjd/uidinfo_waitfree/sys/ia64/include/atomic.h 2007/08/21 11:13:57 @@ -370,4 +370,15 @@ #define atomic_fetchadd_int atomic_fetchadd_32 +static __inline u_long +atomic_fetchadd_long(volatile u_long *p, u_long v) +{ + u_long value; + + do { + value = *p; + } while (!atomic_cmpset_64(p, value, value + v)); + return (value); +} + #endif /* ! _MACHINE_ATOMIC_H_ */ --- //depot/vendor/freebsd/src/sys/kern/kern_resource.c 2007/07/17 01:12:20 +++ //depot/user/pjd/uidinfo_waitfree/sys/kern/kern_resource.c 2007/08/24 14:06:17 @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -66,7 +67,7 @@ static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures"); static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures"); #define UIHASH(uid) (&uihashtbl[(uid) & uihash]) -static struct mtx uihashtbl_mtx; +static struct rwlock uihashtbl_lock; static LIST_HEAD(uihashhead, uidinfo) *uihashtbl; static u_long uihash; /* size of hash table - 1 */ @@ -1169,12 +1170,12 @@ { uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash); - mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF); + rw_init(&uihashtbl_lock, "uidinfo hash"); } /* * Look up a uidinfo struct for the parameter uid. - * uihashtbl_mtx must be locked. + * uihashtbl_lock must be locked. */ static struct uidinfo * uilookup(uid) @@ -1183,7 +1184,7 @@ struct uihashhead *uipp; struct uidinfo *uip; - mtx_assert(&uihashtbl_mtx, MA_OWNED); + rw_assert(&uihashtbl_lock, RA_LOCKED); uipp = UIHASH(uid); LIST_FOREACH(uip, uipp, ui_hash) if (uip->ui_uid == uid) @@ -1203,12 +1204,12 @@ { struct uidinfo *old_uip, *uip; - mtx_lock(&uihashtbl_mtx); + rw_rlock(&uihashtbl_lock); uip = uilookup(uid); if (uip == NULL) { - mtx_unlock(&uihashtbl_mtx); + rw_runlock(&uihashtbl_lock); uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO); - mtx_lock(&uihashtbl_mtx); + rw_wlock(&uihashtbl_lock); /* * There's a chance someone created our uidinfo while we * were in malloc and not holding the lock, so we have to @@ -1219,13 +1220,14 @@ free(uip, M_UIDINFO); uip = old_uip; } else { - uip->ui_mtxp = mtx_pool_alloc(mtxpool_sleep); + refcount_init(&uip->ui_ref, 0); uip->ui_uid = uid; LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash); } + uihold(uip); } uihold(uip); - mtx_unlock(&uihashtbl_mtx); + rw_unlock(&uihashtbl_lock); return (uip); } @@ -1237,9 +1239,7 @@ struct uidinfo *uip; { - UIDINFO_LOCK(uip); - uip->ui_ref++; - UIDINFO_UNLOCK(uip); + refcount_acquire(&uip->ui_ref); } /*- @@ -1261,43 +1261,32 @@ uifree(uip) struct uidinfo *uip; { + int old; /* Prepare for optimal case. */ - UIDINFO_LOCK(uip); - - if (--uip->ui_ref != 0) { - UIDINFO_UNLOCK(uip); + old = uip->ui_ref; + if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1)) return; - } /* Prepare for suboptimal case. */ - uip->ui_ref++; - UIDINFO_UNLOCK(uip); - mtx_lock(&uihashtbl_mtx); - UIDINFO_LOCK(uip); - - /* - * We must subtract one from the count again because we backed out - * our initial subtraction before dropping the lock. - * Since another thread may have added a reference after we dropped the - * initial lock we have to test for zero again. - */ - if (--uip->ui_ref == 0) { + rw_wlock(&uihashtbl_lock); + if (refcount_release(&uip->ui_ref)) { LIST_REMOVE(uip, ui_hash); - mtx_unlock(&uihashtbl_mtx); + rw_wunlock(&uihashtbl_lock); if (uip->ui_sbsize != 0) - printf("freeing uidinfo: uid = %d, sbsize = %jd\n", - uip->ui_uid, (intmax_t)uip->ui_sbsize); + printf("freeing uidinfo: uid = %d, sbsize = %ld\n", + uip->ui_uid, uip->ui_sbsize); if (uip->ui_proccnt != 0) printf("freeing uidinfo: uid = %d, proccnt = %ld\n", uip->ui_uid, uip->ui_proccnt); - UIDINFO_UNLOCK(uip); FREE(uip, M_UIDINFO); return; } - - mtx_unlock(&uihashtbl_mtx); - UIDINFO_UNLOCK(uip); + /* + * Someone added a reference between atomic_cmpset_int() and + * rw_wlock(&uihashtbl_lock). + */ + rw_wunlock(&uihashtbl_lock); } /* @@ -1308,19 +1297,20 @@ chgproccnt(uip, diff, max) struct uidinfo *uip; int diff; - int max; + rlim_t max; { - UIDINFO_LOCK(uip); /* Don't allow them to exceed max, but allow subtraction. */ - if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) { - UIDINFO_UNLOCK(uip); - return (0); + if (diff > 0 && max != 0) { + if (atomic_fetchadd_long(&uip->ui_proccnt, (long)diff) + diff > max) { + atomic_subtract_long(&uip->ui_proccnt, (long)diff); + return (0); + } + } else { + atomic_add_long(&uip->ui_proccnt, (long)diff); + if (uip->ui_proccnt < 0) + printf("negative proccnt for uid = %d\n", uip->ui_uid); } - uip->ui_proccnt += diff; - if (uip->ui_proccnt < 0) - printf("negative proccnt for uid = %d\n", uip->ui_uid); - UIDINFO_UNLOCK(uip); return (1); } @@ -1334,19 +1324,19 @@ u_int to; rlim_t max; { - rlim_t new; + int diff; - UIDINFO_LOCK(uip); - new = uip->ui_sbsize + to - *hiwat; - /* Don't allow them to exceed max, but allow subtraction. */ - if (to > *hiwat && new > max) { - UIDINFO_UNLOCK(uip); - return (0); + diff = to - *hiwat; + if (diff > 0) { + if (atomic_fetchadd_long(&uip->ui_sbsize, (long)diff) + diff > max) { + atomic_subtract_long(&uip->ui_sbsize, (long)diff); + return (0); + } + } else { + atomic_add_long(&uip->ui_sbsize, (long)diff); + if (uip->ui_sbsize < 0) + printf("negative sbsize for uid = %d\n", uip->ui_uid); } - uip->ui_sbsize = new; - UIDINFO_UNLOCK(uip); - *hiwat = to; - if (new < 0) - printf("negative sbsize for uid = %d\n", uip->ui_uid); - return (1); + *hiwat = to; + return (1); } --- //depot/vendor/freebsd/src/sys/kern/subr_witness.c 2007/06/16 23:33:01 +++ //depot/user/pjd/uidinfo_waitfree/sys/kern/subr_witness.c 2007/08/28 23:07:52 @@ -288,8 +288,7 @@ { "process group", &lock_class_mtx_sleep }, { "process lock", &lock_class_mtx_sleep }, { "session", &lock_class_mtx_sleep }, - { "uidinfo hash", &lock_class_mtx_sleep }, - { "uidinfo struct", &lock_class_mtx_sleep }, + { "uidinfo hash", &lock_class_rw }, #ifdef HWPMC_HOOKS { "pmc-sleep", &lock_class_mtx_sleep }, #endif --- //depot/vendor/freebsd/src/sys/powerpc/include/atomic.h 2007/07/10 04:46:53 +++ //depot/user/pjd/uidinfo_waitfree/sys/powerpc/include/atomic.h 2007/08/21 11:13:57 @@ -461,5 +461,7 @@ } #define atomic_fetchadd_int atomic_fetchadd_32 +#define atomic_fetchadd_long(p, v) \ + (u_long)atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v)) #endif /* ! _MACHINE_ATOMIC_H_ */ --- //depot/vendor/freebsd/src/sys/sparc64/include/atomic.h 2005/09/27 17:40:33 +++ //depot/user/pjd/uidinfo_waitfree/sys/sparc64/include/atomic.h 2007/08/21 11:13:57 @@ -279,6 +279,8 @@ #define atomic_fetchadd_int atomic_add_int #define atomic_fetchadd_32 atomic_add_32 +#define atomic_fetchadd_long(p, v) \ + (u_long)atomic_add_int((volatile u_int *)(p), (u_int)(v)) #undef ATOMIC_GEN #undef atomic_cas --- //depot/vendor/freebsd/src/sys/sun4v/include/atomic.h 2006/10/05 06:18:11 +++ //depot/user/pjd/uidinfo_waitfree/sys/sun4v/include/atomic.h 2007/08/21 11:13:57 @@ -279,6 +279,8 @@ #define atomic_fetchadd_int atomic_add_int #define atomic_fetchadd_32 atomic_add_32 +#define atomic_fetchadd_long(p, v) \ + (u_long)atomic_add_int((volatile u_int *)(p), (u_int)(v)) #undef ATOMIC_GEN #undef atomic_cas --- //depot/vendor/freebsd/src/sys/sys/resourcevar.h 2007/06/09 21:51:31 +++ //depot/user/pjd/uidinfo_waitfree/sys/sys/resourcevar.h 2007/08/21 10:41:37 @@ -84,21 +84,17 @@ * * Locking guide: * (a) Constant from inception - * (b) Locked by ui_mtxp + * (b) Lockless, updated using atomics * (c) Locked by global uihashtbl_mtx */ struct uidinfo { LIST_ENTRY(uidinfo) ui_hash; /* (c) hash chain of uidinfos */ - rlim_t ui_sbsize; /* (b) socket buffer space consumed */ + long ui_sbsize; /* (b) socket buffer space consumed */ long ui_proccnt; /* (b) number of processes */ uid_t ui_uid; /* (a) uid */ u_int ui_ref; /* (b) reference count */ - struct mtx *ui_mtxp; /* protect all counts/limits */ }; -#define UIDINFO_LOCK(ui) mtx_lock((ui)->ui_mtxp) -#define UIDINFO_UNLOCK(ui) mtx_unlock((ui)->ui_mtxp) - struct proc; struct rusage_ext; struct thread; @@ -107,7 +103,7 @@ void addupc_task(struct thread *td, uintfptr_t pc, u_int ticks); void calccru(struct proc *p, struct timeval *up, struct timeval *sp); void calcru(struct proc *p, struct timeval *up, struct timeval *sp); -int chgproccnt(struct uidinfo *uip, int diff, int maxval); +int chgproccnt(struct uidinfo *uip, int diff, rlim_t maxval); int chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t maxval); int fuswintr(void *base); --- //depot/vendor/freebsd/src/sys/sys/rwlock.h 2007/07/20 08:48:10 +++ //depot/user/pjd/uidinfo_waitfree/sys/sys/rwlock.h 2007/08/22 06:49:54 @@ -164,6 +164,12 @@ #define rw_runlock(rw) _rw_runlock((rw), LOCK_FILE, LOCK_LINE) #define rw_try_upgrade(rw) _rw_try_upgrade((rw), LOCK_FILE, LOCK_LINE) #define rw_downgrade(rw) _rw_downgrade((rw), LOCK_FILE, LOCK_LINE) +#define rw_unlock(rw) do { \ + if (rw_wowned(rw)) \ + rw_wunlock(rw); \ + else \ + rw_runlock(rw); \ +} while (0) #define rw_sleep(chan, rw, pri, wmesg, timo) \ _sleep((chan), &(rw)->lock_object, (pri), (wmesg), (timo))