--- //depot/projects/smpng/sys/alpha/include/atomic.h 2005/09/15 19:40:43 +++ //depot/user/jhb/proc/alpha/include/atomic.h 2005/09/20 21:16:26 @@ -365,6 +365,27 @@ return (atomic_cmpset_64(p, cmpval, newval)); } +/* + * Atomically add the value of v to the integer pointed to by p and return + * the result. + */ +static __inline u_int +atomic_fetchadd_32(volatile u_int32_t *p, u_int32_t v) +{ + u_int32_t value, temp; + +#ifdef __GNUCLIKE_ASM + __asm __volatile ( + "1:\tldl_l %0, %1\n\t" /* load old value */ + "addl %0, %3, %2\n\t" /* calculate new value */ + "stl_c %2, %1\n\t" /* attempt to store */ + "beq %2, 1b\n" /* spin if failed */ + : "=&r" (value), "=m" (*count), "=r" (temp), + : "r" (v), "m" (*count)); +#endif + return (value); +} + /* Operations on chars. */ #define atomic_set_char atomic_set_8 #define atomic_set_acq_char atomic_set_acq_8 @@ -412,6 +433,7 @@ #define atomic_load_acq_int atomic_load_acq_32 #define atomic_store_rel_int atomic_store_rel_32 #define atomic_readandclear_int atomic_readandclear_32 +#define atomic_fetchadd_int atomic_fetchadd_32 /* Operations on longs. */ #define atomic_set_long atomic_set_64 --- //depot/projects/smpng/sys/amd64/include/atomic.h 2005/09/15 19:40:43 +++ //depot/user/jhb/proc/amd64/include/atomic.h 2005/09/21 17:53:20 @@ -73,6 +73,7 @@ int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src); int atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src); +u_int atomic_fetchadd_int(volatile u_int *p, u_int v); #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ @@ -154,6 +155,25 @@ return (res); } +/* + * Atomically add the value of v to the integer pointed to by p and return + * the result. + */ +static __inline u_int +atomic_fetchadd_int(volatile u_int *p, u_int v) +{ + + __asm __volatile ( + " " __XSTRING(MPLOCKED) " " + " xaddl %0, %1 ; " + "# atomic_fetchadd_int" + : "+r" (v), /* 0 (result) */ + "=m" (*p) /* 1 */ + : "m" (*p)); /* 2 */ + + return (v); +} + #if defined(_KERNEL) && !defined(SMP) /* @@ -375,6 +395,7 @@ #define atomic_cmpset_acq_32 atomic_cmpset_acq_int #define atomic_cmpset_rel_32 atomic_cmpset_rel_int #define atomic_readandclear_32 atomic_readandclear_int +#define atomic_fetchadd_32 atomic_fetchadd_int /* Operations on 64-bit quad words. */ #define atomic_set_64 atomic_set_long --- //depot/projects/smpng/sys/arm/include/atomic.h 2005/07/29 15:18:26 +++ //depot/user/jhb/proc/arm/include/atomic.h 2005/09/20 21:16:26 @@ -262,6 +262,17 @@ return (__swp(0, p)); } +static __inline uint32_t +atomic_fetchadd_32(volatile uint32_t *p, uint32_t v) +{ + uint32_t value; + + do { + value = *p; + } while (!atomic_cmpset_32(p, value, value + v)); + return (value); +} + #undef __with_interrupts_disabled #endif /* _LOCORE */ @@ -291,5 +302,6 @@ #define atomic_store_ptr atomic_store_32 #define atomic_cmpset_ptr atomic_cmpset_32 #define atomic_set_ptr atomic_set_32 +#define atomic_fetchadd_int atomic_fetchadd_32 #endif /* _MACHINE_ATOMIC_H_ */ --- //depot/projects/smpng/sys/i386/include/atomic.h 2005/09/15 19:40:43 +++ //depot/user/jhb/proc/i386/include/atomic.h 2005/09/21 17:53:20 @@ -72,6 +72,7 @@ void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v) int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src); +u_int atomic_fetchadd_int(volatile u_int *p, u_int v); #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ @@ -163,6 +164,25 @@ #endif /* defined(CPU_DISABLE_CMPXCHG) */ +/* + * Atomically add the value of v to the integer pointed to by p and return + * the result. + */ +static __inline u_int +atomic_fetchadd_int(volatile u_int *p, u_int v) +{ + + __asm __volatile ( + " " __XSTRING(MPLOCKED) " " + " xaddl %0, %1 ; " + "# atomic_fetchadd_int" + : "+r" (v), /* 0 (result) */ + "=m" (*p) /* 1 */ + : "m" (*p)); /* 2 */ + + return (v); +} + #if defined(_KERNEL) && !defined(SMP) /* @@ -392,6 +412,7 @@ #define atomic_cmpset_acq_32 atomic_cmpset_acq_int #define atomic_cmpset_rel_32 atomic_cmpset_rel_int #define atomic_readandclear_32 atomic_readandclear_int +#define atomic_fetchadd_32 atomic_fetchadd_int /* Operations on pointers. */ #define atomic_set_ptr atomic_set_int --- //depot/projects/smpng/sys/ia64/include/atomic.h 2005/07/18 15:43:37 +++ //depot/user/jhb/proc/ia64/include/atomic.h 2005/09/20 21:16:26 @@ -342,4 +342,23 @@ #define atomic_readandclear_int atomic_readandclear_32 #define atomic_readandclear_long atomic_readandclear_64 +/* + * Atomically add the value of v to the integer pointed to by p and return + * the result. + * + * XXX: Should use the fetchadd instruction here. + */ +static __inline uint32_t +atomic_fetchadd_32(volatile uint32_t *p, uint32_t v) +{ + uint32_t value; + + do { + value = *p; + } while (!atomic_cmpset_32(p, value, value + v)); + return (value); +} + +#define atomic_fetchadd_int atomic_fetchadd_32 + #endif /* ! _MACHINE_ATOMIC_H_ */ --- //depot/projects/smpng/sys/kern/kern_proc.c 2005/05/27 14:58:46 +++ //depot/user/jhb/proc/kern/kern_proc.c 2005/05/27 19:03:47 @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -90,7 +91,6 @@ struct proclist zombproc; struct sx allproc_lock; struct sx proctree_lock; -struct mtx pargs_ref_lock; struct mtx ppeers_lock; uma_zone_t proc_zone; uma_zone_t ithread_zone; @@ -109,7 +109,6 @@ sx_init(&allproc_lock, "allproc"); sx_init(&proctree_lock, "proctree"); - mtx_init(&pargs_ref_lock, "struct pargs.ref", NULL, MTX_DEF); mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF); LIST_INIT(&allproc); LIST_INIT(&zombproc); @@ -1090,7 +1089,7 @@ MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS, M_WAITOK); - pa->ar_ref = 1; + refcount_init(&pa->ar_ref, 1); pa->ar_length = len; return (pa); } @@ -1108,9 +1107,7 @@ if (pa == NULL) return; - PARGS_LOCK(pa); - pa->ar_ref++; - PARGS_UNLOCK(pa); + refcount_acquire(&pa->ar_ref); } void @@ -1119,12 +1116,8 @@ if (pa == NULL) return; - PARGS_LOCK(pa); - if (--pa->ar_ref == 0) { - PARGS_UNLOCK(pa); + if (refcount_release(&pa->ar_ref)) pargs_free(pa); - } else - PARGS_UNLOCK(pa); } /* --- //depot/projects/smpng/sys/kern/kern_prot.c 2005/05/27 14:58:46 +++ //depot/user/jhb/proc/kern/kern_prot.c 2005/05/27 19:03:47 @@ -54,6 +54,7 @@ #include #include #include +#include #include #include #include @@ -1841,8 +1842,7 @@ register struct ucred *cr; MALLOC(cr, struct ucred *, sizeof(*cr), M_CRED, M_WAITOK | M_ZERO); - cr->cr_ref = 1; - cr->cr_mtxp = mtx_pool_find(mtxpool_sleep, cr); + refcount_init(&cr->cr_ref, 1); #ifdef MAC mac_init_cred(cr); #endif @@ -1857,9 +1857,7 @@ crhold(struct ucred *cr) { - mtx_lock(cr->cr_mtxp); - cr->cr_ref++; - mtx_unlock(cr->cr_mtxp); + refcount_acquire(&cr->cr_ref); return (cr); } @@ -1871,12 +1869,10 @@ void crfree(struct ucred *cr) { - struct mtx *mtxp = cr->cr_mtxp; - mtx_lock(mtxp); KASSERT(cr->cr_ref > 0, ("bad ucred refcount: %d", cr->cr_ref)); - if (--cr->cr_ref == 0) { - mtx_unlock(mtxp); + KASSERT(cr->cr_ref != 0xdeadc0de, ("dangling reference to ucred")); + if (refcount_release(&cr->cr_ref)) { /* * Some callers of crget(), such as nfs_statfs(), * allocate a temporary credential, but don't @@ -1895,8 +1891,6 @@ mac_destroy_cred(cr); #endif FREE(cr, M_CRED); - } else { - mtx_unlock(mtxp); } } @@ -1907,12 +1901,8 @@ int crshared(struct ucred *cr) { - int shared; - mtx_lock(cr->cr_mtxp); - shared = (cr->cr_ref > 1); - mtx_unlock(cr->cr_mtxp); - return (shared); + return (cr->cr_ref > 1); } /* --- //depot/projects/smpng/sys/kern/kern_resource.c 2005/06/15 20:53:05 +++ //depot/user/jhb/proc/kern/kern_resource.c 2005/06/15 21:53:47 @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -901,8 +902,7 @@ struct plimit *limp; limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK); - limp->pl_refcnt = 1; - limp->pl_mtx = mtx_pool_alloc(mtxpool_sleep); + refcount_init(&limp->pl_refcnt, 1); return (limp); } @@ -911,9 +911,7 @@ struct plimit *limp; { - LIM_LOCK(limp); - limp->pl_refcnt++; - LIM_UNLOCK(limp); + refcount_acquire(&limp->pl_refcnt); return (limp); } @@ -922,14 +920,9 @@ struct plimit *limp; { - LIM_LOCK(limp); KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow")); - if (--limp->pl_refcnt == 0) { - LIM_UNLOCK(limp); + if (refcount_release(&limp->pl_refcnt)) free((void *)limp, M_PLIMIT); - return; - } - LIM_UNLOCK(limp); } /* --- //depot/projects/smpng/sys/kern/uipc_mbuf.c 2005/09/15 18:47:58 +++ //depot/user/jhb/proc/kern/uipc_mbuf.c 2005/09/15 20:10:42 @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -203,7 +204,7 @@ mb->m_ext.ref_cnt = (ref_cnt == NULL) ? malloc(sizeof(u_int), M_MBUF, M_NOWAIT) : (u_int *)ref_cnt; if (mb->m_ext.ref_cnt != NULL) { - *(mb->m_ext.ref_cnt) = 1; + refcount_init(mb->m_ext.ref_cnt, 1); mb->m_flags |= (M_EXT | flags); mb->m_ext.ext_buf = buf; mb->m_data = mb->m_ext.ext_buf; @@ -221,28 +222,13 @@ void mb_free_ext(struct mbuf *m) { - u_int cnt; int dofree; /* Account for lazy ref count assign. */ if (m->m_ext.ref_cnt == NULL) dofree = 1; else - dofree = 0; - - /* - * This is tricky. We need to make sure to decrement the - * refcount in a safe way but to also clean up if we're the - * last reference. This method seems to do it without race. - */ - while (dofree == 0) { - cnt = *(m->m_ext.ref_cnt); - if (atomic_cmpset_int(m->m_ext.ref_cnt, cnt, cnt - 1)) { - if (cnt == 1) - dofree = 1; - break; - } - } + dofree = refcount_release(m->m_ext.ref_cnt); if (dofree) { /* --- //depot/projects/smpng/sys/kern/vfs_export.c 2005/05/27 14:58:46 +++ //depot/user/jhb/proc/kern/vfs_export.c 2005/05/27 19:03:47 @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -115,7 +116,7 @@ np->netc_anon.cr_ngroups = argp->ex_anon.cr_ngroups; bcopy(argp->ex_anon.cr_groups, np->netc_anon.cr_groups, sizeof(np->netc_anon.cr_groups)); - np->netc_anon.cr_ref = 1; + refcount_init(&np->netc_anon.cr_ref, 1); mp->mnt_flag |= MNT_DEFEXPORTED; return (0); } @@ -174,7 +175,7 @@ np->netc_anon.cr_ngroups = argp->ex_anon.cr_ngroups; bcopy(argp->ex_anon.cr_groups, np->netc_anon.cr_groups, sizeof(np->netc_anon.cr_groups)); - np->netc_anon.cr_ref = 1; + refcount_init(&np->netc_anon.cr_ref, 1); return (0); out: free(np, M_NETADDR); --- //depot/projects/smpng/sys/nfsserver/nfs_srvsock.c 2005/01/31 22:15:49 +++ //depot/user/jhb/proc/nfsserver/nfs_srvsock.c 2005/05/03 21:35:05 @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -364,12 +365,11 @@ /* * XXX: This credential should be managed using crget(9) * and related calls. Right now, this tramples on any - * extensible data in the ucred, fails to initialize the - * mutex, and worse. This must be fixed before FreeBSD - * 5.3-RELEASE. + * extensible data in the ucred, and worse. This wasn't + * fixed before FreeBSD 5.3-RELEASE. */ bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred)); - nd->nd_cr.cr_ref = 1; + refcount_init(&nd->nd_cr.cr_ref, 1); nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); len = fxdr_unsigned(int, *tl); --- //depot/projects/smpng/sys/nfsserver/nfs_srvsubs.c 2005/04/01 18:38:57 +++ //depot/user/jhb/proc/nfsserver/nfs_srvsubs.c 2005/05/03 21:35:05 @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include @@ -1257,7 +1258,7 @@ NFSD_LOCK_DONTCARE(); bzero((caddr_t)outcred, sizeof (struct ucred)); - outcred->cr_ref = 1; + refcount_init(&outcred->cr_ref, 1); outcred->cr_uid = incred->cr_uid; outcred->cr_ngroups = incred->cr_ngroups; for (i = 0; i < incred->cr_ngroups; i++) --- //depot/projects/smpng/sys/powerpc/include/atomic.h 2005/09/15 19:40:43 +++ //depot/user/jhb/proc/powerpc/include/atomic.h 2005/09/20 21:16:26 @@ -444,4 +444,17 @@ #define atomic_cmpset_acq_ptr atomic_cmpset_acq_32 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_32 +static __inline uint32_t +atomic_fetchadd_32(volatile uint32_t *p, uint32_t v) +{ + uint32_t value; + + do { + value = *p; + } while (!atomic_cmpset_32(p, value, value + v)); + return (value); +} + +#define atomic_fetchadd_int atomic_fetchadd_32 + #endif /* ! _MACHINE_ATOMIC_H_ */ --- //depot/projects/smpng/sys/sparc64/include/atomic.h 2005/07/18 15:43:37 +++ //depot/user/jhb/proc/sparc64/include/atomic.h 2005/08/12 19:36:56 @@ -277,6 +277,9 @@ ATOMIC_GEN(ptr, uintptr_t *, uintptr_t, uintptr_t, 64); +#define atomic_fetchadd_int atomic_add_int +#define atomic_fetchadd_32 atomic_add_32 + #undef ATOMIC_GEN #undef atomic_cas #undef atomic_cas_acq --- //depot/projects/smpng/sys/sys/mbuf.h 2005/09/15 18:47:58 +++ //depot/user/jhb/proc/sys/mbuf.h 2005/09/15 20:10:42 @@ -321,7 +321,7 @@ #define MEXT_REM_REF(m) do { \ KASSERT((m)->m_ext.ref_cnt != NULL, ("m_ext refcnt lazy NULL")); \ KASSERT(*((m)->m_ext.ref_cnt) > 0, ("m_ext refcnt < 0")); \ - atomic_subtract_int((m)->m_ext.ref_cnt, 1); \ + refcount_release((m)->m_ext.ref_cnt); \ } while(0) #define MEXT_ADD_REF(m) do { \ @@ -331,9 +331,9 @@ ("Unexpected mbuf type has lazy refcnt")); \ (m)->m_ext.ref_cnt = (u_int *)uma_find_refcnt( \ zone_clust, (m)->m_ext.ext_buf); \ - *((m)->m_ext.ref_cnt) = 2; \ + refcount_init((m)->m_ext.ref_cnt, 2); \ } else \ - atomic_add_int((m)->m_ext.ref_cnt, 1); \ + refcount_acquire((m)->m_ext.ref_cnt); \ } while (0) #ifdef WITNESS --- //depot/projects/smpng/sys/sys/proc.h 2005/09/15 19:40:43 +++ //depot/user/jhb/proc/sys/proc.h 2005/09/15 20:10:42 @@ -806,10 +806,6 @@ curthread->td_pflags &= ~TDP_NOSLEEPING; \ } while (0) -/* Lock and unlock process arguments. */ -#define PARGS_LOCK(p) mtx_lock(&pargs_ref_lock) -#define PARGS_UNLOCK(p) mtx_unlock(&pargs_ref_lock) - #define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; extern u_long pidhash; @@ -820,7 +816,6 @@ extern struct sx allproc_lock; extern struct sx proctree_lock; -extern struct mtx pargs_ref_lock; extern struct mtx ppeers_lock; extern struct ksegrp ksegrp0; /* Primary ksegrp in proc0. */ extern struct proc proc0; /* Process slot for swapper. */ --- //depot/projects/smpng/sys/sys/resourcevar.h 2005/01/07 18:05:05 +++ //depot/user/jhb/proc/sys/resourcevar.h 2005/03/02 19:28:03 @@ -78,13 +78,8 @@ struct plimit { struct rlimit pl_rlimit[RLIM_NLIMITS]; int pl_refcnt; /* number of references */ - struct mtx *pl_mtx; }; -#define LIM_LOCK(lim) mtx_lock((lim)->pl_mtx) -#define LIM_UNLOCK(lim) mtx_unlock((lim)->pl_mtx) -#define LIM_LOCK_ASSERT(lim, f) mtx_assert((lim)->pl_mtx, (f)) - /*- * Per uid resource consumption * --- //depot/projects/smpng/sys/sys/ucred.h 2005/06/29 18:47:53 +++ //depot/user/jhb/proc/sys/ucred.h 2005/06/29 18:55:40 @@ -55,7 +55,6 @@ struct prison *cr_prison; /* jail(2) */ #define cr_endcopy cr_label struct label *cr_label; /* MAC label */ - struct mtx *cr_mtxp; /* protect refcount */ }; #define NOCRED ((struct ucred *)0) /* no credential available */ #define FSCRED ((struct ucred *)-1) /* filesystem credential */ --- //depot/projects/smpng/sys/ufs/ufs/ufs_vnops.c 2005/09/15 18:47:58 +++ //depot/user/jhb/proc/ufs/ufs/ufs_vnops.c 2005/09/15 20:10:42 @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include @@ -1364,7 +1365,7 @@ * XXX This seems to never be accessed out of * our context so a stack variable is ok. */ - ucred.cr_ref = 1; + refcount_init(&ucred.cr_ref, 1); ucred.cr_uid = ip->i_uid; ucred.cr_ngroups = 1; ucred.cr_groups[0] = dp->i_gid; @@ -2195,7 +2196,7 @@ * XXX This seems to never be accessed out of our * context so a stack variable is ok. */ - ucred.cr_ref = 1; + refcount_init(&ucred.cr_ref, 1); ucred.cr_uid = ip->i_uid; ucred.cr_ngroups = 1; ucred.cr_groups[0] = pdir->i_gid;