last update Tue Sep 12 16:51:33 PDT 2000 Index: alpha/alpha/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/alpha/alpha/machdep.c,v retrieving revision 1.92 diff -u -c -r1.92 machdep.c *** alpha/alpha/machdep.c 2000/09/07 01:32:38 1.92 --- alpha/alpha/machdep.c 2000/09/12 06:34:14 *************** *** 185,192 **** static void cpu_startup __P((void *)); SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) - static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); - struct msgbuf *msgbufp=0; int bootverbose = 0, Maxmem = 0; --- 185,190 ---- *************** *** 373,390 **** (16*(ARG_MAX+(PAGE_SIZE*3)))); /* ! * Finally, allocate mbuf pool. */ ! { ! vm_offset_t mb_map_size; ! ! mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES + ! (nmbclusters + nmbufs / 4) * sizeof(union mext_refcnt); ! mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE)); ! mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, ! &maxaddr, mb_map_size); ! mb_map->system_map = 1; ! } /* * Initialize callouts --- 371,382 ---- (16*(ARG_MAX+(PAGE_SIZE*3)))); /* ! * Initialize mbuf system. ! * Doing this early on (as opposed to through SYSINIT) is good as ! * we want to make sure that the mutex locks are setup prior to ! * network device drivers doing their stuff. */ ! mbinit(); /* * Initialize callouts Index: i386/i386/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/machdep.c,v retrieving revision 1.407 diff -u -c -r1.407 machdep.c *** i386/i386/machdep.c 2000/09/07 20:12:12 1.407 --- i386/i386/machdep.c 2000/09/12 06:34:14 *************** *** 139,146 **** static void cpu_startup __P((void *)); SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) - static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); - int _udatasel, _ucodesel; u_int atdevbase; --- 139,144 ---- *************** *** 399,416 **** (16*(ARG_MAX+(PAGE_SIZE*3)))); /* ! * Finally, allocate mbuf pool. */ ! { ! vm_offset_t mb_map_size; ! ! mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES + ! (nmbclusters + nmbufs / 4) * sizeof(union mext_refcnt); ! mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE)); ! mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, ! &maxaddr, mb_map_size); ! mb_map->system_map = 1; ! } /* * Initialize callouts --- 397,408 ---- (16*(ARG_MAX+(PAGE_SIZE*3)))); /* ! * Initialize mbuf system. ! * Doing this early on (as opposed to through SYSINIT) is good ! * as we want to make sure that the mutex locks are setup prior to ! * network device drivers doing their stuff. */ ! mbinit(); /* * Initialize callouts Index: i386/include/in_cksum.h =================================================================== RCS file: /home/ncvs/src/sys/i386/include/in_cksum.h,v retrieving revision 1.9 diff -u -c -r1.9 in_cksum.h *** i386/include/in_cksum.h 2000/05/06 18:18:32 1.9 --- i386/include/in_cksum.h 2000/09/09 10:48:29 *************** *** 39,44 **** --- 39,48 ---- #ifndef _MACHINE_IN_CKSUM_H_ #define _MACHINE_IN_CKSUM_H_ 1 + /* + * MP safe (alfred) + */ + #include #define in_cksum(m, len) in_cksum_skip(m, len, 0) Index: kern/kern_prot.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_prot.c,v retrieving revision 1.63 diff -u -c -r1.63 kern_prot.c *** kern/kern_prot.c 2000/09/05 22:10:22 1.63 --- kern/kern_prot.c 2000/09/10 02:35:00 *************** *** 1150,1156 **** crfree(cr) struct ucred *cr; { ! if (--cr->cr_ref == 0) { /* * Some callers of crget(), such as nfs_statfs(), * allocate a temporary credential, but don't --- 1150,1156 ---- crfree(cr) struct ucred *cr; { ! if (atomic_subtract_short(&cr->cr_ref, 1) == 0) { /* * Some callers of crget(), such as nfs_statfs(), * allocate a temporary credential, but don't Index: kern/uipc_domain.c =================================================================== RCS file: /home/ncvs/src/sys/kern/uipc_domain.c,v retrieving revision 1.22 diff -u -c -r1.22 uipc_domain.c *** kern/uipc_domain.c 1999/08/28 00:46:21 1.22 --- kern/uipc_domain.c 2000/09/08 22:07:19 *************** *** 64,69 **** --- 64,70 ---- static void pffasttimo __P((void *)); static void pfslowtimo __P((void *)); + mtx_t domain_mtx; struct domain *domains; /* *************** *** 77,83 **** register struct protosw *pr; int s; - s = splnet(); if (dp->dom_init) (*dp->dom_init)(); for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++){ --- 78,83 ---- *************** *** 93,99 **** */ max_hdr = max_linkhdr + max_protohdr; max_datalen = MHLEN - max_hdr; - splx(s); } /* --- 93,98 ---- *************** *** 108,118 **** struct domain *dp; dp = (struct domain *)data; ! s = splnet(); dp->dom_next = domains; domains = dp; ! splx(s); ! net_init_domain(dp); } /* ARGSUSED*/ --- 107,117 ---- struct domain *dp; dp = (struct domain *)data; ! mtx_enter(&domain_mtx, MTX_DEF); ! net_init_domain(dp); dp->dom_next = domains; domains = dp; ! mtx_exit(&domain_mtx, MTX_DEF); } /* ARGSUSED*/ *************** *** 133,138 **** --- 132,140 ---- */ socket_zone = zinit("socket", sizeof(struct socket), maxsockets, ZONE_INTERRUPT, 0); + + mtx_init(&socket_alloc_lock, "socket_zone, so_gencnt", MTX_DEF); + mtx_init(&domain_mtx, "protect adding new domains", MTX_DEF); if (max_linkhdr < 16) /* XXX */ max_linkhdr = 16; Index: kern/uipc_mbuf.c =================================================================== RCS file: /home/ncvs/src/sys/kern/uipc_mbuf.c,v retrieving revision 1.56 diff -u -c -r1.56 uipc_mbuf.c *** kern/uipc_mbuf.c 2000/08/25 22:28:08 1.56 --- kern/uipc_mbuf.c 2000/09/12 06:34:14 *************** *** 48,66 **** #include #include #ifdef INVARIANTS #include #endif - static void mbinit __P((void *)); - SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) - struct mbuf *mbutl; struct mbstat mbstat; u_long mbtypes[MT_NTYPES]; - struct mbuf *mmbfree; - union mcluster *mclfree; - union mext_refcnt *mext_refcnt_free; int max_linkhdr; int max_protohdr; int max_hdr; --- 48,66 ---- #include #include + #include + #include + #include + #include + #include + #ifdef INVARIANTS #include #endif struct mbuf *mbutl; struct mbstat mbstat; u_long mbtypes[MT_NTYPES]; int max_linkhdr; int max_protohdr; int max_hdr; *************** *** 70,75 **** --- 70,86 ---- u_int m_mballoc_wid = 0; u_int m_clalloc_wid = 0; + /* + * freelist header structures... + * mbffree_lst, mclfree_lst, mcntfree_lst + */ + struct mbffree_lst mbffree_lst_hdr, *mmbfree; + struct mclfree_lst mclfree_lst_hdr, *mclfree; + struct mcntfree_lst mcntfree_lst_hdr, *mcntfree; + + /* + * sysctl(8) exported objects + */ SYSCTL_DECL(_kern_ipc); SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, &max_linkhdr, 0, ""); *************** *** 95,135 **** static void m_reclaim __P((void)); #define NCL_INIT 2 #define NMB_INIT 16 #define REF_INIT (NMBCLUSTERS * 2) ! /* ARGSUSED*/ ! static void ! mbinit(dummy) ! void *dummy; { ! int s; ! mmbfree = NULL; ! mclfree = NULL; ! mext_refcnt_free = NULL; mbstat.m_msize = MSIZE; mbstat.m_mclbytes = MCLBYTES; mbstat.m_minclsize = MINCLSIZE; mbstat.m_mlen = MLEN; mbstat.m_mhlen = MHLEN; ! s = splimp(); if (m_alloc_ref(REF_INIT) == 0) goto bad; if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) goto bad; #if MCLBYTES <= PAGE_SIZE if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) goto bad; #else /* It's OK to call contigmalloc in this context. */ if (m_clalloc(16, M_WAIT) == 0) goto bad; #endif ! splx(s); return; bad: panic("mbinit: failed to initialize mbuf subsystem!"); --- 106,195 ---- static void m_reclaim __P((void)); + /* Initial allocation numbers */ #define NCL_INIT 2 #define NMB_INIT 16 #define REF_INIT (NMBCLUSTERS * 2) ! /* ! * Full mbuf subsystem initialization done here. ! * ! * XXX: If ever we have system specific map setups to do, then move them to ! * machdep.c - for now, there is no reason for this stuff to go there. ! * We just call this explicitly, as most of the stuff that needs to get ! * done here should be done early on (i.e. from cpu_startup) anyway. ! */ ! void ! mbinit(void) { ! vm_offset_t maxaddr, mb_map_size; ! /* ! * Setup the mb_map, allocate requested VM space. ! */ ! mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES + (nmbclusters + ! nmbufs / 4) * sizeof(union mext_refcnt); ! mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE)); ! mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, ! mb_map_size); ! /* XXX: mb_map->system_map = 1; */ + /* + * Initialize the free list headers, and setup locks for lists. + */ + mmbfree = (struct mbffree_lst *)&mbffree_lst_hdr; + mclfree = (struct mclfree_lst *)&mclfree_lst_hdr; + mcntfree = (struct mcntfree_lst *)&mcntfree_lst_hdr; + mmbfree->m_head = NULL; + mclfree->m_head = NULL; + mcntfree->m_head = NULL; + mtx_init(&mmbfree->m_mtx, "mbuf free list lock", MTX_DEF); + mtx_init(&mclfree->m_mtx, "mcluster free list lock", MTX_DEF); + mtx_init(&mcntfree->m_mtx, "m_ext counter free list lock", MTX_DEF); + + /* + * Initialize mbuf subsystem (sysctl exported) statistics structure. + */ mbstat.m_msize = MSIZE; mbstat.m_mclbytes = MCLBYTES; mbstat.m_minclsize = MINCLSIZE; mbstat.m_mlen = MLEN; mbstat.m_mhlen = MHLEN; ! /* ! * Perform some initial allocations. ! * If allocations are succesful, locks are obtained in the allocation ! * routines, so we must release them if it works. ! * ! * XXX: We try to allocate as many reference counters as we'll ! * most need throughout the system's lifespan. ! * XXXXXX: Make sure we check whether the MCLBYTES > PAGE_SIZE ! * is still useful before bringing this in. ! */ if (m_alloc_ref(REF_INIT) == 0) goto bad; + else + mtx_exit(&mcntfree->m_mtx, MTX_DEF); + if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) goto bad; + else + mtx_exit(&mmbfree->m_mtx, MTX_DEF); + #if MCLBYTES <= PAGE_SIZE if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) goto bad; + else + mtx_exit(&mclfree->m_mtx, MTX_DEF); #else + /* XXXXXX */ /* It's OK to call contigmalloc in this context. */ if (m_clalloc(16, M_WAIT) == 0) goto bad; + else + mtx_exit(&mclfree->m_mtx, MTX_DEF); #endif ! return; bad: panic("mbinit: failed to initialize mbuf subsystem!"); *************** *** 138,144 **** /* * Allocate at least nmb reference count structs and place them * on the ref cnt free list. - * Must be called at splimp. */ int m_alloc_ref(nmb) --- 198,203 ---- *************** *** 157,174 **** * and if we are, we're probably not out of the woods anyway, * so leave this way for now. */ - if (mb_map_full) return (0); nbytes = round_page(nmb * sizeof(union mext_refcnt)); ! if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT)) == NULL) return (0); nmb = nbytes / sizeof(union mext_refcnt); for (i = 0; i < nmb; i++) { ! ((union mext_refcnt *)p)->next_ref = mext_refcnt_free; ! mext_refcnt_free = (union mext_refcnt *)p; p += sizeof(union mext_refcnt); mbstat.m_refree++; } --- 216,243 ---- * and if we are, we're probably not out of the woods anyway, * so leave this way for now. */ if (mb_map_full) return (0); nbytes = round_page(nmb * sizeof(union mext_refcnt)); ! mtx_enter(&Giant, MTX_DEF); ! if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT)) == NULL) { ! mtx_exit(&Giant, MTX_DEF); return (0); + } + mtx_exit(&Giant, MTX_DEF); nmb = nbytes / sizeof(union mext_refcnt); + /* + * We don't let go of the mutex in order to avoid a race. + * It is up to the caller to let go of the mutex if the call + * was successful or just do nothing if it failed, because in that + * case, we wouldn't have grabbed the mutex at all. + */ + mtx_enter(&mcntfree->m_mtx, MTX_DEF); for (i = 0; i < nmb; i++) { ! ((union mext_refcnt *)p)->next_ref = mcntfree->m_head; ! mcntfree->m_head = (union mext_refcnt *)p; p += sizeof(union mext_refcnt); mbstat.m_refree++; } *************** *** 179,187 **** /* * Allocate at least nmb mbufs and place on mbuf free list. - * Must be called at splimp. */ - /* ARGSUSED */ int m_mballoc(nmb, how) register int nmb; --- 248,254 ---- *************** *** 191,223 **** register int i; int nbytes; - /* - * If we've hit the mbuf limit, stop allocating from mb_map, - * (or trying to) in order to avoid dipping into the section of - * mb_map which we've "reserved" for clusters. - */ - if ((nmb + mbstat.m_mbufs) > nmbufs) - return (0); - /* ! * Once we run out of map space, it will be impossible to get ! * any more (nothing is ever freed back to the map) ! * -- however you are not dead as m_reclaim might ! * still be able to free a substantial amount of space. ! * ! * XXX Furthermore, we can also work with "recycled" mbufs (when ! * we're calling with M_WAIT the sleep procedure will be woken ! * up when an mbuf is freed. See m_mballoc_wait()). */ ! if (mb_map_full) return (0); nbytes = round_page(nmb * MSIZE); p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); if (p == 0 && how == M_WAIT) { mbstat.m_wait++; p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); } /* * Either the map is now full, or `how' is M_NOWAIT and there --- 258,281 ---- register int i; int nbytes; /* ! * If we've hit the mbuf limit, stop allocating from mb_map. ! * Also, once we run out of map space, it will be impossible to ! * get any more (nothing is ever freed back to the map). */ ! if (mb_map_full || ((nmb + mbstat.m_mbufs) > nmbufs)) { ! mbstat.m_drops++; return (0); + } nbytes = round_page(nmb * MSIZE); + mtx_enter(&Giant, MTX_DEF); p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); if (p == 0 && how == M_WAIT) { mbstat.m_wait++; p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); } + mtx_exit(&Giant, MTX_DEF); /* * Either the map is now full, or `how' is M_NOWAIT and there *************** *** 227,235 **** return (0); nmb = nbytes / MSIZE; for (i = 0; i < nmb; i++) { ! ((struct mbuf *)p)->m_next = mmbfree; ! mmbfree = (struct mbuf *)p; p += MSIZE; } mbstat.m_mbufs += nmb; --- 285,301 ---- return (0); nmb = nbytes / MSIZE; + + /* + * We don't let go of the mutex in order to avoid a race. + * It is up to the caller to let go of the mutex if the call + * was successful or just do nothing if it failed, because in that + * case, we wouldn't have grabbed the mutex at all. + */ + mtx_enter(&mmbfree->m_mtx, MTX_DEF); for (i = 0; i < nmb; i++) { ! ((struct mbuf *)p)->m_next = mmbfree->m_head; ! mmbfree->m_head = (struct mbuf *)p; p += MSIZE; } mbstat.m_mbufs += nmb; *************** *** 242,266 **** * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a * designated (mbuf_wait) time. */ struct mbuf * m_mballoc_wait(int caller, int type) { struct mbuf *p; - int s; ! s = splimp(); m_mballoc_wid++; ! if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK) m_mballoc_wid--; - splx(s); /* * Now that we (think) that we've got something, we will redo an * MGET, but avoid getting into another instance of m_mballoc_wait() * XXX: We retry to fetch _even_ if the sleep timed out. This is left * this way, purposely, in the [unlikely] case that an mbuf was ! * freed but the sleep was not awakened in time. */ p = NULL; switch (caller) { --- 308,343 ---- * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a * designated (mbuf_wait) time. + * + * Must be called with no held mutexes... may block. */ struct mbuf * m_mballoc_wait(int caller, int type) { struct mbuf *p; ! /* ! * This avoids a potential race. What we do is first place ourselves ! * in the queue (with asleep) and then later do an actual await() so ! * that if we happen to get a wakeup() in between, that the await() ! * does effectively nothing. Otherwise, what could happen is that ! * we increment m_mballoc_wid, at which point it will be decremented ! * by a (racing) MMBWAKEUP(), yet we will sleep on it nonetheless and ! * risk never being woken up (i.e. sleep on a m_mballoc_wid of 0)! ! */ ! asleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait); m_mballoc_wid++; ! if (await(PVM, mbuf_wait) == EWOULDBLOCK) m_mballoc_wid--; /* * Now that we (think) that we've got something, we will redo an * MGET, but avoid getting into another instance of m_mballoc_wait() + * * XXX: We retry to fetch _even_ if the sleep timed out. This is left * this way, purposely, in the [unlikely] case that an mbuf was ! * freed but the sleep was not awakened in time; then we are ! * willing to race for it. */ p = NULL; switch (caller) { *************** *** 274,290 **** panic("m_mballoc_wait: invalid caller (%d)", caller); } ! s = splimp(); ! if (p != NULL) { /* We waited and got something... */ mbstat.m_wait++; ! /* Wake up another if we have more free. */ ! if (mmbfree != NULL) MMBWAKEUP(); } ! splx(s); return (p); } #if MCLBYTES > PAGE_SIZE static int i_want_my_mcl; --- 351,375 ---- panic("m_mballoc_wait: invalid caller (%d)", caller); } ! /* If we waited and got something... */ ! if (p != NULL) { ! /* ! * We don't need to grab a mutex here, since even if ! * mmbfree->m_head changes to NULL (and it wasn't NULL right ! * after our allocation) right here, then so let it be, ! * we just won't wakeup() anybody. ! * ! * XXX: In other words, we willing race here for now. ! */ mbstat.m_wait++; ! if (mmbfree->m_head != NULL) MMBWAKEUP(); } ! return (p); } + /* XXXXXX */ #if MCLBYTES > PAGE_SIZE static int i_want_my_mcl; *************** *** 312,324 **** SYSINIT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start, &mclalloc_kp); #endif /* * Allocate some number of mbuf clusters * and place on cluster free list. - * Must be called at splimp. */ - /* ARGSUSED */ int m_clalloc(ncl, how) register int ncl; --- 397,408 ---- SYSINIT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start, &mclalloc_kp); #endif + /* XXXXXX */ /* * Allocate some number of mbuf clusters * and place on cluster free list. */ int m_clalloc(ncl, how) register int ncl; *************** *** 329,354 **** int npg; /* * If we've hit the mcluster number limit, stop allocating from ! * mb_map, (or trying to) in order to avoid dipping into the section ! * of mb_map which we've "reserved" for mbufs. ! */ ! if ((ncl + mbstat.m_clusters) > nmbclusters) { ! mbstat.m_drops++; ! return (0); ! } ! ! /* ! * Once we run out of map space, it will be impossible ! * to get any more (nothing is ever freed back to the ! * map). From this point on, we solely rely on freed ! * mclusters. */ ! if (mb_map_full) { mbstat.m_drops++; return (0); } #if MCLBYTES > PAGE_SIZE if (how != M_WAIT) { i_want_my_mcl += ncl; --- 413,428 ---- int npg; /* + * If the map is now full (nothing will ever be freed to it). * If we've hit the mcluster number limit, stop allocating from ! * mb_map. */ ! if (mb_map_full || ((ncl + mbstat.m_clusters) > nmbclusters)) { mbstat.m_drops++; return (0); } + /* XXXXXX */ #if MCLBYTES > PAGE_SIZE if (how != M_WAIT) { i_want_my_mcl += ncl; *************** *** 359,368 **** p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul, ~0ul, PAGE_SIZE, 0, mb_map); } ! #else npg = ncl; p = (caddr_t)kmem_malloc(mb_map, ctob(npg), how != M_WAIT ? M_NOWAIT : M_WAITOK); ncl = ncl * PAGE_SIZE / MCLBYTES; #endif /* --- 433,444 ---- p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul, ~0ul, PAGE_SIZE, 0, mb_map); } ! #else /* XXXXXX */ npg = ncl; + mtx_enter(&Giant, MTX_DEF); p = (caddr_t)kmem_malloc(mb_map, ctob(npg), how != M_WAIT ? M_NOWAIT : M_WAITOK); + mtx_exit(&Giant, MTX_DEF); ncl = ncl * PAGE_SIZE / MCLBYTES; #endif /* *************** *** 374,382 **** return (0); } for (i = 0; i < ncl; i++) { ! ((union mcluster *)p)->mcl_next = mclfree; ! mclfree = (union mcluster *)p; p += MCLBYTES; mbstat.m_clfree++; } --- 450,465 ---- return (0); } + /* + * We don't let go of the mutex in order to avoid a race. + * It is up to the caller to let go of the mutex if the call + * was successful or just do nothing if it failed, because in that + * case, we wouldn't have grabbed the mutex at all. + */ + mtx_enter(&mclfree->m_mtx, MTX_DEF); for (i = 0; i < ncl; i++) { ! ((union mcluster *)p)->mcl_next = mclfree->m_head; ! mclfree->m_head = (union mcluster *)p; p += MCLBYTES; mbstat.m_clfree++; } *************** *** 389,409 **** * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will * sleep for a designated amount of time (mbuf_wait) or until we're woken up * due to sudden mcluster availability. */ caddr_t m_clalloc_wait(void) { caddr_t p; - int s; #ifdef __i386__ /* If in interrupt context, and INVARIANTS, maintain sanity and die. */ KASSERT(intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT")); #endif ! /* Sleep until something's available or until we expire. */ m_clalloc_wid++; ! if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK) m_clalloc_wid--; /* --- 472,502 ---- * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will * sleep for a designated amount of time (mbuf_wait) or until we're woken up * due to sudden mcluster availability. + * + * Must be called with no held mutexes... may block. */ caddr_t m_clalloc_wait(void) { caddr_t p; #ifdef __i386__ /* If in interrupt context, and INVARIANTS, maintain sanity and die. */ KASSERT(intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT")); #endif ! /* ! * This avoids a potential race. What we do is first place ourselves ! * in the queue (with asleep) and then later do an actual await() so ! * that if we happen to get a wakeup() in between, that the await() ! * does effectively nothing. Otherwise, what could happen is that ! * we increment m_clalloc_wid, at which point it will be decremented ! * by a (racing) MCLWAKEUP(), yet we will sleep on it nonetheless and ! * risk never being woken up (i.e. sleep on a m_clalloc_wid of 0)! ! */ ! asleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait); m_clalloc_wid++; ! if (await(PVM, mbuf_wait) == EWOULDBLOCK) m_clalloc_wid--; /* *************** *** 413,427 **** p = NULL; _MCLALLOC(p, M_DONTWAIT); ! s = splimp(); ! if (p != NULL) { /* We waited and got something... */ mbstat.m_wait++; ! /* Wake up another if we have more free. */ ! if (mclfree != NULL) MCLWAKEUP(); } - splx(s); return (p); } --- 506,526 ---- p = NULL; _MCLALLOC(p, M_DONTWAIT); ! /* If we waited and got something ... */ ! if (p != NULL) { ! /* ! * We don't need to grab a mutex here, since even if ! * mclfree->m_head changes to NULL (and it wasn't NULL right ! * after our allocation) right here, then so let it be, ! * we just won't wakeup() anybody. ! * ! * XXX: In other words, we willing race here for now. ! */ mbstat.m_wait++; ! if (mclfree->m_head != NULL) MCLWAKEUP(); } return (p); } *************** *** 504,516 **** { register struct domain *dp; register struct protosw *pr; - int s = splimp(); for (dp = domains; dp; dp = dp->dom_next) for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) if (pr->pr_drain) (*pr->pr_drain)(); - splx(s); mbstat.m_drain++; } --- 603,617 ---- { register struct domain *dp; register struct protosw *pr; + /* + * XXX: MAKE SURE DRAINERS DONT DIRECTLY TOUCH LISTS UNLESS THEY GRAB + * APPROPRIATE MUTEX!!! + */ for (dp = domains; dp; dp = dp->dom_next) for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) if (pr->pr_drain) (*pr->pr_drain)(); mbstat.m_drain++; } Index: kern/uipc_socket.c =================================================================== RCS file: /home/ncvs/src/sys/kern/uipc_socket.c,v retrieving revision 1.82 diff -u -c -r1.82 uipc_socket.c *** kern/uipc_socket.c 2000/09/05 22:10:24 1.82 --- kern/uipc_socket.c 2000/09/11 07:52:46 *************** *** 76,81 **** --- 76,82 ---- { 1, filt_sowattach, filt_sowdetach, filt_sowrite }, }; + mtx_t socket_alloc_lock; /* protect socket_zone and so_gencnt */ struct vm_zone *socket_zone; so_gen_t so_gencnt; /* generation count for sockets */ *************** *** 109,122 **** { struct socket *so; so = zalloci(socket_zone); if (so) { - /* XXX race condition for reentrant kernel */ bzero(so, sizeof *so); so->so_gencnt = ++so_gencnt; so->so_zone = socket_zone; TAILQ_INIT(&so->so_aiojobq); } return so; } --- 110,124 ---- { struct socket *so; + mtx_enter(&socket_alloc_lock, MTX_DEF); so = zalloci(socket_zone); if (so) { bzero(so, sizeof *so); so->so_gencnt = ++so_gencnt; so->so_zone = socket_zone; TAILQ_INIT(&so->so_aiojobq); } + mtx_exit(&socket_alloc_lock, MTX_DEF); return so; } *************** *** 153,158 **** --- 155,162 ---- if (so == 0) return (ENOBUFS); + mtx_init(&so->so_mtx, "socket lock", MTX_DEF); + TAILQ_INIT(&so->so_incomp); TAILQ_INIT(&so->so_comp); so->so_type = type; *************** *** 175,185 **** struct sockaddr *nam; struct proc *p; { - int s = splnet(); int error; error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p); ! splx(s); return (error); } --- 179,189 ---- struct sockaddr *nam; struct proc *p; { int error; + mtx_enter(&so->so_mtx, MTX_DEF); error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p); ! mtx_exit(&so->so_mtx, MTX_DEF); return (error); } *************** *** 188,193 **** --- 192,198 ---- struct socket *so; { + mtx_enter(&socket_alloc_lock, MTX_DEF); so->so_gencnt = ++so_gencnt; if (so->so_rcv.sb_hiwat) (void)chgsbsize(so->so_cred->cr_uidinfo, *************** *** 206,211 **** --- 211,217 ---- } crfree(so->so_cred); zfreei(so->so_zone, so); + mtx_exit(&socket_alloc_lock, MTX_DEF); } int *************** *** 214,225 **** int backlog; struct proc *p; { ! int s, error; ! s = splnet(); error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p); if (error) { ! splx(s); return (error); } if (TAILQ_EMPTY(&so->so_comp)) --- 220,231 ---- int backlog; struct proc *p; { ! int error; ! mtx_enter(&so->so_mtx, MTX_DEF); error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p); if (error) { ! mtx_exit(&so->so_mtx, MTX_DEF); return (error); } if (TAILQ_EMPTY(&so->so_comp)) *************** *** 227,233 **** if (backlog < 0 || backlog > somaxconn) backlog = somaxconn; so->so_qlimit = backlog; ! splx(s); return (0); } --- 233,239 ---- if (backlog < 0 || backlog > somaxconn) backlog = somaxconn; so->so_qlimit = backlog; ! mtx_exit(&so->so_mtx, MTX_DEF); return (0); } *************** *** 272,280 **** soclose(so) register struct socket *so; { - int s = splnet(); /* conservative */ int error = 0; funsetown(so->so_sigio); if (so->so_options & SO_ACCEPTCONN) { struct socket *sp, *sonext; --- 278,286 ---- soclose(so) register struct socket *so; { int error = 0; + mtx_enter(&so->so_mtx, MTX_DEF); funsetown(so->so_sigio); if (so->so_options & SO_ACCEPTCONN) { struct socket *sp, *sonext; *************** *** 325,336 **** panic("soclose: NOFDREF"); so->so_state |= SS_NOFDREF; sofree(so); ! splx(s); return (error); } /* ! * Must be called at splnet... */ int soabort(so) --- 331,342 ---- panic("soclose: NOFDREF"); so->so_state |= SS_NOFDREF; sofree(so); ! mtx_exit(&so->so_mtx, MTX_DEF); return (error); } /* ! * Must be called with the socket locked */ int soabort(so) *************** *** 338,343 **** --- 344,350 ---- { int error; + mtx_assert(so->so_mtx, MA_OWNED); error = (*so->so_proto->pr_usrreqs->pru_abort)(so); if (error) { sofree(so); *************** *** 351,359 **** register struct socket *so; struct sockaddr **nam; { - int s = splnet(); int error; if ((so->so_state & SS_NOFDREF) == 0) panic("soaccept: !NOFDREF"); so->so_state &= ~SS_NOFDREF; --- 358,366 ---- register struct socket *so; struct sockaddr **nam; { int error; + mtx_enter(&so->so_mtx, MTX_DEF); if ((so->so_state & SS_NOFDREF) == 0) panic("soaccept: !NOFDREF"); so->so_state &= ~SS_NOFDREF; *************** *** 364,370 **** *nam = 0; error = 0; } ! splx(s); return (error); } --- 371,377 ---- *nam = 0; error = 0; } ! mtx_exit(&so->so_mtx, MTX_DEF); return (error); } *************** *** 374,385 **** struct sockaddr *nam; struct proc *p; { - int s; int error; if (so->so_options & SO_ACCEPTCONN) return (EOPNOTSUPP); ! s = splnet(); /* * If protocol is connection-based, can only connect once. * Otherwise, if connected, try to disconnect first. --- 381,391 ---- struct sockaddr *nam; struct proc *p; { int error; if (so->so_options & SO_ACCEPTCONN) return (EOPNOTSUPP); ! mtx_enter(&so->so_mtx, MTX_DEF); /* * If protocol is connection-based, can only connect once. * Otherwise, if connected, try to disconnect first. *************** *** 392,398 **** error = EISCONN; else error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p); ! splx(s); return (error); } --- 398,404 ---- error = EISCONN; else error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p); ! mtx_exit(&so->so_mtx, MTX_DEF); return (error); } *************** *** 401,411 **** register struct socket *so1; struct socket *so2; { - int s = splnet(); int error; error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2); ! splx(s); return (error); } --- 407,417 ---- register struct socket *so1; struct socket *so2; { int error; + mtx_enter(&so->so_mtx, MTX_DEF); error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2); ! mtx_exit(&so->so_mtx, MTX_DEF); return (error); } *************** *** 413,421 **** sodisconnect(so) register struct socket *so; { - int s = splnet(); int error; if ((so->so_state & SS_ISCONNECTED) == 0) { error = ENOTCONN; goto bad; --- 419,427 ---- sodisconnect(so) register struct socket *so; { int error; + mtx_enter(&so->so_mtx, MTX_DEF); if ((so->so_state & SS_ISCONNECTED) == 0) { error = ENOTCONN; goto bad; *************** *** 426,432 **** } error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); bad: ! splx(s); return (error); } --- 432,438 ---- } error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); bad: ! mtx_exit(&so->so_mtx, MTX_DEF); return (error); } *************** *** 490,509 **** p->p_stats->p_ru.ru_msgsnd++; if (control) clen = control->m_len; ! #define snderr(errno) { error = errno; splx(s); goto release; } restart: ! error = sblock(&so->so_snd, SBLOCKWAIT(flags)); if (error) goto out; do { - s = splnet(); if (so->so_state & SS_CANTSENDMORE) snderr(EPIPE); if (so->so_error) { error = so->so_error; so->so_error = 0; - splx(s); goto release; } if ((so->so_state & SS_ISCONNECTED) == 0) { --- 496,518 ---- p->p_stats->p_ru.ru_msgsnd++; if (control) clen = control->m_len; ! #define snderr(errno) \ ! do { \ ! error = errno; \ ! goto release; \ ! } while(0) + mtx_enter(&so->so_mtx, MTX_DEF); restart: ! error = sblock(so, &so->so_snd, SBLOCKWAIT(flags)); if (error) goto out; do { if (so->so_state & SS_CANTSENDMORE) snderr(EPIPE); if (so->so_error) { error = so->so_error; so->so_error = 0; goto release; } if ((so->so_state & SS_ISCONNECTED) == 0) { *************** *** 533,545 **** if (so->so_state & SS_NBIO) snderr(EWOULDBLOCK); sbunlock(&so->so_snd); ! error = sbwait(&so->so_snd); ! splx(s); if (error) goto out; goto restart; } ! splx(s); mp = ⊤ space -= clen; do { --- 542,553 ---- if (so->so_state & SS_NBIO) snderr(EWOULDBLOCK); sbunlock(&so->so_snd); ! error = sbwait(so, &so->so_snd); if (error) goto out; goto restart; } ! mtx_exit(&so->so_mtx, MTX_DEF); mp = ⊤ space -= clen; do { *************** *** 555,560 **** --- 563,569 ---- MGETHDR(m, M_WAIT, MT_DATA); if (m == NULL) { error = ENOBUFS; + mtx_enter(&so->so_mtx, MTX_DEF); goto release; } mlen = MHLEN; *************** *** 564,569 **** --- 573,579 ---- MGET(m, M_WAIT, MT_DATA); if (m == NULL) { error = ENOBUFS; + mtx_enter(&so->so_mtx, MTX_DEF); goto release; } mlen = MLEN; *************** *** 590,597 **** m->m_len = len; *mp = m; top->m_pkthdr.len += len; ! if (error) goto release; mp = &m->m_next; if (resid <= 0) { if (flags & MSG_EOR) --- 600,609 ---- m->m_len = len; *mp = m; top->m_pkthdr.len += len; ! if (error) { ! mtx_enter(&so->so_mtx, MTX_DEF); goto release; + } mp = &m->m_next; if (resid <= 0) { if (flags & MSG_EOR) *************** *** 601,607 **** } while (space > 0 && atomic); if (dontroute) so->so_options |= SO_DONTROUTE; ! s = splnet(); /* XXX */ /* * XXX all the SS_CANTSENDMORE checks previously * done could be out of date. We could have recieved --- 613,619 ---- } while (space > 0 && atomic); if (dontroute) so->so_options |= SO_DONTROUTE; ! mtx_enter(&so->so_mtx, MTX_DEF); /* XXX */ /* * XXX all the SS_CANTSENDMORE checks previously * done could be out of date. We could have recieved *************** *** 625,631 **** /* If there is more to send set PRUS_MORETOCOME */ (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, top, addr, control, p); - splx(s); if (dontroute) so->so_options &= ~SO_DONTROUTE; clen = 0; --- 637,642 ---- *************** *** 640,645 **** --- 651,657 ---- release: sbunlock(&so->so_snd); out: + mtx_exit(&so->so_mtx, MTX_DEF); if (top) m_freem(top); if (control) *************** *** 711,720 **** (*pr->pr_usrreqs->pru_rcvd)(so, 0); restart: ! error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); ! if (error) return (error); ! s = splnet(); m = so->so_rcv.sb_mb; /* --- 723,734 ---- (*pr->pr_usrreqs->pru_rcvd)(so, 0); restart: ! mtx_enter(&so->so_mtx, MTX_DEF); ! error = sblock(so, &so->so_rcv, SBLOCKWAIT(flags)); ! if (error) { ! mtx_exit(&so->so_mtx, MTX_DEF); return (error); ! } m = so->so_rcv.sb_mb; /* *************** *** 765,772 **** goto release; } sbunlock(&so->so_rcv); ! error = sbwait(&so->so_rcv); ! splx(s); if (error) return (error); goto restart; --- 779,786 ---- goto release; } sbunlock(&so->so_rcv); ! error = sbwait(so, &so->so_rcv); ! mtx_exit(&so->so_mtx, MTX_DEF); if (error) return (error); goto restart; *************** *** 848,856 **** * block interrupts again. */ if (mp == 0) { ! splx(s); error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); ! s = splnet(); if (error) goto release; } else --- 862,870 ---- * block interrupts again. */ if (mp == 0) { ! mtx_exit(&so->so_mtx, MTX_DEF); error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); ! mtx_enter(&so->so_mtx, MTX_DEF); if (error) goto release; } else *************** *** 880,887 **** if (flags & MSG_PEEK) moff += len; else { ! if (mp) *mp = m_copym(m, 0, len, M_WAIT); m->m_data += len; m->m_len -= len; so->so_rcv.sb_cc -= len; --- 894,914 ---- if (flags & MSG_PEEK) moff += len; else { ! if (mp) { ! /* ! * XXX: can block so i need to drop ! * my socket mutex ! * solutions: ! * pass a mutex to m_copym to msleep ! * on ! * try M_NOWAIT first, then M_WAIT ! * ignore the problem and don't ! * unlock ! */ ! mtx_exit(&so->so_mtx, MTX_DEF); *mp = m_copym(m, 0, len, M_WAIT); + mtx_enter(&so->so_mtx, MTX_DEF); + } m->m_data += len; m->m_len -= len; so->so_rcv.sb_cc -= len; *************** *** 913,922 **** !sosendallatonce(so) && !nextrecord) { if (so->so_error || so->so_state & SS_CANTRCVMORE) break; ! error = sbwait(&so->so_rcv); if (error) { sbunlock(&so->so_rcv); ! splx(s); return (0); } m = so->so_rcv.sb_mb; --- 940,949 ---- !sosendallatonce(so) && !nextrecord) { if (so->so_error || so->so_state & SS_CANTRCVMORE) break; ! error = sbwait(so, &so->so_rcv); if (error) { sbunlock(&so->so_rcv); ! mtx_exit(&so->so_mtx, MTX_DEF); return (0); } m = so->so_rcv.sb_mb; *************** *** 939,945 **** if (orig_resid == uio->uio_resid && orig_resid && (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { sbunlock(&so->so_rcv); ! splx(s); goto restart; } --- 966,972 ---- if (orig_resid == uio->uio_resid && orig_resid && (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { sbunlock(&so->so_rcv); ! mtx_exit(&so->so_mtx, MTX_DEF); goto restart; } *************** *** 947,953 **** *flagsp |= flags; release: sbunlock(&so->so_rcv); ! splx(s); return (error); } --- 974,980 ---- *flagsp |= flags; release: sbunlock(&so->so_rcv); ! mtx_exit(&so->so_mtx, MTX_DEF); return (error); } *************** *** 975,988 **** register int s; struct sockbuf asb; sb->sb_flags |= SB_NOINTR; ! (void) sblock(sb, M_WAITOK); ! s = splimp(); socantrcvmore(so); sbunlock(sb); asb = *sb; bzero((caddr_t)sb, sizeof (*sb)); ! splx(s); if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) (*pr->pr_domain->dom_dispose)(asb.sb_mb); sbrelease(&asb, so); --- 1002,1015 ---- register int s; struct sockbuf asb; + mtx_enter(&so->so_mtx, MTX_DEF); sb->sb_flags |= SB_NOINTR; ! (void) sblock(so, sb, M_WAITOK); socantrcvmore(so); sbunlock(sb); asb = *sb; bzero((caddr_t)sb, sizeof (*sb)); ! mtx_exit(&so->so_mtx, MTX_DEF); if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) (*pr->pr_domain->dom_dispose)(asb.sb_mb); sbrelease(&asb, so); *************** *** 1496,1503 **** sopoll(struct socket *so, int events, struct ucred *cred, struct proc *p) { int revents = 0; - int s = splnet(); if (events & (POLLIN | POLLRDNORM)) if (soreadable(so)) revents |= events & (POLLIN | POLLRDNORM); --- 1523,1530 ---- sopoll(struct socket *so, int events, struct ucred *cred, struct proc *p) { int revents = 0; + mtx_enter(&so->so_mtx, MTX_DEF); if (events & (POLLIN | POLLRDNORM)) if (soreadable(so)) revents |= events & (POLLIN | POLLRDNORM); *************** *** 1522,1528 **** } } ! splx(s); return (revents); } --- 1549,1555 ---- } } ! mtx_exit(&so->so_mtx, MTX_DEF); return (revents); } *************** *** 1530,1542 **** filt_sorattach(struct knote *kn) { struct socket *so = (struct socket *)kn->kn_fp->f_data; - int s = splnet(); if (so->so_options & SO_ACCEPTCONN) kn->kn_fop = &solisten_filtops; SLIST_INSERT_HEAD(&so->so_rcv.sb_sel.si_note, kn, kn_selnext); so->so_rcv.sb_flags |= SB_KNOTE; ! splx(s); return (0); } --- 1557,1569 ---- filt_sorattach(struct knote *kn) { struct socket *so = (struct socket *)kn->kn_fp->f_data; + mtx_enter(&so->so_mtx, MTX_DEF); if (so->so_options & SO_ACCEPTCONN) kn->kn_fop = &solisten_filtops; SLIST_INSERT_HEAD(&so->so_rcv.sb_sel.si_note, kn, kn_selnext); so->so_rcv.sb_flags |= SB_KNOTE; ! mtx_exit(&so->so_mtx, MTX_DEF); return (0); } *************** *** 1544,1555 **** filt_sordetach(struct knote *kn) { struct socket *so = (struct socket *)kn->kn_fp->f_data; - int s = splnet(); SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext); if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note)) so->so_rcv.sb_flags &= ~SB_KNOTE; ! splx(s); } /*ARGSUSED*/ --- 1571,1582 ---- filt_sordetach(struct knote *kn) { struct socket *so = (struct socket *)kn->kn_fp->f_data; + mtx_enter(&so->so_mtx, MTX_DEF); SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext); if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note)) so->so_rcv.sb_flags &= ~SB_KNOTE; ! mtx_exit(&so->so_mtx, MTX_DEF); } /*ARGSUSED*/ *************** *** 1570,1580 **** filt_sowattach(struct knote *kn) { struct socket *so = (struct socket *)kn->kn_fp->f_data; - int s = splnet(); SLIST_INSERT_HEAD(&so->so_snd.sb_sel.si_note, kn, kn_selnext); so->so_snd.sb_flags |= SB_KNOTE; ! splx(s); return (0); } --- 1597,1607 ---- filt_sowattach(struct knote *kn) { struct socket *so = (struct socket *)kn->kn_fp->f_data; + mtx_enter(&so->so_mtx, MTX_DEF); SLIST_INSERT_HEAD(&so->so_snd.sb_sel.si_note, kn, kn_selnext); so->so_snd.sb_flags |= SB_KNOTE; ! mtx_exit(&so->so_mtx, MTX_DEF); return (0); } *************** *** 1582,1593 **** filt_sowdetach(struct knote *kn) { struct socket *so = (struct socket *)kn->kn_fp->f_data; - int s = splnet(); SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext); if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note)) so->so_snd.sb_flags &= ~SB_KNOTE; ! splx(s); } /*ARGSUSED*/ --- 1609,1620 ---- filt_sowdetach(struct knote *kn) { struct socket *so = (struct socket *)kn->kn_fp->f_data; + mtx_enter(&so->so_mtx, MTX_DEF); SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext); if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note)) so->so_snd.sb_flags &= ~SB_KNOTE; ! mtx_exit(&so->so_mtx, MTX_DEF); } /*ARGSUSED*/ Index: kern/uipc_socket2.c =================================================================== RCS file: /home/ncvs/src/sys/kern/uipc_socket2.c,v retrieving revision 1.65 diff -u -c -r1.65 uipc_socket2.c *** kern/uipc_socket2.c 2000/09/05 22:10:24 1.65 --- kern/uipc_socket2.c 2000/09/11 06:27:47 *************** *** 295,306 **** * Wait for data to arrive at/drain from a socket buffer. */ int ! sbwait(sb) struct sockbuf *sb; { sb->sb_flags |= SB_WAIT; ! return (tsleep((caddr_t)&sb->sb_cc, (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait", sb->sb_timeo)); } --- 295,307 ---- * Wait for data to arrive at/drain from a socket buffer. */ int ! sbwait(so, sb) ! struct socket *so; struct sockbuf *sb; { sb->sb_flags |= SB_WAIT; ! return (msleep((caddr_t)&sb->sb_cc, &so->so_mtx (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait", sb->sb_timeo)); } *************** *** 310,323 **** * return any error returned from sleep (EINTR). */ int ! sb_lock(sb) ! register struct sockbuf *sb; { int error; while (sb->sb_flags & SB_LOCK) { sb->sb_flags |= SB_WANT; ! error = tsleep((caddr_t)&sb->sb_flags, (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK|PCATCH, "sblock", 0); if (error) --- 311,325 ---- * return any error returned from sleep (EINTR). */ int ! sb_lock(so, sb) ! struct socket *so; ! struct sockbuf *sb; { int error; while (sb->sb_flags & SB_LOCK) { sb->sb_flags |= SB_WANT; ! error = msleep((caddr_t)&sb->sb_flags, &so->so_mtx (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK|PCATCH, "sblock", 0); if (error) Index: libkern/inet_ntoa.c =================================================================== RCS file: /home/ncvs/src/sys/libkern/inet_ntoa.c,v retrieving revision 1.3 diff -u -c -r1.3 inet_ntoa.c *** libkern/inet_ntoa.c 2000/05/01 20:06:36 1.3 --- libkern/inet_ntoa.c 2000/09/09 23:57:26 *************** *** 48,50 **** --- 48,64 ---- return buf; } + char * + inet_ntoa_r(struct in_addr ina, char *buf) + { + unsigned char *ucp = (unsigned char *)&ina; + + sprintf(buf, "%d.%d.%d.%d", + ucp[0] & 0xff, + ucp[1] & 0xff, + ucp[2] & 0xff, + ucp[3] & 0xff); + return buf; + } + + Index: net/if_ethersubr.c =================================================================== RCS file: /home/ncvs/src/sys/net/if_ethersubr.c,v retrieving revision 1.81 diff -u -c -r1.81 if_ethersubr.c *** net/if_ethersubr.c 2000/07/29 02:00:12 1.81 --- net/if_ethersubr.c 2000/09/11 16:45:03 *************** *** 436,442 **** return; } ! #ifdef BRIDGE /* Check for bridging mode */ if (do_bridge) { struct ifnet *bif; --- 436,442 ---- return; } ! #ifdef BRIDGE /* MPsafe? */ /* Check for bridging mode */ if (do_bridge) { struct ifnet *bif; Index: net/if_var.h =================================================================== RCS file: /home/ncvs/src/sys/net/if_var.h,v retrieving revision 1.26 diff -u -c -r1.26 if_var.h *** net/if_var.h 2000/08/15 00:48:38 1.26 --- net/if_var.h 2000/09/12 23:47:29 *************** *** 89,94 **** --- 89,95 ---- int ifq_len; int ifq_maxlen; int ifq_drops; + mtx_t ifq_mtx; }; /* *************** *** 183,191 **** * (defined above). Entries are added to and deleted from these structures * by these macros, which should be called with ipl raised to splimp(). */ #define IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen) ! #define IF_DROP(ifq) ((ifq)->ifq_drops++) ! #define IF_ENQUEUE(ifq, m) { \ (m)->m_nextpkt = 0; \ if ((ifq)->ifq_tail == 0) \ (ifq)->ifq_head = m; \ --- 184,199 ---- * (defined above). Entries are added to and deleted from these structures * by these macros, which should be called with ipl raised to splimp(). */ + #define IF_QLOCK_INIT(ifq, n) mtx_init(&(ifq)->ifq_mtx, n, M_DEF) + #define IF_QLOCK_UNINITED(ifq) ((ifq)->ifq_mtx.mtx_description == NULL) + #define IF_QLOCK(ifq) mtx_enter(&(ifq)->ifq_mtx, M_DEF) + #define IF_QUNLOCK(ifq) mtx_exit(&(ifq)->ifq_mtx, M_DEF) + #define IF_QLOCK_DESTROY(ifq) mtx_destroy(&(ifq)->ifq_mtx) + #define IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen) ! #define IF_DROP(ifq) (atomic_add_int(&((ifq)->ifq_drops), 1)) ! ! #define IF_ENQUEUE_NOUNLOCK(ifq, m) do { \ (m)->m_nextpkt = 0; \ if ((ifq)->ifq_tail == 0) \ (ifq)->ifq_head = m; \ *************** *** 193,207 **** (ifq)->ifq_tail->m_nextpkt = m; \ (ifq)->ifq_tail = m; \ (ifq)->ifq_len++; \ ! } ! #define IF_PREPEND(ifq, m) { \ (m)->m_nextpkt = (ifq)->ifq_head; \ if ((ifq)->ifq_tail == 0) \ (ifq)->ifq_tail = (m); \ (ifq)->ifq_head = (m); \ (ifq)->ifq_len++; \ ! } ! #define IF_DEQUEUE(ifq, m) { \ (m) = (ifq)->ifq_head; \ if (m) { \ if (((ifq)->ifq_head = (m)->m_nextpkt) == 0) \ --- 201,224 ---- (ifq)->ifq_tail->m_nextpkt = m; \ (ifq)->ifq_tail = m; \ (ifq)->ifq_len++; \ ! } while(0) ! ! #define IF_ENQUEUE(ifq, m) do { \ ! IF_ENQUEUE_NOUNLOCK(ifq, m); \ ! IF_QUNLOCK(ifq); \ ! } while(0) ! ! #define IF_PREPEND(ifq, m) do { \ ! IF_QLOCK(ifq); \ (m)->m_nextpkt = (ifq)->ifq_head; \ if ((ifq)->ifq_tail == 0) \ (ifq)->ifq_tail = (m); \ (ifq)->ifq_head = (m); \ (ifq)->ifq_len++; \ ! IF_QUNLOCK(ifq); \ ! } while (0) ! ! #define IF_DEQUEUE_NOLOCK(ifq, m) do { \ (m) = (ifq)->ifq_head; \ if (m) { \ if (((ifq)->ifq_head = (m)->m_nextpkt) == 0) \ *************** *** 209,215 **** (m)->m_nextpkt = 0; \ (ifq)->ifq_len--; \ } \ ! } #ifdef _KERNEL #define IF_ENQ_DROP(ifq, m) if_enq_drop(ifq, m) --- 226,239 ---- (m)->m_nextpkt = 0; \ (ifq)->ifq_len--; \ } \ ! } while (0) ! ! #define IF_DEQUEUE(ifq, m) do { \ ! IF_QLOCK(ifq); \ ! IF_DEQUEUE_NOLOCK(ifq, m); \ ! IF_QUNLOCK(ifq); \ ! } while(0) ! #ifdef _KERNEL #define IF_ENQ_DROP(ifq, m) if_enq_drop(ifq, m) Index: net/netisr.h =================================================================== RCS file: /home/ncvs/src/sys/net/netisr.h,v retrieving revision 1.21 diff -u -c -r1.21 netisr.h *** net/netisr.h 2000/02/13 03:31:56 1.21 --- net/netisr.h 2000/09/12 09:09:45 *************** *** 68,74 **** #ifdef _KERNEL extern volatile unsigned int netisr; /* scheduling bits for network */ ! #define schednetisr(anisr) { netisr |= 1 << (anisr); setsoftnet(); } typedef void netisr_t __P((void)); --- 68,78 ---- #ifdef _KERNEL extern volatile unsigned int netisr; /* scheduling bits for network */ ! #define schednetisr(anisr) \ ! do { \ ! atomic_set_int(&netisr, 1 << (anisr)); \ ! setsoftnet(); \ ! } while(0) typedef void netisr_t __P((void)); Index: netinet/in.h =================================================================== RCS file: /home/ncvs/src/sys/netinet/in.h,v retrieving revision 1.50 diff -u -c -r1.50 in.h *** netinet/in.h 2000/07/04 16:35:05 1.50 --- netinet/in.h 2000/09/09 23:57:24 *************** *** 483,488 **** --- 483,489 ---- int in_canforward __P((struct in_addr)); int in_localaddr __P((struct in_addr)); char *inet_ntoa __P((struct in_addr)); /* in libkern */ + char *inet_ntoa_r __P((struct in_addr, char *)); /* in libkern */ int prison_ip __P((struct proc *p, int flag, u_int32_t *ip)); void prison_remote_ip __P((struct proc *p, int flag, u_int32_t *ip)); Index: netinet/in_pcb.c =================================================================== RCS file: /home/ncvs/src/sys/netinet/in_pcb.c,v retrieving revision 1.67 diff -u -c -r1.67 in_pcb.c *** netinet/in_pcb.c 2000/07/21 23:26:37 1.67 --- netinet/in_pcb.c 2000/09/10 02:53:59 *************** *** 145,150 **** --- 145,151 ---- { register struct inpcb *inp; + mxt_enter(&pcbinfo->ipi_mtx, MTX_DEF); inp = zalloci(pcbinfo->ipi_zone); if (inp == NULL) return (ENOBUFS); *************** *** 161,166 **** --- 162,168 ---- LIST_INSERT_HEAD(pcbinfo->listhead, inp, inp_list); pcbinfo->ipi_count++; so->so_pcb = (caddr_t)inp; + mxt_exit(&pcbinfo->ipi_mtx, MTX_DEF); return (0); } *************** *** 188,194 **** sin = (struct sockaddr_in *)nam; if (nam->sa_len != sizeof (*sin)) return (EINVAL); ! #ifdef notdef /* * We should check the family, but old programs * incorrectly fail to initialize it. --- 190,196 ---- sin = (struct sockaddr_in *)nam; if (nam->sa_len != sizeof (*sin)) return (EINVAL); ! #ifdef 0 /* * We should check the family, but old programs * incorrectly fail to initialize it. *************** *** 548,554 **** */ if ((rt->rt_flags & RTF_DELCLONE) && (rt->rt_flags & RTF_WASCLONED)) { ! if (--rt->rt_refcnt <= 0) { rt->rt_flags &= ~RTF_UP; rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), --- 550,556 ---- */ if ((rt->rt_flags & RTF_DELCLONE) && (rt->rt_flags & RTF_WASCLONED)) { ! if (atomic_subtract_long(rt->rt_refcnt, 1) <= 0) { rt->rt_flags &= ~RTF_UP; rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), *************** *** 559,565 **** * more than one reference, bump it up * again. */ ! rt->rt_refcnt++; } else rtfree(rt); --- 561,567 ---- * more than one reference, bump it up * again. */ ! atomic_add_long(rt->rt_refcnt, 1); } else rtfree(rt); *************** *** 961,966 **** --- 963,970 ---- pcbporthash = &pcbinfo->porthashbase[INP_PCBPORTHASH(inp->inp_lport, pcbinfo->porthashmask)]; + mxt_enter(&pcbinfo->ipi_mtx, MTX_DEF); + /* * Go through port list and look for a head for this lport. */ *************** *** 983,988 **** --- 987,993 ---- inp->inp_phd = phd; LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist); LIST_INSERT_HEAD(pcbhash, inp, inp_hash); + mxt_exit(&pcbinfo->ipi_mtx, MTX_DEF); return (0); } *************** *** 1009,1016 **** --- 1014,1023 ---- head = &inp->inp_pcbinfo->hashbase[INP_PCBHASH(hashkey_faddr, inp->inp_lport, inp->inp_fport, inp->inp_pcbinfo->hashmask)]; + mxt_enter(&pcbinfo->ipi_mtx, MTX_DEF); LIST_REMOVE(inp, inp_hash); LIST_INSERT_HEAD(head, inp, inp_hash); + mxt_exit(&pcbinfo->ipi_mtx, MTX_DEF); } /* *************** *** 1020,1025 **** --- 1027,1034 ---- in_pcbremlists(inp) struct inpcb *inp; { + + mxt_enter(&pcbinfo->ipi_mtx, MTX_DEF); inp->inp_gencnt = ++inp->inp_pcbinfo->ipi_gencnt; if (inp->inp_lport) { struct inpcbport *phd = inp->inp_phd; *************** *** 1033,1038 **** --- 1042,1048 ---- } LIST_REMOVE(inp, inp_list); inp->inp_pcbinfo->ipi_count--; + mxt_exit(&pcbinfo->ipi_mtx, MTX_DEF); } int Index: netinet/in_pcb.h =================================================================== RCS file: /home/ncvs/src/sys/netinet/in_pcb.h,v retrieving revision 1.34 diff -u -c -r1.34 in_pcb.h *** netinet/in_pcb.h 2000/05/26 02:05:45 1.34 --- netinet/in_pcb.h 2000/09/09 14:06:25 *************** *** 197,202 **** --- 197,203 ---- }; struct inpcbinfo { /* XXX documentation, prefixes */ + mtx_t ipi_mtx; struct inpcbhead *hashbase; u_long hashmask; struct inpcbporthead *porthashbase; Index: netinet/ip6.h =================================================================== RCS file: /home/ncvs/src/sys/netinet/ip6.h,v retrieving revision 1.4 diff -u -c -r1.4 ip6.h *** netinet/ip6.h 2000/07/12 16:39:13 1.4 --- netinet/ip6.h 2000/09/09 11:08:25 *************** *** 213,243 **** * mbufs(never into two or more internal mbufs). Thus, the third case is * supposed to never be matched but is prepared just in case. */ ! #define IP6_EXTHDR_CHECK(m, off, hlen, ret) \ do { \ if ((m)->m_next != NULL) { \ if (((m)->m_flags & M_LOOP) && \ ((m)->m_len < (off) + (hlen)) && \ (((m) = m_pullup((m), (off) + (hlen))) == NULL)) { \ ! ip6stat.ip6s_exthdrtoolong++; \ return ret; \ } else if ((m)->m_flags & M_EXT) { \ if ((m)->m_len < (off) + (hlen)) { \ ! ip6stat.ip6s_exthdrtoolong++; \ m_freem(m); \ return ret; \ } \ } else { \ if ((m)->m_len < (off) + (hlen)) { \ ! ip6stat.ip6s_exthdrtoolong++; \ m_freem(m); \ return ret; \ } \ } \ } else { \ if ((m)->m_len < (off) + (hlen)) { \ ! ip6stat.ip6s_tooshort++; \ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated); \ m_freem(m); \ return ret; \ --- 213,245 ---- * mbufs(never into two or more internal mbufs). Thus, the third case is * supposed to never be matched but is prepared just in case. */ ! /* ! * MPsafe (alfred) ! */ #define IP6_EXTHDR_CHECK(m, off, hlen, ret) \ do { \ if ((m)->m_next != NULL) { \ if (((m)->m_flags & M_LOOP) && \ ((m)->m_len < (off) + (hlen)) && \ (((m) = m_pullup((m), (off) + (hlen))) == NULL)) { \ ! atomic_add_long(&ip6stat.ip6s_exthdrtoolong,1); \ return ret; \ } else if ((m)->m_flags & M_EXT) { \ if ((m)->m_len < (off) + (hlen)) { \ ! atomic_add_long(&ip6stat.ip6s_exthdrtoolong, 1); \ m_freem(m); \ return ret; \ } \ } else { \ if ((m)->m_len < (off) + (hlen)) { \ ! atomic_add_long(&ip6stat.ip6s_exthdrtoolong, 1); \ m_freem(m); \ return ret; \ } \ } \ } else { \ if ((m)->m_len < (off) + (hlen)) { \ ! atomic_add_long(&ip6stat.ip6s_tooshort, 1); \ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated); \ m_freem(m); \ return ret; \ Index: netinet/ip_input.c =================================================================== RCS file: /home/ncvs/src/sys/netinet/ip_input.c,v retrieving revision 1.139 diff -u -c -r1.139 ip_input.c *** netinet/ip_input.c 2000/09/01 12:33:03 1.139 --- netinet/ip_input.c 2000/09/10 02:48:00 *************** *** 1415,1420 **** --- 1415,1422 ---- * Second argument is buffer to which options * will be moved, and return value is their length. * XXX should be deleted; last arg currently ignored. + * + * MP safe (alfred) */ void ip_stripoptions(m, mopt) Index: netinet/tcp_input.c =================================================================== RCS file: /home/ncvs/src/sys/netinet/tcp_input.c,v retrieving revision 1.118 diff -u -c -r1.118 tcp_input.c *** netinet/tcp_input.c 2000/07/21 23:26:37 1.118 --- netinet/tcp_input.c 2000/09/11 07:58:36 *************** *** 185,192 **** tp->t_flags |= TF_ACKNOW; \ (tp)->rcv_nxt += *(tlenp); \ flags = (th)->th_flags & TH_FIN; \ ! tcpstat.tcps_rcvpack++;\ ! tcpstat.tcps_rcvbyte += *(tlenp);\ ND6_HINT(tp); \ sbappend(&(so)->so_rcv, (m)); \ sorwakeup(so); \ --- 185,192 ---- tp->t_flags |= TF_ACKNOW; \ (tp)->rcv_nxt += *(tlenp); \ flags = (th)->th_flags & TH_FIN; \ ! atomic_add_long(&tcpstat.tcps_rcvpack, 1);\ ! atomic_add_long(&tcpstat.tcps_rcvbyte, *(tlenp));\ ND6_HINT(tp); \ sbappend(&(so)->so_rcv, (m)); \ sorwakeup(so); \ *************** *** 221,227 **** MALLOC(te, struct tseg_qent *, sizeof (struct tseg_qent), M_TSEGQ, M_NOWAIT); if (te == NULL) { ! tcpstat.tcps_rcvmemdrop++; m_freem(m); return (0); } --- 221,227 ---- MALLOC(te, struct tseg_qent *, sizeof (struct tseg_qent), M_TSEGQ, M_NOWAIT); if (te == NULL) { ! atomic_add_long(&tcpstat.tcps_rcvmemdrop, 1); m_freem(m); return (0); } *************** *** 246,253 **** i = p->tqe_th->th_seq + p->tqe_len - th->th_seq; if (i > 0) { if (i >= *tlenp) { ! tcpstat.tcps_rcvduppack++; ! tcpstat.tcps_rcvdupbyte += *tlenp; m_freem(m); FREE(te, M_TSEGQ); /* --- 246,253 ---- i = p->tqe_th->th_seq + p->tqe_len - th->th_seq; if (i > 0) { if (i >= *tlenp) { ! atomic_add_long(&tcpstat.tcps_rcvduppack, 1); ! atomic_add_long(&tcpstat.tcps_rcvdupbyte, *tlenp); m_freem(m); FREE(te, M_TSEGQ); /* *************** *** 263,270 **** th->th_seq += i; } } ! tcpstat.tcps_rcvoopack++; ! tcpstat.tcps_rcvoobyte += *tlenp; /* * While we overlap succeeding segments trim them or, --- 263,270 ---- th->th_seq += i; } } ! atomic_add_long(&tcpstat.tcps_rcvoopack, 1); ! atomic_add_long(&tcpstat.tcps_rcvoobyte, *tlenp); /* * While we overlap succeeding segments trim them or, *************** *** 309,314 **** --- 309,316 ---- q = LIST_FIRST(&tp->t_segq); if (!q || q->tqe_th->th_seq != tp->rcv_nxt) return (0); + + mtx_enter(&so->so_mtx, MTX_DEF); do { tp->rcv_nxt += q->tqe_len; flags = q->tqe_th->th_flags & TH_FIN; *************** *** 323,328 **** --- 325,331 ---- } while (q && q->tqe_th->th_seq == tp->rcv_nxt); ND6_HINT(tp); sorwakeup(so); + mtx_exit(&so->so_mtx, MTX_DEF); return (flags); } *************** *** 398,404 **** #endif bzero((char *)&to, sizeof(to)); ! tcpstat.tcps_rcvtotal++; #ifdef INET6 if (isipv6) { --- 401,407 ---- #endif bzero((char *)&to, sizeof(to)); ! atomic_add_long(&tcpstat.tcps_rcvtotal, 1); #ifdef INET6 if (isipv6) { *************** *** 406,412 **** ip6 = mtod(m, struct ip6_hdr *); tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { ! tcpstat.tcps_rcvbadsum++; goto drop; } th = (struct tcphdr *)((caddr_t)ip6 + off0); --- 409,415 ---- ip6 = mtod(m, struct ip6_hdr *); tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { ! atomic_add_long(&tcpstat.tcps_rcvbadsum, 1); goto drop; } th = (struct tcphdr *)((caddr_t)ip6 + off0); *************** *** 418,429 **** * Note: IP leaves IP header in first mbuf. */ if (off0 > sizeof (struct ip)) { ! ip_stripoptions(m, (struct mbuf *)0); off0 = sizeof(struct ip); } if (m->m_len < sizeof (struct tcpiphdr)) { if ((m = m_pullup(m, sizeof (struct tcpiphdr))) == 0) { ! tcpstat.tcps_rcvshort++; return; } } --- 421,432 ---- * Note: IP leaves IP header in first mbuf. */ if (off0 > sizeof (struct ip)) { ! ip_stripoptions(m, NULL); off0 = sizeof(struct ip); } if (m->m_len < sizeof (struct tcpiphdr)) { if ((m = m_pullup(m, sizeof (struct tcpiphdr))) == 0) { ! atomic_add_long(&tcpstat.tcps_rcvshort, 1); return; } } *************** *** 451,457 **** th->th_sum = in_cksum(m, len); } if (th->th_sum) { ! tcpstat.tcps_rcvbadsum++; goto drop; } #ifdef INET6 --- 454,460 ---- th->th_sum = in_cksum(m, len); } if (th->th_sum) { ! atomic_add_long(&tcpstat.tcps_rcvbadsum, 1); goto drop; } #ifdef INET6 *************** *** 466,472 **** */ off = th->th_off << 2; if (off < sizeof (struct tcphdr) || off > tlen) { ! tcpstat.tcps_rcvbadoff++; goto drop; } tlen -= off; /* tlen is used instead of ti->ti_len */ --- 469,475 ---- */ off = th->th_off << 2; if (off < sizeof (struct tcphdr) || off > tlen) { ! atomic_add_long(&tcpstat.tcps_rcvbadoff, 1); goto drop; } tlen -= off; /* tlen is used instead of ti->ti_len */ *************** *** 481,487 **** { if (m->m_len < sizeof(struct ip) + off) { if ((m = m_pullup(m, sizeof (struct ip) + off)) == 0) { ! tcpstat.tcps_rcvshort++; return; } ip = mtod(m, struct ip *); --- 484,490 ---- { if (m->m_len < sizeof(struct ip) + off) { if ((m = m_pullup(m, sizeof (struct ip) + off)) == 0) { ! atomic_add_long(&tcpstat.tcps_rcvshort, 1); return; } ip = mtod(m, struct ip *); *************** *** 575,581 **** #ifdef INET6 if (isipv6) { if (inp != NULL && ipsec6_in_reject_so(m, inp->inp_socket)) { ! ipsec6stat.in_polvio++; goto drop; } } else --- 578,584 ---- #ifdef INET6 if (isipv6) { if (inp != NULL && ipsec6_in_reject_so(m, inp->inp_socket)) { ! atomic_add_long(&ipsec6stat.in_polvio, 1); goto drop; } } else *************** *** 607,614 **** } else #endif { ! strcpy(dbuf, inet_ntoa(ip->ip_dst)); ! strcpy(sbuf, inet_ntoa(ip->ip_src)); } switch (log_in_vain) { case 1: --- 610,617 ---- } else #endif { ! (void) inet_ntoa_r(ip->ip_dst, dbuf); ! (void) inet_ntoa_r(ip->ip_src, sbuf); } switch (log_in_vain) { case 1: *************** *** 704,710 **** * send a RST in response to a RST. */ if (thflags & TH_ACK) { ! tcpstat.tcps_badsyn++; goto maybedropwithreset; } goto drop; --- 707,713 ---- * send a RST in response to a RST. */ if (thflags & TH_ACK) { ! atomic_add_long(&tcpstat.tcps_badsyn, 1); goto maybedropwithreset; } goto drop; *************** *** 712,718 **** #endif so2 = sonewconn(so, 0); if (so2 == 0) { ! tcpstat.tcps_listendrop++; so2 = sodropablereq(so); if (so2) { if (tcp_lq_overflow) --- 715,721 ---- #endif so2 = sonewconn(so, 0); if (so2 == 0) { ! atomic_add_long(&tcpstat.tcps_listendrop, 1); so2 = sodropablereq(so); if (so2) { if (tcp_lq_overflow) *************** *** 784,790 **** * send a RST in response to a RST. */ if (thflags & TH_ACK) { ! tcpstat.tcps_badsyn++; goto maybedropwithreset; } goto drop; --- 787,793 ---- * send a RST in response to a RST. */ if (thflags & TH_ACK) { ! atomic_add_long(&tcpstat.tcps_badsyn, 1); goto maybedropwithreset; } goto drop; *************** *** 896,902 **** /* * this is a pure ack for outstanding data. */ ! ++tcpstat.tcps_predack; /* * "bad retransmit" recovery */ --- 899,905 ---- /* * this is a pure ack for outstanding data. */ ! atomic_add_long(&tcpstat.tcps_predack, 1); /* * "bad retransmit" recovery */ *************** *** 915,922 **** SEQ_GT(th->th_ack, tp->t_rtseq)) tcp_xmit_timer(tp, ticks - tp->t_rtttime); acked = th->th_ack - tp->snd_una; ! tcpstat.tcps_rcvackpack++; ! tcpstat.tcps_rcvackbyte += acked; sbdrop(&so->so_snd, acked); tp->snd_una = th->th_ack; m_freem(m); --- 918,925 ---- SEQ_GT(th->th_ack, tp->t_rtseq)) tcp_xmit_timer(tp, ticks - tp->t_rtttime); acked = th->th_ack - tp->snd_una; ! atomic_add_long(&tcpstat.tcps_rcvackpack, 1); ! atomic_add_long(&tcpstat.tcps_rcvackbyte, acked); sbdrop(&so->so_snd, acked); tp->snd_una = th->th_ack; m_freem(m); *************** *** 951,960 **** * with nothing on the reassembly queue and * we have enough buffer space to take it. */ ! ++tcpstat.tcps_preddat; tp->rcv_nxt += tlen; ! tcpstat.tcps_rcvpack++; ! tcpstat.tcps_rcvbyte += tlen; ND6_HINT(tp); /* some progress has been done */ /* * Add data to socket buffer. --- 954,963 ---- * with nothing on the reassembly queue and * we have enough buffer space to take it. */ ! atomic_add_long(&tcpstat.tcps_preddat, 1); tp->rcv_nxt += tlen; ! atomic_add_long(&tcpstat.tcps_rcvpack, 1); ! atomic_add_long(&tcpstat.tcps_rcvbyte, tlen); ND6_HINT(tp); /* some progress has been done */ /* * Add data to socket buffer. *************** *** 1170,1181 **** * become effective only later when our SYN is acked. */ tp->rcv_adv += min(tp->rcv_wnd, TCP_MAXWIN); ! tcpstat.tcps_connects++; soisconnected(so); callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp); dropsocket = 0; /* committed to socket */ ! tcpstat.tcps_accepts++; goto trimthenstep6; } /* else do standard 3-way handshake */ --- 1173,1184 ---- * become effective only later when our SYN is acked. */ tp->rcv_adv += min(tp->rcv_wnd, TCP_MAXWIN); ! atomic_add_long(&tcpstat.tcps_connects++, 1); soisconnected(so); callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp); dropsocket = 0; /* committed to socket */ ! atomic_add_long(&tcpstat.tcps_accepts, 1); goto trimthenstep6; } /* else do standard 3-way handshake */ *************** *** 1194,1200 **** tp->t_state = TCPS_SYN_RECEIVED; callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp); dropsocket = 0; /* committed to socket */ ! tcpstat.tcps_accepts++; goto trimthenstep6; } --- 1197,1203 ---- tp->t_state = TCPS_SYN_RECEIVED; callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp); dropsocket = 0; /* committed to socket */ ! atomic_add_long(&tcpstat.tcps_accepts, 1); goto trimthenstep6; } *************** *** 1273,1279 **** } } else tp->t_flags &= ~TF_RCVD_CC; ! tcpstat.tcps_connects++; soisconnected(so); /* Do window scaling on this connection? */ if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == --- 1276,1282 ---- } } else tp->t_flags &= ~TF_RCVD_CC; ! atomic_add_long(&tcpstat.tcps_connects, 1); soisconnected(so); /* Do window scaling on this connection? */ if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == *************** *** 1366,1373 **** m_adj(m, -todrop); tlen = tp->rcv_wnd; thflags &= ~TH_FIN; ! tcpstat.tcps_rcvpackafterwin++; ! tcpstat.tcps_rcvbyteafterwin += todrop; } tp->snd_wl1 = th->th_seq - 1; tp->rcv_up = th->th_seq; --- 1369,1376 ---- m_adj(m, -todrop); tlen = tp->rcv_wnd; thflags &= ~TH_FIN; ! atomic_add_long(&tcpstat.tcps_rcvpackafterwin, 1); ! atomic_add_long(&tcpstat.tcps_rcvbyteafterwin, todrop); } tp->snd_wl1 = th->th_seq - 1; tp->rcv_up = th->th_seq; *************** *** 1485,1491 **** so->so_error = ECONNRESET; close: tp->t_state = TCPS_CLOSED; ! tcpstat.tcps_drops++; tp = tcp_close(tp); break; --- 1488,1494 ---- so->so_error = ECONNRESET; close: tp->t_state = TCPS_CLOSED; ! atomic_add_long(&tcpstat.tcps_drops, 1); tp = tcp_close(tp); break; *************** *** 1523,1531 **** */ tp->ts_recent = 0; } else { ! tcpstat.tcps_rcvduppack++; ! tcpstat.tcps_rcvdupbyte += tlen; ! tcpstat.tcps_pawsdrop++; goto dropafterack; } } --- 1526,1534 ---- */ tp->ts_recent = 0; } else { ! atomic_add_long(&tcpstat.tcps_rcvduppack, 1); ! atomic_add_long(&tcpstat.tcps_rcvdupbyte, tlen); ! atomic_add_long(&tcpstat.tcps_pawsdrop, 1); goto dropafterack; } } *************** *** 1579,1589 **** */ tp->t_flags |= TF_ACKNOW; todrop = tlen; ! tcpstat.tcps_rcvduppack++; ! tcpstat.tcps_rcvdupbyte += todrop; } else { ! tcpstat.tcps_rcvpartduppack++; ! tcpstat.tcps_rcvpartdupbyte += todrop; } drop_hdrlen += todrop; /* drop from the top afterwards */ th->th_seq += todrop; --- 1582,1592 ---- */ tp->t_flags |= TF_ACKNOW; todrop = tlen; ! atomic_add_long(&tcpstat.tcps_rcvduppack, 1); ! atomic_add_long(&tcpstat.tcps_rcvdupbyte, todrop); } else { ! atomic_add_long(&tcpstat.tcps_rcvpartduppack, 1); ! atomic_add_long(&tcpstat.tcps_rcvpartdupbyte, todrop); } drop_hdrlen += todrop; /* drop from the top afterwards */ th->th_seq += todrop; *************** *** 1603,1609 **** if ((so->so_state & SS_NOFDREF) && tp->t_state > TCPS_CLOSE_WAIT && tlen) { tp = tcp_close(tp); ! tcpstat.tcps_rcvafterclose++; goto dropwithreset; } --- 1606,1612 ---- if ((so->so_state & SS_NOFDREF) && tp->t_state > TCPS_CLOSE_WAIT && tlen) { tp = tcp_close(tp); ! atomic_add_long(&tcpstat.tcps_rcvafterclose, 1); goto dropwithreset; } *************** *** 1613,1621 **** */ todrop = (th->th_seq+tlen) - (tp->rcv_nxt+tp->rcv_wnd); if (todrop > 0) { ! tcpstat.tcps_rcvpackafterwin++; if (todrop >= tlen) { ! tcpstat.tcps_rcvbyteafterwin += tlen; /* * If a new connection request is received * while in TIME_WAIT, drop the old connection --- 1616,1624 ---- */ todrop = (th->th_seq+tlen) - (tp->rcv_nxt+tp->rcv_wnd); if (todrop > 0) { ! atomic_add_long(&tcpstat.tcps_rcvpackafterwin, 1); if (todrop >= tlen) { ! atomic_add_long(&tcpstat.tcps_rcvbyteafterwin, tlen); /* * If a new connection request is received * while in TIME_WAIT, drop the old connection *************** *** 1638,1648 **** */ if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { tp->t_flags |= TF_ACKNOW; ! tcpstat.tcps_rcvwinprobe++; } else goto dropafterack; } else ! tcpstat.tcps_rcvbyteafterwin += todrop; m_adj(m, -todrop); tlen -= todrop; thflags &= ~(TH_PUSH|TH_FIN); --- 1641,1651 ---- */ if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { tp->t_flags |= TF_ACKNOW; ! atomic_add_long(&tcpstat.tcps_rcvwinprobe, 1); } else goto dropafterack; } else ! atomic_add_long(&tcpstat.tcps_rcvbyteafterwin, todrop); m_adj(m, -todrop); tlen -= todrop; thflags &= ~(TH_PUSH|TH_FIN); *************** *** 1694,1700 **** */ case TCPS_SYN_RECEIVED: ! tcpstat.tcps_connects++; soisconnected(so); /* Do window scaling? */ if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == --- 1697,1703 ---- */ case TCPS_SYN_RECEIVED: ! atomic_add_long(&tcpstat.tcps_connects, 1); soisconnected(so); /* Do window scaling? */ if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == *************** *** 1753,1759 **** if (SEQ_LEQ(th->th_ack, tp->snd_una)) { if (tlen == 0 && tiwin == tp->snd_wnd) { ! tcpstat.tcps_rcvdupack++; /* * If we have outstanding data (other than * a window probe), this is a completely --- 1756,1762 ---- if (SEQ_LEQ(th->th_ack, tp->snd_una)) { if (tlen == 0 && tiwin == tp->snd_wnd) { ! atomic_add_long(&tcpstat.tcps_rcvdupack, 1); /* * If we have outstanding data (other than * a window probe), this is a completely *************** *** 1844,1850 **** tp->t_dupacks = 0; } if (SEQ_GT(th->th_ack, tp->snd_max)) { ! tcpstat.tcps_rcvacktoomuch++; goto dropafterack; } /* --- 1847,1853 ---- tp->t_dupacks = 0; } if (SEQ_GT(th->th_ack, tp->snd_max)) { ! atomic_add_long(&tcpstat.tcps_rcvacktoomuch, 1); goto dropafterack; } /* *************** *** 1871,1878 **** process_ACK: acked = th->th_ack - tp->snd_una; ! tcpstat.tcps_rcvackpack++; ! tcpstat.tcps_rcvackbyte += acked; /* * If we just performed our first retransmit, and the ACK --- 1874,1881 ---- process_ACK: acked = th->th_ack - tp->snd_una; ! atomic_add_long(&tcpstat.tcps_rcvackpack, 1); ! atomic_add_long(&tcpstat.tcps_rcvackbyte, acked); /* * If we just performed our first retransmit, and the ACK *************** *** 2038,2044 **** /* keep track of pure window updates */ if (tlen == 0 && tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) ! tcpstat.tcps_rcvwinupd++; tp->snd_wnd = tiwin; tp->snd_wl1 = th->th_seq; tp->snd_wl2 = th->th_ack; --- 2041,2047 ---- /* keep track of pure window updates */ if (tlen == 0 && tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) ! atomic_add_long(&tcpstat.tcps_rcvwinupd, 1); tp->snd_wnd = tiwin; tp->snd_wl1 = th->th_seq; tp->snd_wl2 = th->th_ack; *************** *** 2479,2485 **** { register int delta; ! tcpstat.tcps_rttupdated++; tp->t_rttupdated++; if (tp->t_srtt != 0) { /* --- 2482,2488 ---- { register int delta; ! atomic_add_long(&tcpstat.tcps_rttupdated, 1); tp->t_rttupdated++; if (tp->t_srtt != 0) { /* *************** *** 2657,2667 **** if (rt->rt_rmx.rmx_locks & RTV_RTT) tp->t_rttmin = rtt / (RTM_RTTUNIT / hz); tp->t_srtt = rtt / (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); ! tcpstat.tcps_usedrtt++; if (rt->rt_rmx.rmx_rttvar) { tp->t_rttvar = rt->rt_rmx.rmx_rttvar / (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); ! tcpstat.tcps_usedrttvar++; } else { /* default variation is +- 1 rtt */ tp->t_rttvar = --- 2660,2670 ---- if (rt->rt_rmx.rmx_locks & RTV_RTT) tp->t_rttmin = rtt / (RTM_RTTUNIT / hz); tp->t_srtt = rtt / (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); ! atomic_add_long(&tcpstat.tcps_usedrtt, 1); if (rt->rt_rmx.rmx_rttvar) { tp->t_rttvar = rt->rt_rmx.rmx_rttvar / (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); ! atomic_add_long(&tcpstat.tcps_usedrttvar, 1); } else { /* default variation is +- 1 rtt */ tp->t_rttvar = *************** *** 2785,2791 **** * threshold to no less than 2*mss. */ tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh); ! tcpstat.tcps_usedssthresh++; } } --- 2788,2794 ---- * threshold to no less than 2*mss. */ tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh); ! atomic_add_long(&tcpstat.tcps_usedssthresh, 1); } } Index: netinet/tcp_output.c =================================================================== RCS file: /home/ncvs/src/sys/netinet/tcp_output.c,v retrieving revision 1.46 diff -u -c -r1.46 tcp_output.c *** netinet/tcp_output.c 2000/08/03 23:23:36 1.46 --- netinet/tcp_output.c 2000/09/08 22:54:48 *************** *** 549,561 **** */ if (len) { if (tp->t_force && len == 1) ! tcpstat.tcps_sndprobe++; else if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { ! tcpstat.tcps_sndrexmitpack++; ! tcpstat.tcps_sndrexmitbyte += len; } else { ! tcpstat.tcps_sndpack++; ! tcpstat.tcps_sndbyte += len; } #ifdef notyet if ((m = m_copypack(so->so_snd.sb_mb, off, --- 549,561 ---- */ if (len) { if (tp->t_force && len == 1) ! atomic_add_long(&tcpstat.tcps_sndprobe, 1); else if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { ! atomic_add_long(&tcpstat.tcps_sndrexmitpack, 1); ! atomic_add_long(&tcpstat.tcps_sndrexmitbyte, len); } else { ! atomic_add_long(&tcpstat.tcps_sndpack, 1); ! atomic_add_long(&tcpstat.tcps_sndbyte, len); } #ifdef notyet if ((m = m_copypack(so->so_snd.sb_mb, off, *************** *** 609,621 **** flags |= TH_PUSH; } else { if (tp->t_flags & TF_ACKNOW) ! tcpstat.tcps_sndacks++; else if (flags & (TH_SYN|TH_FIN|TH_RST)) ! tcpstat.tcps_sndctrl++; else if (SEQ_GT(tp->snd_up, tp->snd_una)) ! tcpstat.tcps_sndurg++; else ! tcpstat.tcps_sndwinup++; MGETHDR(m, M_DONTWAIT, MT_HEADER); if (m == NULL) { --- 609,621 ---- flags |= TH_PUSH; } else { if (tp->t_flags & TF_ACKNOW) ! atomic_add_long(&tcpstat.tcps_sndacks, 1); else if (flags & (TH_SYN|TH_FIN|TH_RST)) ! atomic_add_long(&tcpstat.tcps_sndctrl, 1); else if (SEQ_GT(tp->snd_up, tp->snd_una)) ! atomic_add_long(&tcpstat.tcps_sndurg, 1); else ! atomic_add_long(&tcpstat.tcps_sndwinup, 1); MGETHDR(m, M_DONTWAIT, MT_HEADER); if (m == NULL) { *************** *** 765,771 **** if (tp->t_rtttime == 0) { tp->t_rtttime = ticks; tp->t_rtseq = startseq; ! tcpstat.tcps_segstimed++; } } --- 765,771 ---- if (tp->t_rtttime == 0) { tp->t_rtttime = ticks; tp->t_rtseq = startseq; ! atomic_add_long(&tcpstat.tcps_segstimed, 1); } } *************** *** 905,911 **** } return (error); } ! tcpstat.tcps_sndtotal++; /* * Data sent (as far as we can tell). --- 905,911 ---- } return (error); } ! atomic_add_long(&tcpstat.tcps_sndtotal, 1); /* * Data sent (as far as we can tell). Index: netinet/tcp_subr.c =================================================================== RCS file: /home/ncvs/src/sys/netinet/tcp_subr.c,v retrieving revision 1.79 diff -u -c -r1.79 tcp_subr.c *** netinet/tcp_subr.c 2000/07/21 23:26:37 1.79 --- netinet/tcp_subr.c 2000/09/08 22:52:55 *************** *** 539,547 **** if (TCPS_HAVERCVDSYN(tp->t_state)) { tp->t_state = TCPS_CLOSED; (void) tcp_output(tp); ! tcpstat.tcps_drops++; ! } else ! tcpstat.tcps_conndrops++; if (errno == ETIMEDOUT && tp->t_softerror) errno = tp->t_softerror; so->so_error = errno; --- 539,548 ---- if (TCPS_HAVERCVDSYN(tp->t_state)) { tp->t_state = TCPS_CLOSED; (void) tcp_output(tp); ! atomic_add_long(&tcpstat.tcps_drops, 1); ! } else { ! atomic_add_long(&tcpstat.tcps_conndrops, 1); ! } if (errno == ETIMEDOUT && tp->t_softerror) errno = tp->t_softerror; so->so_error = errno; *************** *** 621,627 **** (rt->rt_rmx.rmx_rtt + i) / 2; else rt->rt_rmx.rmx_rtt = i; ! tcpstat.tcps_cachedrtt++; } if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { i = tp->t_rttvar * --- 622,628 ---- (rt->rt_rmx.rmx_rtt + i) / 2; else rt->rt_rmx.rmx_rtt = i; ! atomic_add_long(&tcpstat.tcps_cachedrtt, 1); } if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { i = tp->t_rttvar * *************** *** 631,637 **** (rt->rt_rmx.rmx_rttvar + i) / 2; else rt->rt_rmx.rmx_rttvar = i; ! tcpstat.tcps_cachedrttvar++; } /* * The old comment here said: --- 632,638 ---- (rt->rt_rmx.rmx_rttvar + i) / 2; else rt->rt_rmx.rmx_rttvar = i; ! atomic_add_long(&tcpstat.tcps_cachedrttvar, 1); } /* * The old comment here said: *************** *** 677,683 **** (rt->rt_rmx.rmx_ssthresh + i) / 2; else rt->rt_rmx.rmx_ssthresh = i; ! tcpstat.tcps_cachedssthresh++; } } rt = inp->inp_route.ro_rt; --- 678,684 ---- (rt->rt_rmx.rmx_ssthresh + i) / 2; else rt->rt_rmx.rmx_ssthresh = i; ! atomic_add_long(&tcpstat.tcps_cachedssthresh, 1); } } rt = inp->inp_route.ro_rt; *************** *** 709,715 **** else #endif /* INET6 */ in_pcbdetach(inp); ! tcpstat.tcps_closed++; return ((struct tcpcb *)0); } --- 710,716 ---- else #endif /* INET6 */ in_pcbdetach(inp); ! atomic_add_long(&tcpstat.tcps_closed, 1); return ((struct tcpcb *)0); } *************** *** 1159,1165 **** tp->t_maxseg = mss; ! tcpstat.tcps_mturesent++; tp->t_rtttime = 0; tp->snd_nxt = tp->snd_una; tcp_output(tp); --- 1160,1166 ---- tp->t_maxseg = mss; ! atomic_add_long(&tcpstat.tcps_mturesent, 1); tp->t_rtttime = 0; tp->snd_nxt = tp->snd_una; tcp_output(tp); Index: netinet/tcp_timer.c =================================================================== RCS file: /home/ncvs/src/sys/netinet/tcp_timer.c,v retrieving revision 1.37 diff -u -c -r1.37 tcp_timer.c *** netinet/tcp_timer.c 2000/07/04 11:25:28 1.37 --- netinet/tcp_timer.c 2000/09/08 22:51:49 *************** *** 176,182 **** callout_deactivate(tp->tt_delack); tp->t_flags |= TF_ACKNOW; ! tcpstat.tcps_delack++; (void) tcp_output(tp); splx(s); } --- 176,182 ---- callout_deactivate(tp->tt_delack); tp->t_flags |= TF_ACKNOW; ! atomic_add_long(&tcpstat.tcps_delack, 1); (void) tcp_output(tp); splx(s); } *************** *** 240,246 **** * Keep-alive timer went off; send something * or drop connection if idle for too long. */ ! tcpstat.tcps_keeptimeo++; if (tp->t_state < TCPS_ESTABLISHED) goto dropit; if ((always_keepalive || --- 240,246 ---- * Keep-alive timer went off; send something * or drop connection if idle for too long. */ ! atomic_add_long(&tcpstat.tcps_keeptimeo, 1); if (tp->t_state < TCPS_ESTABLISHED) goto dropit; if ((always_keepalive || *************** *** 260,266 **** * by the protocol spec, this requires the * correspondent TCP to respond. */ ! tcpstat.tcps_keepprobe++; #ifdef TCP_COMPAT_42 /* * The keepalive packet must have nonzero length --- 260,266 ---- * by the protocol spec, this requires the * correspondent TCP to respond. */ ! atomic_add_long(&tcpstat.tcps_keepprobe, 1); #ifdef TCP_COMPAT_42 /* * The keepalive packet must have nonzero length *************** *** 287,293 **** return; dropit: ! tcpstat.tcps_keepdrops++; tp = tcp_drop(tp, ETIMEDOUT); #ifdef TCPDEBUG --- 287,293 ---- return; dropit: ! atomic_add_long(&tcpstat.tcps_keepdrops, 1); tp = tcp_drop(tp, ETIMEDOUT); #ifdef TCPDEBUG *************** *** 319,325 **** * Persistance timer into zero window. * Force a byte to be output, if possible. */ ! tcpstat.tcps_persisttimeo++; /* * Hack: if the peer is dead/unreachable, we do not * time out if the window is closed. After a full --- 319,325 ---- * Persistance timer into zero window. * Force a byte to be output, if possible. */ ! atomic_add_long(&tcpstat.tcps_persisttimeo, 1); /* * Hack: if the peer is dead/unreachable, we do not * time out if the window is closed. After a full *************** *** 330,336 **** if (tp->t_rxtshift == TCP_MAXRXTSHIFT && ((ticks - tp->t_rcvtime) >= tcp_maxpersistidle || (ticks - tp->t_rcvtime) >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { ! tcpstat.tcps_persistdrop++; tp = tcp_drop(tp, ETIMEDOUT); goto out; } --- 330,336 ---- if (tp->t_rxtshift == TCP_MAXRXTSHIFT && ((ticks - tp->t_rcvtime) >= tcp_maxpersistidle || (ticks - tp->t_rcvtime) >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { ! atomic_add_long(&tcpstat.tcps_persistdrop, 1); tp = tcp_drop(tp, ETIMEDOUT); goto out; } *************** *** 373,379 **** */ if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { tp->t_rxtshift = TCP_MAXRXTSHIFT; ! tcpstat.tcps_timeoutdrop++; tp = tcp_drop(tp, tp->t_softerror ? tp->t_softerror : ETIMEDOUT); goto out; --- 373,379 ---- */ if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { tp->t_rxtshift = TCP_MAXRXTSHIFT; ! atomic_add_long(&tcpstat.tcps_timeoutdrop, 1); tp = tcp_drop(tp, tp->t_softerror ? tp->t_softerror : ETIMEDOUT); goto out; *************** *** 392,398 **** tp->snd_ssthresh_prev = tp->snd_ssthresh; tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); } ! tcpstat.tcps_rexmttimeo++; rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX); --- 392,398 ---- tp->snd_ssthresh_prev = tp->snd_ssthresh; tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); } ! atomic_add_long(&tcpstat.tcps_rexmttimeo, 1); rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX); Index: netinet/tcp_usrreq.c =================================================================== RCS file: /home/ncvs/src/sys/netinet/tcp_usrreq.c,v retrieving revision 1.52 diff -u -c -r1.52 tcp_usrreq.c *** netinet/tcp_usrreq.c 2000/04/01 22:35:45 1.52 --- netinet/tcp_usrreq.c 2000/09/08 22:56:33 *************** *** 735,741 **** tp->request_r_scale++; soisconnecting(so); ! tcpstat.tcps_connattempt++; tp->t_state = TCPS_SYN_SENT; callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp); tp->iss = tcp_iss; tcp_iss += TCP_ISSINCR/2; --- 735,741 ---- tp->request_r_scale++; soisconnecting(so); ! atomic_add_long(&tcpstat.tcps_connattempt, 1); tp->t_state = TCPS_SYN_SENT; callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp); tp->iss = tcp_iss; tcp_iss += TCP_ISSINCR/2; *************** *** 827,833 **** tp->request_r_scale++; soisconnecting(so); ! tcpstat.tcps_connattempt++; tp->t_state = TCPS_SYN_SENT; callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp); tp->iss = tcp_iss; tcp_iss += TCP_ISSINCR/2; --- 827,833 ---- tp->request_r_scale++; soisconnecting(so); ! atomic_add_long(&tcpstat.tcps_connattempt, 1); tp->t_state = TCPS_SYN_SENT; callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp); tp->iss = tcp_iss; tcp_iss += TCP_ISSINCR/2; Index: netinet6/in6_cksum.c =================================================================== RCS file: /home/ncvs/src/sys/netinet6/in6_cksum.c,v retrieving revision 1.3 diff -u -c -r1.3 in6_cksum.c *** netinet6/in6_cksum.c 2000/09/09 15:56:46 1.3 --- netinet6/in6_cksum.c 2000/09/11 00:29:36 *************** *** 89,95 **** * len is a total length of a transport segment. * (e.g. TCP header + TCP payload) */ ! int in6_cksum(m, nxt, off, len) register struct mbuf *m; --- 89,95 ---- * len is a total length of a transport segment. * (e.g. TCP header + TCP payload) */ ! /* MP safe (alfred) */ int in6_cksum(m, nxt, off, len) register struct mbuf *m; *************** *** 120,125 **** --- 120,134 ---- u_int16_t s[2]; u_int32_t l; } l_util; + union { + u_int16_t phs[4]; + struct { + u_int32_t ph_len; + u_int8_t ph_zero[3]; + u_int8_t ph_nxt; + } ph; + } uph; + /* sanity check */ if (m->m_pkthdr.len < off + len) { Index: netinet6/in6_var.h =================================================================== RCS file: /home/ncvs/src/sys/netinet6/in6_var.h,v retrieving revision 1.6 diff -u -c -r1.6 in6_var.h *** netinet6/in6_var.h 2000/07/04 16:35:09 1.6 --- netinet6/in6_var.h 2000/09/09 11:08:01 *************** *** 436,441 **** --- 436,445 ---- extern struct icmp6stat icmp6stat; extern struct icmp6_ifstat **icmp6_ifstat; extern size_t icmp6_ifstatmax; + /* + * MPsafe assuming that there is only one executing context per interrupt + * (alfred) + */ #define in6_ifstat_inc(ifp, tag) \ do { \ if ((ifp) && (ifp)->if_index <= if_index \ Index: pc98/i386/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/pc98/i386/machdep.c,v retrieving revision 1.175 diff -u -c -r1.175 machdep.c *** pc98/i386/machdep.c 2000/09/08 11:20:04 1.175 --- pc98/i386/machdep.c 2000/09/12 06:34:14 *************** *** 145,152 **** static void cpu_startup __P((void *)); SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) - static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); - #ifdef PC98 int need_pre_dma_flush; /* If 1, use wbinvd befor DMA transfer. */ int need_post_dma_flush; /* If 1, use invd after DMA transfer. */ --- 145,150 ---- *************** *** 413,430 **** (16*(ARG_MAX+(PAGE_SIZE*3)))); /* ! * Finally, allocate mbuf pool. */ ! { ! vm_offset_t mb_map_size; ! ! mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES + ! (nmbclusters + nmbufs / 4) * sizeof(union mext_refcnt); ! mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE)); ! mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, ! &maxaddr, mb_map_size); ! mb_map->system_map = 1; ! } /* * Initialize callouts --- 411,422 ---- (16*(ARG_MAX+(PAGE_SIZE*3)))); /* ! * Initialize mbuf system. ! * Doing this early on (as opposed to through SYSINIT) is good ! * as we want to make sure that the mutex locks are setup prior to ! * network device drivers doing their stuff. */ ! mbinit(); /* * Initialize callouts Index: sys/mbuf.h =================================================================== RCS file: /home/ncvs/src/sys/sys/mbuf.h,v retrieving revision 1.55 diff -u -c -r1.55 mbuf.h *** sys/mbuf.h 2000/08/20 00:02:48 1.55 --- sys/mbuf.h 2000/09/12 06:34:14 *************** *** 37,42 **** --- 37,47 ---- #ifndef _SYS_MBUF_H_ #define _SYS_MBUF_H_ + #ifdef _KERNEL + #include + #include + #endif + /* * Mbufs are of a single size, MSIZE (machine/param.h), which * includes overhead. An mbuf may add a single "mbuf cluster" of size *************** *** 203,210 **** #define M_DONTWAIT 1 #define M_WAIT 0 ! /* Freelists: ! * * Normal mbuf clusters are normally treated as character arrays * after allocation, but use the first word of the buffer as a free list * pointer while on the free list. --- 208,214 ---- #define M_DONTWAIT 1 #define M_WAIT 0 ! /* * Normal mbuf clusters are normally treated as character arrays * after allocation, but use the first word of the buffer as a free list * pointer while on the free list. *************** *** 232,239 **** }; /* * Wake up the next instance (if any) of m_mballoc_wait() which is ! * waiting for an mbuf to be freed. This should be called at splimp(). * * XXX: If there is another free mbuf, this routine will be called [again] * from the m_mballoc_wait routine in order to wake another sleep instance. --- 236,261 ---- }; /* + * free list header definitions: mbffree_lst, mclfree_lst, mcntfree_lst + */ + struct mbffree_lst { + struct mbuf *m_head; + mtx_t m_mtx; + }; + + struct mclfree_lst { + union mcluster *m_head; + mtx_t m_mtx; + }; + + struct mcntfree_lst { + union mext_refcnt *m_head; + mtx_t m_mtx; + }; + + /* * Wake up the next instance (if any) of m_mballoc_wait() which is ! * waiting for an mbuf to be freed. * * XXX: If there is another free mbuf, this routine will be called [again] * from the m_mballoc_wait routine in order to wake another sleep instance. *************** *** 256,275 **** } while (0) /* - * mbuf utility macros: - * - * MBUFLOCK(code) - * prevents a section of code from from being interrupted by network - * drivers. - */ - #define MBUFLOCK(code) do { \ - int _ms = splimp(); \ - \ - { code } \ - splx(_ms); \ - } while (0) - - /* * mbuf external reference count management macros: * * MEXT_IS_REF(m): true if (m) is not the only mbuf referencing --- 278,283 ---- *************** *** 286,309 **** #define MEXT_ADD_REF(m) atomic_add_long(&((m)->m_ext.ref_cnt->refcnt), 1) ! #define _MEXT_ALLOC_CNT(m_cnt) MBUFLOCK( \ union mext_refcnt *__mcnt; \ \ ! if ((mext_refcnt_free == NULL) && (m_alloc_ref(1) == 0)) \ ! panic("mbuf subsystem: out of ref counts!"); \ ! __mcnt = mext_refcnt_free; \ ! mext_refcnt_free = __mcnt->next_ref; \ ! __mcnt->refcnt = 0; \ ! (m_cnt) = __mcnt; \ mbstat.m_refree--; \ ! ) #define _MEXT_DEALLOC_CNT(m_cnt) do { \ union mext_refcnt *__mcnt = (m_cnt); \ \ ! __mcnt->next_ref = mext_refcnt_free; \ ! mext_refcnt_free = __mcnt; \ mbstat.m_refree++; \ } while (0) #define MEXT_INIT_REF(m) do { \ --- 294,324 ---- #define MEXT_ADD_REF(m) atomic_add_long(&((m)->m_ext.ref_cnt->refcnt), 1) ! #define _MEXT_ALLOC_CNT(m_cnt) do { \ union mext_refcnt *__mcnt; \ \ ! mtx_enter(&mcntfree->m_mtx, MTX_DEF); \ ! if (mcntfree->m_head == NULL) { \ ! mtx_exit(&mcntfree->m_mtx, MTX_DEF); \ ! if (m_alloc_ref(1) == 0) \ ! panic("mbuf subsystem: out of ref counts!"); \ ! } \ ! __mcnt = mcntfree->m_head; \ ! mcntfree->m_head = __mcnt->next_ref; \ mbstat.m_refree--; \ ! mtx_exit(&mcntfree->m_mtx, MTX_DEF); \ ! __mcnt->refcnt = 0; \ ! (m_cnt) = __mcnt; \ ! } while (0) #define _MEXT_DEALLOC_CNT(m_cnt) do { \ union mext_refcnt *__mcnt = (m_cnt); \ \ ! mtx_enter(&mcntfree->m_mtx, MTX_DEF); \ ! __mcnt->next_ref = mcntfree->m_head; \ ! mcntfree->m_head = __mcnt; \ mbstat.m_refree++; \ + mtx_exit(&mcntfree->m_mtx, MTX_DEF); \ } while (0) #define MEXT_INIT_REF(m) do { \ *************** *** 327,349 **** struct mbuf *_mm; \ int _mhow = (how); \ int _mtype = (type); \ - int _ms = splimp(); \ \ ! if (mmbfree == NULL) \ ! (void)m_mballoc(1, _mhow); \ ! _mm = mmbfree; \ if (_mm != NULL) { \ ! mmbfree = _mm->m_next; \ mbtypes[MT_FREE]--; \ mbtypes[_mtype]++; \ ! splx(_ms); \ _mm->m_type = _mtype; \ _mm->m_next = NULL; \ _mm->m_nextpkt = NULL; \ _mm->m_data = _mm->m_dat; \ _mm->m_flags = 0; \ } else { \ ! splx(_ms); \ _mm = m_retry(_mhow, _mtype); \ if (_mm == NULL && _mhow == M_WAIT) \ _mm = m_mballoc_wait(MGET_C, _mtype); \ --- 342,367 ---- struct mbuf *_mm; \ int _mhow = (how); \ int _mtype = (type); \ \ ! mtx_enter(&mmbfree->m_mtx, MTX_DEF); \ ! if (mmbfree->m_head == NULL) { \ ! mtx_exit(&mmbfree->m_mtx, MTX_DEF); \ ! if (m_mballoc(1, _mhow) == 0) \ ! mtx_enter(&mmbfree->m_mtx, MTX_DEF); \ ! } \ ! _mm = mmbfree->m_head; \ if (_mm != NULL) { \ ! mmbfree->m_head = _mm->m_next; \ mbtypes[MT_FREE]--; \ mbtypes[_mtype]++; \ ! mtx_exit(&mmbfree->m_mtx, MTX_DEF); \ _mm->m_type = _mtype; \ _mm->m_next = NULL; \ _mm->m_nextpkt = NULL; \ _mm->m_data = _mm->m_dat; \ _mm->m_flags = 0; \ } else { \ ! mtx_exit(&mmbfree->m_mtx, MTX_DEF); \ _mm = m_retry(_mhow, _mtype); \ if (_mm == NULL && _mhow == M_WAIT) \ _mm = m_mballoc_wait(MGET_C, _mtype); \ *************** *** 355,370 **** struct mbuf *_mm; \ int _mhow = (how); \ int _mtype = (type); \ - int _ms = splimp(); \ \ ! if (mmbfree == NULL) \ ! (void)m_mballoc(1, _mhow); \ ! _mm = mmbfree; \ if (_mm != NULL) { \ ! mmbfree = _mm->m_next; \ mbtypes[MT_FREE]--; \ mbtypes[_mtype]++; \ ! splx(_ms); \ _mm->m_type = _mtype; \ _mm->m_next = NULL; \ _mm->m_nextpkt = NULL; \ --- 373,391 ---- struct mbuf *_mm; \ int _mhow = (how); \ int _mtype = (type); \ \ ! mtx_enter(&mmbfree->m_mtx, MTX_DEF); \ ! if (mmbfree->m_head == NULL) { \ ! mtx_exit(&mmbfree->m_mtx, MTX_DEF); \ ! if (m_mballoc(1, _mhow) == 0) \ ! mtx_enter(&mmbfree->m_mtx, MTX_DEF); \ ! } \ ! _mm = mmbfree->m_head; \ if (_mm != NULL) { \ ! mmbfree->m_head = _mm->m_next; \ mbtypes[MT_FREE]--; \ mbtypes[_mtype]++; \ ! mtx_exit(&mmbfree->m_mtx, MTX_DEF); \ _mm->m_type = _mtype; \ _mm->m_next = NULL; \ _mm->m_nextpkt = NULL; \ *************** *** 374,380 **** _mm->m_pkthdr.csum_flags = 0; \ _mm->m_pkthdr.aux = NULL; \ } else { \ ! splx(_ms); \ _mm = m_retryhdr(_mhow, _mtype); \ if (_mm == NULL && _mhow == M_WAIT) \ _mm = m_mballoc_wait(MGETHDR_C, _mtype); \ --- 395,401 ---- _mm->m_pkthdr.csum_flags = 0; \ _mm->m_pkthdr.aux = NULL; \ } else { \ ! mtx_exit(&mmbfree->m_mtx, MTX_DEF); \ _mm = m_retryhdr(_mhow, _mtype); \ if (_mm == NULL && _mhow == M_WAIT) \ _mm = m_mballoc_wait(MGETHDR_C, _mtype); \ *************** *** 393,409 **** #define _MCLALLOC(p, how) do { \ caddr_t _mp; \ int _mhow = (how); \ - int _ms = splimp(); \ \ ! if (mclfree == NULL) \ ! (void)m_clalloc(1, _mhow); \ ! _mp = (caddr_t)mclfree; \ if (_mp != NULL) { \ mbstat.m_clfree--; \ ! mclfree = ((union mcluster *)_mp)->mcl_next; \ ! splx(_ms); \ } else { \ ! splx(_ms); \ if (_mhow == M_WAIT) \ _mp = m_clalloc_wait(); \ } \ --- 414,433 ---- #define _MCLALLOC(p, how) do { \ caddr_t _mp; \ int _mhow = (how); \ \ ! mtx_enter(&mclfree->m_mtx, MTX_DEF); \ ! if (mclfree->m_head == NULL) { \ ! mtx_exit(&mclfree->m_mtx, MTX_DEF); \ ! if (m_clalloc(1, _mhow) == 0) \ ! mtx_enter(&mclfree->m_mtx, MTX_DEF); \ ! } \ ! _mp = (caddr_t)mclfree->m_head; \ if (_mp != NULL) { \ mbstat.m_clfree--; \ ! mclfree->m_head = ((union mcluster *)_mp)->mcl_next; \ ! mtx_exit(&mclfree->m_mtx, MTX_DEF); \ } else { \ ! mtx_exit(&mclfree->m_mtx, MTX_DEF); \ if (_mhow == M_WAIT) \ _mp = m_clalloc_wait(); \ } \ *************** *** 436,451 **** MEXT_INIT_REF(_mm); \ } while (0) ! #define _MCLFREE(p) MBUFLOCK( \ union mcluster *_mp = (union mcluster *)(p); \ \ ! _mp->mcl_next = mclfree; \ ! mclfree = _mp; \ mbstat.m_clfree++; \ MCLWAKEUP(); \ ! ) ! #define _MEXTFREE(m) do { \ struct mbuf *_mmm = (m); \ \ if (MEXT_IS_REF(_mmm)) \ --- 460,477 ---- MEXT_INIT_REF(_mm); \ } while (0) ! #define _MCLFREE(p) do { \ union mcluster *_mp = (union mcluster *)(p); \ \ ! mtx_enter(&mclfree->m_mtx, MTX_DEF); \ ! _mp->mcl_next = mclfree->m_head; \ ! mclfree->m_head = _mp; \ mbstat.m_clfree++; \ + mtx_exit(&mclfree->m_mtx, MTX_DEF); \ MCLWAKEUP(); \ ! } while (0) ! #define MEXTFREE(m) do { \ struct mbuf *_mmm = (m); \ \ if (MEXT_IS_REF(_mmm)) \ *************** *** 461,489 **** _mmm->m_flags &= ~M_EXT; \ } while (0) - #define MEXTFREE(m) MBUFLOCK( \ - _MEXTFREE(m); \ - ) - /* * MFREE(struct mbuf *m, struct mbuf *n) * Free a single mbuf and associated external storage. * Place the successor, if any, in n. */ ! #define MFREE(m, n) MBUFLOCK( \ struct mbuf *_mm = (m); \ \ KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf")); \ if (_mm->m_flags & M_EXT) \ ! _MEXTFREE(_mm); \ mbtypes[_mm->m_type]--; \ _mm->m_type = MT_FREE; \ mbtypes[MT_FREE]++; \ (n) = _mm->m_next; \ ! _mm->m_next = mmbfree; \ ! mmbfree = _mm; \ MMBWAKEUP(); \ ! ) /* * Copy mbuf pkthdr from "from" to "to". --- 487,513 ---- _mmm->m_flags &= ~M_EXT; \ } while (0) /* * MFREE(struct mbuf *m, struct mbuf *n) * Free a single mbuf and associated external storage. * Place the successor, if any, in n. */ ! #define MFREE(m, n) do { \ struct mbuf *_mm = (m); \ \ KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf")); \ if (_mm->m_flags & M_EXT) \ ! MEXTFREE(_mm); \ ! mtx_enter(&mmbfree->m_mtx, MTX_DEF); \ mbtypes[_mm->m_type]--; \ _mm->m_type = MT_FREE; \ mbtypes[MT_FREE]++; \ (n) = _mm->m_next; \ ! _mm->m_next = mmbfree->m_head; \ ! mmbfree->m_head = _mm; \ ! mtx_exit(&mmbfree->m_mtx, MTX_DEF); \ MMBWAKEUP(); \ ! } while (0) /* * Copy mbuf pkthdr from "from" to "to". *************** *** 557,571 **** *_mmp = _mm; \ } while (0) ! /* change mbuf to new type */ #define MCHTYPE(m, t) do { \ struct mbuf *_mm = (m); \ int _mt = (t); \ - int _ms = splimp(); \ \ mbtypes[_mm->m_type]--; \ mbtypes[_mt]++; \ ! splx(_ms); \ _mm->m_type = (_mt); \ } while (0) --- 581,600 ---- *_mmp = _mm; \ } while (0) ! /* change mbuf to new type ! * ! * XXX: ! * mbtypes is protected by the same mutex that protects the mmbfree list, ! * since only mbufs are involved anyway... ! */ #define MCHTYPE(m, t) do { \ struct mbuf *_mm = (m); \ int _mt = (t); \ \ + mtx_enter(&mmbfree->m_mtx, MTX_DEF); \ mbtypes[_mm->m_type]--; \ mbtypes[_mt]++; \ ! mtx_exit(&mmbfree->m_mtx, MTX_DEF); \ _mm->m_type = (_mt); \ } while (0) *************** *** 594,608 **** extern u_long mbtypes[MT_NTYPES]; /* per-type mbuf allocations */ extern int mbuf_wait; /* mbuf sleep time */ extern struct mbuf *mbutl; /* virtual address of mclusters */ ! extern union mcluster *mclfree; ! extern struct mbuf *mmbfree; ! extern union mext_refcnt *mext_refcnt_free; extern int nmbclusters; extern int nmbufs; extern int nsfbufs; void m_adj __P((struct mbuf *, int)); int m_alloc_ref __P((u_int)); void m_cat __P((struct mbuf *,struct mbuf *)); int m_clalloc __P((int, int)); caddr_t m_clalloc_wait __P((void)); --- 623,638 ---- extern u_long mbtypes[MT_NTYPES]; /* per-type mbuf allocations */ extern int mbuf_wait; /* mbuf sleep time */ extern struct mbuf *mbutl; /* virtual address of mclusters */ ! extern struct mclfree_lst mclfree_lst_hdr, *mclfree; ! extern struct mbffree_lst mbffree_lst_hdr, *mmbfree; ! extern struct mcntfree_lst mcntfree_lst_hdr, *mcntfree; extern int nmbclusters; extern int nmbufs; extern int nsfbufs; void m_adj __P((struct mbuf *, int)); int m_alloc_ref __P((u_int)); + void mbinit __P((void)); void m_cat __P((struct mbuf *,struct mbuf *)); int m_clalloc __P((int, int)); caddr_t m_clalloc_wait __P((void)); Index: sys/socketvar.h =================================================================== RCS file: /home/ncvs/src/sys/sys/socketvar.h,v retrieving revision 1.52 diff -u -c -r1.52 socketvar.h *** sys/socketvar.h 2000/09/06 18:49:13 1.52 --- sys/socketvar.h 2000/09/11 06:29:05 *************** *** 119,124 **** --- 119,125 ---- void *so_accept_filter_arg; /* saved filter args */ char *so_accept_filter_str; /* saved user args */ } *so_accf; + mtx_t so_mtx; }; /* *************** *** 227,240 **** } /* ! * Set lock on sockbuf sb; sleep if lock is already held. * Unless SB_NOINTR is set on sockbuf, sleep is interruptible. * Returns error without lock if sleep is interrupted. */ ! #define sblock(sb, wf) ((sb)->sb_flags & SB_LOCK ? \ ! (((wf) == M_WAITOK) ? sb_lock(sb) : EWOULDBLOCK) : \ ((sb)->sb_flags |= SB_LOCK), 0) /* release lock on sockbuf sb */ #define sbunlock(sb) { \ (sb)->sb_flags &= ~SB_LOCK; \ --- 228,249 ---- } /* ! * Must be called with socket's mutex owned ! * Set lock on sockbuf sb; sleep if lock is already held and M_WAITOK is set. * Unless SB_NOINTR is set on sockbuf, sleep is interruptible. * Returns error without lock if sleep is interrupted. */ ! #define sblock(so, sb, wf) ((sb)->sb_flags & SB_LOCK ? \ ! (((wf) == M_WAITOK) ? sb_lock(so, sb) : EWOULDBLOCK) : \ ((sb)->sb_flags |= SB_LOCK), 0) + /* \ + #define sblock(sb, wf) \ + ((mtx_try_enter((sb)->sb_mtx, MTX_DEF) == 0 && (wf) == M_WAITOK) ? \ + mtx_enter((sb)->sb_mtx,MTX_DEF| \ + ((sb->sb_flags & SB_NOINTR) ? 0 : MTX_CATCH)) : \ + EWOULDBLOCK) + */ /* release lock on sockbuf sb */ #define sbunlock(sb) { \ (sb)->sb_flags &= ~SB_LOCK; \ *************** *** 243,249 **** wakeup((caddr_t)&(sb)->sb_flags); \ } \ } ! #define sorwakeup(so) do { \ if (sb_notify(&(so)->so_rcv)) \ sowakeup((so), &(so)->so_rcv); \ --- 252,263 ---- wakeup((caddr_t)&(sb)->sb_flags); \ } \ } ! /*#define sbunlock(sb) \ ! do { \ ! wakeup((caddr_t)&(sb)->sb_flags); \ ! mtx_exit(so->so_mtx, MTX_DEF); \ ! } while (0) ! */ #define sorwakeup(so) do { \ if (sb_notify(&(so)->so_rcv)) \ sowakeup((so), &(so)->so_rcv); \ *************** *** 297,302 **** --- 311,317 ---- extern u_long sb_max; extern struct vm_zone *socket_zone; extern so_gen_t so_gencnt; + extern mtx_t socket_alloc_lock; struct file; struct filedesc; *************** *** 345,352 **** int sbreserve __P((struct sockbuf *sb, u_long cc, struct socket *so, struct proc *p)); void sbtoxsockbuf __P((struct sockbuf *sb, struct xsockbuf *xsb)); ! int sbwait __P((struct sockbuf *sb)); ! int sb_lock __P((struct sockbuf *sb)); int soabort __P((struct socket *so)); int soaccept __P((struct socket *so, struct sockaddr **nam)); struct socket *soalloc __P((int waitok)); --- 360,367 ---- int sbreserve __P((struct sockbuf *sb, u_long cc, struct socket *so, struct proc *p)); void sbtoxsockbuf __P((struct sockbuf *sb, struct xsockbuf *xsb)); ! int sbwait __P((struct socket *so, struct sockbuf *sb)); ! int sb_lock __P((struct socket *so, struct sockbuf *sb)); int soabort __P((struct socket *so)); int soaccept __P((struct socket *so, struct sockaddr **nam)); struct socket *soalloc __P((int waitok)); Index: sys/ucred.h =================================================================== RCS file: /home/ncvs/src/sys/sys/ucred.h,v retrieving revision 1.15 diff -u -c -r1.15 ucred.h *** sys/ucred.h 2000/09/05 22:11:13 1.15 --- sys/ucred.h 2000/09/10 02:35:02 *************** *** 55,61 **** #define FSCRED ((struct ucred *)-1) /* filesystem credential */ #ifdef _KERNEL ! #define crhold(cr) (cr)->cr_ref++ void change_euid __P((struct proc *p, uid_t euid)); struct ucred *crcopy __P((struct ucred *cr)); --- 55,61 ---- #define FSCRED ((struct ucred *)-1) /* filesystem credential */ #ifdef _KERNEL ! #define crhold(cr) atomic_add_short(&((cr)->cr_ref), 1) void change_euid __P((struct proc *p, uid_t euid)); struct ucred *crcopy __P((struct ucred *cr));