diff -ru /cvs/sys.old/kern/uipc_mbuf.c /usr/src/sys/kern/uipc_mbuf.c --- /cvs/sys.old/kern/uipc_mbuf.c Wed Feb 21 04:24:13 2001 +++ /usr/src/sys/kern/uipc_mbuf.c Thu Mar 15 23:02:21 2001 @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -61,8 +62,6 @@ int nmbclusters; int nmbufs; int nmbcnt; -u_long m_mballoc_wid = 0; -u_long m_clalloc_wid = 0; /* * freelist header structures... @@ -94,9 +93,11 @@ "Maximum number of mbufs available"); SYSCTL_INT(_kern_ipc, OID_AUTO, nmbcnt, CTLFLAG_RD, &nmbcnt, 0, "Maximum number of ext_buf counters available"); + #ifndef NMBCLUSTERS #define NMBCLUSTERS (512 + MAXUSERS * 16) #endif + TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters); TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs); TUNABLE_INT_DECL("kern.ipc.nmbcnt", EXT_COUNTERS, nmbcnt); @@ -110,9 +111,6 @@ /* * Full mbuf subsystem initialization done here. - * - * XXX: If ever we have system specific map setups to do, then move them to - * machdep.c - for now, there is no reason for this stuff to go there. */ static void mbinit(void *dummy) @@ -138,6 +136,8 @@ mtx_init(&mmbfree.m_mtx, "mbuf free list lock", MTX_DEF); mtx_init(&mclfree.m_mtx, "mcluster free list lock", MTX_DEF); mtx_init(&mcntfree.m_mtx, "m_ext counter free list lock", MTX_DEF); + cv_init(&mmbfree.m_starved, "mbuf free list starved cv"); + cv_init(&mclfree.m_starved, "mbuf cluster free list starved cv"); /* * Initialize mbuf subsystem (sysctl exported) statistics structure. @@ -293,8 +293,8 @@ * rely solely on reclaimed mbufs. * * Here we request for the protocols to free up some resources and, if we - * still cannot get anything, then we wait for an mbuf to be freed for a - * designated (mbuf_wait) time. + * still cannot get anything, then we block on a cv waiting for an mbuf to be + * freed. We wait for a designated (mbuf_wait) time, at most. * * Must be called with the mmbfree mutex held. */ @@ -320,30 +320,22 @@ _MGET(p, M_DONTWAIT); if (p == NULL) { - m_mballoc_wid++; - msleep(&m_mballoc_wid, &mmbfree.m_mtx, PVM, "mballc", + int retval; + + retval = cv_timedwait(&mmbfree.m_starved, &mmbfree.m_mtx, mbuf_wait); - m_mballoc_wid--; /* - * Try again (one last time). - * - * We retry to fetch _even_ if the sleep timed out. This - * is left this way, purposely, in the [unlikely] case - * that an mbuf was freed but the sleep was not awoken - * in time. - * - * If the sleep didn't time out (i.e. we got woken up) then - * we have the lock so we just grab an mbuf, hopefully. + * If we got signaled (and didn't time out), allocate. */ - _MGET(p, M_DONTWAIT); + if (retval == 0) + _MGET(p, M_DONTWAIT); } - /* If we waited and got something... */ if (p != NULL) { atomic_add_long(&mbstat.m_wait, 1); if (mmbfree.m_head != NULL) - MBWAKEUP(m_mballoc_wid); + MBWAKEUP(&mmbfree.m_starved); } else atomic_add_long(&mbstat.m_drops, 1); @@ -405,8 +397,8 @@ /* * Once the mb_map submap has been exhausted and the allocation is called with * M_TRYWAIT, we rely on the mclfree list. If nothing is free, we will - * sleep for a designated amount of time (mbuf_wait) or until we're woken up - * due to sudden mcluster availability. + * block on a cv for a designated amount of time (mbuf_wait) or until we're + * woken up due to a sudden mcluster availability. * * Must be called with the mclfree lock held. */ @@ -414,21 +406,20 @@ m_clalloc_wait(void) { caddr_t p = NULL; + int retval; - m_clalloc_wid++; - msleep(&m_clalloc_wid, &mclfree.m_mtx, PVM, "mclalc", mbuf_wait); - m_clalloc_wid--; + retval = cv_timedwait(&mclfree.m_starved, &mclfree.m_mtx, mbuf_wait); /* * Now that we (think) that we've got something, try again. */ - _MCLALLOC(p, M_DONTWAIT); + if (retval == 0) + _MCLALLOC(p, M_DONTWAIT); - /* If we waited and got something ... */ if (p != NULL) { atomic_add_long(&mbstat.m_wait, 1); if (mclfree.m_head != NULL) - MBWAKEUP(m_clalloc_wid); + MBWAKEUP(&mclfree.m_starved); } else atomic_add_long(&mbstat.m_drops, 1); @@ -450,7 +441,7 @@ struct protosw *pr; #ifdef WITNESS - KASSERT(witness_list(CURPROC) == 0, + KASSERT(witness_list(curproc) == 0, ("m_reclaim called with locks held")); #endif diff -ru /cvs/sys.old/sys/mbuf.h /usr/src/sys/sys/mbuf.h --- /cvs/sys.old/sys/mbuf.h Sat Feb 17 00:35:56 2001 +++ /usr/src/sys/sys/mbuf.h Thu Mar 15 23:03:11 2001 @@ -37,7 +37,9 @@ #ifndef _SYS_MBUF_H_ #define _SYS_MBUF_H_ -#include /* XXX */ +/* XXX */ +#include +#include /* * Mbufs are of a single size, MSIZE (machine/param.h), which @@ -254,11 +256,13 @@ struct mbffree_lst { struct mbuf *m_head; struct mtx m_mtx; + struct cv m_starved; }; struct mclfree_lst { union mcluster *m_head; struct mtx m_mtx; + struct cv m_starved; }; struct mcntfree_lst { @@ -267,15 +271,14 @@ }; /* - * Wake up the next instance (if any) of a sleeping allocation - which is - * waiting for a {cluster, mbuf} to be freed. + * Signal a single instance (if any) blocked on a m_starved cv (i.e. an + * instance waiting for a {cluster, mbuf} to be freed to the global cache + * lists. * * Must be called with the appropriate mutex held. */ -#define MBWAKEUP(m_wid) do { \ - if ((m_wid)) \ - wakeup_one(&(m_wid)); \ -} while (0) +#define MBWAKEUP(m_cv) \ + cv_signal((m_cv)); /* * mbuf external reference count management macros: @@ -478,7 +481,7 @@ _mp->mcl_next = mclfree.m_head; \ mclfree.m_head = _mp; \ mbstat.m_clfree++; \ - MBWAKEUP(m_clalloc_wid); \ + MBWAKEUP(&mclfree.m_starved); \ mtx_unlock(&mclfree.m_mtx); \ } while (0) @@ -521,7 +524,7 @@ (n) = _mm->m_next; \ _mm->m_next = mmbfree.m_head; \ mmbfree.m_head = _mm; \ - MBWAKEUP(m_mballoc_wid); \ + MBWAKEUP(&mmbfree.m_starved); \ mtx_unlock(&mmbfree.m_mtx); \ } while (0) @@ -633,8 +636,6 @@ }; #ifdef _KERNEL -extern u_long m_clalloc_wid; /* mbuf cluster wait count */ -extern u_long m_mballoc_wid; /* mbuf wait count */ extern int max_linkhdr; /* largest link-level header */ extern int max_protohdr; /* largest protocol header */ extern int max_hdr; /* largest link+protocol header */