diff -ru /cvs/sys_old/kern/uipc_mbuf.c /usr/src/sys/kern/uipc_mbuf.c --- /cvs/sys_old/kern/uipc_mbuf.c Tue Jan 9 18:58:56 2001 +++ /usr/src/sys/kern/uipc_mbuf.c Mon Jan 15 18:59:00 2001 @@ -186,6 +186,9 @@ caddr_t p; u_int nbytes; int i; +#ifdef WITNESS + struct proc *pr = CURPROC; +#endif /* * We don't cap the amount of memory that can be used @@ -205,12 +208,22 @@ nbytes = round_page(nmb * sizeof(union mext_refcnt)); mtx_exit(&mcntfree.m_mtx, MTX_DEF); +#ifdef WITNESS + /* + * XXX: Make sure we don't create lock order problems. + * XXX: We'll grab Giant, but for that to be OK, make sure + * XXX: that either Giant is already held OR make sure that + * XXX: no other locks are held coming in. + * XXX: Revisit once most of the net stuff gets locks added. + */ + KASSERT(mtx_owned(&Giant) || witness_list(pr) == 0, + ("m_alloc_ref: Giant must be owned or no locks held")); +#endif mtx_enter(&Giant, MTX_DEF); if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, how == M_TRYWAIT ? M_WAITOK : M_NOWAIT)) == NULL) { mtx_exit(&Giant, MTX_DEF); - mtx_enter(&mcntfree.m_mtx, MTX_DEF); /* XXX: We must be holding - it going out. */ + mtx_enter(&mcntfree.m_mtx, MTX_DEF); return (0); } mtx_exit(&Giant, MTX_DEF); @@ -245,6 +258,9 @@ register caddr_t p; register int i; int nbytes; +#ifdef WITNESS + struct proc *pr = CURPROC; +#endif /* * If we've hit the mbuf limit, stop allocating from mb_map. @@ -264,9 +280,18 @@ nbytes = round_page(nmb * MSIZE); - /* XXX: The letting go of the mmbfree lock here may eventually - be moved to only be done for M_TRYWAIT calls to kmem_malloc() */ mtx_exit(&mmbfree.m_mtx, MTX_DEF); +#ifdef WITNESS + /* + * XXX: Make sure we don't create lock order problems. + * XXX: We'll grab Giant, but for that to be OK, make sure + * XXX: that either Giant is already held OR make sure that + * XXX: no other locks are held coming in. + * XXX: Revisit once most of the net stuff gets locks added. + */ + KASSERT(mtx_owned(&Giant) || witness_list(pr) == 0, + ("m_mballoc: Giant must be owned or no locks held")); +#endif mtx_enter(&Giant, MTX_DEF); p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); if (p == 0 && how == M_TRYWAIT) { @@ -377,6 +402,9 @@ register caddr_t p; register int i; int npg; +#ifdef WITNESS + struct proc *pr = CURPROC; +#endif /* * If the map is now full (nothing will ever be freed to it). @@ -390,6 +418,17 @@ npg = ncl; mtx_exit(&mclfree.m_mtx, MTX_DEF); +#ifdef WITNESS + /* + * XXX: Make sure we don't create lock order problems. + * XXX: We'll grab Giant, but for that to be OK, make sure + * XXX: that either Giant is already held OR make sure that + * XXX: no other locks are held coming in. + * XXX: Revisit once most of the net stuff gets locks added. + */ + KASSERT(mtx_owned(&Giant) || witness_list(pr) == 0, + ("m_clalloc: Giant must be owned or no locks held")); +#endif mtx_enter(&Giant, MTX_DEF); p = (caddr_t)kmem_malloc(mb_map, ctob(npg), how == M_TRYWAIT ? M_WAITOK : M_NOWAIT); @@ -466,6 +505,12 @@ { register struct domain *dp; register struct protosw *pr; +#ifdef WITNESS + struct proc *prp = CURPROC; + + KASSERT(witness_list(prp) == 0, + ("m_reclaim called with locks held")); +#endif for (dp = domains; dp; dp = dp->dom_next) for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)