--- g_cache.c Tue Jul 11 05:55:45 2006 +++ sys/geom/cache/g_cache.c Tue Jul 11 06:47:28 2006 @@ -101,6 +101,33 @@ #define OFF2BNO(off, sc) ((off) >> (sc)->sc_bshift) #define BNO2OFF(bno, sc) ((bno) << (sc)->sc_bshift) +static int +g_cache_in_bucket(struct g_cache_softc *sc, struct g_cache_desc *dp) +{ + struct g_cache_desc *dp2; + int i; + + for (i = 0; i < G_CACHE_BUCKETS; i++) { + LIST_FOREACH(dp2, &sc->sc_desclist[i], d_next) { + if (dp2 == dp) + return (1); + } + } + return (0); +} + +static int +g_cache_used(struct g_cache_softc *sc, struct g_cache_desc *dp) +{ + struct g_cache_desc *dp2; + + TAILQ_FOREACH(dp2, &sc->sc_usedlist, d_used) { + KASSERT((dp2->d_flags & D_FLAG_USED), ("%s: dp2=%p no used flag", __func__, dp2)); + if (dp2 == dp) + return (1); + } + return (0); +} static struct g_cache_desc * g_cache_alloc(struct g_cache_softc *sc) @@ -111,9 +138,16 @@ if (!TAILQ_EMPTY(&sc->sc_usedlist)) { dp = TAILQ_FIRST(&sc->sc_usedlist); + if (!g_cache_used(sc, dp)) + panic("%s: dp=%p not on used list", __func__, dp); + KASSERT((dp->d_flags & D_FLAG_USED), ("%s: dp=%p no used flag", __func__, dp)); TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used); sc->sc_nused--; + KASSERT(sc->sc_nused >= 0, ("%s: sc_nused < 0", __func__)); + KASSERT(dp->d_flags == D_FLAG_USED, ("%s: dp=%p more flags 0x%x", __func__, dp, dp->d_flags)); dp->d_flags = 0; + if (!g_cache_in_bucket(sc, dp)) + panic("%s: dp=%p not in bucket", __func__, dp); LIST_REMOVE(dp, d_next); return (dp); } @@ -124,7 +158,7 @@ dp = malloc(sizeof(*dp), M_GCACHE, M_NOWAIT | M_ZERO); if (dp == NULL) return (NULL); - dp->d_data = uma_zalloc(sc->sc_zone, M_NOWAIT); + dp->d_data = malloc(sc->sc_bsize, M_GCACHE, M_NOWAIT); if (dp->d_data == NULL) { free(dp, M_GCACHE); return (NULL); @@ -139,7 +173,11 @@ mtx_assert(&sc->sc_mtx, MA_OWNED); - uma_zfree(sc->sc_zone, dp->d_data); + if (g_cache_in_bucket(sc, dp)) + panic("%s: dp=%p still in bucket", __func__, dp); + if (g_cache_used(sc, dp)) + panic("%s: dp=%p still on used list", __func__, dp); + free(dp->d_data, M_GCACHE); free(dp, M_GCACHE); sc->sc_nent--; } @@ -156,8 +194,14 @@ while (sc->sc_nused > n) { KASSERT(!TAILQ_EMPTY(&sc->sc_usedlist), ("used list empty")); dp = TAILQ_FIRST(&sc->sc_usedlist); + if (!g_cache_used(sc, dp)) + panic("%s: dp=%p not on used list", __func__, dp); + KASSERT((dp->d_flags & D_FLAG_USED), ("%s: dp=%p no used flag", __func__, dp)); TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used); sc->sc_nused--; + KASSERT(sc->sc_nused >= 0, ("%s: sc_nused < 0", __func__)); + if (!g_cache_in_bucket(sc, dp)) + panic("%s: dp=%p not in bucket", __func__, dp); LIST_REMOVE(dp, d_next); g_cache_free(sc, dp); } @@ -194,8 +238,10 @@ if (dp->d_flags & D_FLAG_USED) { TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used); + KASSERT(!(dp->d_flags & D_FLAG_INVALID), ("%s: dp=%p has invalid flag", __func__, dp)); TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used); } else if (OFF2BNO(off + len, sc) > dp->d_bno) { + KASSERT(!(dp->d_flags & D_FLAG_INVALID), ("%s: dp=%p has invalid flag", __func__, dp)); TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used); sc->sc_nused++; dp->d_flags |= D_FLAG_USED; @@ -224,12 +270,18 @@ dp->d_biolist = NULL; if (dp->d_flags & D_FLAG_INVALID) { sc->sc_invalid--; + KASSERT(sc->sc_invalid >= 0, ("%s: sc_invalid < 0", __func__)); g_cache_free(sc, dp); } else if (bp->bio_error) { + if (!g_cache_in_bucket(sc, dp)) + panic("%s: dp=%p not in bucket", __func__, dp); LIST_REMOVE(dp, d_next); if (dp->d_flags & D_FLAG_USED) { + if (!g_cache_used(sc, dp)) + panic("%s: dp=%p not on used list", __func__, dp); TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used); sc->sc_nused--; + KASSERT(sc->sc_nused >= 0, ("%s: sc_nused < 0", __func__)); } g_cache_free(sc, dp); } @@ -290,6 +342,8 @@ G_CACHE_NEXT_BIO1(bp) = sc; G_CACHE_NEXT_BIO2(bp) = NULL; dp->d_biolist = bp; + if (g_cache_in_bucket(sc, dp)) + panic("%s: dp=%p already in bucket", __func__, dp); LIST_INSERT_HEAD(&sc->sc_desclist[G_CACHE_BUCKET(dp->d_bno)], dp, d_next); mtx_unlock(&sc->sc_mtx); @@ -315,10 +369,15 @@ lim = OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc); do { if ((dp = g_cache_lookup(sc, bno)) != NULL) { + if (!g_cache_in_bucket(sc, dp)) + panic("%s: dp=%p not in bucket", __func__, dp); LIST_REMOVE(dp, d_next); if (dp->d_flags & D_FLAG_USED) { + if (!g_cache_used(sc, dp)) + panic("%s: dp=%p not on used list", __func__, dp); TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used); sc->sc_nused--; + KASSERT(sc->sc_nused >= 0, ("%s: sc_nused < 0", __func__)); } if (dp->d_biolist == NULL) g_cache_free(sc, dp); @@ -411,6 +470,9 @@ dp->d_biolist != NULL || time_uptime - dp->d_atime < g_cache_idletime) continue; + if (g_cache_used(sc, dp)) + panic("%s: dp=%p already on used list", __func__, dp); + KASSERT(!(dp->d_flags & D_FLAG_INVALID), ("%s: dp=%p has invalid flag", __func__, dp)); TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used); sc->sc_nused++; dp->d_flags |= D_FLAG_USED; @@ -509,8 +571,6 @@ sc->sc_type = type; sc->sc_bshift = bshift; sc->sc_bsize = 1 << bshift; - sc->sc_zone = uma_zcreate("gcache", sc->sc_bsize, NULL, NULL, NULL, NULL, - UMA_ALIGN_PTR, 0); mtx_init(&sc->sc_mtx, "GEOM CACHE mutex", NULL, MTX_DEF); for (i = 0; i < G_CACHE_BUCKETS; i++) LIST_INIT(&sc->sc_desclist[i]); @@ -572,7 +632,7 @@ { struct g_geom *gp; struct g_provider *pp; - struct g_cache_desc *dp, *dp2; + struct g_cache_desc *dp; int i; g_topology_assert(); @@ -595,16 +655,13 @@ callout_drain(&sc->sc_callout); mtx_lock(&sc->sc_mtx); for (i = 0; i < G_CACHE_BUCKETS; i++) { - dp = LIST_FIRST(&sc->sc_desclist[i]); - while (dp != NULL) { - dp2 = LIST_NEXT(dp, d_next); + while ((dp = LIST_FIRST(&sc->sc_desclist[i])) != NULL) { + LIST_REMOVE(dp, d_next); g_cache_free(sc, dp); - dp = dp2; } } mtx_unlock(&sc->sc_mtx); mtx_destroy(&sc->sc_mtx); - uma_zdestroy(sc->sc_zone); g_free(sc); gp->softc = NULL; g_wither_geom(gp, ENXIO);