? softdep.patch Index: ffs_softdep.c =================================================================== RCS file: /home/ncvs/src/sys/ufs/ffs/ffs_softdep.c,v retrieving revision 1.101 diff -u -r1.101 ffs_softdep.c --- ffs_softdep.c 2001/09/27 21:04:13 1.101 +++ ffs_softdep.c 2001/09/28 00:51:07 @@ -61,6 +61,10 @@ #include #include #include +#if defined(__FreeBSD__) && (__FreeBSD_version >= 500023) +#include +#include +#endif /* __FreeBSD__ && (__FreeBSD_version >= 500023) */ #include #include #include @@ -229,21 +233,49 @@ * of the lock calls. Since the uniprocessor sleep already interlocks * the spl, there is nothing that really needs to be done. */ +#if defined(__FreeBSD__) && (__FreeBSD_version >= 500023) +static struct mtx lk; +static int sema_timo; + +#define INIT_LOCK(lk) mtx_init((lk)), "softdep", MTX_DEF) +#define ACQUIRE_LOCK(lk) mtx_lock((lk)) +#define FREE_LOCK(lk) mtx_unlock((lk)) +#define LOCK_ASSERT_FREE(lk) mtx_assert((lk), MA_NOTOWNED) +#define LOCK_ASSERT_HELD(lk) mtx_assert((lk), MA_OWNED) +#define LOCK_HELD(lk) mtx_owned((lk)) +/* XXX */ +#define ACQUIRE_LOCK_INTERLOCKED ACQUIRE_LOCK +#define FREE_LOCK_INTERLOCKED FREE_LOCK + +#define sema_init(sema, name, prio, timo) do { \ + sema_timo = (timo); \ + sema_init((sema), 0, (name)); \ +} while (0) +#define sema_get(sema, interlock) ({ \ + mtx_unlock((interlock)); \ + sema_timedwait((sema), sema_timo); \ +}) +#define sema_release(sema) sema_post((sema)) +#else #ifndef /* NOT */ DEBUG static struct lockit { int lkt_spl; } lk = { 0 }; +#define INIT_LOCK(lk) #define ACQUIRE_LOCK(lk) (lk)->lkt_spl = splbio() #define FREE_LOCK(lk) splx((lk)->lkt_spl) #define ACQUIRE_LOCK_INTERLOCKED(lk) #define FREE_LOCK_INTERLOCKED(lk) +#define LOCK_ASSERT_FREE(lk) +#define LOCK_ASSERT_HELD(lk) +#define LOCK_HELD(lk) /* XXX: broken */ #else /* DEBUG */ -#define NOHOLDER ((struct thread *)-1) -#define SPECIAL_FLAG ((struct thread *)-2) +#define NOHOLDER -1 +#define SPECIAL_FLAG -2 static struct lockit { int lkt_spl; - struct thread *lkt_held; + pid_t lkt_held; } lk = { 0, NOHOLDER }; static int lockcnt; @@ -252,27 +284,37 @@ static void acquire_lock_interlocked __P((struct lockit *)); static void free_lock_interlocked __P((struct lockit *)); +#define INIT_LOCK(lk) #define ACQUIRE_LOCK(lk) acquire_lock(lk) #define FREE_LOCK(lk) free_lock(lk) #define ACQUIRE_LOCK_INTERLOCKED(lk) acquire_lock_interlocked(lk) #define FREE_LOCK_INTERLOCKED(lk) free_lock_interlocked(lk) +#define LOCK_ASSERT_FREE(lk) do { \ + if ((lk)->lkt_held != NOHOLDER) \ + panic(__func__ ": lock held"); \ +} while (0) +#define LOCK_ASSERT_HELD(lk) do { \ + if ((lk)->lkt_held != curproc->p_pid) \ + panic(__func__ ": lock not held"); \ +} while (0) +#define LOCK_HELD(lk) ((lk)->lkt_held != NOHOLDER) static void acquire_lock(lk) struct lockit *lk; { - struct thread *holder; + pid_t holder; if (lk->lkt_held != NOHOLDER) { holder = lk->lkt_held; FREE_LOCK(lk); - if (holder == curthread) + if (holder == curproc->p_pid) panic("softdep_lock: locking against myself"); else - panic("softdep_lock: lock held by %p", holder); + panic("softdep_lock: lock held by %d", holder); } lk->lkt_spl = splbio(); - lk->lkt_held = curthread; + lk->lkt_held = curproc->p_pid; lockcnt++; } @@ -291,18 +333,18 @@ acquire_lock_interlocked(lk) struct lockit *lk; { - struct thread *holder; + pid_t holder; if (lk->lkt_held != NOHOLDER) { holder = lk->lkt_held; FREE_LOCK(lk); - if (holder == curthread) + if (holder == curproc->p_pid) panic("softdep_lock_interlocked: locking against self"); else - panic("softdep_lock_interlocked: lock held by %p", + panic("softdep_lock_interlocked: lock held by %d", holder); } - lk->lkt_held = curthread; + lk->lkt_held = curproc->p_pid; lockcnt++; } @@ -317,12 +359,21 @@ } #endif /* DEBUG */ +#define msleep(ident, lock, priority, wmesg, timo) ({ \ + int error; \ + \ + FREE_LOCK_INTERLOCKED((lock)); \ + error = tsleep((ident), (priority), (wmesg), (timo)); \ + ACQUIRE_LOCK_INTERLOCKED((lock)); \ + error; \ +}) + /* * Place holder for real semaphores. */ struct sema { int value; - struct thread *holder; + pid_t holder; char *name; int prio; int timo; @@ -361,7 +412,7 @@ } return (0); } - semap->holder = curthread; + semap->holder = curproc->p_pid; if (interlock != NULL) FREE_LOCK(interlock); return (1); @@ -372,7 +423,7 @@ struct sema *semap; { - if (semap->value <= 0 || semap->holder != curthread) { + if (semap->value <= 0 || semap->holder != curproc->p_pid) { if (lk.lkt_held != NOHOLDER) FREE_LOCK(&lk); panic("sema_release: not held"); @@ -383,6 +434,7 @@ } semap->holder = NOHOLDER; } +#endif /* __FreeBSD__ && (__FreeBSD_version >= 500023) */ /* * Worklist queue management. @@ -414,8 +466,7 @@ struct worklist *item; { - if (lk.lkt_held == NOHOLDER) - panic("worklist_insert: lock not held"); + LOCK_ASSERT_HELD(&lk); if (item->wk_state & ONWORKLIST) { FREE_LOCK(&lk); panic("worklist_insert: already on list"); @@ -429,8 +480,7 @@ struct worklist *item; { - if (lk.lkt_held == NOHOLDER) - panic("worklist_remove: lock not held"); + LOCK_ASSERT_HELD(&lk); if ((item->wk_state & ONWORKLIST) == 0) { FREE_LOCK(&lk); panic("worklist_remove: not on list"); @@ -446,12 +496,12 @@ { if (item->wk_state & ONWORKLIST) { - if (lk.lkt_held != NOHOLDER) + if (LOCK_HELD(&lk)) FREE_LOCK(&lk); panic("workitem_free: still on list"); } if (item->wk_type != type) { - if (lk.lkt_held != NOHOLDER) + if (LOCK_HELD(&lk)) FREE_LOCK(&lk); panic("workitem_free: type mismatch"); } @@ -520,7 +570,7 @@ static struct worklist *worklist_tail; if (wk->wk_state & ONWORKLIST) { - if (lk.lkt_held != NOHOLDER) + if (LOCK_HELD(&lk)) FREE_LOCK(&lk); panic("add_to_worklist: already on list"); } @@ -886,10 +936,7 @@ struct mount *mp; int i; -#ifdef DEBUG - if (lk.lkt_held == NOHOLDER) - panic("pagedep_lookup: lock not held"); -#endif + LOCK_ASSERT_HELD(&lk); mp = ITOV(ip)->v_mount; pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn); top: @@ -954,10 +1001,7 @@ struct inodedep_hashhead *inodedephd; int firsttry; -#ifdef DEBUG - if (lk.lkt_held == NOHOLDER) - panic("inodedep_lookup: lock not held"); -#endif + LOCK_ASSERT_HELD(&lk); firsttry = 1; inodedephd = INODEDEP_HASH(fs, inum); top: @@ -1227,10 +1271,7 @@ struct bmsafemap *bmsafemap; struct worklist *wk; -#ifdef DEBUG - if (lk.lkt_held == NOHOLDER) - panic("bmsafemap_lookup: lock not held"); -#endif + LOCK_ASSERT_HELD(&lk); LIST_FOREACH(wk, &bp->b_dep, wk_list) if (wk->wk_type == D_BMSAFEMAP) return (WK_BMSAFEMAP(wk)); @@ -1399,10 +1440,7 @@ struct freefrag *freefrag; struct newdirblk *newdirblk; -#ifdef DEBUG - if (lk.lkt_held == NOHOLDER) - panic("allocdirect_merge: lock not held"); -#endif + LOCK_ASSERT_HELD(&lk); if (newadp->ad_oldblkno != oldadp->ad_newblkno || newadp->ad_oldsize != oldadp->ad_newsize || newadp->ad_lbn >= NDADDR) { @@ -1987,10 +2025,7 @@ struct newdirblk *newdirblk; struct worklist *wk; -#ifdef DEBUG - if (lk.lkt_held == NOHOLDER) - panic("free_allocdirect: lock not held"); -#endif + LOCK_ASSERT_HELD(&lk); if ((adp->ad_state & DEPCOMPLETE) == 0) LIST_REMOVE(adp, ad_deps); TAILQ_REMOVE(adphead, adp, ad_next); @@ -2029,10 +2064,7 @@ struct diradd *dap; int i; -#ifdef DEBUG - if (lk.lkt_held == NOHOLDER) - panic("free_newdirblk: lock not held"); -#endif + LOCK_ASSERT_HELD(&lk); /* * If the pagedep is still linked onto the directory buffer * dependency chain, then some of the entries on the @@ -2352,10 +2384,7 @@ { struct freefrag *freefrag; -#ifdef DEBUG - if (lk.lkt_held == NOHOLDER) - panic("free_allocindir: lock not held"); -#endif + LOCK_ASSERT_HELD(&lk); if ((aip->ai_state & DEPCOMPLETE) == 0) LIST_REMOVE(aip, ai_deps); if (aip->ai_state & ONWORKLIST) @@ -2610,10 +2639,7 @@ struct inodedep *inodedep; struct mkdir *mkdir, *nextmd; -#ifdef DEBUG - if (lk.lkt_held == NOHOLDER) - panic("free_diradd: lock not held"); -#endif + LOCK_ASSERT_HELD(&lk); WORKLIST_REMOVE(&dap->da_list); LIST_REMOVE(dap, da_pdlist); if ((dap->da_state & DIRCHG) == 0) { @@ -3423,9 +3449,8 @@ struct inodedep *inodedep; struct bmsafemap *bmsafemap; + LOCK_ASSERT_FREE(&lk); #ifdef DEBUG - if (lk.lkt_held != NOHOLDER) - panic("softdep_disk_write_complete: lock is held"); lk.lkt_held = SPECIAL_FLAG; #endif LIST_INIT(&reattach); @@ -4862,9 +4887,7 @@ proc_waiting += 1; if (handle.callout == NULL) handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2); - FREE_LOCK_INTERLOCKED(&lk); - (void) tsleep((caddr_t)&proc_waiting, PPAUSE, "softupdate", 0); - ACQUIRE_LOCK_INTERLOCKED(&lk); + (void) msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0); proc_waiting -= 1; if (islocked == 0) FREE_LOCK(&lk); @@ -5114,9 +5137,7 @@ if (waitfor != MNT_WAIT) return (0); bp->b_xflags |= BX_BKGRDWAIT; - FREE_LOCK_INTERLOCKED(&lk); - tsleep(&bp->b_xflags, PRIBIO, "getbuf", 0); - ACQUIRE_LOCK_INTERLOCKED(&lk); + msleep(&bp->b_xflags, &lk, PRIBIO, "getbuf", 0); continue; } if (waitfor != MNT_WAIT) @@ -5148,9 +5169,8 @@ ACQUIRE_LOCK(&lk); while (vp->v_numoutput) { vp->v_flag |= VBWAIT; - FREE_LOCK_INTERLOCKED(&lk); - tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "drainvp", 0); - ACQUIRE_LOCK_INTERLOCKED(&lk); + msleep((caddr_t)&vp->v_numoutput, &lk, PRIBIO + 1, "drainvp", + 0); } if (!islocked) FREE_LOCK(&lk);