? compile/STELLA ? i386/conf/BUNKO.hints ? i386/conf/BUNKO ? i386/conf/STELLA ? kern/.nfsA7cad4.4 ? kern/.nfsA76124.4 ? kern/.nfsB76124.4 ? kern/.nfsC76124.4 ? kern/.nfsA55024.4 ? kern/.nfsA27df4.4 ? sys/.nfsA27df4.4 ? ufs/ffs/.nfsA55024.4 ? vm/.nfsA76124.4 ? vm/.nfsAffffdafe4.4 Index: ufs/ffs/ffs_softdep.c =================================================================== RCS file: /home/ncvs/src/sys/ufs/ffs/ffs_softdep.c,v retrieving revision 1.80 diff -u -r1.80 ffs_softdep.c --- ufs/ffs/ffs_softdep.c 2000/12/13 08:30:30 1.80 +++ ufs/ffs/ffs_softdep.c 2000/12/14 06:30:51 @@ -608,8 +608,10 @@ if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM) break; dirrem = WK_DIRREM(wk); + ufs_ihashlock(); vp = ufs_ihashlookup(VFSTOUFS(dirrem->dm_mnt)->um_dev, dirrem->dm_oldinum); + ufs_ihashunlock(); if (vp == NULL || !VOP_ISLOCKED(vp, CURPROC)) break; } Index: ufs/ffs/ffs_vfsops.c =================================================================== RCS file: /home/ncvs/src/sys/ufs/ffs/ffs_vfsops.c,v retrieving revision 1.134 diff -u -r1.134 ffs_vfsops.c --- ufs/ffs/ffs_vfsops.c 2000/12/13 10:03:52 1.134 +++ ufs/ffs/ffs_vfsops.c 2000/12/14 06:30:51 @@ -1011,23 +1011,6 @@ * return the inode locked. Detection and handling of mount points must be * done by the calling routine. */ -static int ffs_inode_hash_lock; -/* - * ffs_inode_hash_lock is a variable to manage mutual exclusion - * of vnode allocation and intertion to the hash, especially to - * avoid holding more than one vnodes for the same inode in the - * hash table. ffs_inode_hash_lock must hence be tested-and-set - * or cleared atomically, accomplished by ffs_inode_hash_mtx. - * - * As vnode allocation may block during MALLOC() and zone - * allocation, we should also do msleep() to give away the CPU - * if anyone else is allocating a vnode. lockmgr is not suitable - * here because someone else may insert to the hash table the - * vnode we are trying to allocate during our sleep, in which - * case the hash table needs to be examined once again after - * waking up. - */ -static struct mtx ffs_inode_hash_mtx; int ffs_vget(mp, ino, vpp) @@ -1041,31 +1024,21 @@ struct buf *bp; struct vnode *vp; dev_t dev; - int error, want_wakeup; + int error; ump = VFSTOUFS(mp); dev = ump->um_dev; -restart: - if ((*vpp = ufs_ihashget(dev, ino)) != NULL) { - return (0); - } /* - * Lock out the creation of new entries in the FFS hash table in - * case getnewvnode() or MALLOC() blocks, otherwise a duplicate - * may occur! - */ - mtx_enter(&ffs_inode_hash_mtx, MTX_DEF); - if (ffs_inode_hash_lock) { - while (ffs_inode_hash_lock) { - ffs_inode_hash_lock = -1; - msleep(&ffs_inode_hash_lock, &ffs_inode_hash_mtx, PVM, "ffsvgt", 0); - } - mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); - goto restart; + * We have to look-up-and-possibly-insert a vnode + * atomically. + */ + ufs_ihashlock(); + + if ((*vpp = ufs_ihashget(dev, ino)) != NULL) { + ufs_ihashunlock(); + return (0); } - ffs_inode_hash_lock = 1; - mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); /* * If this MALLOC() is performed after the getnewvnode() @@ -1080,17 +1053,7 @@ /* Allocate a new vnode/inode. */ error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp); if (error) { - /* - * Do not wake up processes while holding the mutex, - * otherwise the processes waken up immediately hit - * themselves into the mutex. - */ - mtx_enter(&ffs_inode_hash_mtx, MTX_DEF); - want_wakeup = ffs_inode_hash_lock < 0; - ffs_inode_hash_lock = 0; - mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); - if (want_wakeup) - wakeup(&ffs_inode_hash_lock); + ufs_ihashunlock(); *vpp = NULL; FREE(ip, ump->um_malloctype); return (error); @@ -1121,17 +1084,7 @@ */ ufs_ihashins(ip); - /* - * Do not wake up processes while holding the mutex, - * otherwise the processes waken up immediately hit - * themselves into the mutex. - */ - mtx_enter(&ffs_inode_hash_mtx, MTX_DEF); - want_wakeup = ffs_inode_hash_lock < 0; - ffs_inode_hash_lock = 0; - mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); - if (want_wakeup) - wakeup(&ffs_inode_hash_lock); + ufs_ihashunlock(); /* Read in the disk contents for the inode, copy into the inode. */ error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), @@ -1248,7 +1201,6 @@ { softdep_initialize(); - mtx_init(&ffs_inode_hash_mtx, "ifsvgt", MTX_DEF); return (ufs_init(vfsp)); } Index: ufs/ifs/ifs_vfsops.c =================================================================== RCS file: /home/ncvs/src/sys/ufs/ifs/ifs_vfsops.c,v retrieving revision 1.3 diff -u -r1.3 ifs_vfsops.c --- ufs/ifs/ifs_vfsops.c 2000/12/13 10:04:01 1.3 +++ ufs/ifs/ifs_vfsops.c 2000/12/14 06:30:51 @@ -101,7 +101,6 @@ ifs_init(vfsp) struct vfsconf *vfsp; { - mtx_init(&ifs_inode_hash_mtx, "ifsvgt", MTX_DEF); return (ufs_init(vfsp)); } @@ -132,23 +131,6 @@ * return the inode locked. Detection and handling of mount points must be * done by the calling routine. */ -static int ifs_inode_hash_lock; -/* - * ifs_inode_hash_lock is a variable to manage mutual exclusion - * of vnode allocation and intertion to the hash, especially to - * avoid holding more than one vnodes for the same inode in the - * hash table. ifs_inode_hash_lock must hence be tested-and-set - * or cleared atomically, accomplished by ifs_inode_hash_mtx. - * - * As vnode allocation may block during MALLOC() and zone - * allocation, we should also do msleep() to give away the CPU - * if anyone else is allocating a vnode. lockmgr is not suitable - * here because someone else may insert to the hash table the - * vnode we are trying to allocate during our sleep, in which - * case the hash table needs to be examined once again after - * waking up. - */ -static struct mtx ifs_inode_hash_mtx; int ifs_vget(mp, ino, vpp) @@ -162,31 +144,21 @@ struct buf *bp; struct vnode *vp; dev_t dev; - int error, want_wakeup; + int error; ump = VFSTOUFS(mp); dev = ump->um_dev; -restart: - if ((*vpp = ufs_ihashget(dev, ino)) != NULL) { - return (0); - } /* - * Lock out the creation of new entries in the FFS hash table in - * case getnewvnode() or MALLOC() blocks, otherwise a duplicate - * may occur! + * We have to look-up-and-possibly-insert a vnode + * atomically. */ - mtx_enter(&ifs_inode_hash_mtx, MTX_DEF); - if (ifs_inode_hash_lock) { - while (ifs_inode_hash_lock) { - ifs_inode_hash_lock = -1; - msleep(&ifs_inode_hash_lock, &ifs_inode_hash_mtx, PVM, "ifsvgt", 0); - } - mtx_exit(&ifs_inode_hash_mtx, MTX_DEF); - goto restart; + ufs_ihashlock(); + + if ((*vpp = ufs_ihashget(dev, ino)) != NULL) { + ufs_ihashunlock(); + return (0); } - ifs_inode_hash_lock = 1; - mtx_exit(&ifs_inode_hash_mtx, MTX_DEF); /* * If this MALLOC() is performed after the getnewvnode() @@ -201,17 +173,7 @@ /* Allocate a new vnode/inode. */ error = getnewvnode(VT_UFS, mp, ifs_vnodeop_p, &vp); if (error) { - /* - * Do not wake up processes while holding the mutex, - * otherwise the processes waken up immediately hit - * themselves into the mutex. - */ - mtx_enter(&ifs_inode_hash_mtx, MTX_DEF); - want_wakeup = ifs_inode_hash_lock < 0; - ifs_inode_hash_lock = 0; - mtx_exit(&ifs_inode_hash_mtx, MTX_DEF); - if (want_wakeup) - wakeup(&ifs_inode_hash_lock); + ufs_ihashunlock(); *vpp = NULL; FREE(ip, ump->um_malloctype); return (error); @@ -242,17 +204,7 @@ */ ufs_ihashins(ip); - /* - * Do not wake up processes while holding the mutex, - * otherwise the processes waken up immediately hit - * themselves into the mutex. - */ - mtx_enter(&ifs_inode_hash_mtx, MTX_DEF); - want_wakeup = ffs_inode_hash_lock < 0; - ifs_inode_hash_lock = 0; - mtx_exit(&ifs_inode_hash_mtx, MTX_DEF); - if (want_wakeup) - wakeup(&ifs_inode_hash_lock); + ufs_ihashunlock(); /* Read in the disk contents for the inode, copy into the inode. */ error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), Index: ufs/ufs/ufs_extern.h =================================================================== RCS file: /home/ncvs/src/sys/ufs/ufs/ufs_extern.h,v retrieving revision 1.29 diff -u -r1.29 ufs_extern.h --- ufs/ufs/ufs_extern.h 2000/12/13 08:30:35 1.29 +++ ufs/ufs/ufs_extern.h 2000/12/14 06:30:51 @@ -79,9 +79,11 @@ ufs_ihashget __P((dev_t, ino_t)); void ufs_ihashinit __P((void)); void ufs_ihashins __P((struct inode *)); +void ufs_ihashlock __P((void)); struct vnode * ufs_ihashlookup __P((dev_t, ino_t)); void ufs_ihashrem __P((struct inode *)); +void ufs_ihashunlock __P((void)); int ufs_inactive __P((struct vop_inactive_args *)); int ufs_init __P((struct vfsconf *)); void ufs_itimes __P((struct vnode *vp)); Index: ufs/ufs/ufs_ihash.c =================================================================== RCS file: /home/ncvs/src/sys/ufs/ufs/ufs_ihash.c,v retrieving revision 1.25 diff -u -r1.25 ufs_ihash.c --- ufs/ufs/ufs_ihash.c 2000/10/29 13:57:19 1.25 +++ ufs/ufs/ufs_ihash.c 2000/12/14 06:30:51 @@ -40,9 +40,8 @@ #include #include #include +#include -#include - #include #include #include @@ -54,9 +53,15 @@ static LIST_HEAD(ihashhead, inode) *ihashtbl; static u_long ihash; /* size of hash table - 1 */ #define INOHASH(device, inum) (&ihashtbl[(minor(device) + (inum)) & ihash]) -#ifndef NULL_SIMPLELOCKS -static struct simplelock ufs_ihash_slock; -#endif + +/* + * This is a mutex to accomplish atomic hash operation. Since atomic + * lookup-and-insertion is required to allocate a new vnode, a caller + * of a hash operation function is responsible for entering to and + * exiting from the mutex by calling ufs_ihashlock or ufs_ihashunlock. + * (except for calling ufs_ihashinit, of course) + */ +static struct mtx ufs_ihash_mtx; /* * Initialize inode hash table. @@ -66,7 +71,7 @@ { ihashtbl = hashinit(desiredvnodes, M_UFSIHASH, &ihash); - simple_lock_init(&ufs_ihash_slock); + mtx_init(&ufs_ihash_mtx, "ufsih", MTX_DEF); } /* @@ -80,11 +85,9 @@ { struct inode *ip; - simple_lock(&ufs_ihash_slock); for (ip = INOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next) if (inum == ip->i_number && dev == ip->i_dev) break; - simple_unlock(&ufs_ihash_slock); if (ip) return (ITOV(ip)); @@ -105,18 +108,15 @@ struct vnode *vp; loop: - simple_lock(&ufs_ihash_slock); for (ip = INOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next) { if (inum == ip->i_number && dev == ip->i_dev) { vp = ITOV(ip); mtx_enter(&vp->v_interlock, MTX_DEF); - simple_unlock(&ufs_ihash_slock); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) goto loop; return (vp); } } - simple_unlock(&ufs_ihash_slock); return (NULL); } @@ -133,11 +133,9 @@ /* lock the inode, then put it on the appropriate hash list */ lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p); - simple_lock(&ufs_ihash_slock); ipp = INOHASH(ip->i_dev, ip->i_number); LIST_INSERT_HEAD(ipp, ip, i_hash); ip->i_flag |= IN_HASHED; - simple_unlock(&ufs_ihash_slock); } /* @@ -147,7 +145,6 @@ ufs_ihashrem(ip) struct inode *ip; { - simple_lock(&ufs_ihash_slock); if (ip->i_flag & IN_HASHED) { ip->i_flag &= ~IN_HASHED; LIST_REMOVE(ip, i_hash); @@ -156,5 +153,22 @@ ip->i_hash.le_prev = NULL; #endif } - simple_unlock(&ufs_ihash_slock); +} + +/* + * Lock the hash table. + */ +void +ufs_ihashlock() +{ + mtx_enter(&ufs_ihash_mtx, MTX_DEF); +} + +/* + * Unlock the hash table. + */ +void +ufs_ihashunlock() +{ + mtx_exit(&ufs_ihash_mtx, MTX_DEF); } Index: ufs/ufs/ufs_inode.c =================================================================== RCS file: /home/ncvs/src/sys/ufs/ufs/ufs_inode.c,v retrieving revision 1.30 diff -u -r1.30 ufs_inode.c --- ufs/ufs/ufs_inode.c 2000/10/04 01:29:16 1.30 +++ ufs/ufs/ufs_inode.c 2000/12/14 06:30:51 @@ -138,7 +138,9 @@ /* * Remove the inode from its hash chain. */ + ufs_ihashlock(); ufs_ihashrem(ip); + ufs_ihashunlock(); /* * Purge old data structures associated with the inode. */