Index: nfsclient/nfs_node.c =================================================================== --- nfsclient/nfs_node.c (revision 211527) +++ nfsclient/nfs_node.c (working copy) @@ -150,6 +150,7 @@ /* * NFS supports recursive and shared locking. */ + lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); VN_LOCK_AREC(vp); VN_LOCK_ASHARE(vp); if (fhsize > NFS_SMALLFH) { @@ -158,7 +159,6 @@ np->n_fhp = &np->n_fh; bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize); np->n_fhsize = fhsize; - lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); error = insmntque(vp, mntp); if (error != 0) { *npp = NULL; Index: gnu/fs/xfs/FreeBSD/xfs_freebsd_iget.c =================================================================== --- gnu/fs/xfs/FreeBSD/xfs_freebsd_iget.c (revision 211527) +++ gnu/fs/xfs/FreeBSD/xfs_freebsd_iget.c (working copy) @@ -389,8 +389,8 @@ return (error); } + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); VN_LOCK_AREC(vp); - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); error = insmntque(vp, XVFSTOMNT(XFS_MTOVFS(mp))); if (error != 0) { kmem_free(vdata, sizeof(*vdata)); Index: ufs/ffs/ffs_vfsops.c =================================================================== --- ufs/ffs/ffs_vfsops.c (revision 211527) +++ ufs/ffs/ffs_vfsops.c (working copy) @@ -1501,6 +1501,7 @@ /* * FFS supports recursive locking. */ + lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); VN_LOCK_AREC(vp); vp->v_data = ip; vp->v_bufobj.bo_bsize = fs->fs_bsize; @@ -1518,7 +1519,6 @@ } #endif - lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); if (ffs_flags & FFSV_FORCEINSMQ) vp->v_vflag |= VV_FORCEINSMQ; error = insmntque(vp, mp); Index: ufs/ffs/ffs_softdep.c =================================================================== --- ufs/ffs/ffs_softdep.c (revision 211527) +++ ufs/ffs/ffs_softdep.c (working copy) @@ -904,8 +904,8 @@ #define ACQUIRE_LOCK(lk) mtx_lock(lk) #define FREE_LOCK(lk) mtx_unlock(lk) -#define BUF_AREC(bp) ((bp)->b_lock.lock_object.lo_flags |= LO_RECURSABLE) -#define BUF_NOREC(bp) ((bp)->b_lock.lock_object.lo_flags &= ~LO_RECURSABLE) +#define BUF_AREC(bp) lockallowrecurse(&(bp)->b_lock) +#define BUF_NOREC(bp) lockdisablerecurse(&(bp)->b_lock) /* * Worklist queue management. Index: kern/vfs_lookup.c =================================================================== --- kern/vfs_lookup.c (revision 211527) +++ kern/vfs_lookup.c (working copy) @@ -91,7 +91,9 @@ error = getnewvnode("crossmp", NULL, &dead_vnodeops, &vp_crossmp); if (error != 0) panic("nameiinit: getnewvnode"); + vn_lock(vp_crossmp, LK_EXCLUSIVE); VN_LOCK_ASHARE(vp_crossmp); + VOP_UNLOCK(vp_crossmp, 0); } SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nameiinit, NULL); Index: kern/kern_lock.c =================================================================== --- kern/kern_lock.c (revision 211527) +++ kern/kern_lock.c (working copy) @@ -396,7 +396,35 @@ STACK_ZERO(lk); } +/* + * XXX: Gross hacks to manipulate external lock flags after + * initialization. Used for certain vnode and buf locks. + */ void +lockallowshare(struct lock *lk) +{ + + lockmgr_assert(lk, KA_XLOCKED); + lk->lock_object.lo_flags &= ~LK_NOSHARE; +} + +void +lockallowrecurse(struct lock *lk) +{ + + lockmgr_assert(lk, KA_XLOCKED); + lk->lock_object.lo_flags |= LO_RECURSABLE; +} + +void +lockdisablerecurse(struct lock *lk) +{ + + lockmgr_assert(lk, KA_XLOCKED); + lk->lock_object.lo_flags &= ~LO_RECURSABLE; +} + +void lockdestroy(struct lock *lk) { Index: fs/nfsclient/nfs_clport.c =================================================================== --- fs/nfsclient/nfs_clport.c (revision 211527) +++ fs/nfsclient/nfs_clport.c (working copy) @@ -230,9 +230,9 @@ /* * NFS supports recursive and shared locking. */ + lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); VN_LOCK_AREC(vp); VN_LOCK_ASHARE(vp); - lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); error = insmntque(vp, mntp); if (error != 0) { *npp = NULL; Index: fs/nfsclient/nfs_clnode.c =================================================================== --- fs/nfsclient/nfs_clnode.c (revision 211527) +++ fs/nfsclient/nfs_clnode.c (working copy) @@ -140,6 +140,7 @@ /* * NFS supports recursive and shared locking. */ + lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); VN_LOCK_AREC(vp); VN_LOCK_ASHARE(vp); /* @@ -157,7 +158,6 @@ M_NFSFH, M_WAITOK); bcopy(fhp, np->n_fhp->nfh_fh, fhsize); np->n_fhp->nfh_len = fhsize; - lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); error = insmntque(vp, mntp); if (error != 0) { *npp = NULL; Index: fs/pseudofs/pseudofs_vncache.c =================================================================== --- fs/pseudofs/pseudofs_vncache.c (revision 211527) +++ fs/pseudofs/pseudofs_vncache.c (working copy) @@ -189,8 +189,8 @@ if ((pn->pn_flags & PFS_PROCDEP) != 0) (*vpp)->v_vflag |= VV_PROCDEP; pvd->pvd_vnode = *vpp; + vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY); VN_LOCK_AREC(*vpp); - vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY); error = insmntque(*vpp, mp); if (error != 0) { free(pvd, M_PFSVNCACHE); Index: fs/devfs/devfs_vnops.c =================================================================== --- fs/devfs/devfs_vnops.c (revision 211527) +++ fs/devfs/devfs_vnops.c (working copy) @@ -412,8 +412,8 @@ } else { vp->v_type = VBAD; } + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS); VN_LOCK_ASHARE(vp); - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS); mtx_lock(&devfs_de_interlock); vp->v_data = de; de->de_vnode = vp; Index: fs/smbfs/smbfs_node.c =================================================================== --- fs/smbfs/smbfs_node.c (revision 211527) +++ fs/smbfs/smbfs_node.c (working copy) @@ -253,8 +253,8 @@ } else if (vp->v_type == VREG) SMBERROR("new vnode '%s' born without parent ?\n", np->n_name); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); VN_LOCK_AREC(vp); - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); smbfs_hash_lock(smp); LIST_FOREACH(np2, nhpp, n_hash) { Index: fs/nwfs/nwfs_node.c =================================================================== --- fs/nwfs/nwfs_node.c (revision 211527) +++ fs/nwfs/nwfs_node.c (working copy) @@ -185,7 +185,6 @@ if (dvp) { np->n_parent = VTONW(dvp)->n_fid; } - VN_LOCK_AREC(vp); sx_xlock(&nwhashlock); /* * Another process can create vnode while we blocked in malloc() or @@ -202,6 +201,7 @@ nhpp = NWNOHASH(fid); LIST_INSERT_HEAD(nhpp, np, n_hash); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); + VN_LOCK_AREC(vp); sx_xunlock(&nwhashlock); ASSERT_VOP_LOCKED(dvp, "nwfs_allocvp"); Index: sys/lockmgr.h =================================================================== --- sys/lockmgr.h (revision 211527) +++ sys/lockmgr.h (working copy) @@ -73,7 +73,10 @@ #endif void _lockmgr_disown(struct lock *lk, const char *file, int line); +void lockallowrecurse(struct lock *lk); +void lockallowshare(struct lock *lk); void lockdestroy(struct lock *lk); +void lockdisablerecurse(struct lock *lk); void lockinit(struct lock *lk, int prio, const char *wmesg, int timo, int flags); #ifdef DDB Index: sys/vnode.h =================================================================== --- sys/vnode.h (revision 211527) +++ sys/vnode.h (working copy) @@ -419,10 +419,8 @@ #define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock) #define VI_MTX(vp) (&(vp)->v_interlock) -#define VN_LOCK_AREC(vp) \ - ((vp)->v_vnlock->lock_object.lo_flags |= LO_RECURSABLE) -#define VN_LOCK_ASHARE(vp) \ - ((vp)->v_vnlock->lock_object.lo_flags &= ~LK_NOSHARE) +#define VN_LOCK_AREC(vp) lockallowrecurse((vp)->v_vnlock) +#define VN_LOCK_ASHARE(vp) lockallowshare((vp)->v_vnlock) #endif /* _KERNEL */