diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c index 0b72249..fa8180c 100644 --- a/sys/fs/nullfs/null_subr.c +++ b/sys/fs/nullfs/null_subr.c @@ -46,9 +46,6 @@ #include -#define LOG2_SIZEVNODE 8 /* log2(sizeof struct vnode) */ -#define NNULLNODECACHE 16 - /* * Null layer cache: * Each cache entry holds a reference to the lower vnode @@ -57,11 +54,9 @@ * alias is removed the lower vnode is vrele'd. */ -#define NULL_NHASH(vp) \ - (&null_node_hashtbl[(((uintptr_t)vp)>>LOG2_SIZEVNODE) & null_node_hash]) +#define NULL_NHASH(vp) (&null_node_hashtbl[vfs_hash_index(vp)]) static LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl; -static u_long null_node_hash; struct mtx null_hashmtx; static MALLOC_DEFINE(M_NULLFSHASH, "nullfs_hash", "NULLFS hash table"); @@ -76,9 +71,11 @@ int nullfs_init(vfsp) struct vfsconf *vfsp; { + u_long null_node_hash; - NULLFSDEBUG("nullfs_init\n"); /* printed during system boot */ - null_node_hashtbl = hashinit(NNULLNODECACHE, M_NULLFSHASH, &null_node_hash); + null_node_hashtbl = hashinit(desiredvnodes, M_NULLFSHASH, + &null_node_hash); + MPASS(null_node_hash == vfs_hash_mask); mtx_init(&null_hashmtx, "nullhs", NULL, MTX_DEF); return (0); } @@ -89,7 +86,7 @@ nullfs_uninit(vfsp) { mtx_destroy(&null_hashmtx); - hashdestroy(null_node_hashtbl, M_NULLFSHASH, null_node_hash); + hashdestroy(null_node_hashtbl, M_NULLFSHASH, vfs_hash_mask); return (0); } diff --git a/sys/kern/vfs_hash.c b/sys/kern/vfs_hash.c index aad22e0..8e536a6 100644 --- a/sys/kern/vfs_hash.c +++ b/sys/kern/vfs_hash.c @@ -39,7 +39,7 @@ static MALLOC_DEFINE(M_VFS_HASH, "vfs_hash", "VFS hash table"); static LIST_HEAD(vfs_hash_head, vnode) *vfs_hash_tbl; static LIST_HEAD(,vnode) vfs_hash_side; -static u_long vfs_hash_mask; +u_long vfs_hash_mask; static struct mtx vfs_hash_mtx; static void @@ -54,11 +54,18 @@ vfs_hashinit(void *dummy __unused) /* Must be SI_ORDER_SECOND so desiredvnodes is available */ SYSINIT(vfs_hash, SI_SUB_VFS, SI_ORDER_SECOND, vfs_hashinit, NULL); +u_int +vfs_hash_index(struct vnode *vp) +{ + + return ((vp->v_hash + vp->v_mount->mnt_hashseed) & vfs_hash_mask); +} + static struct vfs_hash_head * -vfs_hash_index(const struct mount *mp, u_int hash) +vfs_hash_bucket(const struct mount *mp, u_int hash) { - return(&vfs_hash_tbl[(hash + mp->mnt_hashseed) & vfs_hash_mask]); + return (&vfs_hash_tbl[(hash + mp->mnt_hashseed) & vfs_hash_mask]); } int @@ -69,7 +76,7 @@ vfs_hash_get(const struct mount *mp, u_int hash, int flags, struct thread *td, s while (1) { mtx_lock(&vfs_hash_mtx); - LIST_FOREACH(vp, vfs_hash_index(mp, hash), v_hashlist) { + LIST_FOREACH(vp, vfs_hash_bucket(mp, hash), v_hashlist) { if (vp->v_hash != hash) continue; if (vp->v_mount != mp) @@ -113,7 +120,7 @@ vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td, stru while (1) { mtx_lock(&vfs_hash_mtx); LIST_FOREACH(vp2, - vfs_hash_index(vp->v_mount, hash), v_hashlist) { + vfs_hash_bucket(vp->v_mount, hash), v_hashlist) { if (vp2->v_hash != hash) continue; if (vp2->v_mount != vp->v_mount) @@ -138,7 +145,7 @@ vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td, stru } vp->v_hash = hash; - LIST_INSERT_HEAD(vfs_hash_index(vp->v_mount, hash), vp, v_hashlist); + LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist); mtx_unlock(&vfs_hash_mtx); return (0); } @@ -149,7 +156,7 @@ vfs_hash_rehash(struct vnode *vp, u_int hash) mtx_lock(&vfs_hash_mtx); LIST_REMOVE(vp, v_hashlist); - LIST_INSERT_HEAD(vfs_hash_index(vp->v_mount, hash), vp, v_hashlist); + LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist); vp->v_hash = hash; mtx_unlock(&vfs_hash_mtx); } diff --git a/sys/sys/bufobj.h b/sys/sys/bufobj.h index 3934553..79bef84 100644 --- a/sys/sys/bufobj.h +++ b/sys/sys/bufobj.h @@ -89,12 +89,7 @@ struct buf_ops { struct bufobj { struct mtx bo_mtx; /* Mutex which protects "i" things */ - struct bufv bo_clean; /* i Clean buffers */ - struct bufv bo_dirty; /* i Dirty buffers */ - long bo_numoutput; /* i Writes in progress */ - u_int bo_flag; /* i Flags */ struct buf_ops *bo_ops; /* - Buffer operations */ - int bo_bsize; /* - Block size for i/o */ struct vm_object *bo_object; /* v Place to store VM object */ LIST_ENTRY(bufobj) bo_synclist; /* S dirty vnode list */ void *bo_private; /* private pointer */ @@ -103,6 +98,11 @@ struct bufobj { * XXX: only to keep the syncer working * XXX: for now. */ + struct bufv bo_clean; /* i Clean buffers */ + struct bufv bo_dirty; /* i Dirty buffers */ + long bo_numoutput; /* i Writes in progress */ + u_int bo_flag; /* i Flags */ + int bo_bsize; /* - Block size for i/o */ }; /* diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h index f841f82..e373f6e 100644 --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -99,7 +99,6 @@ struct vnode { * Fields which define the identity of the vnode. These fields are * owned by the filesystem (XXX: and vgone() ?) */ - enum vtype v_type; /* u vnode type */ const char *v_tag; /* u type of underlying data */ struct vop_vector *v_op; /* u vnode operations vector */ void *v_data; /* u private data for fs */ @@ -122,10 +121,10 @@ struct vnode { } v_un; /* - * vfs_hash: (mount + inode) -> vnode hash. + * vfs_hash: (mount + inode) -> vnode hash. The hash value + * itself is grouped with other int fields, to avoid padding. */ LIST_ENTRY(vnode) v_hashlist; - u_int v_hash; /* * VFS_namecache stuff @@ -135,24 +134,11 @@ struct vnode { struct namecache *v_cache_dd; /* c Cache entry for .. vnode */ /* - * clustering stuff - */ - daddr_t v_cstart; /* v start block of cluster */ - daddr_t v_lasta; /* v last allocation */ - daddr_t v_lastw; /* v last write */ - int v_clen; /* v length of cur. cluster */ - - /* * Locking */ struct lock v_lock; /* u (if fs don't have one) */ struct mtx v_interlock; /* lock for "i" things */ struct lock *v_vnlock; /* u pointer to vnode lock */ - int v_holdcnt; /* i prevents recycling. */ - int v_usecount; /* i ref count of users */ - u_int v_iflag; /* i vnode flags (see below) */ - u_int v_vflag; /* v vnode flags */ - int v_writecount; /* v ref count of writers */ /* * The machinery of being a vnode @@ -167,6 +153,22 @@ struct vnode { struct label *v_label; /* MAC label for vnode */ struct lockf *v_lockf; /* Byte-level advisory lock list */ struct rangelock v_rl; /* Byte-range lock */ + + /* + * clustering stuff + */ + daddr_t v_cstart; /* v start block of cluster */ + daddr_t v_lasta; /* v last allocation */ + daddr_t v_lastw; /* v last write */ + int v_clen; /* v length of cur. cluster */ + + int v_holdcnt; /* i prevents recycling. */ + int v_usecount; /* i ref count of users */ + u_int v_iflag; /* i vnode flags (see below) */ + u_int v_vflag; /* v vnode flags */ + int v_writecount; /* v ref count of writers */ + u_int v_hash; + enum vtype v_type; /* u vnode type */ }; #endif /* defined(_KERNEL) || defined(_KVM_VNODE) */ @@ -810,8 +812,10 @@ int fifo_printinfo(struct vnode *); /* vfs_hash.c */ typedef int vfs_hash_cmp_t(struct vnode *vp, void *arg); +extern u_long vfs_hash_mask; int vfs_hash_get(const struct mount *mp, u_int hash, int flags, struct thread *td, struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg); +u_int vfs_hash_index(struct vnode *vp); int vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td, struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg); void vfs_hash_rehash(struct vnode *vp, u_int hash); void vfs_hash_remove(struct vnode *vp);