diff --git a/sys/fs/tmpfs/tmpfs_vfsops.c b/sys/fs/tmpfs/tmpfs_vfsops.c index bea0d3ecc7c8..752c4b9f59f7 100644 --- a/sys/fs/tmpfs/tmpfs_vfsops.c +++ b/sys/fs/tmpfs/tmpfs_vfsops.c @@ -142,6 +142,26 @@ tmpfs_node_fini(void *mem, int size) mtx_destroy(&node->tn_interlock); } +static bool +tmpfs_mtime_filter(struct vnode *vp, void *arg) +{ + struct vm_object *obj; + bool lazy = *(bool *)arg; + + if (vp->v_type != VREG) + return (false); + obj = *(struct vm_object * const volatile *)&(vp->v_object); + /* + * We may race against VOP_RECLAIM which will NULLify the object. + * It is safe to access regardless thanks to being type-stable. + */ + if (obj == NULL) + return (false); + if (!lazy || (obj->flags & OBJ_TMPFS_DIRTY) != 0) + return (true); + return (false); +} + /* * Handle updates of time from writes to mmaped regions. Use * MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_ACTIVE, since @@ -156,7 +176,7 @@ tmpfs_update_mtime(struct mount *mp, bool lazy) struct vnode *vp, *mvp; struct vm_object *obj; - MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { + MNT_VNODE_FOREACH_ALL_FILTER(vp, mp, mvp, tmpfs_mtime_filter, &lazy) { if (vp->v_type != VREG) { VI_UNLOCK(vp); continue; diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index 288a1ff905fa..fafd1f105403 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -5434,7 +5434,8 @@ vfs_unixify_accmode(accmode_t *accmode) MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); struct vnode * -__mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) +__mnt_vnode_next_all(struct vnode **mvp, struct mount *mp, + bool (*cb)(struct vnode *, void *), void *cbarg) { struct vnode *vp; @@ -5447,6 +5448,9 @@ __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) continue; + /* XXXMJG relocking based on should_yield() may be prudent */ + if (cb != NULL && !cb(vp, cbarg)) + continue; VI_LOCK(vp); if ((vp->v_iflag & VI_DOOMED) != 0) { VI_UNLOCK(vp); @@ -5467,7 +5471,8 @@ __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) } struct vnode * -__mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) +__mnt_vnode_first_all(struct vnode **mvp, struct mount *mp, + bool (*cb)(struct vnode *, void *), void *cbarg) { struct vnode *vp; @@ -5481,6 +5486,9 @@ __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) continue; + /* XXXMJG relocking based on should_yield() may be prudent */ + if (cb != NULL && !cb(vp, cbarg)) + continue; VI_LOCK(vp); if ((vp->v_iflag & VI_DOOMED) != 0) { VI_UNLOCK(vp); diff --git a/sys/sys/mount.h b/sys/sys/mount.h index 0bd6d9928074..10dbba4a72c0 100644 --- a/sys/sys/mount.h +++ b/sys/sys/mount.h @@ -231,13 +231,19 @@ struct mount { /* * Definitions for MNT_VNODE_FOREACH_ALL. */ -struct vnode *__mnt_vnode_next_all(struct vnode **mvp, struct mount *mp); -struct vnode *__mnt_vnode_first_all(struct vnode **mvp, struct mount *mp); +struct vnode *__mnt_vnode_next_all(struct vnode **mvp, struct mount *mp, + bool (*cb)(struct vnode *, void *), void *); +struct vnode *__mnt_vnode_first_all(struct vnode **mvp, struct mount *mp, + bool (*cb)(struct vnode *, void *), void *); void __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp); -#define MNT_VNODE_FOREACH_ALL(vp, mp, mvp) \ - for (vp = __mnt_vnode_first_all(&(mvp), (mp)); \ - (vp) != NULL; vp = __mnt_vnode_next_all(&(mvp), (mp))) +#define MNT_VNODE_FOREACH_ALL_FILTER(vp, mp, mvp, cb, cbarg) \ + for (vp = __mnt_vnode_first_all(&(mvp), (mp), (cb), (cbarg)); \ + (vp) != NULL; vp = __mnt_vnode_next_all(&(mvp), (mp), (cb),\ + (cbarg))) + +#define MNT_VNODE_FOREACH_ALL(vp, mp, mvp) \ + MNT_VNODE_FOREACH_ALL_FILTER(vp, mp, mvp, NULL, NULL) #define MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp) \ do { \