Index: sys/fs/tmpfs/tmpfs_vnops.c =================================================================== --- sys/fs/tmpfs/tmpfs_vnops.c (revisione 233191) +++ sys/fs/tmpfs/tmpfs_vnops.c (copia locale) @@ -653,21 +653,32 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t to goto nocache; } lookupvpg: - if (((vpg = vm_page_lookup(vobj, idx)) != NULL) && - vm_page_is_valid(vpg, offset, tlen)) { - if ((vpg->oflags & VPO_BUSY) != 0) { - /* - * Reference the page before unlocking and sleeping so - * that the page daemon is less likely to reclaim it. - */ - vm_page_reference(vpg); - vm_page_sleep(vpg, "tmfsmw"); - goto lookupvpg; + if ((vpg = vm_radix_lookup(&vobj->rtree, idx, VM_RADIX_ANY)) != NULL) { + if (vm_page_is_valid(vpg, offset, tlen)) { + if ((vpg->oflags & VPO_BUSY) != 0) { + /* + * Reference the page before unlocking and + * sleeping so that the page daemon is less + * likely to reclaim it. + */ + vm_page_reference(vpg); + vm_page_sleep(vpg, "tmfsmw"); + goto lookupvpg; + } + vm_page_busy(vpg); + vm_page_undirty(vpg); + VM_OBJECT_UNLOCK(vobj); + error = uiomove_fromphys(&vpg, offset, tlen, uio); + } else { + if (vpg->flags & PG_CACHED) { + mtx_lock(&vm_page_queue_free_mtx); + if (vpg->object == vobj) + vm_page_cache_free(vpg); + mtx_unlock(&vm_page_queue_free_mtx); + } + VM_OBJECT_UNLOCK(vobj); + vpg = NULL; } - vm_page_busy(vpg); - vm_page_undirty(vpg); - VM_OBJECT_UNLOCK(vobj); - error = uiomove_fromphys(&vpg, offset, tlen, uio); } else { VM_OBJECT_UNLOCK(vobj); vpg = NULL; Index: sys/sys/ktr.h =================================================================== --- sys/sys/ktr.h (revisione 233191) +++ sys/sys/ktr.h (copia locale) @@ -75,7 +75,8 @@ #define KTR_INET6 0x10000000 /* IPv6 stack */ #define KTR_SCHED 0x20000000 /* Machine parsed sched info. */ #define KTR_BUF 0x40000000 /* Buffer cache */ -#define KTR_ALL 0x7fffffff +#define KTR_FLO 0x80000000 /* Buffer cache */ +#define KTR_ALL 0xffffffff /* Trace classes to compile in */ #ifdef KTR Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c (revisione 233191) +++ sys/kern/vfs_subr.c (copia locale) @@ -771,6 +771,7 @@ vnlru_free(int count) mtx_assert(&vnode_free_list_mtx, MA_OWNED); for (; count > 0; count--) { vp = TAILQ_FIRST(&vnode_free_list); + CTR2(KTR_FLO, "%s: first %p", __func__, vp); /* * The list can be modified while the free_list_mtx * has been dropped and vp could be NULL here. @@ -780,11 +781,13 @@ vnlru_free(int count) VNASSERT(vp->v_op != NULL, vp, ("vnlru_free: vnode already reclaimed.")); TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); + CTR2(KTR_FLO, "%s: remove %p", __func__, vp); /* * Don't recycle if we can't get the interlock. */ if (!VI_TRYLOCK(vp)) { TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); + CTR2(KTR_FLO, "%s: reinsert tail %p", __func__, vp); continue; } VNASSERT(VCANRECYCLE(vp), vp, @@ -3330,8 +3333,10 @@ vfree(struct vnode *vp) CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if (vp->v_iflag & VI_AGE) { TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); + CTR2(KTR_FLO, "%s: insert head %p", __func__, vp); } else { TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); + CTR2(KTR_FLO, "%s: insert tail %p", __func__, vp); } freevnodes++; vp->v_iflag &= ~VI_AGE; @@ -3354,6 +3359,7 @@ vbusy(struct vnode *vp) TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); freevnodes--; vp->v_iflag &= ~(VI_FREE|VI_AGE); + CTR2(KTR_FLO, "%s: remove busy %p", __func__, vp); mtx_unlock(&vnode_free_list_mtx); }