Index: sys/fs/tmpfs/tmpfs_vnops.c =================================================================== --- sys/fs/tmpfs/tmpfs_vnops.c (revisione 233702) +++ sys/fs/tmpfs/tmpfs_vnops.c (copia locale) @@ -514,7 +514,7 @@ tmpfs_mappedread(vm_object_t vobj, vm_object_t tob goto nocache; VM_OBJECT_LOCK(vobj); - if (vobj->cached_page_count == 0) { + if (vobj->resident_page_count == 0 && vobj->cached_page_count == 0) { VM_OBJECT_UNLOCK(vobj); goto nocache; } @@ -647,27 +647,38 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t to } VM_OBJECT_LOCK(vobj); - if (vobj->cached_page_count == 0) { + if (vobj->resident_page_count == 0 && vobj->cached_page_count == 0) { VM_OBJECT_UNLOCK(vobj); vpg = NULL; goto nocache; } lookupvpg: - if (((vpg = vm_page_lookup(vobj, idx)) != NULL) && - vm_page_is_valid(vpg, offset, tlen)) { - if ((vpg->oflags & VPO_BUSY) != 0) { - /* - * Reference the page before unlocking and sleeping so - * that the page daemon is less likely to reclaim it. - */ - vm_page_reference(vpg); - vm_page_sleep(vpg, "tmfsmw"); - goto lookupvpg; + if ((vpg = vm_radix_lookup(&vobj->rtree, idx, VM_RADIX_ANY)) != NULL) { + if (vm_page_is_valid(vpg, offset, tlen)) { + if ((vpg->oflags & VPO_BUSY) != 0) { + /* + * Reference the page before unlocking and + * sleeping so that the page daemon is less + * likely to reclaim it. + */ + vm_page_reference(vpg); + vm_page_sleep(vpg, "tmfsmw"); + goto lookupvpg; + } + vm_page_busy(vpg); + vm_page_undirty(vpg); + VM_OBJECT_UNLOCK(vobj); + error = uiomove_fromphys(&vpg, offset, tlen, uio); + } else { + if (vpg->flags & PG_CACHED) { + mtx_lock(&vm_page_queue_free_mtx); + if (vpg->object == vobj) + vm_page_cache_free(vpg); + mtx_unlock(&vm_page_queue_free_mtx); + } + VM_OBJECT_UNLOCK(vobj); + vpg = NULL; } - vm_page_busy(vpg); - vm_page_undirty(vpg); - VM_OBJECT_UNLOCK(vobj); - error = uiomove_fromphys(&vpg, offset, tlen, uio); } else { VM_OBJECT_UNLOCK(vobj); vpg = NULL; Index: sys/amd64/conf/GENERIC =================================================================== --- sys/amd64/conf/GENERIC (revisione 233703) +++ sys/amd64/conf/GENERIC (copia locale) @@ -81,6 +81,11 @@ options WITNESS # Enable checks to detect deadl options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones +options KTR +options KTR_COMPILE=(KTR_FLO) +options KTR_MASK=(KTR_FLO) +options KTR_ENTRIES=1024 + # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel Index: sys/amd64/amd64/trap.c =================================================================== --- sys/amd64/amd64/trap.c (revisione 233703) +++ sys/amd64/amd64/trap.c (copia locale) @@ -784,6 +784,7 @@ trap_fatal(frame, eva) struct soft_segment_descriptor softseg; char *msg; + ktr_mask = 0; code = frame->tf_err; type = frame->tf_trapno; sdtossd(&gdt[NGDT * PCPU_GET(cpuid) + IDXSEL(frame->tf_cs & 0xffff)], Index: sys/sys/ktr.h =================================================================== --- sys/sys/ktr.h (revisione 233702) +++ sys/sys/ktr.h (copia locale) @@ -75,7 +75,8 @@ #define KTR_INET6 0x10000000 /* IPv6 stack */ #define KTR_SCHED 0x20000000 /* Machine parsed sched info. */ #define KTR_BUF 0x40000000 /* Buffer cache */ -#define KTR_ALL 0x7fffffff +#define KTR_FLO 0x80000000 +#define KTR_ALL 0xffffffff /* Trace classes to compile in */ #ifdef KTR Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c (revisione 233702) +++ sys/kern/vfs_subr.c (copia locale) @@ -884,7 +884,7 @@ vdestroy(struct vnode *vp) { struct bufobj *bo; - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); mtx_lock(&vnode_free_list_mtx); numvnodes--; mtx_unlock(&vnode_free_list_mtx); @@ -930,7 +930,7 @@ vtryrecycle(struct vnode *vp) { struct mount *vnmp; - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); VNASSERT(vp->v_holdcnt, vp, ("vtryrecycle: Recycling vp %p without a reference.", vp)); /* @@ -938,7 +938,7 @@ vtryrecycle(struct vnode *vp) * can't recycle it yet. */ if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { - CTR2(KTR_VFS, + CTR2(KTR_FLO, "%s: impossible to recycle, vp %p lock is already held", __func__, vp); return (EWOULDBLOCK); @@ -948,7 +948,7 @@ vtryrecycle(struct vnode *vp) */ if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { VOP_UNLOCK(vp, 0); - CTR2(KTR_VFS, + CTR2(KTR_FLO, "%s: impossible to recycle, cannot start the write for %p", __func__, vp); return (EBUSY); @@ -963,7 +963,7 @@ vtryrecycle(struct vnode *vp) if (vp->v_usecount) { VOP_UNLOCK(vp, LK_INTERLOCK); vn_finished_write(vnmp); - CTR2(KTR_VFS, + CTR2(KTR_FLO, "%s: impossible to recycle, %p is already referenced", __func__, vp); return (EBUSY); @@ -985,7 +985,7 @@ getnewvnode(const char *tag, struct mount *mp, str struct vnode *vp = NULL; struct bufobj *bo; - CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); + CTR3(KTR_FLO, "%s: mp %p with tag %s", __func__, mp, tag); mtx_lock(&vnode_free_list_mtx); /* * Lend our context to reclaim vnodes if they've exceeded the max. @@ -2072,7 +2072,7 @@ static void v_incr_usecount(struct vnode *vp) { - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); vp->v_usecount++; if (vp->v_type == VCHR && vp->v_rdev != NULL) { dev_lock(); @@ -2090,7 +2090,7 @@ static void v_upgrade_usecount(struct vnode *vp) { - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); vp->v_usecount++; if (vp->v_type == VCHR && vp->v_rdev != NULL) { dev_lock(); @@ -2111,7 +2111,7 @@ v_decr_usecount(struct vnode *vp) ASSERT_VI_LOCKED(vp, __FUNCTION__); VNASSERT(vp->v_usecount > 0, vp, ("v_decr_usecount: negative usecount")); - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); vp->v_usecount--; if (vp->v_type == VCHR && vp->v_rdev != NULL) { dev_lock(); @@ -2134,7 +2134,7 @@ v_decr_useonly(struct vnode *vp) ASSERT_VI_LOCKED(vp, __FUNCTION__); VNASSERT(vp->v_usecount > 0, vp, ("v_decr_useonly: negative usecount")); - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); vp->v_usecount--; if (vp->v_type == VCHR && vp->v_rdev != NULL) { dev_lock(); @@ -2159,14 +2159,14 @@ vget(struct vnode *vp, int flags, struct thread *t VFS_ASSERT_GIANT(vp->v_mount); VNASSERT((flags & LK_TYPE_MASK) != 0, vp, ("vget: invalid lock operation")); - CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); + CTR3(KTR_FLO, "%s: vp %p with flags %d", __func__, vp, flags); if ((flags & LK_INTERLOCK) == 0) VI_LOCK(vp); vholdl(vp); if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) { vdrop(vp); - CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, + CTR2(KTR_FLO, "%s: impossible to lock vnode %p", __func__, vp); return (error); } @@ -2198,7 +2198,7 @@ void vref(struct vnode *vp) { - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); VI_LOCK(vp); v_incr_usecount(vp); VI_UNLOCK(vp); @@ -2242,7 +2242,7 @@ vputx(struct vnode *vp, int func) else KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); VFS_ASSERT_GIANT(vp->v_mount); - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); VI_LOCK(vp); /* Skip this v_writecount check if we're going to panic below. */ @@ -2262,7 +2262,7 @@ vputx(struct vnode *vp, int func) vprint("vputx: negative ref count", vp); panic("vputx: negative ref cnt"); } - CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); + CTR2(KTR_FLO, "%s: return vnode %p to the freelist", __func__, vp); /* * We want to hold the vnode until the inactive finishes to * prevent vgone() races. We drop the use count here and the @@ -2351,7 +2351,7 @@ void vholdl(struct vnode *vp) { - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); vp->v_holdcnt++; if (VSHOULDBUSY(vp)) vbusy(vp); @@ -2379,13 +2379,13 @@ vdropl(struct vnode *vp) { ASSERT_VI_LOCKED(vp, "vdropl"); - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); if (vp->v_holdcnt <= 0) panic("vdrop: holdcnt %d", vp->v_holdcnt); vp->v_holdcnt--; if (vp->v_holdcnt == 0) { if (vp->v_iflag & VI_DOOMED) { - CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, + CTR2(KTR_FLO, "%s: destroying the vnode %p", __func__, vp); vdestroy(vp); return; @@ -2409,7 +2409,7 @@ vinactive(struct vnode *vp, struct thread *td) ASSERT_VI_LOCKED(vp, "vinactive"); VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, ("vinactive: recursed on VI_DOINGINACT")); - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); vp->v_iflag |= VI_DOINGINACT; vp->v_iflag &= ~VI_OWEINACT; VI_UNLOCK(vp); @@ -2452,7 +2452,7 @@ vflush(struct mount *mp, int rootrefs, int flags, struct vattr vattr; int busy = 0, error; - CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, + CTR4(KTR_FLO, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, rootrefs, flags); if (rootrefs > 0) { KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, @@ -2462,7 +2462,7 @@ vflush(struct mount *mp, int rootrefs, int flags, * immediately, since with rootrefs > 0, it won't go away. */ if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { - CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", + CTR2(KTR_FLO, "%s: vfs_root lookup failed with %d", __func__, error); return (error); } @@ -2563,7 +2563,7 @@ loop: VI_UNLOCK(rootvp); } if (busy) { - CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, + CTR2(KTR_FLO, "%s: failing as %d vnodes are busy", __func__, busy); return (EBUSY); } @@ -2581,7 +2581,7 @@ vrecycle(struct vnode *vp, struct thread *td) int recycled; ASSERT_VOP_ELOCKED(vp, "vrecycle"); - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); recycled = 0; VI_LOCK(vp); if (vp->v_usecount == 0) { @@ -2619,7 +2619,7 @@ vgonel(struct vnode *vp) ASSERT_VI_LOCKED(vp, "vgonel"); VNASSERT(vp->v_holdcnt, vp, ("vgonel: vp %p has no reference.", vp)); - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); td = curthread; /* @@ -3327,7 +3327,7 @@ vfree(struct vnode *vp) VNASSERT(VSHOULDFREE(vp), vp, ("vfree: freeing when we shouldn't")); VNASSERT((vp->v_iflag & VI_DOOMED) == 0, vp, ("vfree: Freeing doomed vnode")); - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); if (vp->v_iflag & VI_AGE) { TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); } else { @@ -3348,7 +3348,7 @@ vbusy(struct vnode *vp) ASSERT_VI_LOCKED(vp, "vbusy"); VNASSERT((vp->v_iflag & VI_FREE) != 0, vp, ("vnode not free")); VNASSERT(vp->v_op != NULL, vp, ("vbusy: vnode already reclaimed.")); - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + CTR2(KTR_FLO, "%s: vp %p", __func__, vp); mtx_lock(&vnode_free_list_mtx); TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);