diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c index 5fb020b..ab0cc49 100644 --- a/sys/fs/nullfs/null_vnops.c +++ b/sys/fs/nullfs/null_vnops.c @@ -665,7 +665,9 @@ null_unlock(struct vop_unlock_args *ap) } /* - * XXXKIB + * Do not allow the VOP_INACTIVE to be bypassed to the lower layer, + * since the reference count on the lower vnode is not related with + * our. */ static int null_inactive(struct vop_inactive_args *ap __unused) diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c index 5f1616c..eb63c76 100644 --- a/sys/fs/tmpfs/tmpfs_subr.c +++ b/sys/fs/tmpfs/tmpfs_subr.c @@ -154,6 +154,7 @@ tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type, char *target, dev_t rdev, struct tmpfs_node **node) { struct tmpfs_node *nnode; + struct vm_object *obj; /* If the root directory of the 'tmp' file system is not yet * allocated, this must be the request to do it. */ @@ -214,7 +215,7 @@ tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type, break; case VREG: - nnode->tn_reg.tn_aobj = + obj = nnode->tn_reg.tn_aobj = vm_pager_allocate(OBJT_SWAP, NULL, 0, VM_PROT_DEFAULT, 0, NULL /* XXXKIB - tmpfs needs swap reservation */); break; @@ -295,6 +296,9 @@ tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node) TMPFS_LOCK(tmp); tmp->tm_pages_used -= uobj->size; TMPFS_UNLOCK(tmp); + VM_OBJECT_LOCK(uobj); + uobj->flags &= ~OBJ_TMPFS; + VM_OBJECT_UNLOCK(uobj); vm_object_deallocate(uobj); } break; @@ -381,9 +385,11 @@ int tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag, struct vnode **vpp) { - int error = 0; struct vnode *vp; + vm_object_t object; + int error; + error = 0; loop: TMPFS_NODE_LOCK(node); if ((vp = node->tn_vnode) != NULL) { @@ -453,13 +459,22 @@ loop: /* FALLTHROUGH */ case VLNK: /* FALLTHROUGH */ - case VREG: - /* FALLTHROUGH */ case VSOCK: break; case VFIFO: vp->v_op = &tmpfs_fifoop_entries; break; + case VREG: + object = node->tn_reg.tn_aobj; + VM_OBJECT_LOCK(object); + VI_LOCK(vp); + KASSERT(vp->v_object == NULL, ("Not NULL v_object in tmpfs")); + vp->v_object = object; + object->un_pager.swp.swp_tmpfs = vp; + object->flags |= OBJ_TMPFS; + VI_UNLOCK(vp); + VM_OBJECT_UNLOCK(object); + break; case VDIR: MPASS(node->tn_dir.tn_parent != NULL); if (node->tn_dir.tn_parent == node) @@ -470,7 +485,6 @@ loop: panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type); } - vnode_pager_setsize(vp, node->tn_size); error = insmntque(vp, mp); if (error) vp = NULL; diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c index 368e1ca..d4c2e13 100644 --- a/sys/fs/tmpfs/tmpfs_vnops.c +++ b/sys/fs/tmpfs/tmpfs_vnops.c @@ -276,8 +276,6 @@ tmpfs_close(struct vop_close_args *v) { struct vnode *vp = v->a_vp; - MPASS(VOP_ISLOCKED(vp)); - /* Update node times. */ tmpfs_update(vp); @@ -437,7 +435,6 @@ tmpfs_setattr(struct vop_setattr_args *v) return error; } -/* --------------------------------------------------------------------- */ static int tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx, vm_offset_t offset, size_t tlen, struct uio *uio) @@ -451,7 +448,17 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx, if (m->valid != VM_PAGE_BITS_ALL) { if (vm_pager_has_page(tobj, idx, NULL, NULL)) { rv = vm_pager_get_pages(tobj, &m, 1, 0); + m = vm_page_lookup(tobj, idx); + if (m == NULL) { + printf( + "tmpfs: vm_obj %p idx %jd null lookup rv %d\n", + tobj, idx, rv); + return (EIO); + } if (rv != VM_PAGER_OK) { + printf( + "tmpfs: vm_obj %p idx %jd valid %x pager error %d\n", + tobj, idx, m->valid, rv); vm_page_lock(m); vm_page_free(m); vm_page_unlock(m); @@ -473,120 +480,20 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx, return (error); } -static __inline int -tmpfs_nocacheread_buf(vm_object_t tobj, vm_pindex_t idx, - vm_offset_t offset, size_t tlen, void *buf) -{ - struct uio uio; - struct iovec iov; - - uio.uio_iovcnt = 1; - uio.uio_iov = &iov; - iov.iov_base = buf; - iov.iov_len = tlen; - - uio.uio_offset = 0; - uio.uio_resid = tlen; - uio.uio_rw = UIO_READ; - uio.uio_segflg = UIO_SYSSPACE; - uio.uio_td = curthread; - - return (tmpfs_nocacheread(tobj, idx, offset, tlen, &uio)); -} - -static int -tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio) -{ - struct sf_buf *sf; - vm_pindex_t idx; - vm_page_t m; - vm_offset_t offset; - off_t addr; - size_t tlen; - char *ma; - int error; - - addr = uio->uio_offset; - idx = OFF_TO_IDX(addr); - offset = addr & PAGE_MASK; - tlen = MIN(PAGE_SIZE - offset, len); - - if ((vobj == NULL) || - (vobj->resident_page_count == 0 && vobj->cache == NULL)) - goto nocache; - - VM_OBJECT_LOCK(vobj); -lookupvpg: - if (((m = vm_page_lookup(vobj, idx)) != NULL) && - vm_page_is_valid(m, offset, tlen)) { - if ((m->oflags & VPO_BUSY) != 0) { - /* - * Reference the page before unlocking and sleeping so - * that the page daemon is less likely to reclaim it. - */ - vm_page_reference(m); - vm_page_sleep(m, "tmfsmr"); - goto lookupvpg; - } - vm_page_busy(m); - VM_OBJECT_UNLOCK(vobj); - error = uiomove_fromphys(&m, offset, tlen, uio); - VM_OBJECT_LOCK(vobj); - vm_page_wakeup(m); - VM_OBJECT_UNLOCK(vobj); - return (error); - } else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) { - KASSERT(offset == 0, - ("unexpected offset in tmpfs_mappedread for sendfile")); - if ((m->oflags & VPO_BUSY) != 0) { - /* - * Reference the page before unlocking and sleeping so - * that the page daemon is less likely to reclaim it. - */ - vm_page_reference(m); - vm_page_sleep(m, "tmfsmr"); - goto lookupvpg; - } - vm_page_busy(m); - VM_OBJECT_UNLOCK(vobj); - sched_pin(); - sf = sf_buf_alloc(m, SFB_CPUPRIVATE); - ma = (char *)sf_buf_kva(sf); - error = tmpfs_nocacheread_buf(tobj, idx, 0, tlen, ma); - if (error == 0) { - if (tlen != PAGE_SIZE) - bzero(ma + tlen, PAGE_SIZE - tlen); - uio->uio_offset += tlen; - uio->uio_resid -= tlen; - } - sf_buf_free(sf); - sched_unpin(); - VM_OBJECT_LOCK(vobj); - if (error == 0) - m->valid = VM_PAGE_BITS_ALL; - vm_page_wakeup(m); - VM_OBJECT_UNLOCK(vobj); - return (error); - } - VM_OBJECT_UNLOCK(vobj); -nocache: - error = tmpfs_nocacheread(tobj, idx, offset, tlen, uio); - - return (error); -} - static int tmpfs_read(struct vop_read_args *v) { struct vnode *vp = v->a_vp; struct uio *uio = v->a_uio; - struct tmpfs_node *node; vm_object_t uobj; size_t len; int resid; - int error = 0; + vm_pindex_t idx; + vm_offset_t offset; + off_t addr; + size_t tlen; node = VP_TO_TMPFS_NODE(vp); @@ -610,7 +517,11 @@ tmpfs_read(struct vop_read_args *v) len = MIN(node->tn_size - uio->uio_offset, resid); if (len == 0) break; - error = tmpfs_mappedread(vp->v_object, uobj, len, uio); + addr = uio->uio_offset; + idx = OFF_TO_IDX(addr); + offset = addr & PAGE_MASK; + tlen = MIN(PAGE_SIZE - offset, len); + error = tmpfs_nocacheread(uobj, idx, offset, tlen, uio); if ((error != 0) || (resid == uio->uio_resid)) break; } @@ -623,10 +534,10 @@ out: /* --------------------------------------------------------------------- */ static int -tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio) +tmpfs_mappedwrite(vm_object_t tobj, size_t len, struct uio *uio) { vm_pindex_t idx; - vm_page_t vpg, tpg; + vm_page_t tpg; vm_offset_t offset; off_t addr; size_t tlen; @@ -639,76 +550,42 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui offset = addr & PAGE_MASK; tlen = MIN(PAGE_SIZE - offset, len); - if ((vobj == NULL) || - (vobj->resident_page_count == 0 && vobj->cache == NULL)) { - vpg = NULL; - goto nocache; - } - - VM_OBJECT_LOCK(vobj); -lookupvpg: - if (((vpg = vm_page_lookup(vobj, idx)) != NULL) && - vm_page_is_valid(vpg, offset, tlen)) { - if ((vpg->oflags & VPO_BUSY) != 0) { - /* - * Reference the page before unlocking and sleeping so - * that the page daemon is less likely to reclaim it. - */ - vm_page_reference(vpg); - vm_page_sleep(vpg, "tmfsmw"); - goto lookupvpg; - } - vm_page_busy(vpg); - vm_page_undirty(vpg); - VM_OBJECT_UNLOCK(vobj); - error = uiomove_fromphys(&vpg, offset, tlen, uio); - } else { - if (vm_page_is_cached(vobj, idx)) - vm_page_cache_free(vobj, idx, idx + 1); - VM_OBJECT_UNLOCK(vobj); - vpg = NULL; - } -nocache: VM_OBJECT_LOCK(tobj); - tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED | - VM_ALLOC_NORMAL | VM_ALLOC_RETRY); + tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED | VM_ALLOC_NORMAL | + VM_ALLOC_RETRY); if (tpg->valid != VM_PAGE_BITS_ALL) { if (vm_pager_has_page(tobj, idx, NULL, NULL)) { rv = vm_pager_get_pages(tobj, &tpg, 1, 0); + tpg = vm_page_lookup(tobj, idx); + if (tpg == NULL) { + printf( + "tmpfs: vm_obj %p idx %jd null lookup rv %d\n", + tobj, idx, rv); + return (EIO); + } if (rv != VM_PAGER_OK) { + printf( + "tmpfs: vm_obj %p idx %jd valid %x pager error %d\n", + tobj, idx, tpg->valid, rv); vm_page_lock(tpg); vm_page_free(tpg); vm_page_unlock(tpg); - error = EIO; - goto out; + VM_OBJECT_UNLOCK(tobj); + return (EIO); } } else vm_page_zero_invalid(tpg, TRUE); } VM_OBJECT_UNLOCK(tobj); - if (vpg == NULL) - error = uiomove_fromphys(&tpg, offset, tlen, uio); - else { - KASSERT(vpg->valid == VM_PAGE_BITS_ALL, ("parts of vpg invalid")); - pmap_copy_page(vpg, tpg); - } + error = uiomove_fromphys(&tpg, offset, tlen, uio); VM_OBJECT_LOCK(tobj); - if (error == 0) { - KASSERT(tpg->valid == VM_PAGE_BITS_ALL, - ("parts of tpg invalid")); + if (error == 0) vm_page_dirty(tpg); - } vm_page_lock(tpg); vm_page_unwire(tpg, TRUE); vm_page_unlock(tpg); vm_page_wakeup(tpg); -out: VM_OBJECT_UNLOCK(tobj); - if (vpg != NULL) { - VM_OBJECT_LOCK(vobj); - vm_page_wakeup(vpg); - VM_OBJECT_UNLOCK(vobj); - } return (error); } @@ -766,7 +643,7 @@ tmpfs_write(struct vop_write_args *v) len = MIN(node->tn_size - uio->uio_offset, resid); if (len == 0) break; - error = tmpfs_mappedwrite(vp->v_object, uobj, len, uio); + error = tmpfs_mappedwrite(uobj, len, uio); if ((error != 0) || (resid == uio->uio_resid)) break; } @@ -1581,8 +1458,6 @@ tmpfs_inactive(struct vop_inactive_args *v) struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(vp)); - node = VP_TO_TMPFS_NODE(vp); if (node->tn_links == 0) @@ -1604,7 +1479,7 @@ tmpfs_reclaim(struct vop_reclaim_args *v) node = VP_TO_TMPFS_NODE(vp); tmp = VFS_TO_TMPFS(vp->v_mount); - vnode_destroy_vobject(vp); + vp->v_object = NULL; cache_purge(vp); TMPFS_NODE_LOCK(node); diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index bea4102..1021803 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -2341,6 +2341,37 @@ out: return (m_pc); } +static int +sysctl_pv_reclaim(SYSCTL_HANDLER_ARGS) +{ + vm_page_t m; + pmap_t pmap; + int error, c, i, val; + + val = 0; + error = sysctl_handle_int(oidp, &val, 0, req); + if (error || !req->newptr) + return (error); + pmap = vmspace_pmap(curproc->p_vmspace); + for (i = 0, c = 0; i < val; i++) { + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pmap); + m = pmap_pv_reclaim(pmap); + PMAP_UNLOCK(pmap); + rw_wunlock(&pvh_global_lock); + if (m != NULL) { + c++; + m->wire_count = 0; + atomic_add_int(&cnt.v_wire_count, -1); + vm_page_free(m); + } + } + return (error); +} + +SYSCTL_PROC(_vm_pmap, OID_AUTO, pv_reclaim, CTLTYPE_INT | CTLFLAG_RW, 0, 0, + sysctl_pv_reclaim, "I", ""); + /* * free the pv_entry back to the free list */ @@ -4366,6 +4397,8 @@ pmap_remove_pages(pmap_t pmap) PMAP_LOCK(pmap); sched_pin(); TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { + KASSERT(pc->pc_pmap == pmap, ("Wrong pmap %p %p", pmap, + pc->pc_pmap)); allfree = 1; for (field = 0; field < _NPCM; field++) { inuse = ~pc->pc_map[field] & pc_freemask[field]; diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c index 94dd127..c3f5fae 100644 --- a/sys/kern/uipc_syscalls.c +++ b/sys/kern/uipc_syscalls.c @@ -1832,9 +1832,10 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap, struct mbuf *m = NULL; struct sf_buf *sf; struct vm_page *pg; + struct vattr va; off_t off, xfsize, fsbytes = 0, sbytes = 0, rem = 0; int error, hdrlen = 0, mnw = 0; - int vfslocked; + int bsize, vfslocked; struct sendfile_sync *sfs = NULL; /* @@ -2031,6 +2032,20 @@ retry_space: */ space -= hdrlen; + vfslocked = VFS_LOCK_GIANT(vp->v_mount); + error = vn_lock(vp, LK_SHARED); + if (error != 0) { + VFS_UNLOCK_GIANT(vfslocked); + goto done; + } + error = VOP_GETATTR(vp, &va, td->td_ucred); + if (error != 0) { + VOP_UNLOCK(vp, 0); + VFS_UNLOCK_GIANT(vfslocked); + goto done; + } + bsize = vp->v_mount->mnt_stat.f_iosize; + /* * Loop and construct maximum sized mbuf chain to be bulk * dumped into socket buffer. @@ -2040,7 +2055,6 @@ retry_space: vm_offset_t pgoff; struct mbuf *m0; - VM_OBJECT_LOCK(obj); /* * Calculate the amount to transfer. * Not to exceed a page, the EOF, @@ -2048,17 +2062,16 @@ retry_space: */ pgoff = (vm_offset_t)(off & PAGE_MASK); xfsize = omin(PAGE_SIZE - pgoff, - obj->un_pager.vnp.vnp_size - uap->offset - + va.va_size - uap->offset - fsbytes - loopbytes); if (uap->nbytes) rem = (uap->nbytes - fsbytes - loopbytes); else - rem = obj->un_pager.vnp.vnp_size - + rem = va.va_size - uap->offset - fsbytes - loopbytes; xfsize = omin(rem, xfsize); xfsize = omin(space - loopbytes, xfsize); if (xfsize <= 0) { - VM_OBJECT_UNLOCK(obj); done = 1; /* all data sent */ break; } @@ -2068,6 +2081,7 @@ retry_space: * if not found or wait and loop if busy. */ pindex = OFF_TO_IDX(off); + VM_OBJECT_LOCK(obj); pg = vm_page_grab(obj, pindex, VM_ALLOC_NOBUSY | VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_RETRY); @@ -2085,7 +2099,6 @@ retry_space: else if (uap->flags & SF_NODISKIO) error = EBUSY; else { - int bsize; ssize_t resid; /* @@ -2098,12 +2111,6 @@ retry_space: /* * Get the page from backing store. */ - vfslocked = VFS_LOCK_GIANT(vp->v_mount); - error = vn_lock(vp, LK_SHARED); - if (error != 0) - goto after_read; - bsize = vp->v_mount->mnt_stat.f_iosize; - /* * XXXMAC: Because we don't have fp->f_cred * here, we pass in NOCRED. This is probably @@ -2114,9 +2121,6 @@ retry_space: trunc_page(off), UIO_NOCOPY, IO_NODELOCKED | IO_VMIO | ((MAXBSIZE / bsize) << IO_SEQSHIFT), td->td_ucred, NOCRED, &resid, td); - VOP_UNLOCK(vp, 0); - after_read: - VFS_UNLOCK_GIANT(vfslocked); VM_OBJECT_LOCK(obj); vm_page_io_finish(pg); if (!error) @@ -2199,6 +2203,9 @@ retry_space: } } + VOP_UNLOCK(vp, 0); + VFS_UNLOCK_GIANT(vfslocked); + /* Add the buffer chain to the socket buffer. */ if (m != NULL) { int mlen, err; diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index e0aab73..a84f968 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -1280,7 +1280,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize, error = EINVAL; goto done; } - if (obj->handle != vp) { + if (obj->type == OBJT_VNODE && obj->handle != vp) { vput(vp); vp = (struct vnode *)obj->handle; /* @@ -1331,7 +1331,14 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize, objsize = round_page(va.va_size); if (va.va_nlink == 0) flags |= MAP_NOSYNC; - obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, cred); + if (obj->type == OBJT_VNODE) + obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, + cred); + else { + KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP, + ("wrong object type")); + vm_object_reference(obj); + } if (obj == NULL) { error = ENOMEM; goto done; diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index abf7689..80463e1 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -478,6 +478,7 @@ void vm_object_deallocate(vm_object_t object) { vm_object_t temp; + struct vnode *vp; while (object != NULL) { int vfslocked; @@ -527,6 +528,24 @@ vm_object_deallocate(vm_object_t object) VM_OBJECT_UNLOCK(object); return; } else if (object->ref_count == 1) { + if (object->type == OBJT_SWAP && + (object->flags & OBJ_TMPFS) != 0) { + vp = object->un_pager.swp.swp_tmpfs; + vhold(vp); + VM_OBJECT_UNLOCK(object); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); + vdrop(vp); + VM_OBJECT_LOCK(object); + if (object->type == OBJT_DEAD) { + VM_OBJECT_UNLOCK(object); + VOP_UNLOCK(vp, 0); + return; + } else if ((object->flags & OBJ_TMPFS) != 0) { + if (object->ref_count == 1) + VOP_UNSET_TEXT(vp); + VOP_UNLOCK(vp, 0); + } + } if (object->shadow_count == 0 && object->handle == NULL && (object->type == OBJT_DEFAULT || @@ -822,7 +841,12 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); - KASSERT(object->type == OBJT_VNODE, ("Not a vnode object")); + + /* + * The OBJ_MIGHTBEDIRTY flag is only set for the OBJT_VNODE + * objects. The check below prevents the function from + * operating on the non-vnode objects. + */ if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 || object->resident_page_count == 0) return (TRUE); diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h index daafd03..d7c5b75 100644 --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -155,6 +155,7 @@ struct vm_object { * see vm/swap_pager.h */ struct { + void *swp_tmpfs; int swp_bcount; } swp; } un_pager; @@ -173,6 +174,7 @@ struct vm_object { #define OBJ_COLORED 0x1000 /* pg_color is defined */ #define OBJ_ONEMAPPING 0x2000 /* One USE (a single, non-forked) mapping flag */ #define OBJ_DISCONNECTWNT 0x4000 /* disconnect from vnode wanted */ +#define OBJ_TMPFS 0x8000 #define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT) #define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT)) diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index cc9063b..9183ba3 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -213,8 +213,7 @@ retry: msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0); } - if (vp->v_usecount == 0) - panic("vnode_pager_alloc: no vnode reference"); + KASSERT(vp->v_usecount != 0, ("vnode_pager_alloc: no vnode reference")); if (object == NULL) { /* @@ -379,7 +378,7 @@ vnode_pager_setsize(vp, nsize) vm_page_t m; vm_pindex_t nobjsize; - if ((object = vp->v_object) == NULL) + if ((object = vp->v_object) == NULL || object->type != OBJT_VNODE) return; /* ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */ VM_OBJECT_LOCK(object);