diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c index 94bc0bf..a2cb883 100644 --- a/sys/fs/tmpfs/tmpfs_subr.c +++ b/sys/fs/tmpfs/tmpfs_subr.c @@ -381,13 +381,17 @@ loop: /* FALLTHROUGH */ case VLNK: /* FALLTHROUGH */ - case VREG: - /* FALLTHROUGH */ case VSOCK: break; case VFIFO: vp->v_op = &tmpfs_fifoop_entries; break; + case VREG: + VI_LOCK(vp); + KASSERT(vp->v_object == NULL, ("Not NULL v_object in tmpfs")); + vp->v_object = node->tn_reg.tn_aobj; + VI_UNLOCK(vp); + break; case VDIR: MPASS(node->tn_dir.tn_parent != NULL); if (node->tn_dir.tn_parent == node) @@ -398,7 +402,6 @@ loop: panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type); } - vnode_pager_setsize(vp, node->tn_size); error = insmntque(vp, mp); if (error) vp = NULL; diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c index f0dfe36..228c06c 100644 --- a/sys/fs/tmpfs/tmpfs_vnops.c +++ b/sys/fs/tmpfs/tmpfs_vnops.c @@ -263,6 +263,23 @@ tmpfs_open(struct vop_open_args *v) return error; } +static void +tmpfs_clear_text(struct vnode *vp) +{ + vm_object_t object; + + ASSERT_VOP_ELOCKED(vp, "tmpfs_close"); + if (vp->v_type != VREG) + return; + object = vp->v_object; + if (object == NULL) + return; + VM_OBJECT_LOCK(object); + if (object->ref_count <= 1) + vp->v_vflag &= ~VV_TEXT; + VM_OBJECT_UNLOCK(object); +} + /* --------------------------------------------------------------------- */ static int @@ -270,10 +287,9 @@ tmpfs_close(struct vop_close_args *v) { struct vnode *vp = v->a_vp; - MPASS(VOP_ISLOCKED(vp)); - /* Update node times. */ tmpfs_update(vp); + tmpfs_clear_text(vp); return (0); } @@ -431,7 +447,6 @@ tmpfs_setattr(struct vop_setattr_args *v) return error; } -/* --------------------------------------------------------------------- */ static int tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx, vm_offset_t offset, size_t tlen, struct uio *uio) @@ -467,120 +482,20 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx, return (error); } -static __inline int -tmpfs_nocacheread_buf(vm_object_t tobj, vm_pindex_t idx, - vm_offset_t offset, size_t tlen, void *buf) -{ - struct uio uio; - struct iovec iov; - - uio.uio_iovcnt = 1; - uio.uio_iov = &iov; - iov.iov_base = buf; - iov.iov_len = tlen; - - uio.uio_offset = 0; - uio.uio_resid = tlen; - uio.uio_rw = UIO_READ; - uio.uio_segflg = UIO_SYSSPACE; - uio.uio_td = curthread; - - return (tmpfs_nocacheread(tobj, idx, offset, tlen, &uio)); -} - -static int -tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio) -{ - struct sf_buf *sf; - vm_pindex_t idx; - vm_page_t m; - vm_offset_t offset; - off_t addr; - size_t tlen; - char *ma; - int error; - - addr = uio->uio_offset; - idx = OFF_TO_IDX(addr); - offset = addr & PAGE_MASK; - tlen = MIN(PAGE_SIZE - offset, len); - - if ((vobj == NULL) || - (vobj->resident_page_count == 0 && vobj->cache == NULL)) - goto nocache; - - VM_OBJECT_LOCK(vobj); -lookupvpg: - if (((m = vm_page_lookup(vobj, idx)) != NULL) && - vm_page_is_valid(m, offset, tlen)) { - if ((m->oflags & VPO_BUSY) != 0) { - /* - * Reference the page before unlocking and sleeping so - * that the page daemon is less likely to reclaim it. - */ - vm_page_reference(m); - vm_page_sleep(m, "tmfsmr"); - goto lookupvpg; - } - vm_page_busy(m); - VM_OBJECT_UNLOCK(vobj); - error = uiomove_fromphys(&m, offset, tlen, uio); - VM_OBJECT_LOCK(vobj); - vm_page_wakeup(m); - VM_OBJECT_UNLOCK(vobj); - return (error); - } else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) { - KASSERT(offset == 0, - ("unexpected offset in tmpfs_mappedread for sendfile")); - if ((m->oflags & VPO_BUSY) != 0) { - /* - * Reference the page before unlocking and sleeping so - * that the page daemon is less likely to reclaim it. - */ - vm_page_reference(m); - vm_page_sleep(m, "tmfsmr"); - goto lookupvpg; - } - vm_page_busy(m); - VM_OBJECT_UNLOCK(vobj); - sched_pin(); - sf = sf_buf_alloc(m, SFB_CPUPRIVATE); - ma = (char *)sf_buf_kva(sf); - error = tmpfs_nocacheread_buf(tobj, idx, 0, tlen, ma); - if (error == 0) { - if (tlen != PAGE_SIZE) - bzero(ma + tlen, PAGE_SIZE - tlen); - uio->uio_offset += tlen; - uio->uio_resid -= tlen; - } - sf_buf_free(sf); - sched_unpin(); - VM_OBJECT_LOCK(vobj); - if (error == 0) - m->valid = VM_PAGE_BITS_ALL; - vm_page_wakeup(m); - VM_OBJECT_UNLOCK(vobj); - return (error); - } - VM_OBJECT_UNLOCK(vobj); -nocache: - error = tmpfs_nocacheread(tobj, idx, offset, tlen, uio); - - return (error); -} - static int tmpfs_read(struct vop_read_args *v) { struct vnode *vp = v->a_vp; struct uio *uio = v->a_uio; - struct tmpfs_node *node; vm_object_t uobj; size_t len; int resid; - int error = 0; + vm_pindex_t idx; + vm_offset_t offset; + off_t addr; + size_t tlen; node = VP_TO_TMPFS_NODE(vp); @@ -604,7 +519,11 @@ tmpfs_read(struct vop_read_args *v) len = MIN(node->tn_size - uio->uio_offset, resid); if (len == 0) break; - error = tmpfs_mappedread(vp->v_object, uobj, len, uio); + addr = uio->uio_offset; + idx = OFF_TO_IDX(addr); + offset = addr & PAGE_MASK; + tlen = MIN(PAGE_SIZE - offset, len); + error = tmpfs_nocacheread(uobj, idx, offset, tlen, uio); if ((error != 0) || (resid == uio->uio_resid)) break; } @@ -617,10 +536,10 @@ out: /* --------------------------------------------------------------------- */ static int -tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio) +tmpfs_mappedwrite(vm_object_t tobj, size_t len, struct uio *uio) { vm_pindex_t idx; - vm_page_t vpg, tpg; + vm_page_t tpg; vm_offset_t offset; off_t addr; size_t tlen; @@ -633,39 +552,9 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui offset = addr & PAGE_MASK; tlen = MIN(PAGE_SIZE - offset, len); - if ((vobj == NULL) || - (vobj->resident_page_count == 0 && vobj->cache == NULL)) { - vpg = NULL; - goto nocache; - } - - VM_OBJECT_LOCK(vobj); -lookupvpg: - if (((vpg = vm_page_lookup(vobj, idx)) != NULL) && - vm_page_is_valid(vpg, offset, tlen)) { - if ((vpg->oflags & VPO_BUSY) != 0) { - /* - * Reference the page before unlocking and sleeping so - * that the page daemon is less likely to reclaim it. - */ - vm_page_reference(vpg); - vm_page_sleep(vpg, "tmfsmw"); - goto lookupvpg; - } - vm_page_busy(vpg); - vm_page_undirty(vpg); - VM_OBJECT_UNLOCK(vobj); - error = uiomove_fromphys(&vpg, offset, tlen, uio); - } else { - if (__predict_false(vobj->cache != NULL)) - vm_page_cache_free(vobj, idx, idx + 1); - VM_OBJECT_UNLOCK(vobj); - vpg = NULL; - } -nocache: VM_OBJECT_LOCK(tobj); - tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED | - VM_ALLOC_NORMAL | VM_ALLOC_RETRY); + tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED | VM_ALLOC_NORMAL | + VM_ALLOC_RETRY); if (tpg->valid != VM_PAGE_BITS_ALL) { if (vm_pager_has_page(tobj, idx, NULL, NULL)) { rv = vm_pager_get_pages(tobj, &tpg, 1, 0); @@ -673,36 +562,22 @@ nocache: vm_page_lock(tpg); vm_page_free(tpg); vm_page_unlock(tpg); - error = EIO; - goto out; + VM_OBJECT_UNLOCK(tobj); + return (EIO); } } else vm_page_zero_invalid(tpg, TRUE); } VM_OBJECT_UNLOCK(tobj); - if (vpg == NULL) - error = uiomove_fromphys(&tpg, offset, tlen, uio); - else { - KASSERT(vpg->valid == VM_PAGE_BITS_ALL, ("parts of vpg invalid")); - pmap_copy_page(vpg, tpg); - } + error = uiomove_fromphys(&tpg, offset, tlen, uio); VM_OBJECT_LOCK(tobj); - if (error == 0) { - KASSERT(tpg->valid == VM_PAGE_BITS_ALL, - ("parts of tpg invalid")); + if (error == 0) vm_page_dirty(tpg); - } vm_page_lock(tpg); vm_page_unwire(tpg, TRUE); vm_page_unlock(tpg); vm_page_wakeup(tpg); -out: VM_OBJECT_UNLOCK(tobj); - if (vpg != NULL) { - VM_OBJECT_LOCK(vobj); - vm_page_wakeup(vpg); - VM_OBJECT_UNLOCK(vobj); - } return (error); } @@ -759,7 +634,7 @@ tmpfs_write(struct vop_write_args *v) len = MIN(node->tn_size - uio->uio_offset, resid); if (len == 0) break; - error = tmpfs_mappedwrite(vp->v_object, uobj, len, uio); + error = tmpfs_mappedwrite(uobj, len, uio); if ((error != 0) || (resid == uio->uio_resid)) break; } @@ -1412,12 +1287,12 @@ tmpfs_inactive(struct vop_inactive_args *v) struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(vp)); - node = VP_TO_TMPFS_NODE(vp); if (node->tn_links == 0) vrecycle(vp, l); + else + tmpfs_clear_text(vp); return 0; } @@ -1435,7 +1310,7 @@ tmpfs_reclaim(struct vop_reclaim_args *v) node = VP_TO_TMPFS_NODE(vp); tmp = VFS_TO_TMPFS(vp->v_mount); - vnode_destroy_vobject(vp); + vp->v_object = NULL; cache_purge(vp); TMPFS_NODE_LOCK(node); diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index 8455f48..f196866 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -86,9 +86,9 @@ static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, const char *interp, int32_t *osrel); static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry, size_t pagesize); -static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object, - vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, - vm_prot_t prot, size_t pagesize); +static int __elfN(load_section)(struct vmspace *vmspace, struct vnode *vp, + vm_object_t object, vm_offset_t offset, caddr_t vmaddr, size_t memsz, + size_t filsz, vm_prot_t prot, size_t pagesize); static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel); @@ -445,12 +445,13 @@ __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, } static int -__elfN(load_section)(struct vmspace *vmspace, +__elfN(load_section)(struct vmspace *vmspace, struct vnode *vp, vm_object_t object, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, size_t pagesize) { struct sf_buf *sf; + struct vattr va; size_t map_len; vm_offset_t map_addr; int error, rv, cow; @@ -466,7 +467,10 @@ __elfN(load_section)(struct vmspace *vmspace, * While I'm here, might as well check for something else that * is invalid: filsz cannot be greater than memsz. */ - if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size || + error = VOP_GETATTR(vp, &va, curthread->td_ucred); + if (error != 0) + return (error); + if ((off_t)filsz + offset > va.va_size || filsz > memsz) { uprintf("elf_load_section: truncated ELF file\n"); return (ENOEXEC); @@ -672,7 +676,7 @@ __elfN(load_file)(struct proc *p, const char *file, u_long *addr, if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) { /* Loadable segment */ prot = __elfN(trans_prot)(phdr[i].p_flags); - if ((error = __elfN(load_section)(vmspace, + if ((error = __elfN(load_section)(vmspace, imgp->vp, imgp->object, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, phdr[i].p_memsz, phdr[i].p_filesz, prot, @@ -828,7 +832,7 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) prot |= VM_PROT_EXECUTE; #endif - if ((error = __elfN(load_section)(vmspace, + if ((error = __elfN(load_section)(vmspace, imgp->vp, imgp->object, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr, phdr[i].p_memsz, phdr[i].p_filesz, prot, diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c index 3b83e1c..0042dd8 100644 --- a/sys/kern/uipc_syscalls.c +++ b/sys/kern/uipc_syscalls.c @@ -1834,9 +1834,10 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap, struct mbuf *m = NULL; struct sf_buf *sf; struct vm_page *pg; + struct vattr va; off_t off, xfsize, fsbytes = 0, sbytes = 0, rem = 0; int error, hdrlen = 0, mnw = 0; - int vfslocked; + int bsize, vfslocked; struct sendfile_sync *sfs = NULL; /* @@ -2032,6 +2033,20 @@ retry_space: */ space -= hdrlen; + vfslocked = VFS_LOCK_GIANT(vp->v_mount); + error = vn_lock(vp, LK_SHARED); + if (error != 0) { + VFS_UNLOCK_GIANT(vfslocked); + goto done; + } + error = VOP_GETATTR(vp, &va, td->td_ucred); + if (error != 0) { + VOP_UNLOCK(vp, 0); + VFS_UNLOCK_GIANT(vfslocked); + goto done; + } + bsize = vp->v_mount->mnt_stat.f_iosize; + /* * Loop and construct maximum sized mbuf chain to be bulk * dumped into socket buffer. @@ -2049,12 +2064,12 @@ retry_space: */ pgoff = (vm_offset_t)(off & PAGE_MASK); xfsize = omin(PAGE_SIZE - pgoff, - obj->un_pager.vnp.vnp_size - uap->offset - + va.va_size - uap->offset - fsbytes - loopbytes); if (uap->nbytes) rem = (uap->nbytes - fsbytes - loopbytes); else - rem = obj->un_pager.vnp.vnp_size - + rem = va.va_size - uap->offset - fsbytes - loopbytes; xfsize = omin(rem, xfsize); xfsize = omin(space - loopbytes, xfsize); @@ -2086,7 +2101,7 @@ retry_space: else if (uap->flags & SF_NODISKIO) error = EBUSY; else { - int bsize, resid; + int resid; /* * Ensure that our page is still around @@ -2098,12 +2113,6 @@ retry_space: /* * Get the page from backing store. */ - vfslocked = VFS_LOCK_GIANT(vp->v_mount); - error = vn_lock(vp, LK_SHARED); - if (error != 0) - goto after_read; - bsize = vp->v_mount->mnt_stat.f_iosize; - /* * XXXMAC: Because we don't have fp->f_cred * here, we pass in NOCRED. This is probably @@ -2114,9 +2123,6 @@ retry_space: trunc_page(off), UIO_NOCOPY, IO_NODELOCKED | IO_VMIO | ((MAXBSIZE / bsize) << IO_SEQSHIFT), td->td_ucred, NOCRED, &resid, td); - VOP_UNLOCK(vp, 0); - after_read: - VFS_UNLOCK_GIANT(vfslocked); VM_OBJECT_LOCK(obj); vm_page_io_finish(pg); if (!error) @@ -2196,6 +2202,9 @@ retry_space: } } + VOP_UNLOCK(vp, 0); + VFS_UNLOCK_GIANT(vfslocked); + /* Add the buffer chain to the socket buffer. */ if (m != NULL) { int mlen, err; diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index e85b681..9e07b87 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -1254,7 +1254,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize, error = EINVAL; goto done; } - if (obj->handle != vp) { + if (obj->type == OBJT_VNODE && obj->handle != vp) { vput(vp); vp = (struct vnode*)obj->handle; vget(vp, LK_SHARED, td); @@ -1293,7 +1293,14 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize, objsize = round_page(va.va_size); if (va.va_nlink == 0) flags |= MAP_NOSYNC; - obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, td->td_ucred); + if (obj->type == OBJT_VNODE) + obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, + td->td_ucred); + else { + KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP, + ("wrong object type")); + vm_object_reference(obj); + } if (obj == NULL) { error = ENOMEM; goto done; diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 1a8ce65..6508bba 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -817,7 +817,12 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); - KASSERT(object->type == OBJT_VNODE, ("Not a vnode object")); + + /* + * The OBJ_MIGHTBEDIRTY flag is only set for the OBJT_VNODE + * objects. The check below prevents the function from + * operating on the non-vnode objects. + */ if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 || object->resident_page_count == 0) return; diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index 929fa4f..55ac328 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -212,8 +212,7 @@ retry: msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0); } - if (vp->v_usecount == 0) - panic("vnode_pager_alloc: no vnode reference"); + KASSERT(vp->v_usecount != 0, ("vnode_pager_alloc: no vnode reference")); if (object == NULL) { /* @@ -369,7 +368,7 @@ vnode_pager_setsize(vp, nsize) vm_page_t m; vm_pindex_t nobjsize; - if ((object = vp->v_object) == NULL) + if ((object = vp->v_object) == NULL || object->type != OBJT_VNODE) return; /* ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */ VM_OBJECT_LOCK(object);