diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c index e9324cf..95c3afb 100644 --- a/sys/fs/tmpfs/tmpfs_subr.c +++ b/sys/fs/tmpfs/tmpfs_subr.c @@ -380,13 +380,17 @@ loop: /* FALLTHROUGH */ case VLNK: /* FALLTHROUGH */ - case VREG: - /* FALLTHROUGH */ case VSOCK: break; case VFIFO: vp->v_op = &tmpfs_fifoop_entries; break; + case VREG: + VI_LOCK(vp); + KASSERT(vp->v_object == NULL, ("Not NULL v_object in tmpfs")); + vp->v_object = node->tn_reg.tn_aobj; + VI_UNLOCK(vp); + break; case VDIR: MPASS(node->tn_dir.tn_parent != NULL); if (node->tn_dir.tn_parent == node) @@ -397,7 +401,6 @@ loop: panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type); } - vnode_pager_setsize(vp, node->tn_size); error = insmntque(vp, mp); if (error) vp = NULL; @@ -895,8 +898,8 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize) MPASS(newsize >= 0); node = VP_TO_TMPFS_NODE(vp); - uobj = node->tn_reg.tn_aobj; tmp = VFS_TO_TMPFS(vp->v_mount); + uobj = node->tn_reg.tn_aobj; /* * Convert the old and new sizes to the number of pages needed to @@ -917,7 +920,6 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize) TMPFS_UNLOCK(tmp); node->tn_size = newsize; - vnode_pager_setsize(vp, newsize); VM_OBJECT_LOCK(uobj); if (newsize < oldsize) { /* diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c index 0568e93..e0a2180 100644 --- a/sys/fs/tmpfs/tmpfs_vnops.c +++ b/sys/fs/tmpfs/tmpfs_vnops.c @@ -263,6 +263,23 @@ tmpfs_open(struct vop_open_args *v) return error; } +static void +tmpfs_clear_text(struct vnode *vp) +{ + vm_object_t object; + + ASSERT_VOP_ELOCKED(vp, "tmpfs_close"); + if (vp->v_type != VREG) + return; + object = vp->v_object; + if (object == NULL) + return; + VM_OBJECT_LOCK(object); + if (object->ref_count <= 1) + vp->v_vflag &= ~VV_TEXT; + VM_OBJECT_UNLOCK(object); +} + /* --------------------------------------------------------------------- */ static int @@ -270,10 +287,9 @@ tmpfs_close(struct vop_close_args *v) { struct vnode *vp = v->a_vp; - MPASS(VOP_ISLOCKED(vp)); - /* Update node times. */ tmpfs_update(vp); + tmpfs_clear_text(vp); return (0); } @@ -431,7 +447,6 @@ tmpfs_setattr(struct vop_setattr_args *v) return error; } -/* --------------------------------------------------------------------- */ static int tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx, vm_offset_t offset, size_t tlen, struct uio *uio) @@ -446,13 +461,15 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx, if (m->valid != VM_PAGE_BITS_ALL) { if (vm_pager_has_page(tobj, idx, NULL, NULL)) { error = vm_pager_get_pages(tobj, &m, 1, 0); - if (error != 0) { - printf("tmpfs get pages from pager error [read]\n"); + if (error != VM_PAGER_OK) { + error = EIO; + vm_page_wakeup(m); goto out; } } else vm_page_zero_invalid(m, TRUE); } + vm_page_wakeup(m); VM_OBJECT_UNLOCK(tobj); error = uiomove_fromphys(&m, offset, tlen, uio); VM_OBJECT_LOCK(tobj); @@ -460,129 +477,26 @@ out: vm_page_lock(m); vm_page_unwire(m, TRUE); vm_page_unlock(m); - vm_page_wakeup(m); vm_object_pip_subtract(tobj, 1); VM_OBJECT_UNLOCK(tobj); return (error); } -static __inline int -tmpfs_nocacheread_buf(vm_object_t tobj, vm_pindex_t idx, - vm_offset_t offset, size_t tlen, void *buf) -{ - struct uio uio; - struct iovec iov; - - uio.uio_iovcnt = 1; - uio.uio_iov = &iov; - iov.iov_base = buf; - iov.iov_len = tlen; - - uio.uio_offset = 0; - uio.uio_resid = tlen; - uio.uio_rw = UIO_READ; - uio.uio_segflg = UIO_SYSSPACE; - uio.uio_td = curthread; - - return (tmpfs_nocacheread(tobj, idx, offset, tlen, &uio)); -} - -static int -tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio) -{ - struct sf_buf *sf; - vm_pindex_t idx; - vm_page_t m; - vm_offset_t offset; - off_t addr; - size_t tlen; - char *ma; - int error; - - addr = uio->uio_offset; - idx = OFF_TO_IDX(addr); - offset = addr & PAGE_MASK; - tlen = MIN(PAGE_SIZE - offset, len); - - if ((vobj == NULL) || - (vobj->resident_page_count == 0 && vobj->cache == NULL)) - goto nocache; - - VM_OBJECT_LOCK(vobj); -lookupvpg: - if (((m = vm_page_lookup(vobj, idx)) != NULL) && - vm_page_is_valid(m, offset, tlen)) { - if ((m->oflags & VPO_BUSY) != 0) { - /* - * Reference the page before unlocking and sleeping so - * that the page daemon is less likely to reclaim it. - */ - vm_page_lock_queues(); - vm_page_flag_set(m, PG_REFERENCED); - vm_page_sleep(m, "tmfsmr"); - goto lookupvpg; - } - vm_page_busy(m); - VM_OBJECT_UNLOCK(vobj); - error = uiomove_fromphys(&m, offset, tlen, uio); - VM_OBJECT_LOCK(vobj); - vm_page_wakeup(m); - VM_OBJECT_UNLOCK(vobj); - return (error); - } else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) { - KASSERT(offset == 0, - ("unexpected offset in tmpfs_mappedread for sendfile")); - if ((m->oflags & VPO_BUSY) != 0) { - /* - * Reference the page before unlocking and sleeping so - * that the page daemon is less likely to reclaim it. - */ - vm_page_lock_queues(); - vm_page_flag_set(m, PG_REFERENCED); - vm_page_sleep(m, "tmfsmr"); - goto lookupvpg; - } - vm_page_busy(m); - VM_OBJECT_UNLOCK(vobj); - sched_pin(); - sf = sf_buf_alloc(m, SFB_CPUPRIVATE); - ma = (char *)sf_buf_kva(sf); - error = tmpfs_nocacheread_buf(tobj, idx, 0, tlen, ma); - if (error == 0) { - if (tlen != PAGE_SIZE) - bzero(ma + tlen, PAGE_SIZE - tlen); - uio->uio_offset += tlen; - uio->uio_resid -= tlen; - } - sf_buf_free(sf); - sched_unpin(); - VM_OBJECT_LOCK(vobj); - if (error == 0) - m->valid = VM_PAGE_BITS_ALL; - vm_page_wakeup(m); - VM_OBJECT_UNLOCK(vobj); - return (error); - } - VM_OBJECT_UNLOCK(vobj); -nocache: - error = tmpfs_nocacheread(tobj, idx, offset, tlen, uio); - - return (error); -} - static int tmpfs_read(struct vop_read_args *v) { struct vnode *vp = v->a_vp; struct uio *uio = v->a_uio; - struct tmpfs_node *node; vm_object_t uobj; size_t len; int resid; - int error = 0; + vm_pindex_t idx; + vm_offset_t offset; + off_t addr; + size_t tlen; node = VP_TO_TMPFS_NODE(vp); @@ -606,7 +520,11 @@ tmpfs_read(struct vop_read_args *v) len = MIN(node->tn_size - uio->uio_offset, resid); if (len == 0) break; - error = tmpfs_mappedread(vp->v_object, uobj, len, uio); + addr = uio->uio_offset; + idx = OFF_TO_IDX(addr); + offset = addr & PAGE_MASK; + tlen = MIN(PAGE_SIZE - offset, len); + error = tmpfs_nocacheread(uobj, idx, offset, tlen, uio); if ((error != 0) || (resid == uio->uio_resid)) break; } @@ -619,10 +537,10 @@ out: /* --------------------------------------------------------------------- */ static int -tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio) +tmpfs_mappedwrite(vm_object_t tobj, size_t len, struct uio *uio) { vm_pindex_t idx; - vm_page_t vpg, tpg; + vm_page_t tpg; vm_offset_t offset; off_t addr; size_t tlen; @@ -635,37 +553,6 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui offset = addr & PAGE_MASK; tlen = MIN(PAGE_SIZE - offset, len); - if ((vobj == NULL) || - (vobj->resident_page_count == 0 && vobj->cache == NULL)) { - vpg = NULL; - goto nocache; - } - - VM_OBJECT_LOCK(vobj); -lookupvpg: - if (((vpg = vm_page_lookup(vobj, idx)) != NULL) && - vm_page_is_valid(vpg, offset, tlen)) { - if ((vpg->oflags & VPO_BUSY) != 0) { - /* - * Reference the page before unlocking and sleeping so - * that the page daemon is less likely to reclaim it. - */ - vm_page_lock_queues(); - vm_page_flag_set(vpg, PG_REFERENCED); - vm_page_sleep(vpg, "tmfsmw"); - goto lookupvpg; - } - vm_page_busy(vpg); - vm_page_undirty(vpg); - VM_OBJECT_UNLOCK(vobj); - error = uiomove_fromphys(&vpg, offset, tlen, uio); - } else { - if (__predict_false(vobj->cache != NULL)) - vm_page_cache_free(vobj, idx, idx + 1); - VM_OBJECT_UNLOCK(vobj); - vpg = NULL; - } -nocache: VM_OBJECT_LOCK(tobj); vm_object_pip_add(tobj, 1); tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED | @@ -673,24 +560,19 @@ nocache: if (tpg->valid != VM_PAGE_BITS_ALL) { if (vm_pager_has_page(tobj, idx, NULL, NULL)) { error = vm_pager_get_pages(tobj, &tpg, 1, 0); - if (error != 0) { - printf("tmpfs get pages from pager error [write]\n"); + if (error != VM_PAGER_OK) { + error = EIO; + vm_page_wakeup(tpg); goto out; } } else vm_page_zero_invalid(tpg, TRUE); } + vm_page_wakeup(tpg); VM_OBJECT_UNLOCK(tobj); - if (vpg == NULL) - error = uiomove_fromphys(&tpg, offset, tlen, uio); - else { - KASSERT(vpg->valid == VM_PAGE_BITS_ALL, ("parts of vpg invalid")); - pmap_copy_page(vpg, tpg); - } + error = uiomove_fromphys(&tpg, offset, tlen, uio); VM_OBJECT_LOCK(tobj); out: - if (vobj != NULL) - VM_OBJECT_LOCK(vobj); if (error == 0) { KASSERT(tpg->valid == VM_PAGE_BITS_ALL, ("parts of tpg invalid")); @@ -699,11 +581,6 @@ out: vm_page_lock(tpg); vm_page_unwire(tpg, TRUE); vm_page_unlock(tpg); - vm_page_wakeup(tpg); - if (vpg != NULL) - vm_page_wakeup(vpg); - if (vobj != NULL) - VM_OBJECT_UNLOCK(vobj); vm_object_pip_subtract(tobj, 1); VM_OBJECT_UNLOCK(tobj); @@ -762,7 +639,7 @@ tmpfs_write(struct vop_write_args *v) len = MIN(node->tn_size - uio->uio_offset, resid); if (len == 0) break; - error = tmpfs_mappedwrite(vp->v_object, uobj, len, uio); + error = tmpfs_mappedwrite(uobj, len, uio); if ((error != 0) || (resid == uio->uio_resid)) break; } @@ -1417,12 +1294,12 @@ tmpfs_inactive(struct vop_inactive_args *v) struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(vp)); - node = VP_TO_TMPFS_NODE(vp); if (node->tn_links == 0) vrecycle(vp, l); + else + tmpfs_clear_text(vp); return 0; } @@ -1440,7 +1317,7 @@ tmpfs_reclaim(struct vop_reclaim_args *v) node = VP_TO_TMPFS_NODE(vp); tmp = VFS_TO_TMPFS(vp->v_mount); - vnode_destroy_vobject(vp); + vp->v_object = NULL; cache_purge(vp); TMPFS_NODE_LOCK(node); diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index 45f6d64..31ae810 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -86,9 +86,9 @@ static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, const char *interp, int32_t *osrel); static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry, size_t pagesize); -static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object, - vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, - vm_prot_t prot, size_t pagesize); +static int __elfN(load_section)(struct vmspace *vmspace, struct vnode *vp, + vm_object_t object, vm_offset_t offset, caddr_t vmaddr, size_t memsz, + size_t filsz, vm_prot_t prot, size_t pagesize); static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel); @@ -437,12 +437,13 @@ __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, } static int -__elfN(load_section)(struct vmspace *vmspace, +__elfN(load_section)(struct vmspace *vmspace, struct vnode *vp, vm_object_t object, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, size_t pagesize) { struct sf_buf *sf; + struct vattr va; size_t map_len; vm_offset_t map_addr; int error, rv, cow; @@ -458,7 +459,10 @@ __elfN(load_section)(struct vmspace *vmspace, * While I'm here, might as well check for something else that * is invalid: filsz cannot be greater than memsz. */ - if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size || + error = VOP_GETATTR(vp, &va, curthread->td_ucred); + if (error != 0) + return (error); + if ((off_t)filsz + offset > va.va_size || filsz > memsz) { uprintf("elf_load_section: truncated ELF file\n"); return (ENOEXEC); @@ -664,7 +668,7 @@ __elfN(load_file)(struct proc *p, const char *file, u_long *addr, if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) { /* Loadable segment */ prot = __elfN(trans_prot)(phdr[i].p_flags); - if ((error = __elfN(load_section)(vmspace, + if ((error = __elfN(load_section)(vmspace, imgp->vp, imgp->object, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, phdr[i].p_memsz, phdr[i].p_filesz, prot, @@ -820,7 +824,7 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) prot |= VM_PROT_EXECUTE; #endif - if ((error = __elfN(load_section)(vmspace, + if ((error = __elfN(load_section)(vmspace, imgp->vp, imgp->object, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr, phdr[i].p_memsz, phdr[i].p_filesz, prot, diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c index 57fd5eb..585c705 100644 --- a/sys/kern/uipc_shm.c +++ b/sys/kern/uipc_shm.c @@ -40,16 +40,9 @@ * (4) Resource limits? Does this need its own resource limits or are the * existing limits in mmap(2) sufficient? * - * (5) Partial page truncation. vnode_pager_setsize() will zero any parts - * of a partially mapped page as a result of ftruncate(2)/truncate(2). - * We can do the same (with the same pmap evil), but do we need to - * worry about the bits on disk if the page is swapped out or will the - * swapper zero the parts of a page that are invalid if the page is - * swapped back in for us? + * (5) Add MAC support in mac_biba(4) and mac_mls(4). * - * (6) Add MAC support in mac_biba(4) and mac_mls(4). - * - * (7) Add a MAC check_create() hook for creating new named objects. + * (6) Add a MAC check_create() hook for creating new named objects. */ #include @@ -252,15 +245,94 @@ shm_close(struct file *fp, struct thread *td) } static int +shm_clear_truncate(vm_object_t object, vm_offset_t pindex, int base) +{ + vm_page_t m, ma[1]; + int rv, size; + + VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + + m = vm_page_lookup(object, pindex); + if (m == NULL) { + /* + * Both vm_page_grab and vm_pager_get_pages() may drop + * object lock. Since the coherence of + * shmfd->shm_size and backing vm object size is + * protected by vm object lock, return true as + * indicator of neccessity of retry if there is a + * possibility of dropping the lock. + */ + m = vm_page_grab(object, pindex, VM_ALLOC_RETRY | + VM_ALLOC_NOBUSY); + if (m->valid == 0) { + if (!vm_pager_has_page(object, pindex, NULL, NULL)) { + /* + * Insert zero page to avoid looping. + * Next iteration of retry loop in + * shm_dotruncate() usually find this + * page. + */ + pmap_zero_page(m); + m->valid = VM_PAGE_BITS_ALL; + m->dirty = 0; + return (1); + } + KASSERT((m->oflags & VPO_BUSY) == 0, + ("busy after grab")); + vm_page_busy(m); + ma[0] = m; + vm_object_pip_add(object, 1); + rv = vm_pager_get_pages(object, ma, 1, 0); + vm_object_pip_wakeup(object); + m = vm_page_lookup(object, pindex); + if (m == NULL) + return (1); + if (rv != VM_PAGER_OK) { + vm_page_lock(m); + vm_page_free(m); + vm_page_unlock(m); + return (1); + } + vm_page_wakeup(m); + } + return (1); + } + + if (m->valid != 0) { + size = PAGE_SIZE - base; + + pmap_zero_page_area(m, base, size); + + /* + * Update the valid bits to reflect the blocks that + * have been zeroed. Some of these valid bits may + * have already been set. + */ + vm_page_set_valid(m, base, size); + + /* + * Round "base" to the next block boundary so that the + * dirty bit for a partially zeroed block is not + * cleared. + */ + base = roundup2(base, DEV_BSIZE); + + vm_page_clear_dirty(m, base, PAGE_SIZE - base); + } + return (0); +} + +static int shm_dotruncate(struct shmfd *shmfd, off_t length) { vm_object_t object; - vm_page_t m; - vm_pindex_t nobjsize; + vm_pindex_t nobjsize, pindex; vm_ooffset_t delta; + int base; object = shmfd->shm_object; VM_OBJECT_LOCK(object); +retry: if (length == shmfd->shm_size) { VM_OBJECT_UNLOCK(object); return (0); @@ -271,6 +343,18 @@ shm_dotruncate(struct shmfd *shmfd, off_t length) if (length < shmfd->shm_size) { delta = ptoa(object->size - nobjsize); + /* + * If the last page is partially mapped, then zero out + * the garbage at the end of the page. See comments + * in vnode_pager_setsize() for more details. + */ + base = (int)length & PAGE_MASK; + pindex = OFF_TO_IDX(length); + if (base != 0) { + if (shm_clear_truncate(object, pindex, base)) + goto retry; + } + /* Toss in memory pages. */ if (nobjsize < object->size) vm_object_page_remove(object, nobjsize, object->size, @@ -283,45 +367,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length) /* Free the swap accounted for shm */ swap_release_by_cred(delta, object->cred); object->charge -= delta; - - /* - * If the last page is partially mapped, then zero out - * the garbage at the end of the page. See comments - * in vnode_pager_setsize() for more details. - * - * XXXJHB: This handles in memory pages, but what about - * a page swapped out to disk? - */ - if ((length & PAGE_MASK) && - (m = vm_page_lookup(object, OFF_TO_IDX(length))) != NULL && - m->valid != 0) { - int base = (int)length & PAGE_MASK; - int size = PAGE_SIZE - base; - - pmap_zero_page_area(m, base, size); - - /* - * Update the valid bits to reflect the blocks that - * have been zeroed. Some of these valid bits may - * have already been set. - */ - vm_page_set_valid(m, base, size); - - /* - * Round "base" to the next block boundary so that the - * dirty bit for a partially zeroed block is not - * cleared. - */ - base = roundup2(base, DEV_BSIZE); - - vm_page_clear_dirty(m, base, PAGE_SIZE - base); - } else if ((length & PAGE_MASK) && - __predict_false(object->cache != NULL)) { - vm_page_cache_free(object, OFF_TO_IDX(length), - nobjsize); - } } else { - /* Attempt to reserve the swap */ delta = ptoa(nobjsize - object->size); if (!swap_reserve_by_cred(delta, object->cred)) { diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c index 0e5efe6..c96bd51 100644 --- a/sys/kern/uipc_syscalls.c +++ b/sys/kern/uipc_syscalls.c @@ -1834,9 +1834,10 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap, struct mbuf *m = NULL; struct sf_buf *sf; struct vm_page *pg; + struct vattr va; off_t off, xfsize, fsbytes = 0, sbytes = 0, rem = 0; int error, hdrlen = 0, mnw = 0; - int vfslocked; + int bsize, vfslocked; struct sendfile_sync *sfs = NULL; /* @@ -2032,6 +2033,20 @@ retry_space: */ space -= hdrlen; + vfslocked = VFS_LOCK_GIANT(vp->v_mount); + error = vn_lock(vp, LK_SHARED); + if (error != 0) { + VFS_UNLOCK_GIANT(vfslocked); + goto done; + } + error = VOP_GETATTR(vp, &va, td->td_ucred); + if (error != 0) { + VOP_UNLOCK(vp, 0); + VFS_UNLOCK_GIANT(vfslocked); + goto done; + } + bsize = vp->v_mount->mnt_stat.f_iosize; + /* * Loop and construct maximum sized mbuf chain to be bulk * dumped into socket buffer. @@ -2049,12 +2064,12 @@ retry_space: */ pgoff = (vm_offset_t)(off & PAGE_MASK); xfsize = omin(PAGE_SIZE - pgoff, - obj->un_pager.vnp.vnp_size - uap->offset - + va.va_size - uap->offset - fsbytes - loopbytes); if (uap->nbytes) rem = (uap->nbytes - fsbytes - loopbytes); else - rem = obj->un_pager.vnp.vnp_size - + rem = va.va_size - uap->offset - fsbytes - loopbytes; xfsize = omin(rem, xfsize); xfsize = omin(space - loopbytes, xfsize); @@ -2086,7 +2101,7 @@ retry_space: else if (uap->flags & SF_NODISKIO) error = EBUSY; else { - int bsize, resid; + int resid; /* * Ensure that our page is still around @@ -2098,12 +2113,6 @@ retry_space: /* * Get the page from backing store. */ - vfslocked = VFS_LOCK_GIANT(vp->v_mount); - error = vn_lock(vp, LK_SHARED); - if (error != 0) - goto after_read; - bsize = vp->v_mount->mnt_stat.f_iosize; - /* * XXXMAC: Because we don't have fp->f_cred * here, we pass in NOCRED. This is probably @@ -2114,9 +2123,6 @@ retry_space: trunc_page(off), UIO_NOCOPY, IO_NODELOCKED | IO_VMIO | ((MAXBSIZE / bsize) << IO_SEQSHIFT), td->td_ucred, NOCRED, &resid, td); - VOP_UNLOCK(vp, 0); - after_read: - VFS_UNLOCK_GIANT(vfslocked); VM_OBJECT_LOCK(obj); vm_page_io_finish(pg); if (!error) @@ -2196,6 +2202,9 @@ retry_space: } } + VOP_UNLOCK(vp, 0); + VFS_UNLOCK_GIANT(vfslocked); + /* Add the buffer chain to the socket buffer. */ if (m != NULL) { int mlen, err; diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index a46d6b5..6148ae5 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -1254,7 +1254,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize, error = EINVAL; goto done; } - if (obj->handle != vp) { + if (obj->type == OBJT_VNODE && obj->handle != vp) { vput(vp); vp = (struct vnode*)obj->handle; vget(vp, LK_SHARED, td); @@ -1293,7 +1293,14 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize, objsize = round_page(va.va_size); if (va.va_nlink == 0) flags |= MAP_NOSYNC; - obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, td->td_ucred); + if (obj->type == OBJT_VNODE) + obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, + td->td_ucred); + else { + KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP, + ("wrong object type")); + vm_object_reference(obj); + } if (obj == NULL) { error = ENOMEM; goto done; diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 602d99e..8b7839c 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -816,7 +816,12 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); - KASSERT(object->type == OBJT_VNODE, ("Not a vnode object")); + + /* + * The OBJ_MIGHTBEDIRTY flag is only set for the OBJT_VNODE + * objects. The check below prevents the function from + * operating on the non-vnode objects. + */ if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 || object->resident_page_count == 0) return; diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index 23ade63..4e17cf4 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -212,8 +212,7 @@ retry: msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0); } - if (vp->v_usecount == 0) - panic("vnode_pager_alloc: no vnode reference"); + KASSERT(vp->v_usecount != 0, ("vnode_pager_alloc: no vnode reference")); if (object == NULL) { /* @@ -369,7 +368,7 @@ vnode_pager_setsize(vp, nsize) vm_page_t m; vm_pindex_t nobjsize; - if ((object = vp->v_object) == NULL) + if ((object = vp->v_object) == NULL || object->type != OBJT_VNODE) return; /* ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */ VM_OBJECT_LOCK(object);