Index: sys/fs/tmpfs/tmpfs_vnops.c =================================================================== --- sys/fs/tmpfs/tmpfs_vnops.c (revision 253523) +++ sys/fs/tmpfs/tmpfs_vnops.c (working copy) @@ -445,7 +445,7 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t id vm_page_t m; int error, rv; - VM_OBJECT_WLOCK(tobj); + VM_OBJECT_RLOCK(tobj); /* * Although the tmpfs vnode lock is held here, it is @@ -454,8 +454,17 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t id * lock to page out tobj's pages because tobj is a OBJT_SWAP * type object. */ +retry: m = vm_page_grab(tobj, idx, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); if (m->valid != VM_PAGE_BITS_ALL) { + if (!VM_OBJECT_LOCK_TRYUPGRADE(tobj)) { + VM_OBJECT_RUNLOCK(tobj); + VM_OBJECT_WLOCK(tobj); + vm_page_lock(m); + vm_page_free(m); + vm_page_unlock(m); + goto retry; + } if (vm_pager_has_page(tobj, idx, NULL, NULL)) { rv = vm_pager_get_pages(tobj, &m, 1, 0); m = vm_page_lookup(tobj, idx); @@ -480,11 +489,14 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t id vm_page_zero_invalid(m, TRUE); } vm_page_busy_downgrade(m); - VM_OBJECT_WUNLOCK(tobj); + if (VM_OBJECT_WOWNED(tobj)) + VM_OBJECT_WUNLOCK(tobj); + else + VM_OBJECT_RUNLOCK(tobj); error = uiomove_fromphys(&m, offset, tlen, uio); - VM_OBJECT_WLOCK(tobj); + VM_OBJECT_RLOCK(tobj); vm_page_busy_runlock(m); - VM_OBJECT_WUNLOCK(tobj); + VM_OBJECT_RUNLOCK(tobj); vm_page_lock(m); if (m->queue == PQ_NONE) { vm_page_deactivate(m); @@ -567,9 +579,18 @@ tmpfs_mappedwrite(vm_object_t tobj, size_t len, st offset = addr & PAGE_MASK; tlen = MIN(PAGE_SIZE - offset, len); - VM_OBJECT_WLOCK(tobj); + VM_OBJECT_RLOCK(tobj); +retry: tpg = vm_page_grab(tobj, idx, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); if (tpg->valid != VM_PAGE_BITS_ALL) { + if (!VM_OBJECT_LOCK_TRYUPGRADE(tobj)) { + VM_OBJECT_RUNLOCK(tobj); + VM_OBJECT_WLOCK(tobj); + vm_page_lock(tpg); + vm_page_free(tpg); + vm_page_unlock(tpg); + goto retry; + } if (vm_pager_has_page(tobj, idx, NULL, NULL)) { rv = vm_pager_get_pages(tobj, &tpg, 1, 0); tpg = vm_page_lookup(tobj, idx); @@ -594,7 +615,10 @@ tmpfs_mappedwrite(vm_object_t tobj, size_t len, st vm_page_zero_invalid(tpg, TRUE); } vm_page_busy_downgrade(tpg); - VM_OBJECT_WUNLOCK(tobj); + if (VM_OBJECT_WOWNED(tobj)) + VM_OBJECT_WUNLOCK(tobj); + else + VM_OBJECT_RUNLOCK(tobj); error = uiomove_fromphys(&tpg, offset, tlen, uio); VM_OBJECT_WLOCK(tobj); vm_page_busy_runlock(tpg); Index: sys/vm/vm_kern.c =================================================================== --- sys/vm/vm_kern.c (revision 253523) +++ sys/vm/vm_kern.c (working copy) @@ -555,7 +555,7 @@ retry: /* * Loop thru pages, entering them in the pmap. */ - VM_OBJECT_WLOCK(kmem_object); + VM_OBJECT_RLOCK(kmem_object); for (i = 0; i < size; i += PAGE_SIZE) { m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); /* @@ -565,7 +565,7 @@ retry: TRUE); vm_page_busy_wunlock(m); } - VM_OBJECT_WUNLOCK(kmem_object); + VM_OBJECT_RUNLOCK(kmem_object); return (KERN_SUCCESS); } Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c (revision 253523) +++ sys/vm/vm_object.c (working copy) @@ -1212,15 +1212,15 @@ vm_object_shadow( * Don't create the new object if the old object isn't shared. */ if (source != NULL) { - VM_OBJECT_WLOCK(source); + VM_OBJECT_RLOCK(source); if (source->ref_count == 1 && source->handle == NULL && (source->type == OBJT_DEFAULT || source->type == OBJT_SWAP)) { - VM_OBJECT_WUNLOCK(source); + VM_OBJECT_RUNLOCK(source); return; } - VM_OBJECT_WUNLOCK(source); + VM_OBJECT_RUNLOCK(source); } /* Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c (revision 253523) +++ sys/vm/vm_fault.c (working copy) @@ -1362,7 +1362,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src /* * Mark it no longer busy, and put it on the active list. */ - VM_OBJECT_WLOCK(dst_object); + VM_OBJECT_RLOCK(dst_object); if (upgrade) { vm_page_lock(src_m); @@ -1380,7 +1380,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src vm_page_busy_wunlock(dst_m); } } - VM_OBJECT_WUNLOCK(dst_object); + VM_OBJECT_RUNLOCK(dst_object); if (upgrade) { dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); vm_object_deallocate(src_object); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c (revision 253523) +++ sys/vm/vm_page.c (working copy) @@ -2528,7 +2528,7 @@ vm_page_grab(vm_object_t object, vm_pindex_t pinde vm_page_t m; int origwlock; - VM_OBJECT_ASSERT_WLOCKED(object); + VM_OBJECT_ASSERT_LOCKED(object); origwlock = VM_OBJECT_WOWNED(object); KASSERT((allocflags & VM_ALLOC_RETRY) != 0, ("vm_page_grab: VM_ALLOC_RETRY is required")); Index: sys/vm/vm_glue.c =================================================================== --- sys/vm/vm_glue.c (revision 253523) +++ sys/vm/vm_glue.c (working copy) @@ -239,10 +239,19 @@ vm_imgact_page_iostart(vm_object_t object, vm_ooff vm_pindex_t pindex; int rv; - VM_OBJECT_WLOCK(object); + VM_OBJECT_RLOCK(object); pindex = OFF_TO_IDX(offset); +retry: m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); if (m->valid != VM_PAGE_BITS_ALL) { + if (!VM_OBJECT_LOCK_TRYUPGRADE(object)) { + VM_OBJECT_RUNLOCK(object); + VM_OBJECT_WLOCK(object); + vm_page_lock(m); + vm_page_free(m); + vm_page_unlock(m); + goto retry; + } ma[0] = m; rv = vm_pager_get_pages(object, ma, 1, 0); m = vm_page_lookup(object, pindex); @@ -256,10 +265,12 @@ vm_imgact_page_iostart(vm_object_t object, vm_ooff goto out; } } - vm_page_busy_wunlock(m); - vm_page_busy_rlock(m); + vm_page_busy_downgrade(m); out: - VM_OBJECT_WUNLOCK(object); + if (VM_OBJECT_WOWNED(object)) + VM_OBJECT_WUNLOCK(object); + else + VM_OBJECT_RUNLOCK(object); return (m); } @@ -290,9 +301,9 @@ vm_imgact_unmap_page(vm_object_t object, struct sf m = sf_buf_page(sf); sf_buf_free(sf); sched_unpin(); - VM_OBJECT_WLOCK(object); + VM_OBJECT_RLOCK(object); vm_page_busy_runlock(m); - VM_OBJECT_WUNLOCK(object); + VM_OBJECT_RUNLOCK(object); } void @@ -504,7 +515,7 @@ vm_thread_swapout(struct thread *td) pages = td->td_kstack_pages; ksobj = td->td_kstack_obj; pmap_qremove(td->td_kstack, pages); - VM_OBJECT_WLOCK(ksobj); + VM_OBJECT_RLOCK(ksobj); for (i = 0; i < pages; i++) { m = vm_page_lookup(ksobj, i); if (m == NULL) @@ -514,7 +525,7 @@ vm_thread_swapout(struct thread *td) vm_page_unwire(m, 0); vm_page_unlock(m); } - VM_OBJECT_WUNLOCK(ksobj); + VM_OBJECT_RUNLOCK(ksobj); } /* Index: sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c =================================================================== --- sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c (revision 253523) +++ sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c (working copy) @@ -331,15 +331,14 @@ page_busy(vnode_t *vp, int64_t start, int64_t off, vm_page_t pp; obj = vp->v_object; - zfs_vmobject_assert_wlocked(obj); + zfs_vmobject_assert_locked(obj); for (;;) { if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL && pp->valid) { if (vm_page_sleep_if_busy(pp, "zfsmwb", - VM_ALLOC_NOBUSY, TRUE)) + VM_ALLOC_RBUSY, TRUE)) continue; - vm_page_busy_rlock(pp); } else if (pp == NULL) { if (!alloc) break; @@ -483,21 +482,21 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio) ASSERT(obj != NULL); ASSERT((uio->uio_loffset & PAGEOFFSET) == 0); - zfs_vmobject_wlock(obj); + zfs_vmobject_rlock(obj); for (start = uio->uio_loffset; len > 0; start += PAGESIZE) { int bytes = MIN(PAGESIZE, len); pp = vm_page_grab(obj, OFF_TO_IDX(start), VM_ALLOC_RBUSY | VM_ALLOC_NORMAL | VM_ALLOC_RETRY); if (pp->valid == 0) { - zfs_vmobject_wunlock(obj); + zfs_vmobject_runlock(obj); va = zfs_map_page(pp, &sf); error = dmu_read(os, zp->z_id, start, bytes, va, DMU_READ_PREFETCH); if (bytes != PAGESIZE && error == 0) bzero(va + bytes, PAGESIZE - bytes); zfs_unmap_page(sf); - zfs_vmobject_wlock(obj); + zfs_vmobject_rlock(obj); vm_page_busy_runlock(pp); vm_page_lock(pp); if (error) { @@ -517,7 +516,7 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio) uio->uio_offset += bytes; len -= bytes; } - zfs_vmobject_wunlock(obj); + zfs_vmobject_runlock(obj); return (error); } @@ -549,7 +548,7 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio) start = uio->uio_loffset; off = start & PAGEOFFSET; - zfs_vmobject_wlock(obj); + zfs_vmobject_rlock(obj); for (start &= PAGEMASK; len > 0; start += PAGESIZE) { vm_page_t pp; uint64_t bytes = MIN(PAGESIZE - off, len); @@ -558,23 +557,23 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio) struct sf_buf *sf; caddr_t va; - zfs_vmobject_wunlock(obj); + zfs_vmobject_runlock(obj); va = zfs_map_page(pp, &sf); error = uiomove(va + off, bytes, UIO_READ, uio); zfs_unmap_page(sf); - zfs_vmobject_wlock(obj); + zfs_vmobject_rlock(obj); page_unbusy(pp, FALSE); } else { - zfs_vmobject_wunlock(obj); + zfs_vmobject_runlock(obj); error = dmu_read_uio(os, zp->z_id, uio, bytes); - zfs_vmobject_wlock(obj); + zfs_vmobject_rlock(obj); } len -= bytes; off = 0; if (error) break; } - zfs_vmobject_wunlock(obj); + zfs_vmobject_runlock(obj); return (error); } Index: sys/cddl/compat/opensolaris/kern/opensolaris_vm.c =================================================================== --- sys/cddl/compat/opensolaris/kern/opensolaris_vm.c (revision 253523) +++ sys/cddl/compat/opensolaris/kern/opensolaris_vm.c (working copy) @@ -42,6 +42,32 @@ const int zfs_vm_pagerret_error = VM_PAGER_ERROR; const int zfs_vm_pagerret_ok = VM_PAGER_OK; void +zfs_vmobject_assert_locked(vm_object_t object) +{ + + /* + * This is not ideal because FILE/LINE used by assertions will not + * be too helpful, but it must be an hard function for + * compatibility reasons. + */ + VM_OBJECT_ASSERT_LOCKED(object); +} + +void +zfs_vmobject_rlock(vm_object_t object) +{ + + VM_OBJECT_RLOCK(object); +} + +void +zfs_vmobject_runlock(vm_object_t object) +{ + + VM_OBJECT_RUNLOCK(object); +} + +void zfs_vmobject_assert_wlocked(vm_object_t object) { Index: sys/cddl/compat/opensolaris/sys/vm.h =================================================================== --- sys/cddl/compat/opensolaris/sys/vm.h (revision 253523) +++ sys/cddl/compat/opensolaris/sys/vm.h (working copy) @@ -35,6 +35,9 @@ extern const int zfs_vm_pagerret_bad; extern const int zfs_vm_pagerret_error; extern const int zfs_vm_pagerret_ok; +void zfs_vmobject_assert_locked(vm_object_t object); +void zfs_vmobject_rlock(vm_object_t object); +void zfs_vmobject_runlock(vm_object_t object); void zfs_vmobject_assert_wlocked(vm_object_t object); void zfs_vmobject_wlock(vm_object_t object); void zfs_vmobject_wunlock(vm_object_t object); Index: sys/kern/vfs_bio.c =================================================================== --- sys/kern/vfs_bio.c (revision 253523) +++ sys/kern/vfs_bio.c (working copy) @@ -3433,7 +3433,7 @@ allocbuf(struct buf *bp, int size) (bp->b_npages - desiredpages)); } else BUF_CHECK_UNMAPPED(bp); - VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); + VM_OBJECT_RLOCK(bp->b_bufobj->bo_object); for (i = desiredpages; i < bp->b_npages; i++) { /* * the page is not freed here -- it @@ -3443,16 +3443,17 @@ allocbuf(struct buf *bp, int size) m = bp->b_pages[i]; KASSERT(m != bogus_page, ("allocbuf: bogus page found")); - while (vm_page_sleep_if_busy(m, - "biodep", VM_ALLOC_NOBUSY, FALSE)) + while (vm_page_sleep_if_busy(m, "biodep", 0, + FALSE)) continue; bp->b_pages[i] = NULL; vm_page_lock(m); vm_page_unwire(m, 0); vm_page_unlock(m); + vm_page_busy_wunlock(m); } - VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); + VM_OBJECT_RUNLOCK(bp->b_bufobj->bo_object); bp->b_npages = desiredpages; } } else if (size > bp->b_bcount) { @@ -3473,7 +3474,7 @@ allocbuf(struct buf *bp, int size) obj = bp->b_bufobj->bo_object; - VM_OBJECT_WLOCK(obj); + VM_OBJECT_RLOCK(obj); onpages = bp->b_npages; while (bp->b_npages < desiredpages) { vm_page_t m; @@ -3537,11 +3538,13 @@ allocbuf(struct buf *bp, int size) tinc = PAGE_SIZE; } while ((bp->b_npages - onpages) != 0) { + vm_page_t m; + m = bp->b_pages[onpages]; vm_page_busy_runlock(m); ++onpages; } - VM_OBJECT_WUNLOCK(obj); + VM_OBJECT_RUNLOCK(obj); /* * Step 3, fixup the KVM pmap. Index: sys/kern/vfs_cluster.c =================================================================== --- sys/kern/vfs_cluster.c (revision 253523) +++ sys/kern/vfs_cluster.c (working copy) @@ -414,20 +414,20 @@ cluster_rbuild(struct vnode *vp, u_quad_t filesize */ off = tbp->b_offset; tsize = size; - VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object); + VM_OBJECT_RLOCK(tbp->b_bufobj->bo_object); for (j = 0; tsize > 0; j++) { toff = off & PAGE_MASK; tinc = tsize; if (toff + tinc > PAGE_SIZE) tinc = PAGE_SIZE - toff; - VM_OBJECT_ASSERT_WLOCKED(tbp->b_pages[j]->object); + VM_OBJECT_ASSERT_RLOCKED(tbp->b_pages[j]->object); if ((tbp->b_pages[j]->valid & vm_page_bits(toff, tinc)) != 0) break; off += tinc; tsize -= tinc; } - VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object); + VM_OBJECT_RUNLOCK(tbp->b_bufobj->bo_object); if (tsize > 0) { bqrelse(tbp); break; @@ -494,13 +494,13 @@ cluster_rbuild(struct vnode *vp, u_quad_t filesize * Fully valid pages in the cluster are already good and do not need * to be re-read from disk. Replace the page with bogus_page */ - VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); + VM_OBJECT_RLOCK(bp->b_bufobj->bo_object); for (j = 0; j < bp->b_npages; j++) { - VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[j]->object); + VM_OBJECT_ASSERT_RLOCKED(bp->b_pages[j]->object); if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL) bp->b_pages[j] = bogus_page; } - VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); + VM_OBJECT_RUNLOCK(bp->b_bufobj->bo_object); if (bp->b_bufsize > bp->b_kvasize) panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", bp->b_bufsize, bp->b_kvasize); Index: sys/kern/uipc_syscalls.c =================================================================== --- sys/kern/uipc_syscalls.c (revision 253523) +++ sys/kern/uipc_syscalls.c (working copy) @@ -2221,7 +2221,7 @@ retry_space: * if not found or wait and loop if busy. */ pindex = OFF_TO_IDX(off); - VM_OBJECT_WLOCK(obj); + VM_OBJECT_RLOCK(obj); pg = vm_page_grab(obj, pindex, VM_ALLOC_NOBUSY | VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_RETRY); @@ -2233,7 +2233,7 @@ retry_space: * block. */ if (pg->valid && vm_page_is_valid(pg, pgoff, xfsize)) - VM_OBJECT_WUNLOCK(obj); + VM_OBJECT_RUNLOCK(obj); else if (m != NULL) error = EAGAIN; /* send what we already got */ else if (uap->flags & SF_NODISKIO) @@ -2241,7 +2241,7 @@ retry_space: else { ssize_t resid; - VM_OBJECT_WUNLOCK(obj); + VM_OBJECT_RUNLOCK(obj); /* * Get the page from backing store. Index: sys/kern/sys_process.c =================================================================== --- sys/kern/sys_process.c (revision 253523) +++ sys/kern/sys_process.c (working copy) @@ -316,9 +316,9 @@ proc_rwmem(struct proc *p, struct uio *uio) /* * Release the page. */ - VM_OBJECT_WLOCK(m->object); + VM_OBJECT_RLOCK(m->object); vm_page_busy_runlock(m); - VM_OBJECT_WUNLOCK(m->object); + VM_OBJECT_RUNLOCK(m->object); } while (error == 0 && uio->uio_resid > 0); Index: sys/dev/agp/agp_i810.c =================================================================== --- sys/dev/agp/agp_i810.c (revision 253523) +++ sys/dev/agp/agp_i810.c (working copy) @@ -2006,12 +2006,12 @@ agp_i810_free_memory(device_t dev, struct agp_memo /* * Unwire the page which we wired in alloc_memory. */ - VM_OBJECT_WLOCK(mem->am_obj); + VM_OBJECT_RLOCK(mem->am_obj); m = vm_page_lookup(mem->am_obj, 0); vm_page_lock(m); vm_page_unwire(m, 0); vm_page_unlock(m); - VM_OBJECT_WUNLOCK(mem->am_obj); + VM_OBJECT_RUNLOCK(mem->am_obj); } else { contigfree(sc->argb_cursor, mem->am_size, M_AGP); sc->argb_cursor = NULL; Index: sys/dev/agp/agp.c =================================================================== --- sys/dev/agp/agp.c (revision 253523) +++ sys/dev/agp/agp.c (working copy) @@ -545,7 +545,7 @@ agp_generic_bind_memory(device_t dev, struct agp_m * because vm_page_grab() may sleep and we can't hold a mutex * while sleeping. */ - VM_OBJECT_WLOCK(mem->am_obj); + VM_OBJECT_RLOCK(mem->am_obj); for (i = 0; i < mem->am_size; i += PAGE_SIZE) { /* * Find a page from the object and wire it @@ -558,14 +558,14 @@ agp_generic_bind_memory(device_t dev, struct agp_m VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY); AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m)); } - VM_OBJECT_WUNLOCK(mem->am_obj); + VM_OBJECT_RUNLOCK(mem->am_obj); mtx_lock(&sc->as_lock); if (mem->am_is_bound) { device_printf(dev, "memory already bound\n"); error = EINVAL; - VM_OBJECT_WLOCK(mem->am_obj); + VM_OBJECT_RLOCK(mem->am_obj); i = 0; goto bad; } @@ -574,7 +574,7 @@ agp_generic_bind_memory(device_t dev, struct agp_m * Bind the individual pages and flush the chipset's * TLB. */ - VM_OBJECT_WLOCK(mem->am_obj); + VM_OBJECT_RLOCK(mem->am_obj); for (i = 0; i < mem->am_size; i += PAGE_SIZE) { m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i)); @@ -602,7 +602,7 @@ agp_generic_bind_memory(device_t dev, struct agp_m } vm_page_busy_wunlock(m); } - VM_OBJECT_WUNLOCK(mem->am_obj); + VM_OBJECT_RUNLOCK(mem->am_obj); /* * Flush the cpu cache since we are providing a new mapping @@ -623,7 +623,7 @@ agp_generic_bind_memory(device_t dev, struct agp_m return 0; bad: mtx_unlock(&sc->as_lock); - VM_OBJECT_ASSERT_WLOCKED(mem->am_obj); + VM_OBJECT_ASSERT_LOCKED(mem->am_obj); for (k = 0; k < mem->am_size; k += PAGE_SIZE) { m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k)); if (k >= i) @@ -632,7 +632,7 @@ bad: vm_page_unwire(m, 0); vm_page_unlock(m); } - VM_OBJECT_WUNLOCK(mem->am_obj); + VM_OBJECT_RUNLOCK(mem->am_obj); return error; } @@ -659,14 +659,14 @@ agp_generic_unbind_memory(device_t dev, struct agp */ for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) AGP_UNBIND_PAGE(dev, mem->am_offset + i); - VM_OBJECT_WLOCK(mem->am_obj); + VM_OBJECT_RLOCK(mem->am_obj); for (i = 0; i < mem->am_size; i += PAGE_SIZE) { m = vm_page_lookup(mem->am_obj, atop(i)); vm_page_lock(m); vm_page_unwire(m, 0); vm_page_unlock(m); } - VM_OBJECT_WUNLOCK(mem->am_obj); + VM_OBJECT_RUNLOCK(mem->am_obj); agp_flush_cache(); AGP_FLUSH_TLB(dev);