Index: sys/sparc64/sparc64/pmap.c =================================================================== --- sys/sparc64/sparc64/pmap.c (revision 250855) +++ sys/sparc64/sparc64/pmap.c (working copy) @@ -1843,6 +1843,8 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst) vm_paddr_t psrc; struct tte *tp; + if ((msrc->oflags & VPO_BUSY) == 0) + VM_OBJECT_ASSERT_LOCKED(msrc->object); KASSERT((mdst->flags & PG_FICTITIOUS) == 0, ("pmap_copy_page: fake dst page")); KASSERT((msrc->flags & PG_FICTITIOUS) == 0, Index: sys/ia64/ia64/pmap.c =================================================================== --- sys/ia64/ia64/pmap.c (revision 250855) +++ sys/ia64/ia64/pmap.c (working copy) @@ -2009,6 +2009,9 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { void *dst, *src; + if ((msrc->oflags & VPO_BUSY) == 0) + VM_OBJECT_ASSERT_LOCKED(msrc->object); + src = (void *)pmap_page_to_va(msrc); dst = (void *)pmap_page_to_va(mdst); bcopy(src, dst, PAGE_SIZE); Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c (revision 250855) +++ sys/vm/vm_fault.c (working copy) @@ -1318,7 +1318,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src * (Because the source is wired down, the page will be in * memory.) */ - VM_OBJECT_WLOCK(src_object); + VM_OBJECT_RLOCK(src_object); object = src_object; pindex = src_pindex + dst_pindex; while ((src_m = vm_page_lookup(object, pindex)) == NULL && @@ -1327,15 +1327,15 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src /* * Allow fallback to backing objects if we are reading. */ - VM_OBJECT_WLOCK(backing_object); + VM_OBJECT_RLOCK(backing_object); pindex += OFF_TO_IDX(object->backing_object_offset); - VM_OBJECT_WUNLOCK(object); + VM_OBJECT_RUNLOCK(object); object = backing_object; } if (src_m == NULL) panic("vm_fault_copy_wired: page missing"); pmap_copy_page(src_m, dst_m); - VM_OBJECT_WUNLOCK(object); + VM_OBJECT_RUNLOCK(object); dst_m->valid = VM_PAGE_BITS_ALL; dst_m->dirty = VM_PAGE_BITS_ALL; VM_OBJECT_WUNLOCK(dst_object); Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c (revision 250855) +++ sys/i386/i386/pmap.c (working copy) @@ -4185,6 +4185,9 @@ pmap_copy_page(vm_page_t src, vm_page_t dst) { struct sysmaps *sysmaps; + if ((src->oflags & VPO_BUSY) == 0) + VM_OBJECT_ASSERT_LOCKED(src->object); + sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; mtx_lock(&sysmaps->lock); if (*sysmaps->CMAP1) Index: sys/i386/xen/pmap.c =================================================================== --- sys/i386/xen/pmap.c (revision 250855) +++ sys/i386/xen/pmap.c (working copy) @@ -3432,6 +3432,9 @@ pmap_copy_page(vm_page_t src, vm_page_t dst) { struct sysmaps *sysmaps; + if ((src->oflags & VPO_BUSY) == 0) + VM_OBJECT_ASSERT_LOCKED(src->object); + sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)]; mtx_lock(&sysmaps->lock); if (*sysmaps->CMAP1) Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c (revision 250855) +++ sys/amd64/amd64/pmap.c (working copy) @@ -4229,9 +4229,14 @@ pmap_zero_page_idle(vm_page_t m) void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { - vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); - vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); + vm_offset_t dst, src; + if ((msrc->oflags & VPO_BUSY) == 0) + VM_OBJECT_ASSERT_LOCKED(msrc->object); + + src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); + dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); + pagecopy((void *)src, (void *)dst); } Index: sys/powerpc/booke/pmap.c =================================================================== --- sys/powerpc/booke/pmap.c (revision 250855) +++ sys/powerpc/booke/pmap.c (working copy) @@ -2127,6 +2127,9 @@ mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_pa { vm_offset_t sva, dva; + if ((ms->oflags & VPO_BUSY) == 0) + VM_OBJECT_ASSERT_LOCKED(ms->object); + sva = copy_page_src_va; dva = copy_page_dst_va; Index: sys/powerpc/aim/mmu_oea.c =================================================================== --- sys/powerpc/aim/mmu_oea.c (revision 250855) +++ sys/powerpc/aim/mmu_oea.c (working copy) @@ -1050,6 +1050,9 @@ moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_ vm_offset_t dst; vm_offset_t src; + if ((msrc->oflags & VPO_BUSY) == 0) + VM_OBJECT_ASSERT_LOCKED(msrc->object); + dst = VM_PAGE_TO_PHYS(mdst); src = VM_PAGE_TO_PHYS(msrc); Index: sys/powerpc/aim/mmu_oea64.c =================================================================== --- sys/powerpc/aim/mmu_oea64.c (revision 250855) +++ sys/powerpc/aim/mmu_oea64.c (working copy) @@ -1097,6 +1097,9 @@ moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_pag vm_offset_t dst; vm_offset_t src; + if ((msrc->oflags & VPO_BUSY) == 0) + VM_OBJECT_ASSERT_LOCKED(msrc->object); + dst = VM_PAGE_TO_PHYS(mdst); src = VM_PAGE_TO_PHYS(msrc); Index: sys/arm/arm/pmap-v6.c =================================================================== --- sys/arm/arm/pmap-v6.c (revision 250855) +++ sys/arm/arm/pmap-v6.c (working copy) @@ -3729,6 +3729,9 @@ void pmap_copy_page(vm_page_t src, vm_page_t dst) { + if ((src->oflags & VPO_BUSY) == 0) + VM_OBJECT_ASSERT_LOCKED(src->object); + if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size && _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst), (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0) Index: sys/arm/arm/pmap.c =================================================================== --- sys/arm/arm/pmap.c (revision 250855) +++ sys/arm/arm/pmap.c (working copy) @@ -4411,6 +4411,9 @@ pmap_copy_page(vm_page_t src, vm_page_t dst) vm_offset_t srcpg, dstpg; #endif + if ((src->oflags & VPO_BUSY) == 0) + VM_OBJECT_ASSERT_LOCKED(src->object); + cpu_dcache_wbinv_all(); cpu_l2cache_wbinv_all(); if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size && Index: sys/mips/mips/pmap.c =================================================================== --- sys/mips/mips/pmap.c (revision 250855) +++ sys/mips/mips/pmap.c (working copy) @@ -2551,9 +2551,14 @@ void pmap_copy_page(vm_page_t src, vm_page_t dst) { vm_offset_t va_src, va_dst; - vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src); - vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst); + vm_paddr_t phys_dst, phys_src; + if ((src->oflags & VPO_BUSY) == 0) + VM_OBJECT_ASSERT_LOCKED(src->object); + + phys_src = VM_PAGE_TO_PHYS(src); + phys_dst = VM_PAGE_TO_PHYS(dst); + if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) { /* easy case, all can be accessed via KSEG0 */ /*