Index: include/pmap.h =================================================================== --- include/pmap.h (revision 327989) +++ include/pmap.h (working copy) @@ -260,6 +260,8 @@ void pmap_unmapdev(vm_offset_t, vm_size_t); void pmap_page_set_memattr(vm_page_t, vm_memattr_t); int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t); +int pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr, + void **kaddr, size_t ulen, size_t *klen); void pmap_deactivate(struct thread *); vm_paddr_t pmap_kextract(vm_offset_t); int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t); Index: powerpc/pmap_dispatch.c =================================================================== --- powerpc/pmap_dispatch.c (revision 327989) +++ powerpc/pmap_dispatch.c (working copy) @@ -511,6 +511,15 @@ return (MMU_KREMOVE(mmu_obj, va)); } +int +pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr, + size_t ulen, size_t *klen) +{ + + CTR2(KTR_PMAP, "%s(%#x)", __func__, va); + return (MMU_MAP_USER_PTR(mmu_obj, pm, uaddr, kaddr, ulen, klen)); +} + boolean_t pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size) { Index: powerpc/mmu_if.m =================================================================== --- powerpc/mmu_if.m (revision 327989) +++ powerpc/mmu_if.m (working copy) @@ -817,6 +817,27 @@ vm_size_t _size; }; +/** + * @brief Provide a kernel-space pointer that can be used to access the + * given userland address. The kernel accessible length returned in klen + * may be less than the requested length of the userland buffer (ulen). If + * so, retry with a higher address to get access to the later parts of the + * buffer. Returns EFAULT if no mapping can be made, else zero. + * + * @param _pm PMAP for the user pointer. + * @param _uaddr Userland address to map. + * @param _kaddr Corresponding kernel address. + * @param _ulen Length of user buffer. + * @param _klen Available subset of ulen with _kaddr. + */ +METHOD int map_user_ptr { + mmu_t _mmu; + pmap_t _pm; + volatile const void *_uaddr; + void **_kaddr; + size_t _ulen; + size_t *_klen; +}; /** * @brief Reverse-map a kernel virtual address Index: powerpc/copyinout.c =================================================================== --- powerpc/copyinout.c (revision 327989) +++ powerpc/copyinout.c (working copy) @@ -73,104 +73,6 @@ #include #include -#ifdef AIM -/* - * Makes sure that the right segment of userspace is mapped in. - */ - -#ifdef __powerpc64__ -static __inline void -set_user_sr(pmap_t pm, volatile const void *addr) -{ - struct slb *slb; - register_t slbv; - - /* Try lockless look-up first */ - slb = user_va_to_slb_entry(pm, (vm_offset_t)addr); - - if (slb == NULL) { - /* If it isn't there, we need to pre-fault the VSID */ - PMAP_LOCK(pm); - slbv = va_to_vsid(pm, (vm_offset_t)addr) << SLBV_VSID_SHIFT; - PMAP_UNLOCK(pm); - } else { - slbv = slb->slbv; - } - - /* Mark segment no-execute */ - slbv |= SLBV_N; - - /* If we have already set this VSID, we can just return */ - if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv) - return; - - __asm __volatile("isync"); - curthread->td_pcb->pcb_cpu.aim.usr_segm = - (uintptr_t)addr >> ADDR_SR_SHFT; - curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv; - __asm __volatile ("slbie %0; slbmte %1, %2; isync" :: - "r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE)); -} -#else -static __inline void -set_user_sr(pmap_t pm, volatile const void *addr) -{ - register_t vsid; - - vsid = va_to_vsid(pm, (vm_offset_t)addr); - - /* Mark segment no-execute */ - vsid |= SR_N; - - /* If we have already set this VSID, we can just return */ - if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == vsid) - return; - - __asm __volatile("isync"); - curthread->td_pcb->pcb_cpu.aim.usr_segm = - (uintptr_t)addr >> ADDR_SR_SHFT; - curthread->td_pcb->pcb_cpu.aim.usr_vsid = vsid; - __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(vsid)); -} -#endif - -static __inline int -map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, - size_t *klen) -{ - size_t l; - - *kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK); - - l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr); - if (l > ulen) - l = ulen; - if (klen) - *klen = l; - else if (l != ulen) - return (EFAULT); - - set_user_sr(pm, uaddr); - - return (0); -} -#else /* Book-E uses a combined kernel/user mapping */ -static __inline int -map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, - size_t *klen) -{ - - if ((uintptr_t)uaddr + ulen > VM_MAXUSER_ADDRESS + PAGE_SIZE) - return (EFAULT); - - *kaddr = (void *)(uintptr_t)uaddr; - if (klen) - *klen = ulen; - - return (0); -} -#endif - int copyout(const void *kaddr, void *udaddr, size_t len) { @@ -194,7 +96,7 @@ up = udaddr; while (len > 0) { - if (map_user_ptr(pm, udaddr, (void **)&p, len, &l)) { + if (pmap_map_user_ptr(pm, udaddr, (void **)&p, len, &l)) { td->td_pcb->pcb_onfault = NULL; return (EFAULT); } @@ -233,7 +135,7 @@ up = udaddr; while (len > 0) { - if (map_user_ptr(pm, udaddr, (void **)&p, len, &l)) { + if (pmap_map_user_ptr(pm, udaddr, (void **)&p, len, &l)) { td->td_pcb->pcb_onfault = NULL; return (EFAULT); } @@ -299,7 +201,7 @@ return (-1); } - if (map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { + if (pmap_map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { td->td_pcb->pcb_onfault = NULL; return (-1); } @@ -328,7 +230,7 @@ return (-1); } - if (map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { + if (pmap_map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { td->td_pcb->pcb_onfault = NULL; return (-1); } @@ -357,7 +259,7 @@ return (-1); } - if (map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { + if (pmap_map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { td->td_pcb->pcb_onfault = NULL; return (-1); } @@ -400,7 +302,7 @@ return (-1); } - if (map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { + if (pmap_map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { td->td_pcb->pcb_onfault = NULL; return (-1); } @@ -428,7 +330,7 @@ return (-1); } - if (map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { + if (pmap_map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { td->td_pcb->pcb_onfault = NULL; return (-1); } @@ -456,7 +358,7 @@ return (-1); } - if (map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { + if (pmap_map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { td->td_pcb->pcb_onfault = NULL; return (-1); } @@ -485,7 +387,7 @@ return (-1); } - if (map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { + if (pmap_map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { td->td_pcb->pcb_onfault = NULL; return (-1); } @@ -514,7 +416,7 @@ return (-1); } - if (map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { + if (pmap_map_user_ptr(pm, addr, (void **)&p, sizeof(*p), NULL)) { td->td_pcb->pcb_onfault = NULL; return (-1); } @@ -543,8 +445,8 @@ return (-1); } - if (map_user_ptr(pm, (void *)(uintptr_t)addr, (void **)&p, sizeof(*p), - NULL)) { + if (pmap_map_user_ptr(pm, (void *)(uintptr_t)addr, (void **)&p, + sizeof(*p), NULL)) { td->td_pcb->pcb_onfault = NULL; return (-1); } @@ -595,8 +497,8 @@ return (-1); } - if (map_user_ptr(pm, (void *)(uintptr_t)addr, (void **)&p, sizeof(*p), - NULL)) { + if (pmap_map_user_ptr(pm, (void *)(uintptr_t)addr, (void **)&p, + sizeof(*p), NULL)) { td->td_pcb->pcb_onfault = NULL; return (-1); } Index: aim/mmu_oea.c =================================================================== --- aim/mmu_oea.c (revision 327989) +++ aim/mmu_oea.c (working copy) @@ -320,7 +320,10 @@ void moea_scan_init(mmu_t mmu); vm_offset_t moea_quick_enter_page(mmu_t mmu, vm_page_t m); void moea_quick_remove_page(mmu_t mmu, vm_offset_t addr); +static int moea_map_user_ptr(mmu_t mmu, pmap_t pm, + volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); + static mmu_method_t moea_methods[] = { MMUMETHOD(mmu_clear_modify, moea_clear_modify), MMUMETHOD(mmu_copy_page, moea_copy_page), @@ -370,6 +373,7 @@ MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), MMUMETHOD(mmu_scan_init, moea_scan_init), MMUMETHOD(mmu_dumpsys_map, moea_dumpsys_map), + MMUMETHOD(mmu_map_user_ptr, moea_map_user_ptr), { 0, 0 } }; @@ -1545,6 +1549,45 @@ } /* + * Provide a kernel pointer corresponding to a given userland pointer. + * The returned pointer is valid until the next time this function is + * called in this thread. This is used internally in copyin/copyout. + */ +int +moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, + void **kaddr, size_t ulen, size_t *klen) +{ + size_t l; + register_t vsid; + + *kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK); + l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr); + if (l > ulen) + l = ulen; + if (klen) + *klen = l; + else if (l != ulen) + return (EFAULT); + + vsid = va_to_vsid(pm, (vm_offset_t)uaddr); + + /* Mark segment no-execute */ + vsid |= SR_N; + + /* If we have already set this VSID, we can just return */ + if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == vsid) + return (0); + + __asm __volatile("isync"); + curthread->td_pcb->pcb_cpu.aim.usr_segm = + (uintptr_t)uaddr >> ADDR_SR_SHFT; + curthread->td_pcb->pcb_cpu.aim.usr_vsid = vsid; + __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(vsid)); + + return (0); +} + +/* * Map a range of physical addresses into kernel virtual address space. * * The value passed in *virt is a suggested virtual address for the mapping. Index: aim/mmu_oea64.c =================================================================== --- aim/mmu_oea64.c (revision 327989) +++ aim/mmu_oea64.c (working copy) @@ -228,6 +228,7 @@ static boolean_t moea64_query_bit(mmu_t, vm_page_t, uint64_t); static u_int moea64_clear_bit(mmu_t, vm_page_t, uint64_t); static void moea64_kremove(mmu_t, vm_offset_t); + static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, vm_paddr_t pa, vm_size_t sz); static void moea64_pmap_init_qpages(void); @@ -284,7 +285,10 @@ void moea64_scan_init(mmu_t mmu); vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m); void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr); +static int moea64_map_user_ptr(mmu_t mmu, pmap_t pm, + volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); + static mmu_method_t moea64_methods[] = { MMUMETHOD(mmu_clear_modify, moea64_clear_modify), MMUMETHOD(mmu_copy_page, moea64_copy_page), @@ -333,6 +337,7 @@ MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), MMUMETHOD(mmu_scan_init, moea64_scan_init), MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map), + MMUMETHOD(mmu_map_user_ptr, moea64_map_user_ptr), { 0, 0 } }; @@ -1834,6 +1839,70 @@ } /* + * Provide a kernel pointer corresponding to a given userland pointer. + * The returned pointer is valid until the next time this function is + * called in this thread. This is used internally in copyin/copyout. + */ +static int +moea64_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, + void **kaddr, size_t ulen, size_t *klen) +{ + size_t l; +#ifdef __powerpc64__ + struct slb *slb; +#endif + register_t slbv; + + *kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK); + l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr); + if (l > ulen) + l = ulen; + if (klen) + *klen = l; + else if (l != ulen) + return (EFAULT); + +#ifdef __powerpc64__ + /* Try lockless look-up first */ + slb = user_va_to_slb_entry(pm, (vm_offset_t)uaddr); + + if (slb == NULL) { + /* If it isn't there, we need to pre-fault the VSID */ + PMAP_LOCK(pm); + slbv = va_to_vsid(pm, (vm_offset_t)uaddr) << SLBV_VSID_SHIFT; + PMAP_UNLOCK(pm); + } else { + slbv = slb->slbv; + } + + /* Mark segment no-execute */ + slbv |= SLBV_N; +#else + slbv = va_to_vsid(pm, (vm_offset_t)uaddr); + + /* Mark segment no-execute */ + slbv |= SR_N; +#endif + + /* If we have already set this VSID, we can just return */ + if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv) + return (0); + + __asm __volatile("isync"); + curthread->td_pcb->pcb_cpu.aim.usr_segm = + (uintptr_t)uaddr >> ADDR_SR_SHFT; + curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv; +#ifdef __powerpc64__ + __asm __volatile ("slbie %0; slbmte %1, %2; isync" :: + "r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE)); +#else + __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(slbv)); +#endif + + return (0); +} + +/* * Map a range of physical addresses into kernel virtual address space. * * The value passed in *virt is a suggested virtual address for the mapping. Index: booke/pmap.c =================================================================== --- booke/pmap.c (revision 327989) +++ booke/pmap.c (working copy) @@ -380,7 +380,10 @@ static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr); static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz, vm_memattr_t mode); +static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, + volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); + static mmu_method_t mmu_booke_methods[] = { /* pmap dispatcher interface */ MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), @@ -432,6 +435,7 @@ MMUMETHOD(mmu_kremove, mmu_booke_kremove), MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), MMUMETHOD(mmu_change_attr, mmu_booke_change_attr), + MMUMETHOD(mmu_map_user_ptr, mmu_booke_map_user_ptr), /* dumpsys() support */ MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), @@ -2268,6 +2272,26 @@ } /* + * Provide a kernel pointer corresponding to a given userland pointer. + * The returned pointer is valid until the next time this function is + * called in this thread. This is used internally in copyin/copyout. + */ +int +mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, + void **kaddr, size_t ulen, size_t *klen) +{ + + if ((uintptr_t)uaddr + ulen > VM_MAXUSER_ADDRESS + PAGE_SIZE) + return (EFAULT); + + *kaddr = (void *)(uintptr_t)uaddr; + if (klen) + *klen = ulen; + + return (0); +} + +/* * Initialize pmap associated with process 0. */ static void