Index: aim/mmu_oea.c =================================================================== --- aim/mmu_oea.c (revision 328517) +++ aim/mmu_oea.c (working copy) @@ -322,6 +322,8 @@ void moea_quick_remove_page(mmu_t mmu, vm_offset_t addr); static int moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); +static int moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, + int *is_user, vm_offset_t *decoded_addr); static mmu_method_t moea_methods[] = { @@ -374,6 +376,7 @@ MMUMETHOD(mmu_scan_init, moea_scan_init), MMUMETHOD(mmu_dumpsys_map, moea_dumpsys_map), MMUMETHOD(mmu_map_user_ptr, moea_map_user_ptr), + MMUMETHOD(mmu_decode_kernel_ptr, moea_decode_kernel_ptr), { 0, 0 } }; @@ -1588,6 +1591,31 @@ } /* + * Figure out where a given kernel pointer (usually in a fault) points + * to from the VM's perspective, potentially remapping into userland's + * address space. + */ +static int +moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user, + vm_offset_t *decoded_addr) +{ + vm_offset_t user_sr; + + if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { + user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm; + addr &= ADDR_PIDX | ADDR_POFF; + addr |= user_sr << ADDR_SR_SHFT; + *decoded_addr = addr; + *is_user = 1; + } else { + *decoded_addr = addr; + *is_user = 0; + } + + return (0); +} + +/* * Map a range of physical addresses into kernel virtual address space. * * The value passed in *virt is a suggested virtual address for the mapping. Index: aim/mmu_oea64.c =================================================================== --- aim/mmu_oea64.c (revision 328517) +++ aim/mmu_oea64.c (working copy) @@ -288,6 +288,8 @@ void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr); static int moea64_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); +static int moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, + int *is_user, vm_offset_t *decoded_addr); static mmu_method_t moea64_methods[] = { @@ -339,6 +341,7 @@ MMUMETHOD(mmu_scan_init, moea64_scan_init), MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map), MMUMETHOD(mmu_map_user_ptr, moea64_map_user_ptr), + MMUMETHOD(mmu_decode_kernel_ptr, moea64_decode_kernel_ptr), { 0, 0 } }; @@ -1910,6 +1913,31 @@ } /* + * Figure out where a given kernel pointer (usually in a fault) points + * to from the VM's perspective, potentially remapping into userland's + * address space. + */ +static int +moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user, + vm_offset_t *decoded_addr) +{ + vm_offset_t user_sr; + + if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { + user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm; + addr &= ADDR_PIDX | ADDR_POFF; + addr |= user_sr << ADDR_SR_SHFT; + *decoded_addr = addr; + *is_user = 1; + } else { + *decoded_addr = addr; + *is_user = 0; + } + + return (0); +} + +/* * Map a range of physical addresses into kernel virtual address space. * * The value passed in *virt is a suggested virtual address for the mapping. Index: booke/pmap.c =================================================================== --- booke/pmap.c (revision 328517) +++ booke/pmap.c (working copy) @@ -382,6 +382,8 @@ vm_size_t sz, vm_memattr_t mode); static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); +static int mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, + int *is_user, vm_offset_t *decoded_addr); static mmu_method_t mmu_booke_methods[] = { @@ -436,6 +438,7 @@ MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), MMUMETHOD(mmu_change_attr, mmu_booke_change_attr), MMUMETHOD(mmu_map_user_ptr, mmu_booke_map_user_ptr), + MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr), /* dumpsys() support */ MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), @@ -2292,6 +2295,25 @@ } /* + * Figure out where a given kernel pointer (usually in a fault) points + * to from the VM's perspective, potentially remapping into userland's + * address space. + */ +static int +mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user, + vm_offset_t *decoded_addr) +{ + + if (addr < VM_MAXUSER_ADDRESS) + *is_user = 1; + else + *is_user = 0; + + *decoded_addr = addr; + return (0); +} + +/* * Initialize pmap associated with process 0. */ static void Index: include/pmap.h =================================================================== --- include/pmap.h (revision 328517) +++ include/pmap.h (working copy) @@ -262,6 +262,8 @@ int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t); int pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); +int pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, + vm_offset_t *decoded_addr); void pmap_deactivate(struct thread *); vm_paddr_t pmap_kextract(vm_offset_t); int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t); Index: powerpc/mmu_if.m =================================================================== --- powerpc/mmu_if.m (revision 328517) +++ powerpc/mmu_if.m (working copy) @@ -840,6 +840,21 @@ }; /** + * @brief Decode a kernel pointer, as visible to the current thread, + * by setting whether it corresponds to a user or kernel address and + * the address in the respective memory maps to which the address as + * seen in the kernel corresponds. This is essentially the inverse of + * MMU_MAP_USER_PTR() above and is used in kernel-space fault handling. + * Returns 0 on success or EFAULT if the address could not be mapped. + */ +METHOD int decode_kernel_ptr { + mmu_t _mmu; + vm_offset_t addr; + int *is_user; + vm_offset_t *decoded_addr; +}; + +/** * @brief Reverse-map a kernel virtual address * * @param _va kernel virtual address to reverse-map @@ -998,3 +1013,4 @@ vm_size_t _sz; vm_memattr_t _mode; } DEFAULT mmu_null_change_attr; + Index: powerpc/pmap_dispatch.c =================================================================== --- powerpc/pmap_dispatch.c (revision 328517) +++ powerpc/pmap_dispatch.c (working copy) @@ -520,6 +520,14 @@ return (MMU_MAP_USER_PTR(mmu_obj, pm, uaddr, kaddr, ulen, klen)); } +int +pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded) +{ + + CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr); + return (MMU_DECODE_KERNEL_PTR(mmu_obj, addr, is_user, decoded)); +} + boolean_t pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size) { Index: powerpc/trap.c =================================================================== --- powerpc/trap.c (revision 328517) +++ powerpc/trap.c (working copy) @@ -393,7 +393,8 @@ break; #if defined(__powerpc64__) && defined(AIM) case EXC_DSE: - if ((frame->dar & SEGMENT_MASK) == USER_ADDR) { + if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0 && + (frame->dar & SEGMENT_MASK) == USER_ADDR) { __asm __volatile ("slbmte %0, %1" :: "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); @@ -731,10 +732,7 @@ struct proc *p; vm_map_t map; vm_prot_t ftype; - int rv; -#ifdef AIM - register_t user_sr; -#endif + int rv, is_user; td = curthread; p = td->td_proc; @@ -759,21 +757,14 @@ KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL")); map = &p->p_vmspace->vm_map; } else { -#ifdef BOOKE - if (eva < VM_MAXUSER_ADDRESS) { -#else - if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { -#endif + rv = pmap_decode_kernel_ptr(eva, &is_user, &eva); + if (rv != 0) + return (SIGSEGV); + + if (is_user) map = &p->p_vmspace->vm_map; - -#ifdef AIM - user_sr = td->td_pcb->pcb_cpu.aim.usr_segm; - eva &= ADDR_PIDX | ADDR_POFF; - eva |= user_sr << ADDR_SR_SHFT; -#endif - } else { + else map = kernel_map; - } } va = trunc_page(eva);