Index: arm/include/pmap.h =================================================================== --- arm/include/pmap.h (revision 245047) +++ arm/include/pmap.h (working copy) @@ -61,7 +61,7 @@ #else #define PTE_NOCACHE 1 #endif -#define PTE_CACHE 4 +#define PTE_CACHE 6 #define PTE_DEVICE 2 #define PTE_PAGETABLE 4 #else Index: arm/arm/pmap-v6.c =================================================================== --- arm/arm/pmap-v6.c (revision 245047) +++ arm/arm/pmap-v6.c (working copy) @@ -193,6 +193,14 @@ #define PMAP_INLINE __inline #endif /* PMAP_DEBUG */ +#ifdef ARM_L2_PIPT +#define pmap_l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(pa, size) +#define pmap_l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(pa, size) +#else +#define pmap_l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(va, size) +#define pmap_l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(va, size) +#endif + extern struct pv_addr systempage; /* @@ -786,11 +794,7 @@ pte = *ptep; cpu_idcache_wbinv_range(va, PAGE_SIZE); -#ifdef ARM_L2_PIPT - cpu_l2cache_wbinv_range(pte & L2_S_FRAME, PAGE_SIZE); -#else - cpu_l2cache_wbinv_range(va, PAGE_SIZE); -#endif + pmap_l2cache_wbinv_range(va, pte & L2_S_FRAME, PAGE_SIZE); if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { /* * Page tables must have the cache-mode set to @@ -2121,6 +2125,7 @@ cpu_tlb_flushD_SE(va); cpu_cpwait(); *pte = 0; + PTE_SYNC(pte); } } @@ -2387,11 +2392,7 @@ pte = *ptep &~ L2_S_CACHE_MASK; cpu_idcache_wbinv_range(tmpva, PAGE_SIZE); -#ifdef ARM_L2_PIPT - cpu_l2cache_wbinv_range(pte & L2_S_FRAME, PAGE_SIZE); -#else - cpu_l2cache_wbinv_range(tmpva, PAGE_SIZE); -#endif + pmap_l2cache_wbinv_range(tmpva, pte & L2_S_FRAME, PAGE_SIZE); *ptep = pte; cpu_tlb_flushID_SE(tmpva); @@ -2754,6 +2755,9 @@ else if (PV_BEEN_REFD(oflags)) cpu_tlb_flushD_SE(va); } + + if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap)) + cpu_icache_sync_range(va, PAGE_SIZE); } /* @@ -3197,6 +3201,16 @@ else bzero_page(cdstp); + /* + * Although aliasing is not possible if we use + * cdstp temporary mappings with memory that + * will be mapped later as non-cached or with write-through + * caches we might end up overwriting it when calling wbinv_all + * So make sure caches are clean after copy operation + */ + cpu_idcache_wbinv_range(cdstp, size); + pmap_l2cache_wbinv_range(cdstp, phys, size); + mtx_unlock(&cmtx); } @@ -3276,12 +3290,23 @@ *cdst_pte = L2_S_PROTO | dst | pte_l2_s_cache_mode; pmap_set_prot(cdst_pte, VM_PROT_READ | VM_PROT_WRITE, 0); PTE_SYNC(cdst_pte); + cpu_tlb_flushD_SE(csrcp); cpu_tlb_flushD_SE(cdstp); cpu_cpwait(); + /* + * Although aliasing is not possible if we use + * cdstp temporary mappings with memory that + * will be mapped later as non-cached or with write-through + * caches we might end up overwriting it when calling wbinv_all + * So make sure caches are clean after copy operation + */ bcopy_page(csrcp, cdstp); + cpu_idcache_wbinv_range(cdstp, PAGE_SIZE); + pmap_l2cache_wbinv_range(cdstp, dst, PAGE_SIZE); + mtx_unlock(&cmtx); }