Index: sys/powerpc/booke/pmap.c =================================================================== --- sys/powerpc/booke/pmap.c (revision 347217) +++ sys/powerpc/booke/pmap.c (working copy) @@ -166,8 +166,6 @@ static vm_offset_t zero_page_va; static struct mtx zero_page_mutex; -static struct mtx tlbivax_mutex; - /* Reserved KVA space and mutex for mmu_booke_copy_page. */ static vm_offset_t copy_page_src_va; static vm_offset_t copy_page_dst_va; @@ -504,6 +502,7 @@ if (!smp_started) return; + critical_enter(); STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (pc != pcpup) { @@ -540,6 +539,7 @@ CTR1(KTR_PMAP, "%s: unlocked", __func__); } } + critical_exit(); #endif } @@ -1085,13 +1085,11 @@ * Invalidate the pdir entry as soon as possible, so that other CPUs * don't attempt to look up the page tables we are releasing. */ - mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); pmap->pm_pdir[pdir_idx] = NULL; tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); for (i = 0; i < PTBL_PAGES; i++) { va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); @@ -1310,7 +1308,6 @@ if (TAILQ_EMPTY(&m->md.pv_list)) m->md.pv_tracked = false; } - mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); tlb0_flush_entry(va); @@ -1317,7 +1314,6 @@ *pte = 0; tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); pmap->pm_stats.resident_count--; @@ -1437,7 +1433,6 @@ pv_insert(pmap, va, m); } - mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); tlb0_flush_entry(va); @@ -1447,7 +1442,6 @@ *pte |= (PTE_VALID | flags); tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); return (0); } @@ -1554,7 +1548,6 @@ m->md.pv_tracked = false; } - mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); tlb0_flush_entry(va); @@ -1561,7 +1554,6 @@ *pte = 0; tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); pmap->pm_stats.resident_count--; @@ -1629,7 +1621,6 @@ pmap->pm_stats.resident_count++; - mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); tlb0_flush_entry(va); @@ -1645,7 +1636,6 @@ *pte |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */ tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); return (0); } @@ -1738,9 +1728,6 @@ elf32_nxstack = 1; #endif - /* Initialize invalidation mutex */ - mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); - /* Read TLB0 size and associativity. */ tlb0_get_tlbconf(); @@ -2229,7 +2216,6 @@ pte = pte_find(mmu, kernel_pmap, va); KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE")); - mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); if (PTE_ISVALID(pte)) { @@ -2241,6 +2227,7 @@ } *pte = PTE_RPN_FROM_PA(pa) | flags; + tlb_miss_unlock(); //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", @@ -2250,8 +2237,6 @@ if ((flags & (PTE_I | PTE_G)) == 0) __syncicache((void *)va, PAGE_SIZE); - tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); } /* @@ -2277,7 +2262,6 @@ return; } - mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); /* Invalidate entry in TLB0, update PTE. */ @@ -2285,7 +2269,6 @@ *pte = 0; tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); } /* @@ -2499,7 +2482,6 @@ * The new flags value is all calculated -- only now actually * update the PTE. */ - mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); tlb0_flush_entry(va); @@ -2507,7 +2489,6 @@ *pte |= flags; tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); } else { /* @@ -2803,7 +2784,6 @@ if (PTE_ISVALID(pte)) { m = PHYS_TO_VM_PAGE(PTE_PA(pte)); - mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); /* Handle modified pages. */ @@ -2814,7 +2794,6 @@ *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); } } } @@ -2848,7 +2827,6 @@ if (PTE_ISVALID(pte)) { m = PHYS_TO_VM_PAGE(PTE_PA(pte)); - mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); /* Handle modified pages. */ @@ -2859,7 +2837,6 @@ *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); } } PMAP_UNLOCK(pv->pv_pmap); @@ -2872,18 +2849,23 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) { pte_t *pte; + vm_paddr_t pa = 0; + int valid; +#ifndef __powerpc64__ + vm_page_t m; pmap_t pmap; - vm_page_t m; vm_offset_t addr; - vm_paddr_t pa = 0; - int active, valid; + int active; +#endif va = trunc_page(va); sz = round_page(sz); +#ifndef __powerpc64__ rw_wlock(&pvh_global_lock); pmap = PCPU_GET(curpmap); active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; +#endif while (sz > 0) { PMAP_LOCK(pm); pte = pte_find(mmu, pm, va); @@ -2892,6 +2874,9 @@ pa = PTE_PA(pte); PMAP_UNLOCK(pm); if (valid) { +#ifdef __powerpc64__ + __syncicache((void *)PHYS_TO_DMAP(pa), PAGE_SIZE); +#else if (!active) { /* Create a mapping in the active pmap. */ addr = 0; @@ -2904,11 +2889,14 @@ PMAP_UNLOCK(pmap); } else __syncicache((void *)va, PAGE_SIZE); +#endif } va += PAGE_SIZE; sz -= PAGE_SIZE; } +#ifndef __powerpc64__ rw_wunlock(&pvh_global_lock); +#endif } /* @@ -3264,7 +3252,6 @@ PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && PTE_ISVALID(pte)) { - mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) { @@ -3274,7 +3261,6 @@ } tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); } PMAP_UNLOCK(pv->pv_pmap); } @@ -3313,7 +3299,6 @@ if (PTE_ISMODIFIED(pte)) vm_page_dirty(m); if (PTE_ISREFERENCED(pte)) { - mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); tlb0_flush_entry(pv->pv_va); @@ -3320,7 +3305,6 @@ *pte &= ~PTE_REFERENCED; tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); if (++count >= PMAP_TS_REFERENCED_MAX) { PMAP_UNLOCK(pv->pv_pmap); @@ -3766,7 +3750,6 @@ return (EINVAL); } - mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); for (va = addr; va < addr + sz; va += PAGE_SIZE) { pte = pte_find(mmu, kernel_pmap, va); @@ -3775,7 +3758,6 @@ tlb0_flush_entry(va); } tlb_miss_unlock(); - mtx_unlock_spin(&tlbivax_mutex); return (0); } @@ -3847,15 +3829,21 @@ static inline void tlb0_flush_entry(vm_offset_t va) { + static volatile u_int tlbivax_lock; CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); - mtx_assert(&tlbivax_mutex, MA_OWNED); + /* Hobo spinlock, shamelessly taken from TLBIE() in moea64 */ + while (!atomic_cmpset_int(&tlbivax_lock, 0, 1)) + ; + isync(); __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); __asm __volatile("isync; msync"); __asm __volatile("tlbsync; msync"); + tlbivax_lock = 0; + CTR1(KTR_PMAP, "%s: e", __func__); }