Index: sys/amd64/amd64/mp_machdep.c =================================================================== --- sys/amd64/amd64/mp_machdep.c (revision 223435) +++ sys/amd64/amd64/mp_machdep.c (working copy) @@ -604,10 +604,10 @@ void init_secondary(void) { - cpuset_t tcpuset; struct pcpu *pc; struct nmi_pcpu *np; u_int64_t msr, cr0; + u_int cpuid; int cpu, gsel_tss, x; struct region_descriptor ap_gdt; @@ -711,8 +711,9 @@ fpuinit(); /* A quick check from sanity claus */ + cpuid = PCPU_GET(cpuid); if (PCPU_GET(apic_id) != lapic_id()) { - printf("SMP: cpuid = %d\n", PCPU_GET(cpuid)); + printf("SMP: cpuid = %d\n", cpuid); printf("SMP: actual apic_id = %d\n", lapic_id()); printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id)); panic("cpuid mismatch! boom!!"); @@ -734,14 +735,13 @@ smp_cpus++; - CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid)); - printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid)); - tcpuset = PCPU_GET(cpumask); + CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid); + printf("SMP: AP CPU #%d Launched!\n", cpuid); /* Determine if we are a logical CPU. */ /* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */ if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0) - CPU_OR(&logical_cpus_mask, &tcpuset); + CPU_SET(cpuid, &logical_cpus_mask); if (bootverbose) lapic_dump("AP"); @@ -1138,9 +1138,7 @@ if (othercpus < 1) return; } else { - sched_pin(); - CPU_NAND(&mask, PCPU_PTR(cpumask)); - sched_unpin(); + CPU_CLR(PCPU_GET(cpuid), &mask); if (CPU_EMPTY(&mask)) return; } @@ -1362,7 +1360,7 @@ int ipi_nmi_handler() { - cpuset_t cpumask; + u_int cpuid; /* * As long as there is not a simple way to know about a NMI's @@ -1370,13 +1368,11 @@ * the global pending bitword an IPI_STOP_HARD has been issued * and should be handled. */ - sched_pin(); - cpumask = PCPU_GET(cpumask); - sched_unpin(); - if (!CPU_OVERLAP(&ipi_nmi_pending, &cpumask)) + cpuid = PCPU_GET(cpuid); + if (!CPU_ISSET(cpuid, &ipi_nmi_pending)) return (1); - CPU_NAND_ATOMIC(&ipi_nmi_pending, &cpumask); + CPU_CLR_ATOMIC(cpuid, &ipi_nmi_pending); cpustop_handler(); return (0); } @@ -1388,25 +1384,21 @@ void cpustop_handler(void) { - cpuset_t cpumask; u_int cpu; - sched_pin(); cpu = PCPU_GET(cpuid); - cpumask = PCPU_GET(cpumask); - sched_unpin(); savectx(&stoppcbs[cpu]); /* Indicate that we are stopped */ - CPU_OR_ATOMIC(&stopped_cpus, &cpumask); + CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ - while (!CPU_OVERLAP(&started_cpus, &cpumask)) + while (!CPU_ISSET(cpu, &started_cpus)) ia32_pause(); - CPU_NAND_ATOMIC(&started_cpus, &cpumask); - CPU_NAND_ATOMIC(&stopped_cpus, &cpumask); + CPU_CLR_ATOMIC(cpu, &started_cpus); + CPU_CLR_ATOMIC(cpu, &stopped_cpus); if (cpu == 0 && cpustop_restartfunc != NULL) { cpustop_restartfunc(); @@ -1421,19 +1413,17 @@ void cpususpend_handler(void) { - cpuset_t cpumask; register_t cr3, rf; u_int cpu; cpu = PCPU_GET(cpuid); - cpumask = PCPU_GET(cpumask); rf = intr_disable(); cr3 = rcr3(); if (savectx(susppcbs[cpu])) { wbinvd(); - CPU_OR_ATOMIC(&stopped_cpus, &cpumask); + CPU_SET_ATOMIC(cpu, &stopped_cpus); } else { pmap_init_pat(); PCPU_SET(switchtime, 0); @@ -1441,11 +1431,11 @@ } /* Wait for resume */ - while (!CPU_OVERLAP(&started_cpus, &cpumask)) + while (!CPU_ISSET(cpu, &started_cpus)) ia32_pause(); - CPU_NAND_ATOMIC(&started_cpus, &cpumask); - CPU_NAND_ATOMIC(&stopped_cpus, &cpumask); + CPU_CLR_ATOMIC(cpu, &started_cpus); + CPU_CLR_ATOMIC(cpu, &stopped_cpus); /* Restore CR3 and enable interrupts */ load_cr3(cr3); Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c (revision 223435) +++ sys/amd64/amd64/pmap.c (working copy) @@ -925,17 +925,18 @@ void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { - cpuset_t cpumask, other_cpus; + cpuset_t other_cpus; + u_int cpuid; sched_pin(); if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { invlpg(va); smp_invlpg(va); } else { - cpumask = PCPU_GET(cpumask); + cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; - CPU_CLR(PCPU_GET(cpuid), &other_cpus); - if (CPU_OVERLAP(&pmap->pm_active, &cpumask)) + CPU_CLR(cpuid, &other_cpus); + if (CPU_ISSET(cpuid, &pmap->pm_active)) invlpg(va); CPU_AND(&other_cpus, &pmap->pm_active); if (!CPU_EMPTY(&other_cpus)) @@ -947,8 +948,9 @@ void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { - cpuset_t cpumask, other_cpus; + cpuset_t other_cpus; vm_offset_t addr; + u_int cpuid; sched_pin(); if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { @@ -956,10 +958,10 @@ invlpg(addr); smp_invlpg_range(sva, eva); } else { - cpumask = PCPU_GET(cpumask); + cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; - CPU_CLR(PCPU_GET(cpuid), &other_cpus); - if (CPU_OVERLAP(&pmap->pm_active, &cpumask)) + CPU_CLR(cpuid, &other_cpus); + if (CPU_ISSET(cpuid, &pmap->pm_active)) for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); CPU_AND(&other_cpus, &pmap->pm_active); @@ -972,17 +974,18 @@ void pmap_invalidate_all(pmap_t pmap) { - cpuset_t cpumask, other_cpus; + cpuset_t other_cpus; + u_int cpuid; sched_pin(); if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { invltlb(); smp_invltlb(); } else { - cpumask = PCPU_GET(cpumask); + cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; - CPU_CLR(PCPU_GET(cpuid), &other_cpus); - if (CPU_OVERLAP(&pmap->pm_active, &cpumask)) + CPU_CLR(cpuid, &other_cpus); + if (CPU_ISSET(cpuid, &pmap->pm_active)) invltlb(); CPU_AND(&other_cpus, &pmap->pm_active); if (!CPU_EMPTY(&other_cpus)) @@ -1002,11 +1005,11 @@ } struct pde_action { - cpuset_t store; /* processor that updates the PDE */ cpuset_t invalidate; /* processors that invalidate their TLB */ vm_offset_t va; pd_entry_t *pde; pd_entry_t newpde; + u_int store; /* processor that updates the PDE */ }; static void @@ -1014,12 +1017,8 @@ { struct pde_action *act = arg; - sched_pin(); - if (!CPU_CMP(&act->store, PCPU_PTR(cpumask))) { - sched_unpin(); + if (act->store == PCPU_GET(cpuid)) pde_store(act->pde, act->newpde); - } else - sched_unpin(); } static void @@ -1027,12 +1026,8 @@ { struct pde_action *act = arg; - sched_pin(); - if (CPU_OVERLAP(&act->invalidate, PCPU_PTR(cpumask))) { - sched_unpin(); + if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate)) pmap_update_pde_invalidate(act->va, act->newpde); - } else - sched_unpin(); } /* @@ -1047,29 +1042,30 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) { struct pde_action act; - cpuset_t active, cpumask, other_cpus; + cpuset_t active, other_cpus; + u_int cpuid; sched_pin(); - cpumask = PCPU_GET(cpumask); + cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; - CPU_CLR(PCPU_GET(cpuid), &other_cpus); + CPU_CLR(cpuid, &other_cpus); if (pmap == kernel_pmap) active = all_cpus; else active = pmap->pm_active; if (CPU_OVERLAP(&active, &other_cpus)) { - act.store = cpumask; + act.store = cpuid; act.invalidate = active; act.va = va; act.pde = pde; act.newpde = newpde; - CPU_OR(&cpumask, &active); - smp_rendezvous_cpus(cpumask, + CPU_SET(cpuid, &active); + smp_rendezvous_cpus(active, smp_no_rendevous_barrier, pmap_update_pde_action, pmap_update_pde_teardown, &act); } else { pde_store(pde, newpde); - if (CPU_OVERLAP(&active, &cpumask)) + if (CPU_ISSET(cpuid, &active)) pmap_update_pde_invalidate(va, newpde); } sched_unpin(); @@ -5100,16 +5096,18 @@ { pmap_t pmap, oldpmap; u_int64_t cr3; + u_int cpuid; critical_enter(); pmap = vmspace_pmap(td->td_proc->p_vmspace); oldpmap = PCPU_GET(curpmap); + cpuid = PCPU_GET(cpuid); #ifdef SMP - CPU_NAND_ATOMIC(&oldpmap->pm_active, PCPU_PTR(cpumask)); - CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask)); + CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); + CPU_SET_ATOMIC(cpuid, &pmap->pm_active); #else - CPU_NAND(&oldpmap->pm_active, PCPU_PTR(cpumask)); - CPU_OR(&pmap->pm_active, PCPU_PTR(cpumask)); + CPU_CLR(cpuid, &oldpmap->pm_active); + CPU_SET(cpuid, &pmap->pm_active); #endif cr3 = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4); td->td_pcb->pcb_cr3 = cr3;