Index: sys/kern/subr_smp.c =================================================================== --- sys/kern/subr_smp.c (revision 223034) +++ sys/kern/subr_smp.c (working copy) @@ -142,7 +142,7 @@ /* Probe for MP hardware. */ if (smp_disabled != 0 || cpu_mp_probe() == 0) { mp_ncpus = 1; - all_cpus = PCPU_GET(cpumask); + CPU_SETOF(PCPU_GET(cpuid), &all_cpus); return; } @@ -708,7 +708,7 @@ { mp_ncpus = 1; mp_maxid = PCPU_GET(cpuid); - all_cpus = PCPU_GET(cpumask); + CPU_SETOF(mp_maxid, &all_cpus); KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero")); } SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST, Index: sys/kern/subr_kdb.c =================================================================== --- sys/kern/subr_kdb.c (revision 223034) +++ sys/kern/subr_kdb.c (working copy) @@ -211,9 +211,12 @@ void kdb_panic(const char *msg) { - #ifdef SMP - stop_cpus_hard(PCPU_GET(other_cpus)); + cpuset_t other_cpus; + + other_cpus = all_cpus; + CPU_CLR(PCPU_GET(cpuid), &other_cpus); + stop_cpus_hard(other_cpus); #endif printf("KDB: panic\n"); panic("%s", msg); @@ -414,7 +417,7 @@ #if defined(SMP) && defined(KDB_STOPPEDPCB) STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (pc->pc_curthread == thr && - CPU_OVERLAP(&stopped_cpus, &pc->pc_cpumask)) + CPU_ISSET(pc->pc_cpuid, &stopped_cpus)) return (KDB_STOPPEDPCB(pc)); } #endif @@ -501,6 +504,7 @@ struct kdb_dbbe *be; register_t intr; #ifdef SMP + cpuset_t other_cpus; int did_stop_cpus; #endif int handled; @@ -516,8 +520,11 @@ intr = intr_disable(); #ifdef SMP - if ((did_stop_cpus = kdb_stop_cpus) != 0) - stop_cpus_hard(PCPU_GET(other_cpus)); + if ((did_stop_cpus = kdb_stop_cpus) != 0) { + other_cpus = all_cpus; + CPU_CLR(PCPU_GET(cpuid), &other_cpus); + stop_cpus_hard(other_cpus); + } #endif kdb_active++; Index: sys/kern/sched_4bsd.c =================================================================== --- sys/kern/sched_4bsd.c (revision 223034) +++ sys/kern/sched_4bsd.c (working copy) @@ -951,8 +951,7 @@ if (td->td_flags & TDF_IDLETD) { TD_SET_CAN_RUN(td); #ifdef SMP - /* Spinlock held here, assume no migration. */ - CPU_NAND(&idle_cpus_mask, PCPU_PTR(cpumask)); + CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask); #endif } else { if (TD_IS_RUNNING(td)) { @@ -1026,7 +1025,7 @@ #ifdef SMP if (td->td_flags & TDF_IDLETD) - CPU_OR(&idle_cpus_mask, PCPU_PTR(cpumask)); + CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask); #endif sched_lock.mtx_lock = (uintptr_t)td; td->td_oncpu = PCPU_GET(cpuid); @@ -1055,7 +1054,8 @@ forward_wakeup(int cpunum) { struct pcpu *pc; - cpuset_t dontuse, id, map, map2, me; + cpuset_t dontuse, map, map2; + u_int id, me; int iscpuset; mtx_assert(&sched_lock, MA_OWNED); @@ -1073,27 +1073,24 @@ /* * Check the idle mask we received against what we calculated * before in the old version. - * - * Also note that sched_lock is held now, thus no migration is - * expected. */ - me = PCPU_GET(cpumask); + me = PCPU_GET(cpuid); /* Don't bother if we should be doing it ourself. */ - if (CPU_OVERLAP(&me, &idle_cpus_mask) && - (cpunum == NOCPU || CPU_ISSET(cpunum, &me))) + if (CPU_ISSET(me, &idle_cpus_mask) && + (cpunum == NOCPU || me == cpunum)) return (0); - dontuse = me; + CPU_SETOF(me, &dontuse); CPU_OR(&dontuse, &stopped_cpus); CPU_OR(&dontuse, &hlt_cpus_mask); CPU_ZERO(&map2); if (forward_wakeup_use_loop) { STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { - id = pc->pc_cpumask; - if (!CPU_OVERLAP(&id, &dontuse) && + id = pc->pc_cpuid; + if (!CPU_ISSET(id, &dontuse) && pc->pc_curthread == pc->pc_idlethread) { - CPU_OR(&map2, &id); + CPU_SET(id, &map2); } } } @@ -1125,11 +1122,11 @@ if (!CPU_EMPTY(&map)) { forward_wakeups_delivered++; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { - id = pc->pc_cpumask; - if (!CPU_OVERLAP(&map, &id)) + id = pc->pc_cpuid; + if (!CPU_ISSET(id, &map)) continue; if (cpu_idle_wakeup(pc->pc_cpuid)) - CPU_NAND(&map, &id); + CPU_CLR(id, &map); } if (!CPU_EMPTY(&map)) ipi_selected(map, IPI_AST); @@ -1147,7 +1144,7 @@ int cpri; pcpu = pcpu_find(cpuid); - if (CPU_OVERLAP(&idle_cpus_mask, &pcpu->pc_cpumask)) { + if (CPU_ISSET(cpuid, &idle_cpus_mask)) { forward_wakeups_delivered++; if (!cpu_idle_wakeup(cpuid)) ipi_cpu(cpuid, IPI_AST); @@ -1205,10 +1202,10 @@ sched_add(struct thread *td, int flags) #ifdef SMP { - cpuset_t idle, me, tidlemsk; + cpuset_t tidlemsk; struct td_sched *ts; + u_int cpu, cpuid; int forwarded = 0; - int cpu; int single_cpu = 0; ts = td->td_sched; @@ -1271,23 +1268,17 @@ ts->ts_runq = &runq; } - if (single_cpu && (cpu != PCPU_GET(cpuid))) { + cpuid = PCPU_GET(cpuid); + if (single_cpu && cpu != cpuid) { kick_other_cpu(td->td_priority, cpu); } else { if (!single_cpu) { + tidlemsk = idle_cpus_mask; + CPU_NAND(&tidlemsk, &hlt_cpus_mask); + CPU_CLR(cpuid, &tidlemsk); - /* - * Thread spinlock is held here, assume no - * migration is possible. - */ - me = PCPU_GET(cpumask); - idle = idle_cpus_mask; - tidlemsk = idle; - CPU_AND(&idle, &me); - CPU_OR(&me, &hlt_cpus_mask); - CPU_NAND(&tidlemsk, &me); - - if (CPU_EMPTY(&idle) && ((flags & SRQ_INTR) == 0) && + if (!CPU_ISSET(cpuid, &idle_cpus_mask) && + ((flags & SRQ_INTR) == 0) && !CPU_EMPTY(&tidlemsk)) forwarded = forward_wakeup(cpu); } Index: sys/kern/kern_rmlock.c =================================================================== --- sys/kern/kern_rmlock.c (revision 223034) +++ sys/kern/kern_rmlock.c (working copy) @@ -263,7 +263,7 @@ pc = pcpu_find(curcpu); /* Check if we just need to do a proper critical_exit. */ - if (!CPU_OVERLAP(&pc->pc_cpumask, &rm->rm_writecpus)) { + if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) { critical_exit(); return (1); } @@ -325,7 +325,7 @@ critical_enter(); pc = pcpu_find(curcpu); - CPU_NAND(&rm->rm_writecpus, &pc->pc_cpumask); + CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus); rm_tracker_add(pc, tracker); sched_pin(); critical_exit(); @@ -367,7 +367,7 @@ * conditional jump. */ if (0 == (td->td_owepreempt | - CPU_OVERLAP(&rm->rm_writecpus, &pc->pc_cpumask))) + CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus))) return (1); /* We do not have a read token and need to acquire one. */ Index: sys/dev/xen/control/control.c =================================================================== --- sys/dev/xen/control/control.c (revision 223034) +++ sys/dev/xen/control/control.c (working copy) @@ -197,6 +197,7 @@ static void xctrl_suspend() { + u_int cpuid; int i, j, k, fpp; unsigned long max_pfn, start_info_mfn; @@ -210,11 +211,11 @@ thread_lock(td); sched_bind(td, 0); thread_unlock(td); - KASSERT(PCPU_GET(cpuid) == 0, ("xen_suspend: not running on cpu 0")); + cpuid = PCPU_GET(cpuid); + KASSERT(cpuid == 0, ("xen_suspend: not running on cpu 0")); - sched_pin(); - map = PCPU_GET(other_cpus); - sched_unpin(); + map = all_cpus; + CPU_CLR(cpuid, &map); CPU_NAND(&map, &stopped_cpus); if (!CPU_EMPTY(&map)) stop_cpus(map); Index: powerpc/booke/pmap.c =================================================================== --- powerpc/booke/pmap.c (revision 223106) +++ powerpc/booke/pmap.c (working copy) @@ -1826,6 +1826,7 @@ mmu_booke_activate(mmu_t mmu, struct thread *td) { pmap_t pmap; + u_int cpuid; pmap = &td->td_proc->p_vmspace->vm_pmap; @@ -1836,14 +1837,15 @@ mtx_lock_spin(&sched_lock); - CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask)); + cpuid = PCPU_GET(cpuid); + CPU_SET_ATOMIC(cpuid, &pmap->pm_active); PCPU_SET(curpmap, pmap); - if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE) + if (pmap->pm_tid[cpuid] == TID_NONE) tid_alloc(pmap); /* Load PID0 register with pmap tid value. */ - mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]); + mtspr(SPR_PID0, pmap->pm_tid[cpuid]); __asm __volatile("isync"); mtx_unlock_spin(&sched_lock); @@ -1865,9 +1867,7 @@ CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); - sched_pin(); - CPU_NAND_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask)); - sched_unpin(); + CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); PCPU_SET(curpmap, NULL); } Index: powerpc/powerpc/mp_machdep.c =================================================================== --- powerpc/powerpc/mp_machdep.c (revision 223106) +++ powerpc/powerpc/mp_machdep.c (working copy) @@ -175,9 +175,8 @@ pc->pc_cpuid = bsp.cr_cpuid; pc->pc_bsp = 1; } - CPU_SETOF(pc->pc_cpuid, &pc->pc_cpumask); pc->pc_hwref = cpu.cr_hwref; - CPU_OR(&all_cpus, &pc->pc_cpumask); + CPU_SET(pc->pc_cpuid, &all_cpus); next: error = platform_smp_next_cpu(&cpu); } @@ -215,8 +214,6 @@ smp_cpus = 0; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { cpus++; - pc->pc_other_cpus = all_cpus; - CPU_NAND(&pc->pc_other_cpus, &pc->pc_cpumask); if (!pc->pc_bsp) { if (bootverbose) printf("Waking up CPU %d (dev=%x)\n", @@ -278,7 +275,7 @@ int powerpc_ipi_handler(void *arg) { - cpuset_t self; + u_int cpuid; uint32_t ipimask; int msg; @@ -310,14 +307,14 @@ */ CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)", __func__); - savectx(&stoppcbs[PCPU_GET(cpuid)]); - self = PCPU_GET(cpumask); + cpuid = PCPU_GET(cpuid); + savectx(&stoppcbs[cpuid]); savectx(PCPU_GET(curpcb)); - CPU_OR_ATOMIC(&stopped_cpus, &self); - while (!CPU_OVERLAP(&started_cpus, &self)) + CPU_SET_ATOMIC(cpuid, &stopped_cpus); + while (!CPU_ISSET(cpuid, &started_cpus)) cpu_spinwait(); - CPU_NAND_ATOMIC(&started_cpus, &self); - CPU_NAND_ATOMIC(&stopped_cpus, &self); + CPU_CLR_ATOMIC(cpuid, &stopped_cpus); + CPU_CLR_ATOMIC(cpuid, &started_cpus); CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__); break; case IPI_HARDCLOCK: @@ -350,7 +347,7 @@ struct pcpu *pc; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { - if (CPU_OVERLAP(&cpus, &pc->pc_cpumask)) + if (CPU_ISSET(pc->pc_cpuid, &cpus)) ipi_send(pc, ipi); } } Index: powerpc/aim/mmu_oea.c =================================================================== --- powerpc/aim/mmu_oea.c (revision 223106) +++ powerpc/aim/mmu_oea.c (working copy) @@ -945,9 +945,7 @@ pm = &td->td_proc->p_vmspace->vm_pmap; pmr = pm->pmap_phys; - sched_pin(); - CPU_OR(&pm->pm_active, PCPU_PTR(cpumask)); - sched_unpin(); + CPU_SET(PCPU_GET(cpuid), &pm->pm_active); PCPU_SET(curpmap, pmr); } @@ -957,9 +955,7 @@ pmap_t pm; pm = &td->td_proc->p_vmspace->vm_pmap; - sched_pin(); - CPU_NAND(&pm->pm_active, PCPU_PTR(cpumask)); - sched_unpin(); + CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); PCPU_SET(curpmap, NULL); } Index: powerpc/aim/mmu_oea64.c =================================================================== --- powerpc/aim/mmu_oea64.c (revision 223106) +++ powerpc/aim/mmu_oea64.c (working copy) @@ -998,9 +998,7 @@ pmap_t pm; pm = &td->td_proc->p_vmspace->vm_pmap; - sched_pin(); - CPU_OR(&pm->pm_active, PCPU_PTR(cpumask)); - sched_unpin(); + CPU_SET(PCPU_GET(cpuid), &pm->pm_active); #ifdef __powerpc64__ PCPU_SET(userslb, pm->pm_slb); @@ -1015,9 +1013,7 @@ pmap_t pm; pm = &td->td_proc->p_vmspace->vm_pmap; - sched_pin(); - CPU_NAND(&pm->pm_active, PCPU_PTR(cpumask)); - sched_unpin(); + CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); #ifdef __powerpc64__ PCPU_SET(userslb, NULL); #else