Index: sys/i386/i386/vm_machdep.c =================================================================== --- sys/i386/i386/vm_machdep.c (revision 223140) +++ sys/i386/i386/vm_machdep.c (working copy) @@ -602,8 +602,8 @@ u_int cnt; if (smp_active) { - sched_pin(); - map = PCPU_GET(other_cpus); + map = all_cpus; + CPU_CLR(PCPU_GET(cpuid), &map); CPU_NAND(&map, &stopped_cpus); if (!CPU_EMPTY(&map)) { printf("cpu_reset: Stopping other CPUs\n"); @@ -612,7 +612,6 @@ if (PCPU_GET(cpuid) != 0) { cpu_reset_proxyid = PCPU_GET(cpuid); - sched_unpin(); cpustop_restartfunc = cpu_reset_proxy; cpu_reset_proxy_active = 0; printf("cpu_reset: Restarting BSP\n"); @@ -632,8 +631,7 @@ while (1); /* NOTREACHED */ - } else - sched_unpin(); + } DELAY(1000000); } @@ -802,7 +800,8 @@ struct sf_head *hash_list; struct sf_buf *sf; #ifdef SMP - cpuset_t cpumask, other_cpus; + cpuset_t other_cpus; + u_int cpuid; #endif int error; @@ -877,13 +876,14 @@ CPU_ZERO(&sf->cpumask); shootdown: sched_pin(); - cpumask = PCPU_GET(cpumask); - if (!CPU_OVERLAP(&cpumask, &sf->cpumask)) { - CPU_OR(&sf->cpumask, &cpumask); + cpuid = PCPU_GET(cpuid); + if (!CPU_ISSET(cpuid, &sf->cpumask)) { + CPU_SET(cpuid, &sf->cpumask); invlpg(sf->kva); } if ((flags & SFB_CPUPRIVATE) == 0) { - other_cpus = PCPU_GET(other_cpus); + other_cpus = all_cpus; + CPU_CLR(cpuid, &other_cpus); CPU_NAND(&other_cpus, &sf->cpumask); if (!CPU_EMPTY(&other_cpus)) { CPU_OR(&sf->cpumask, &other_cpus); Index: sys/i386/i386/mp_machdep.c =================================================================== --- sys/i386/i386/mp_machdep.c (revision 223140) +++ sys/i386/i386/mp_machdep.c (working copy) @@ -658,7 +658,7 @@ void init_secondary(void) { - cpuset_t tcpuset, tallcpus; + cpuset_t tcpuset; struct pcpu *pc; vm_offset_t addr; int gsel_tss; @@ -790,11 +790,6 @@ if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0) CPU_OR(&logical_cpus_mask, &tcpuset); - /* Build our map of 'other' CPUs. */ - tallcpus = all_cpus; - CPU_NAND(&tallcpus, &tcpuset); - PCPU_SET(other_cpus, tallcpus); - if (bootverbose) lapic_dump("AP"); @@ -934,7 +929,6 @@ static int start_all_aps(void) { - cpuset_t tallcpus; #ifndef PC98 u_char mpbiosreason; #endif @@ -997,11 +991,6 @@ CPU_SET(cpu, &all_cpus); /* record AP in CPU map */ } - /* build our map of 'other' CPUs */ - tallcpus = all_cpus; - CPU_NAND(&tallcpus, PCPU_PTR(cpumask)); - PCPU_SET(other_cpus, tallcpus); - /* restore the warmstart vector */ *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec; @@ -1452,11 +1441,12 @@ void ipi_all_but_self(u_int ipi) { + cpuset_t other_cpus; - sched_pin(); + other_cpus = all_cpus; + CPU_CLR(PCPU_GET(cpuid), &other_cpus); if (IPI_IS_BITMAPED(ipi)) { - ipi_selected(PCPU_GET(other_cpus), ipi); - sched_unpin(); + ipi_selected(other_cpus, ipi); return; } @@ -1466,8 +1456,7 @@ * Set the mask of receiving CPUs for this purpose. */ if (ipi == IPI_STOP_HARD) - CPU_OR_ATOMIC(&ipi_nmi_pending, PCPU_PTR(other_cpus)); - sched_unpin(); + CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus); CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS); Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c (revision 223140) +++ sys/i386/i386/pmap.c (working copy) @@ -932,16 +932,18 @@ void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { - cpuset_t cpumask, other_cpus; + cpuset_t other_cpus; + u_int cpuid; sched_pin(); if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { invlpg(va); smp_invlpg(va); } else { - cpumask = PCPU_GET(cpumask); - other_cpus = PCPU_GET(other_cpus); - if (CPU_OVERLAP(&pmap->pm_active, &cpumask)) + cpuid = PCPU_GET(cpuid); + other_cpus = all_cpus; + CPU_CLR(cpuid, &other_cpus); + if (CPU_ISSET(cpuid, &pmap->pm_active)) invlpg(va); CPU_AND(&other_cpus, &pmap->pm_active); if (!CPU_EMPTY(&other_cpus)) @@ -953,8 +955,9 @@ void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { - cpuset_t cpumask, other_cpus; + cpuset_t other_cpus; vm_offset_t addr; + u_int cpuid; sched_pin(); if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { @@ -962,9 +965,10 @@ invlpg(addr); smp_invlpg_range(sva, eva); } else { - cpumask = PCPU_GET(cpumask); - other_cpus = PCPU_GET(other_cpus); - if (CPU_OVERLAP(&pmap->pm_active, &cpumask)) + cpuid = PCPU_GET(cpuid); + other_cpus = all_cpus; + CPU_CLR(cpuid, &other_cpus); + if (CPU_ISSET(cpuid, &pmap->pm_active)) for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); CPU_AND(&other_cpus, &pmap->pm_active); @@ -977,16 +981,18 @@ void pmap_invalidate_all(pmap_t pmap) { - cpuset_t cpumask, other_cpus; + cpuset_t other_cpus; + u_int cpuid; sched_pin(); if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) { invltlb(); smp_invltlb(); } else { - cpumask = PCPU_GET(cpumask); - other_cpus = PCPU_GET(other_cpus); - if (CPU_OVERLAP(&pmap->pm_active, &cpumask)) + cpuid = PCPU_GET(cpuid); + other_cpus = all_cpus; + CPU_CLR(cpuid, &other_cpus); + if (CPU_ISSET(cpuid, &pmap->pm_active)) invltlb(); CPU_AND(&other_cpus, &pmap->pm_active); if (!CPU_EMPTY(&other_cpus)) @@ -1055,12 +1061,8 @@ { struct pde_action *act = arg; - sched_pin(); - if (CPU_OVERLAP(&act->invalidate, PCPU_PTR(cpumask))) { - sched_unpin(); + if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate)) pmap_update_pde_invalidate(act->va, act->newpde); - } else - sched_unpin(); } /* @@ -1079,7 +1081,8 @@ sched_pin(); cpumask = PCPU_GET(cpumask); - other_cpus = PCPU_GET(other_cpus); + other_cpus = all_cpus; + CPU_CLR(PCPU_GET(cpuid), &other_cpus); if (pmap == kernel_pmap) active = all_cpus; else Index: sys/i386/xen/mp_machdep.c =================================================================== --- sys/i386/xen/mp_machdep.c (revision 223140) +++ sys/i386/xen/mp_machdep.c (working copy) @@ -523,7 +523,7 @@ void init_secondary(void) { - cpuset_t tcpuset, tallcpus; + cpuset_t tcpuset; vm_offset_t addr; int gsel_tss; @@ -613,11 +613,6 @@ if (hyperthreading_cpus > 1 && PCPU_GET(apic_id) % hyperthreading_cpus != 0) CPU_OR(&hyperthreading_cpus_mask, &tcpuset); - - /* Build our map of 'other' CPUs. */ - tallcpus = all_cpus; - CPU_NAND(&tallcpus, &tcpuset); - PCPU_SET(other_cpus, tallcpus); #if 0 if (bootverbose) lapic_dump("AP"); @@ -731,7 +726,6 @@ int start_all_aps(void) { - cpuset_t tallcpus; int x,apic_id, cpu; struct pcpu *pc; @@ -789,11 +783,6 @@ } - /* build our map of 'other' CPUs */ - tallcpus = all_cpus; - CPU_NAND(&tallcpus, PCPU_PTR(cpumask)); - PCPU_SET(other_cpus, tallcpus); - pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1); /* number of APs actually started */ @@ -1184,9 +1173,8 @@ * of help in order to understand what is the source. * Set the mask of receiving CPUs for this purpose. */ - sched_pin(); - other_cpus = PCPU_GET(other_cpus); - sched_unpin(); + other_cpus = all_cpus; + CPU_CLR(PCPU_GET(cpuid), &other_cpus); if (ipi == IPI_STOP_HARD) CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus); Index: sys/i386/xen/pmap.c =================================================================== --- sys/i386/xen/pmap.c (revision 223140) +++ sys/i386/xen/pmap.c (working copy) @@ -813,7 +813,8 @@ smp_invlpg(va); } else { cpumask = PCPU_GET(cpumask); - other_cpus = PCPU_GET(other_cpus); + other_cpus = all_cpus; + CPU_CLR(PCPU_GET(cpuid), &other_cpus); if (CPU_OVERLAP(&pmap->pm_active, &cpumask)) invlpg(va); CPU_AND(&other_cpus, &pmap->pm_active); @@ -840,7 +841,8 @@ smp_invlpg_range(sva, eva); } else { cpumask = PCPU_GET(cpumask); - other_cpus = PCPU_GET(other_cpus); + other_cpus = all_cpus; + CPU_CLR(PCPU_GET(cpuid), &other_cpus); if (CPU_OVERLAP(&pmap->pm_active, &cpumask)) for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); @@ -865,7 +867,8 @@ smp_invltlb(); } else { cpumask = PCPU_GET(cpumask); - other_cpus = PCPU_GET(other_cpus); + other_cpus = all_cpus; + CPU_CLR(PCPU_GET(cpuid), &other_cpus); if (CPU_OVERLAP(&pmap->pm_active, &cpumask)) invltlb(); CPU_AND(&other_cpus, &pmap->pm_active);