Index: sys/conf/files.mips =================================================================== --- sys/conf/files.mips (revision 203535) +++ sys/conf/files.mips (working copy) @@ -21,6 +21,7 @@ # ---------------------------------------------------------------------- mips/mips/machdep.c standard mips/mips/mp_machdep.c optional smp +mips/mips/mpboot.S optional smp mips/mips/psraccess.S standard # ---------------------------------------------------------------------- # Phase 3 Index: sys/mips/sibyte/sb_scd.h =================================================================== --- sys/mips/sibyte/sb_scd.h (revision 203535) +++ sys/mips/sibyte/sb_scd.h (working copy) @@ -42,4 +42,10 @@ void sb_write_intmap(int cpu, int intsrc, int intrnum); int sb_read_intmap(int cpu, int intsrc); +#if defined(SMP) +#define INTSRC_MAILBOX3 29 +void sb_set_mailbox(int cpuid, uint64_t val); +void sb_clear_mailbox(int cpuid, uint64_t val); +#endif + #endif /* _SB_SCD_H_ */ Index: sys/mips/sibyte/sb_asm.S =================================================================== --- sys/mips/sibyte/sb_asm.S (revision 203535) +++ sys/mips/sibyte/sb_asm.S (working copy) @@ -27,6 +27,7 @@ */ #include +#include /* * We compile a 32-bit kernel to run on the SB-1 processor which is a 64-bit @@ -80,3 +81,20 @@ jr ra sd t0, 0(a0) END(sb_store64) + +#ifdef SMP +/* + * This function must be implemented in assembly because it is called early + * in AP boot without a valid stack. + * + * This cpu number is available in bits 25 to 27 of the coprocessor 0 PRID + * register. This is not documented in the BCM1250 user manual but can be + * gleaned from the CFE source code - see sb1250_altcpu.S + */ +LEAF(platform_processor_id) + mfc0 v0, MIPS_COP_0_PRID + srl v0, v0, 25 + jr ra + and v0, v0, 7 +END(platform_processor_id) +#endif /* SMP */ Index: sys/mips/sibyte/sb_machdep.c =================================================================== --- sys/mips/sibyte/sb_machdep.c (revision 203535) +++ sys/mips/sibyte/sb_machdep.c (working copy) @@ -74,6 +74,10 @@ #include #include +#ifdef SMP +#include +#endif + #ifdef CFE #include #endif @@ -114,6 +118,19 @@ intrnum = sb_route_intsrc(intsrc); sb_disable_intsrc(cpuid, intsrc); sb_write_intmap(cpuid, intsrc, intrnum); +#ifdef SMP + /* + * Set up the mailbox interrupt mapping. + * + * The mailbox interrupt is "special" in that it is not shared + * with any other interrupt source. + */ + if (intsrc == INTSRC_MAILBOX3) { + intrnum = platform_ipi_intrnum(); + sb_write_intmap(cpuid, INTSRC_MAILBOX3, intrnum); + sb_enable_intsrc(cpuid, INTSRC_MAILBOX3); + } +#endif } } @@ -282,7 +299,65 @@ mips_wr_config(config); } +#ifdef SMP void +platform_ipi_send(int cpuid) +{ + KASSERT(cpuid == 0 || cpuid == 1, + ("platform_ipi_send: invalid cpuid %d", cpuid)); + + sb_set_mailbox(cpuid, 1ULL); +} + +void +platform_ipi_clear(void) +{ + int cpuid; + + cpuid = PCPU_GET(cpuid); + sb_clear_mailbox(cpuid, 1ULL); +} + +int +platform_ipi_intrnum(void) +{ + + return (4); +} + +void +platform_init_ap(int cpuid) +{ + + KASSERT(cpuid == 1, ("AP has an invalid cpu id %d", cpuid)); + + /* + * Make sure that kseg0 is mapped cacheable-coherent + */ + kseg0_map_coherent(); + + sb_intr_init(cpuid); +} + +int +platform_start_ap(int cpuid) +{ +#ifdef CFE + int error; + + if ((error = cfe_cpu_start(cpuid, mpentry, 0, 0, 0))) { + printf("cfe_cpu_start error: %d\n", error); + return (-1); + } else { + return (0); + } +#else + return (-1); +#endif /* CFE */ +} +#endif /* SMP */ + +void platform_start(__register_t a0, __register_t a1, __register_t a2, __register_t a3) { Index: sys/mips/sibyte/sb_zbbus.c =================================================================== --- sys/mips/sibyte/sb_zbbus.c (revision 203535) +++ sys/mips/sibyte/sb_zbbus.c (working copy) @@ -24,6 +24,9 @@ * SUCH DAMAGE. */ +#include +__FBSDID("$FreeBSD$"); + #include #include #include @@ -31,21 +34,29 @@ #include #include #include +#include +#include #include #include #include "sb_scd.h" -__FBSDID("$FreeBSD$"); - static MALLOC_DEFINE(M_INTMAP, "sb1250 intmap", "Sibyte 1250 Interrupt Mapper"); -#define NUM_HARD_IRQS 6 +static struct mtx zbbus_intr_mtx; +MTX_SYSINIT(zbbus_intr_mtx, &zbbus_intr_mtx, "zbbus_intr_mask/unmask lock", + MTX_SPIN); +/* + * This array holds the mapping between a MIPS hard interrupt and the + * interrupt sources that feed into that it. + */ +static uint64_t hardint_to_intsrc_mask[NHARD_IRQS]; + struct sb_intmap { int intsrc; /* interrupt mapper register number (0 - 63) */ - int hardint; /* cpu interrupt from 0 to NUM_HARD_IRQS - 1 */ + int hardint; /* cpu interrupt from 0 to NHARD_IRQS - 1 */ /* * The device that the interrupt belongs to. Note that multiple @@ -86,7 +97,7 @@ { struct sb_intmap *map; - KASSERT(intrnum >= 0 && intrnum < NUM_HARD_IRQS, + KASSERT(intrnum >= 0 && intrnum < NHARD_IRQS, ("intrnum is out of range: %d", intrnum)); map = sb_intmap_lookup(intrnum, dev, rid); @@ -113,12 +124,18 @@ { struct sb_intmap *map; - KASSERT(intrnum >= 0 && intrnum < NUM_HARD_IRQS, + KASSERT(intrnum >= 0 && intrnum < NHARD_IRQS, ("intrnum is out of range: %d", intrnum)); map = sb_intmap_lookup(intrnum, dev, rid); if (map) { + /* + * Deliver all interrupts to CPU0. + */ + mtx_lock_spin(&zbbus_intr_mtx); + hardint_to_intsrc_mask[intrnum] |= 1ULL << map->intsrc; sb_enable_intsrc(0, map->intsrc); + mtx_unlock_spin(&zbbus_intr_mtx); } else { /* * In zbbus_setup_intr() we blindly call sb_intmap_activate() @@ -133,6 +150,52 @@ } } +/* + * Replace the default interrupt mask and unmask routines in intr_machdep.c + * with routines that are SMP-friendly. In contrast to the default mask/unmask + * routines in intr_machdep.c these routines do not change the SR.int_mask bits. + * + * Instead they use the interrupt mapper to either mask or unmask all + * interrupt sources feeding into a particular interrupt line of the processor. + * + * This means that these routines have an identical effect irrespective of + * which cpu is executing them. This is important because the ithread may + * be scheduled to run on either of the cpus. + */ +static void +zbbus_intr_mask(void *arg) +{ + uint64_t mask; + int irq; + + irq = (uintptr_t)arg; + + mtx_lock_spin(&zbbus_intr_mtx); + + mask = sb_read_intsrc_mask(0); + mask |= hardint_to_intsrc_mask[irq]; + sb_write_intsrc_mask(0, mask); + + mtx_unlock_spin(&zbbus_intr_mtx); +} + +static void +zbbus_intr_unmask(void *arg) +{ + uint64_t mask; + int irq; + + irq = (uintptr_t)arg; + + mtx_lock_spin(&zbbus_intr_mtx); + + mask = sb_read_intsrc_mask(0); + mask &= ~hardint_to_intsrc_mask[irq]; + sb_write_intsrc_mask(0, mask); + + mtx_unlock_spin(&zbbus_intr_mtx); +} + struct zbbus_devinfo { struct resource_list resources; }; @@ -155,6 +218,9 @@ device_printf(dev, "attached.\n"); } + cpu_set_hardintr_mask_func(zbbus_intr_mask); + cpu_set_hardintr_unmask_func(zbbus_intr_unmask); + bus_generic_probe(dev); bus_enumerate_hinted_children(dev); bus_generic_attach(dev); Index: sys/mips/sibyte/sb_scd.c =================================================================== --- sys/mips/sibyte/sb_scd.c (revision 203535) +++ sys/mips/sibyte/sb_scd.c (working copy) @@ -34,7 +34,7 @@ #include #include -#include +#include #include "sb_scd.h" @@ -189,11 +189,51 @@ * Interrupt 5 is used by sources internal to the CPU (e.g. timer). * Use a deterministic mapping for the remaining sources. */ +#ifdef SMP + KASSERT(platform_ipi_intrnum() == 4, + ("Unexpected interrupt number used for IPI")); + intrnum = intsrc % 4; +#else intrnum = intsrc % 5; +#endif return (intrnum); } +#ifdef SMP +static uint64_t +sb_read_sysrev(void) +{ + + return (sb_load64(SYSREV_ADDR)); +} + +void +sb_set_mailbox(int cpu, uint64_t val) +{ + uint32_t regaddr; + + regaddr = MAILBOX_SET_ADDR(cpu); + sb_store64(regaddr, val); +} + +void +sb_clear_mailbox(int cpu, uint64_t val) +{ + uint32_t regaddr; + + regaddr = MAILBOX_CLEAR_ADDR(cpu); + sb_store64(regaddr, val); +} + +int +platform_num_processors(void) +{ + + return (SYSREV_NUM_PROCESSORS(sb_read_sysrev())); +} +#endif /* SMP */ + #define SCD_PHYSADDR 0x10000000 #define SCD_SIZE 0x00060000 Index: sys/mips/include/smp.h =================================================================== --- sys/mips/include/smp.h (revision 203535) +++ sys/mips/include/smp.h (working copy) @@ -20,7 +20,6 @@ /* * Interprocessor interrupts for SMP. */ -#define IPI_INVLTLB 0x0001 #define IPI_RENDEZVOUS 0x0002 #define IPI_AST 0x0004 #define IPI_STOP 0x0008 @@ -28,13 +27,9 @@ #ifndef LOCORE -extern u_int32_t boot_cpu_id; - -void ipi_selected(u_int cpus, u_int32_t ipi); -void ipi_all_but_self(u_int32_t ipi); -intrmask_t smp_handle_ipi(struct trapframe *frame); +void ipi_selected(cpumask_t cpus, int ipi); void smp_init_secondary(u_int32_t cpuid); -void mips_ipi_send(int thread_id); +void mpentry(void); #endif /* !LOCORE */ #endif /* _KERNEL */ Index: sys/mips/include/intr_machdep.h =================================================================== --- sys/mips/include/intr_machdep.h (revision 203535) +++ sys/mips/include/intr_machdep.h (working copy) @@ -60,6 +60,16 @@ void cpu_intr(struct trapframe *); /* + * Allow a platform to override the default hard interrupt mask and unmask + * functions. The 'arg' can be cast safely to an 'int' and holds the mips + * hard interrupt number to mask or unmask. + */ +typedef void (*cpu_intr_mask_t)(void *arg); +typedef void (*cpu_intr_unmask_t)(void *arg); +void cpu_set_hardintr_mask_func(cpu_intr_mask_t func); +void cpu_set_hardintr_unmask_func(cpu_intr_unmask_t func); + +/* * Opaque datatype that represents intr counter */ typedef unsigned long* mips_intrcnt_t; Index: sys/mips/include/pcpu.h =================================================================== --- sys/mips/include/pcpu.h (revision 203535) +++ sys/mips/include/pcpu.h (working copy) @@ -55,6 +55,13 @@ #define PCPU_SET(member,value) (PCPUP->pc_ ## member = (value)) #define PCPU_LAZY_INC(member) (++PCPUP->pc_ ## member) +#ifdef SMP +/* + * Instantiate the wired TLB entry at PCPU_TLB_ENTRY to map 'pcpu' at 'pcpup'. + */ +void mips_pcpu_tlb_init(struct pcpu *pcpu); +#endif + #endif /* _KERNEL */ #endif /* !_MACHINE_PCPU_H_ */ Index: sys/mips/include/cpu.h =================================================================== --- sys/mips/include/cpu.h (revision 203535) +++ sys/mips/include/cpu.h (working copy) @@ -122,6 +122,8 @@ #define SOFT_INT_MASK (SOFT_INT_MASK_0 | SOFT_INT_MASK_1) #define HW_INT_MASK (ALL_INT_MASK & ~SOFT_INT_MASK) +#define soft_int_mask(softintr) (1 << ((softintr) + 8)) +#define hard_int_mask(hardintr) (1 << ((hardintr) + 10)) /* * The bits in the cause register. Index: sys/mips/include/hwfunc.h =================================================================== --- sys/mips/include/hwfunc.h (revision 203535) +++ sys/mips/include/hwfunc.h (working copy) @@ -47,4 +47,51 @@ /* For hardware specific CPU initialization */ void platform_cpu_init(void); void platform_secondary_init(void); + +#ifdef SMP + +/* + * Spin up the AP so that it starts executing MP bootstrap entry point: mpentry + * + * Returns 0 on sucess and non-zero on failure. + */ +int platform_start_ap(int processor_id); + +/* + * Platform-specific initialization that needs to be done when an AP starts + * running. This function is called from the MP bootstrap code in mpboot.S + */ +void platform_init_ap(int processor_id); + +/* + * Return a plaform-specific interrrupt number that is used to deliver IPIs. + * + * This hardware interrupt is used to deliver IPIs exclusively and must + * not be used for any other interrupt source. + */ +int platform_ipi_intrnum(void); + +/* + * Trigger a IPI interrupt on 'cpuid'. + */ +void platform_ipi_send(int cpuid); + +/* + * Quiesce the IPI interrupt source on the current cpu. + */ +void platform_ipi_clear(void); + +/* + * Return the processor id. + * + * Note that this function is called in early boot when stack is not available. + */ +extern int platform_processor_id(void); + +/* + * Return the number of processors available on this platform. + */ +extern int platform_num_processors(void); + +#endif /* SMP */ #endif /* !_MACHINE_HWFUNC_H_ */ Index: sys/mips/include/asm.h =================================================================== --- sys/mips/include/asm.h (revision 203535) +++ sys/mips/include/asm.h (working copy) @@ -497,17 +497,8 @@ #define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1) #endif -#ifdef SMP - /* - * FREEBSD_DEVELOPERS_FIXME - * In multiprocessor case, store/retrieve the pcpu structure - * address for current CPU in scratch register for fast access. - */ -#error "Write GET_CPU_PCPU for SMP" -#else #define GET_CPU_PCPU(reg) \ lw reg, _C_LABEL(pcpup); -#endif /* * Description of the setjmp buffer Index: sys/mips/conf/SWARM_SMP =================================================================== --- sys/mips/conf/SWARM_SMP (revision 0) +++ sys/mips/conf/SWARM_SMP (revision 0) @@ -0,0 +1,7 @@ +# +# $FreeBSD$ +# +options SMP +options PRINTF_BUFR_SIZE=128 + +include SWARM Index: sys/mips/mips/mpboot.S =================================================================== --- sys/mips/mips/mpboot.S (revision 0) +++ sys/mips/mips/mpboot.S (revision 0) @@ -0,0 +1,72 @@ +/*- + * Copyright (c) 2010 Neelkanth Natu + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include + +#include "assym.s" + + .text + .set noat + .set noreorder + +GLOBAL(mpentry) + mtc0 zero, COP_0_STATUS_REG /* disable interrupts */ + + mtc0 zero, COP_0_CAUSE_REG /* clear soft interrupts */ + + li t0, CFG_K0_CACHED /* make sure kseg0 is cached */ + mtc0 t0, MIPS_COP_0_CONFIG + COP0_SYNC + + jal platform_processor_id /* get the processor number */ + nop + move s0, v0 + + /* + * Initialize stack and call machine startup + */ + PTR_LA sp, _C_LABEL(pcpu_space) + addiu sp, (NBPG * 2) - START_FRAME + sll t0, s0, PAGE_SHIFT + 1 + addu sp, sp, t0 + + /* Zero out old ra and old fp for debugger */ + sw zero, START_FRAME - 4(sp) + sw zero, START_FRAME - 8(sp) + + PTR_LA gp, _C_LABEL(_gp) + + jal platform_init_ap + move a0, s0 + + jal smp_init_secondary + move a0, s0 + + PANIC("AP startup failed!") Index: sys/mips/mips/pmap.c =================================================================== --- sys/mips/mips/pmap.c (revision 203535) +++ sys/mips/mips/pmap.c (working copy) @@ -362,6 +362,15 @@ virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2); pcpup = (struct pcpu *)virtual_avail; virtual_avail += PAGE_SIZE * 2; + + /* + * Initialize the wired TLB entry mapping the pcpu region for + * the BSP at 'pcpup'. Up until this point we were operating + * with the 'pcpup' for the BSP pointing to a virtual address + * in KSEG0 so there was no need for a TLB mapping. + */ + mips_pcpu_tlb_init(PCPU_ADDR(0)); + if (bootverbose) printf("pcpu is available at virtual address %p.\n", pcpup); #endif Index: sys/mips/mips/mp_machdep.c =================================================================== --- sys/mips/mips/mp_machdep.c (revision 203535) +++ sys/mips/mips/mp_machdep.c (working copy) @@ -1,124 +1,110 @@ +/*- + * Copyright (c) 2009 Neelkanth Natu + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + #include __FBSDID("$FreeBSD$"); -#include "opt_kstack_pages.h" - #include #include #include #include -#include #include -#include #include #include #include #include -#include +#include #include #include #include -#include +#include +#include -#include #include -#include -#include -#include #include +#include +#include +#include +static void *dpcpu; static struct mtx ap_boot_mtx; -extern struct pcpu __pcpu[]; -extern int num_tlbentries; -void mips_start_timer(void); -static volatile int aps_ready = 0; -u_int32_t boot_cpu_id; +static volatile int aps_ready; +static volatile int mp_naps; +static void +ipi_send(struct pcpu *pc, int ipi) +{ -void -cpu_mp_announce(void) -{ + CTR3(KTR_SMP, "%s: cpu=%d, ipi=%x", __func__, pc->pc_cpuid, ipi); + + atomic_set_32(&pc->pc_pending_ipis, ipi); + platform_ipi_send(pc->pc_cpuid); + + CTR1(KTR_SMP, "%s: sent", __func__); } -/* - * To implement IPIs on MIPS CPU, we use the Interrupt Line 2 ( bit 4 of cause - * register) and a bitmap to avoid redundant IPI interrupts. To interrupt a - * set of CPUs, the sender routine runs in a ' loop ' sending interrupts to - * all the specified CPUs. A single Mutex (smp_ipi_mtx) is used for all IPIs - * that spinwait for delivery. This includes the following IPIs - * IPI_RENDEZVOUS - * IPI_INVLPG - * IPI_INVLTLB - * IPI_INVLRNG - */ - -/* - * send an IPI to a set of cpus. - */ +/* Send an IPI to a set of cpus. */ void -ipi_selected(u_int32_t cpus, u_int ipi) +ipi_selected(cpumask_t cpus, int ipi) { - struct pcpu *pcpu; - u_int cpuid, new_pending, old_pending; + struct pcpu *pc; CTR3(KTR_SMP, "%s: cpus: %x, ipi: %x\n", __func__, cpus, ipi); - while ((cpuid = ffs(cpus)) != 0) { - cpuid--; - cpus &= ~(1 << cpuid); - pcpu = pcpu_find(cpuid); - - if (pcpu) { - do { - old_pending = pcpu->pc_pending_ipis; - new_pending = old_pending | ipi; - } while (!atomic_cmpset_int(&pcpu->pc_pending_ipis, - old_pending, new_pending)); - - if (old_pending) - continue; - - mips_ipi_send (cpuid); - } + SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { + if ((cpus & pc->pc_cpumask) != 0) + ipi_send(pc, ipi); } } /* - * send an IPI to all CPUs EXCEPT myself - */ -void -ipi_all_but_self(u_int ipi) -{ - - ipi_selected(PCPU_GET(other_cpus), ipi); -} - -/* * Handle an IPI sent to this processor. */ -intrmask_t -smp_handle_ipi(struct trapframe *frame) +static int +mips_ipi_handler(void *arg) { - cpumask_t cpumask; /* This cpu mask */ + cpumask_t cpumask; u_int ipi, ipi_bitmap; + int bit; + platform_ipi_clear(); /* quiesce the pending ipi interrupt */ + ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis)); - cpumask = PCPU_GET(cpumask); + if (ipi_bitmap == 0) + return (FILTER_STRAY); CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap); - while (ipi_bitmap) { - /* - * Find the lowest set bit. - */ - ipi = ipi_bitmap & ~(ipi_bitmap - 1); + + while ((bit = ffs(ipi_bitmap))) { + bit = bit - 1; + ipi = 1 << bit; ipi_bitmap &= ~ipi; switch (ipi) { - case IPI_INVLTLB: - CTR0(KTR_SMP, "IPI_INVLTLB"); - break; - case IPI_RENDEZVOUS: CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); @@ -129,51 +115,136 @@ break; case IPI_STOP: - /* * IPI_STOP_HARD is mapped to IPI_STOP so it is not * necessary to add it in the switch. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); + cpumask = PCPU_GET(cpumask); atomic_set_int(&stopped_cpus, cpumask); - while ((started_cpus & cpumask) == 0) - ; + cpu_spinwait(); atomic_clear_int(&started_cpus, cpumask); atomic_clear_int(&stopped_cpus, cpumask); + CTR0(KTR_SMP, "IPI_STOP (restart)"); break; + default: + panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu); } } - return CR_INT_IPI; + + return (FILTER_HANDLED); } +static int +start_ap(int cpuid) +{ + int cpus, ms; + + cpus = mp_naps; + dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE); + + if (platform_start_ap(cpuid) != 0) + return (-1); /* could not start AP */ + + for (ms = 0; ms < 5000; ++ms) { + if (mp_naps > cpus) + return (0); /* success */ + else + DELAY(1000); + } + + return (-2); /* timeout initializing AP */ +} + void cpu_mp_setmaxid(void) { - mp_maxid = MAXCPU - 1; + mp_ncpus = platform_num_processors(); + if (mp_ncpus <= 0) + mp_ncpus = 1; + + mp_maxid = min(mp_ncpus, MAXCPU) - 1; } void +cpu_mp_announce(void) +{ + /* NOTHING */ +} + +struct cpu_group * +cpu_topo(void) +{ + + return (smp_topo_none()); +} + +int +cpu_mp_probe(void) +{ + + return (mp_ncpus > 1); +} + +void +cpu_mp_start(void) +{ + int error, cpuid; + + mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); + + all_cpus = 1; /* BSP */ + for (cpuid = 1; cpuid < platform_num_processors(); ++cpuid) { + if (cpuid >= MAXCPU) { + printf("cpu_mp_start: ignoring AP #%d.\n", cpuid); + continue; + } + + if ((error = start_ap(cpuid)) != 0) { + printf("AP #%d failed to start: %d\n", cpuid, error); + continue; + } + + if (bootverbose) + printf("AP #%d started!\n", cpuid); + + all_cpus |= 1 << cpuid; + } + + PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); +} + +void smp_init_secondary(u_int32_t cpuid) { + int ipi_int_mask, clock_int_mask; - if (cpuid >= MAXCPU) - panic ("cpu id exceeds MAXCPU\n"); + /* TLB */ + Mips_SetWIRED(0); + Mips_TLBFlush(num_tlbentries); + Mips_SetWIRED(VMWIRED_ENTRIES); - /* tlb init */ - R4K_SetWIRED(0); - R4K_TLBFlush(num_tlbentries); - R4K_SetWIRED(VMWIRED_ENTRIES); + /* + * We assume that the L1 cache on the APs is identical to the one + * on the BSP. + */ + mips_dcache_wbinv_all(); + mips_icache_sync_all(); + MachSetPID(0); - Mips_SyncCache(); + pcpu_init(PCPU_ADDR(cpuid), cpuid, sizeof(struct pcpu)); + dpcpu_init(dpcpu, cpuid); - mips_cp0_status_write(0); + /* The AP has initialized successfully - allow the BSP to proceed */ + ++mp_naps; + + /* Spin until the BSP is ready to release the APs */ while (!aps_ready) ; - mips_sync(); mips_sync(); /* Initialize curthread. */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); PCPU_SET(curthread, PCPU_GET(idlethread)); @@ -182,15 +253,16 @@ smp_cpus++; - CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid)); + CTR1(KTR_SMP, "SMP: AP CPU #%d launched", PCPU_GET(cpuid)); /* Build our map of 'other' CPUs. */ PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); - printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid)); + if (bootverbose) + printf("SMP: AP CPU #%d launched.\n", PCPU_GET(cpuid)); if (smp_cpus == mp_ncpus) { - smp_started = 1; + atomic_store_rel_int(&smp_started, 1); smp_active = 1; } @@ -198,103 +270,46 @@ while (smp_started == 0) ; /* nothing */ - /* Enable Interrupt */ - mips_cp0_status_write(SR_INT_ENAB); - /* ok, now grab sched_lock and enter the scheduler */ - mtx_lock_spin(&sched_lock); /* - * Correct spinlock nesting. The idle thread context that we are - * borrowing was created so that it would start out with a single - * spin lock (sched_lock) held in fork_trampoline(). Since we've - * explicitly acquired locks in this function, the nesting count - * is now 2 rather than 1. Since we are nested, calling - * spinlock_exit() will simply adjust the counts without allowing - * spin lock using code to interrupt us. + * Unmask the clock and ipi interrupts. */ - spinlock_exit(); - KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); + clock_int_mask = hard_int_mask(5); + ipi_int_mask = hard_int_mask(platform_ipi_intrnum()); + set_intr_mask(ALL_INT_MASK & ~(ipi_int_mask | clock_int_mask)); - binuptime(PCPU_PTR(switchtime)); - PCPU_SET(switchticks, ticks); + /* + * Bootstrap the compare register. + */ + mips_wr_compare(mips_rd_count() + counter_freq / hz); - /* kick off the clock on this cpu */ - mips_start_timer(); - cpu_throw(NULL, choosethread()); /* doesn't return */ + enableintr(); + /* enter the scheduler */ + sched_throw(NULL); + panic("scheduler returned us to %s", __func__); + /* NOTREACHED */ } -static int -smp_start_secondary(int cpuid) +static void +release_aps(void *dummy __unused) { - struct pcpu *pcpu; - void *dpcpu; - int i; + int ipi_irq; - if (bootverbose) - printf("smp_start_secondary: starting cpu %d\n", cpuid); + if (mp_ncpus == 1) + return; - dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE); - pcpu_init(&__pcpu[cpuid], cpuid, sizeof(struct pcpu)); - dpcpu_init(dpcpu, cpuid); + /* + * IPI handler + */ + ipi_irq = platform_ipi_intrnum(); + cpu_establish_hardintr("ipi", mips_ipi_handler, NULL, NULL, ipi_irq, + INTR_TYPE_MISC | INTR_EXCL | INTR_FAST, NULL); - if (bootverbose) - printf("smp_start_secondary: cpu %d started\n", cpuid); - - return 1; -} - -int -cpu_mp_probe(void) -{ - int i, cpus; - - /* XXX: Need to check for valid platforms here. */ - - boot_cpu_id = PCPU_GET(cpuid); - KASSERT(boot_cpu_id == 0, ("cpu_mp_probe() called on non-primary CPU")); - all_cpus = PCPU_GET(cpumask); - mp_ncpus = 1; - - /* Make sure we have at least one secondary CPU. */ - cpus = 0; - for (i = 0; i < MAXCPU; i++) { - cpus++; - } - return (cpus); -} - -void -cpu_mp_start(void) -{ - int i, cpuid; - - mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); - - cpuid = 1; - for (i = 0; i < MAXCPU; i++) { - - if (i == boot_cpu_id) - continue; - if (smp_start_secondary(i)) { - all_cpus |= (1 << cpuid); - mp_ncpus++; - cpuid++; - } - } - idle_mask |= CR_INT_IPI; - PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); -} - -static void -release_aps(void *dummy __unused) -{ - if (bootverbose && mp_ncpus > 1) - printf("%s: releasing secondary CPUs\n", __func__); atomic_store_rel_int(&aps_ready, 1); - while (mp_ncpus > 1 && smp_started == 0) + while (smp_started == 0) ; /* nothing */ } Index: sys/mips/mips/machdep.c =================================================================== --- sys/mips/mips/machdep.c (revision 203535) +++ sys/mips/mips/machdep.c (working copy) @@ -133,11 +133,7 @@ char pcpu_space[MAXCPU][PAGE_SIZE * 2] \ __aligned(PAGE_SIZE * 2) __section(".data"); -#ifdef SMP -struct pcpu *pcpup = 0; /* initialized in pmap_bootstrap() */ -#else struct pcpu *pcpup = (struct pcpu *)pcpu_space; -#endif vm_offset_t phys_avail[PHYS_AVAIL_ENTRIES + 2]; vm_offset_t physmem_desc[PHYS_AVAIL_ENTRIES + 2]; @@ -419,22 +415,14 @@ ((void(*)(void))(intptr_t)MIPS_VEC_RESET)(); } -/* - * Initialise a struct pcpu. - */ +#ifdef SMP void -cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) +mips_pcpu_tlb_init(struct pcpu *pcpu) { -#ifdef SMP vm_paddr_t pa; struct tlb tlb; int lobits; -#endif - pcpu->pc_next_asid = 1; - pcpu->pc_asid_generation = 1; - -#ifdef SMP /* * Map the pcpu structure at the virtual address 'pcpup'. * We use a wired tlb index to do this one-time mapping. @@ -446,7 +434,22 @@ tlb.tlb_lo0 = mips_paddr_to_tlbpfn(pa) | lobits; tlb.tlb_lo1 = mips_paddr_to_tlbpfn(pa + PAGE_SIZE) | lobits; Mips_TLBWriteIndexed(PCPU_TLB_ENTRY, &tlb); +} #endif + +/* + * Initialise a struct pcpu. + */ +void +cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) +{ + + pcpu->pc_next_asid = 1; + pcpu->pc_asid_generation = 1; +#ifdef SMP + if ((vm_offset_t)pcpup >= VM_MIN_KERNEL_ADDRESS) + mips_pcpu_tlb_init(pcpu); +#endif } int Index: sys/mips/mips/intr_machdep.c =================================================================== --- sys/mips/mips/intr_machdep.c (revision 203535) +++ sys/mips/mips/intr_machdep.c (working copy) @@ -50,6 +50,9 @@ static int intrcnt_index; +static cpu_intr_mask_t hardintr_mask_func; +static cpu_intr_unmask_t hardintr_unmask_func; + mips_intrcnt_t mips_intrcnt_create(const char* name) { @@ -128,38 +131,54 @@ } void +cpu_set_hardintr_mask_func(cpu_intr_mask_t func) +{ + + hardintr_mask_func = func; +} + +void +cpu_set_hardintr_unmask_func(cpu_intr_unmask_t func) +{ + + hardintr_unmask_func = func; +} + +void cpu_establish_hardintr(const char *name, driver_filter_t *filt, void (*handler)(void*), void *arg, int irq, int flags, void **cookiep) { struct intr_event *event; int error; -#if 0 - printf("Establish HARD IRQ %d: filt %p handler %p arg %p\n", - irq, filt, handler, arg); -#endif /* * We have 6 levels, but thats 0 - 5 (not including 6) */ if (irq < 0 || irq >= NHARD_IRQS) panic("%s called for unknown hard intr %d", __func__, irq); + if (hardintr_mask_func == NULL) + hardintr_mask_func = mips_mask_hard_irq; + + if (hardintr_unmask_func == NULL) + hardintr_unmask_func = mips_unmask_hard_irq; + event = hardintr_events[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)(uintptr_t)irq, 0, - irq, mips_mask_hard_irq, mips_unmask_hard_irq, + irq, hardintr_mask_func, hardintr_unmask_func, NULL, NULL, "int%d", irq); if (error) return; hardintr_events[irq] = event; + mips_unmask_hard_irq((void*)(uintptr_t)irq); } intr_event_add_handler(event, name, filt, handler, arg, intr_priority(flags), flags, cookiep); - mips_intrcnt_setname(mips_intr_counters[NSOFT_IRQS + irq], event->ie_fullname); - - mips_unmask_hard_irq((void*)(uintptr_t)irq); + mips_intrcnt_setname(mips_intr_counters[NSOFT_IRQS + irq], + event->ie_fullname); } void @@ -185,14 +204,13 @@ if (error) return; softintr_events[irq] = event; + mips_unmask_soft_irq((void*)(uintptr_t)irq); } intr_event_add_handler(event, name, filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(mips_intr_counters[irq], event->ie_fullname); - - mips_unmask_soft_irq((void*)(uintptr_t)irq); } void