--- //depot/vendor/freebsd/src/sys/alpha/linux/linux_sysvec.c 2005/01/29 23:15:43 +++ //depot/projects/smpng/sys/alpha/linux/linux_sysvec.c 2005/01/31 22:15:49 @@ -65,12 +65,6 @@ #include #undef szsigcode -MODULE_VERSION(linux, 1); -MODULE_DEPEND(linux, osf1, 1, 1, 1); -MODULE_DEPEND(linux, sysvmsg, 1, 1, 1); -MODULE_DEPEND(linux, sysvsem, 1, 1, 1); -MODULE_DEPEND(linux, sysvshm, 1, 1, 1); - MALLOC_DEFINE(M_LINUX, "linux", "Linux mode structures"); #if BYTE_ORDER == LITTLE_ENDIAN @@ -288,4 +282,5 @@ DUMMY(rt_sigreturn); +MODULE_DEPEND(linux, osf1, 1, 1, 1); DECLARE_MODULE(linuxelf, linux_elf_mod, SI_SUB_EXEC, SI_ORDER_ANY); --- //depot/vendor/freebsd/src/sys/amd64/include/atomic.h 2003/11/21 03:05:42 +++ //depot/projects/smpng/sys/amd64/include/atomic.h 2004/11/19 20:16:10 @@ -70,7 +70,7 @@ int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src); int atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src); -#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ +#define ATOMIC_STORE_LOAD(TYPE) \ u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) @@ -162,30 +162,22 @@ #if defined(__GNUC__) -#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ +#define ATOMIC_STORE_LOAD(TYPE) \ static __inline u_##TYPE \ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ { \ - u_##TYPE res; \ + u_##TYPE v; \ \ - __asm __volatile(__XSTRING(MPLOCKED) LOP \ - : "=a" (res), /* 0 (result) */\ - "+m" (*p) /* 1 */ \ - : : "memory"); \ - \ - return (res); \ + v = *p; \ + __asm __volatile("lfence" ::: "memory"); \ + return (v); \ } \ \ -/* \ - * The XCHG instruction asserts LOCK automagically. \ - */ \ static __inline void \ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ { \ - __asm __volatile(SOP \ - : "+m" (*p), /* 0 */ \ - "+r" (v) /* 1 */ \ - : : "memory"); \ + __asm __volatile("sfence" ::: "memory"); \ + *p = v; \ } \ struct __hack @@ -194,7 +186,7 @@ extern int atomic_cmpset_int(volatile u_int *, u_int, u_int); extern int atomic_cmpset_long(volatile u_long *, u_long, u_long); -#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ +#define ATOMIC_STORE_LOAD(TYPE) \ extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) @@ -222,10 +214,10 @@ ATOMIC_ASM(add, long, "addq %1,%0", "ir", v); ATOMIC_ASM(subtract, long, "subq %1,%0", "ir", v); -ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0"); -ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0"); -ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0"); -ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1", "xchgq %1,%0"); +ATOMIC_STORE_LOAD(char); +ATOMIC_STORE_LOAD(short); +ATOMIC_STORE_LOAD(int); +ATOMIC_STORE_LOAD(long); #undef ATOMIC_ASM #undef ATOMIC_STORE_LOAD --- //depot/vendor/freebsd/src/sys/amd64/include/bus_amd64.h 2005/01/05 20:20:40 +++ //depot/projects/smpng/sys/amd64/include/bus_amd64.h 2005/01/05 22:38:38 @@ -1215,9 +1215,9 @@ { #ifdef __GNUC__ if (flags & BUS_SPACE_BARRIER_READ) - __asm __volatile("lock; addl $0,0(%%rsp)" : : : "memory"); + __asm __volatile("lfence" : : : "memory"); else - __asm __volatile("" : : : "memory"); + __asm __volatile("sfence" : : : "memory"); #endif } --- //depot/vendor/freebsd/src/sys/compat/linux/linux_misc.c 2005/01/25 21:30:35 +++ //depot/projects/smpng/sys/compat/linux/linux_misc.c 2005/01/31 22:15:49 @@ -1404,3 +1404,8 @@ bsd.pid = args->pid; return getsid(td, &bsd); } + +MODULE_VERSION(linux, 1); +MODULE_DEPEND(linux, sysvmsg, 1, 1, 1); +MODULE_DEPEND(linux, sysvsem, 1, 1, 1); +MODULE_DEPEND(linux, sysvshm, 1, 1, 1); --- //depot/vendor/freebsd/src/sys/dev/acpica/acpi_resource.c 2005/01/18 20:25:37 +++ //depot/projects/smpng/sys/dev/acpica/acpi_resource.c 2005/01/18 22:54:47 @@ -77,6 +77,12 @@ req->counter++; break; } + if (irq != rman_get_start(req->res) && irq == 0) { + if (bootverbose) + printf("IRQ is %u, resource is %lu\n", irq, + rman_get_start(req->res)); + return (AE_CTRL_TERMINATE); + } req->found = 1; KASSERT(irq == rman_get_start(req->res), ("IRQ resources do not match")); --- //depot/vendor/freebsd/src/sys/fs/pseudofs/pseudofs_vnops.c 2005/01/13 19:01:14 +++ //depot/projects/smpng/sys/fs/pseudofs/pseudofs_vnops.c 2005/01/14 15:30:06 @@ -90,6 +90,10 @@ if (p_cansee(td, proc) != 0 || (pn->pn_vis != NULL && !(pn->pn_vis)(td, proc, pn))) r = 0; + /* + * XXX: We might should return with the proc locked to + * avoid some races. + */ PROC_UNLOCK(proc); } PFS_RETURN (r); --- //depot/vendor/freebsd/src/sys/i386/i386/initcpu.c 2003/11/10 15:51:40 +++ //depot/projects/smpng/sys/i386/i386/initcpu.c 2004/12/28 16:16:22 @@ -68,7 +68,6 @@ #ifdef I686_CPU static void init_6x86MX(void); -static void init_ppro(void); static void init_mendocino(void); #endif @@ -471,19 +470,6 @@ write_eflags(eflags); } -static void -init_ppro(void) -{ - u_int64_t apicbase; - - /* - * Local APIC should be disabled if it is not going to be used. - */ - apicbase = rdmsr(MSR_APICBASE); - apicbase &= ~APICBASE_ENABLED; - wrmsr(MSR_APICBASE, apicbase); -} - /* * Initialize BBL_CR_CTL3 (Control register 3: used to configure the * L2 cache). @@ -574,9 +560,6 @@ case CPU_686: if (strcmp(cpu_vendor, "GenuineIntel") == 0) { switch (cpu_id & 0xff0) { - case 0x610: - init_ppro(); - break; case 0x660: init_mendocino(); break; --- //depot/vendor/freebsd/src/sys/i386/i386/local_apic.c 2005/01/14 18:35:37 +++ //depot/projects/smpng/sys/i386/i386/local_apic.c 2005/01/18 22:54:47 @@ -572,7 +572,6 @@ apic_init(void *dummy __unused) { struct apic_enumerator *enumerator; - uint64_t apic_base; int retval, best; /* We only support built in local APICs. */ @@ -605,18 +604,6 @@ printf("APIC: Using the %s enumerator.\n", best_enum->apic_name); - /* - * To work around an errata, we disable the local APIC on some - * CPUs during early startup. We need to turn the local APIC back - * on on such CPUs now. - */ - if (cpu == CPU_686 && strcmp(cpu_vendor, "GenuineIntel") == 0 && - (cpu_id & 0xff0) == 0x610) { - apic_base = rdmsr(MSR_APICBASE); - apic_base |= APICBASE_ENABLED; - wrmsr(MSR_APICBASE, apic_base); - } - /* Second, probe the CPU's in the system. */ retval = best_enum->apic_probe_cpus(); if (retval != 0) @@ -669,7 +656,15 @@ int lapic_ipi_wait(int delay) { - int x, incr; + int x, incr, allow_ipis, done; + + /* + * If interrupts are disabled, then hack on the APIC to allow + * safe IPIs to come in while we wait. + */ + allow_ipis = (read_eflags() & PSL_I) == 0; + if (allow_ipis) + APIC_IPI_SPINWAIT_ENTER(); /* * Wait delay loops for IPI to be sent. This is highly bogus @@ -681,12 +676,17 @@ delay = 1; } else incr = 1; + done = 0; for (x = 0; x < delay; x += incr) { - if ((lapic->icr_lo & APIC_DELSTAT_MASK) == APIC_DELSTAT_IDLE) - return (1); + if ((lapic->icr_lo & APIC_DELSTAT_MASK) == APIC_DELSTAT_IDLE) { + done = 1; + break; + } ia32_pause(); } - return (0); + if (allow_ipis) + APIC_IPI_SPINWAIT_EXIT(); + return (done); } void @@ -751,8 +751,12 @@ } /* Wait for an earlier IPI to finish. */ - if (!lapic_ipi_wait(BEFORE_SPIN)) - panic("APIC: Previous IPI is stuck"); + if (!lapic_ipi_wait(BEFORE_SPIN)) { + if (panicstr != NULL) + return; + else + panic("APIC: Previous IPI is stuck"); + } lapic_ipi_raw(icrlo, destfield); --- //depot/vendor/freebsd/src/sys/i386/i386/locore.s 2004/11/20 02:31:01 +++ //depot/projects/smpng/sys/i386/i386/locore.s 2004/11/22 20:01:11 @@ -782,9 +782,24 @@ movl $1,%ecx fillkptphys(%edx) -/* Map read-only from page 1 to the beginning of the kernel text section */ +#define BIOS_START 0xc0000 +#define BIOS_END 0xfffff + +/* Map read-only from page 1 to the beginning of BIOS */ movl $PAGE_SIZE, %eax xorl %edx,%edx + movl $(BIOS_START - PAGE_SIZE)/PAGE_SIZE,%ecx + fillkptphys(%edx) + +/* Map the BIOS read-write for BIOS calls that want to write to it */ + movl $BIOS_START, %eax + movl $PG_RW,%edx + movl $(BIOS_END + 1 - BIOS_START)/PAGE_SIZE,%ecx + fillkptphys(%edx) + +/* Map read-only from the BIOS to the beginning of the kernel text section */ + movl $BIOS_END + 1, %eax + xorl %edx,%edx movl $R(btext),%ecx addl $PAGE_MASK,%ecx subl %eax,%ecx --- //depot/vendor/freebsd/src/sys/i386/i386/machdep.c 2005/01/18 20:25:37 +++ //depot/projects/smpng/sys/i386/i386/machdep.c 2005/01/18 22:54:47 @@ -111,6 +111,7 @@ #include #include #include +#include #include #include #include @@ -1106,8 +1107,11 @@ pcb->pcb_gs = _udatasel; load_gs(_udatasel); + mtx_lock_spin(&sched_lock); if (td->td_proc->p_md.md_ldt) user_ldt_free(td); + else + mtx_unlock_spin(&sched_lock); bzero((char *)regs, sizeof(struct trapframe)); regs->tf_eip = entry; @@ -2085,6 +2089,13 @@ setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); + /* + * Workaround for PentiumPro Errata 5AP: Spurious interrupts routed + * to int15 in APIC virtual wire mode. + */ + setidt(15, &IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL, + GSEL(GCODE_SEL, SEL_KPL)); + r_idt.rd_limit = sizeof(idt0) - 1; r_idt.rd_base = (int) idt; lidt(&r_idt); --- //depot/vendor/freebsd/src/sys/i386/i386/mp_machdep.c 2004/12/23 20:38:18 +++ //depot/projects/smpng/sys/i386/i386/mp_machdep.c 2004/12/23 20:48:47 @@ -926,8 +926,10 @@ smp_tlb_addr2 = addr2; atomic_store_rel_int(&smp_tlb_wait, 0); ipi_all_but_self(vector); + APIC_IPI_SPINWAIT_ENTER(); while (smp_tlb_wait < ncpu) ia32_pause(); + APIC_IPI_SPINWAIT_EXIT(); } /* @@ -1015,8 +1017,10 @@ ipi_all_but_self(vector); else ipi_selected(mask, vector); + APIC_IPI_SPINWAIT_ENTER(); while (smp_tlb_wait < ncpu) ia32_pause(); + APIC_IPI_SPINWAIT_EXIT(); } void --- //depot/vendor/freebsd/src/sys/i386/i386/pmap.c 2004/12/23 20:20:51 +++ //depot/projects/smpng/sys/i386/i386/pmap.c 2004/12/23 20:48:47 @@ -1298,11 +1298,13 @@ (u_int)&pmap->pm_active); atomic_store_rel_int(&lazywait, 0); ipi_selected(mask, IPI_LAZYPMAP); + APIC_IPI_SPINWAIT_ENTER(); while (lazywait == 0) { ia32_pause(); if (--spins == 0) break; } + APIC_IPI_SPINWAIT_EXIT(); } mtx_unlock_spin(&smp_ipi_mtx); if (spins == 0) --- //depot/vendor/freebsd/src/sys/i386/i386/sys_machdep.c 2005/01/26 14:01:04 +++ //depot/projects/smpng/sys/i386/i386/sys_machdep.c 2005/01/31 22:15:49 @@ -319,7 +319,7 @@ #endif /* - * Must be called with either sched_lock free or held but not recursed. + * Must be called with sched_lock held but not recursed. * If it does not return NULL, it will return with it owned. */ struct proc_ldt * @@ -327,9 +327,8 @@ { struct proc_ldt *pldt, *new_ldt; - if (mtx_owned(&sched_lock)) - mtx_unlock_spin(&sched_lock); - mtx_assert(&sched_lock, MA_NOTOWNED); + mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); + mtx_unlock_spin(&sched_lock); MALLOC(new_ldt, struct proc_ldt *, sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK); @@ -360,7 +359,7 @@ } /* - * Must be called either with sched_lock free or held but not recursed. + * Must be called with sched_lock held but not recursed. * If md_ldt is not NULL, it will return with sched_lock released. */ void @@ -372,8 +371,6 @@ if (pldt == NULL) return; - if (!mtx_owned(&sched_lock)) - mtx_lock_spin(&sched_lock); mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); if (td == PCPU_GET(curthread)) { lldt(_default_ldt); @@ -446,7 +443,7 @@ int error = 0, i; int largest_ld; struct mdproc *mdp = &td->td_proc->p_md; - struct proc_ldt *pldt = NULL; + struct proc_ldt *pldt; union descriptor *dp; #ifdef DEBUG @@ -633,14 +630,17 @@ return (ENOMEM); if (len < NLDT + 1) len = NLDT + 1; + + /* allocate user ldt */ + mtx_lock_spin(&sched_lock); pldt = mdp->md_ldt; - /* allocate user ldt */ if (!pldt || len > pldt->ldt_len) { - struct proc_ldt *new_ldt = user_ldt_alloc(mdp, len); + struct proc_ldt *new_ldt; + + new_ldt = user_ldt_alloc(mdp, len); if (new_ldt == NULL) return (ENOMEM); pldt = mdp->md_ldt; - /* sched_lock was held by user_ldt_alloc */ if (pldt) { if (new_ldt->ldt_len > pldt->ldt_len) { old_ldt_base = pldt->ldt_base; @@ -677,6 +677,7 @@ set_user_ldt(mdp); mtx_unlock_spin(&sched_lock); #endif - } + } else + mtx_unlock_spin(&sched_lock); return (0); } --- //depot/vendor/freebsd/src/sys/i386/i386/vm_machdep.c 2005/01/14 20:55:40 +++ //depot/projects/smpng/sys/i386/i386/vm_machdep.c 2005/01/19 21:44:15 @@ -152,7 +152,10 @@ if ((flags & RFMEM) == 0) { /* unshare user LDT */ struct mdproc *mdp1 = &p1->p_md; - struct proc_ldt *pldt = mdp1->md_ldt; + struct proc_ldt *pldt; + + mtx_lock_spin(&sched_lock); + pldt = mdp1->md_ldt; if (pldt && pldt->ldt_refcnt > 1) { pldt = user_ldt_alloc(mdp1, pldt->ldt_len); if (pldt == NULL) @@ -160,7 +163,8 @@ mdp1->md_ldt = pldt; set_user_ldt(mdp1); user_ldt_free(td1); - } + } else + mtx_unlock_spin(&sched_lock); } return; } @@ -290,30 +294,32 @@ void cpu_exit(struct thread *td) { - struct mdproc *mdp; + + mtx_lock_spin(&sched_lock); + if (td->td_proc->p_md.md_ldt) { - /* Reset pc->pcb_gs and %gs before possibly invalidating it. */ - mdp = &td->td_proc->p_md; - if (mdp->md_ldt) { + /* Reset pc->pcb_gs and %gs before invalidating it. */ td->td_pcb->pcb_gs = _udatasel; load_gs(_udatasel); + user_ldt_free(td); - } + } else + mtx_unlock_spin(&sched_lock); } void cpu_thread_exit(struct thread *td) { - struct pcb *pcb = td->td_pcb; + #ifdef DEV_NPX - if (td == PCPU_GET(fpcurthread)) - npxdrop(); + npxexit(td); #endif - if (pcb->pcb_flags & PCB_DBREGS) { - /* disable all hardware breakpoints */ + + /* Disable any hardware breakpoints. */ + if (td->td_pcb->pcb_flags & PCB_DBREGS) { reset_dbregs(); - pcb->pcb_flags &= ~PCB_DBREGS; + td->td_pcb->pcb_flags &= ~PCB_DBREGS; } } @@ -323,7 +329,7 @@ struct pcb *pcb; pcb = td->td_pcb; - if (pcb->pcb_ext != 0) { + if (pcb->pcb_ext != NULL) { /* XXXKSE XXXSMP not SMP SAFE.. what locks do we have? */ /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */ /* @@ -332,7 +338,7 @@ */ kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext, ctob(IOPAGES + 1)); - pcb->pcb_ext = 0; + pcb->pcb_ext = NULL; } } --- //depot/vendor/freebsd/src/sys/i386/include/apicvar.h 2004/12/23 19:50:41 +++ //depot/projects/smpng/sys/i386/include/apicvar.h 2004/12/23 20:26:03 @@ -106,11 +106,19 @@ * other deadlocks caused by IPI_STOP. */ +/* + * These interrupt handlers are for IPIs and local interrupts whose handlers + * do not use any spin locks, so they may still be allowed when a spin lock + * is held. + */ +#define APIC_LOCK_SAFE_INTS (APIC_TIMER_INT + 1) + /* Interrupts for local APIC LVT entries other than the timer. */ -#define APIC_LOCAL_INTS 240 +#define APIC_LOCAL_INTS APIC_LOCK_SAFE_INTS #define APIC_ERROR_INT APIC_LOCAL_INTS #define APIC_THERMAL_INT (APIC_LOCAL_INTS + 1) +/* Spin lock safe IPIs. */ #define APIC_IPI_INTS (APIC_LOCAL_INTS + 2) #define IPI_RENDEZVOUS (APIC_IPI_INTS) /* Inter-CPU rendezvous. */ #define IPI_INVLTLB (APIC_IPI_INTS + 1) /* TLB Shootdown IPIs */ @@ -155,6 +163,21 @@ #define APIC_BUS_PCI 2 #define APIC_BUS_MAX APIC_BUS_PCI +#if 0 +#define APIC_IPI_SPINWAIT_ENTER() do { \ + lapic_set_tpr(APIC_LOCK_SAFE_INTS); \ + enable_intr(); \ +} while (0) + +#define APIC_IPI_SPINWAIT_EXIT() do { \ + disable_intr(); \ + lapic_set_tpr(0); \ +} while (0) +#else +#define APIC_IPI_SPINWAIT_ENTER() +#define APIC_IPI_SPINWAIT_EXIT() +#endif + /* * An APIC enumerator is a psuedo bus driver that enumerates APIC's including * CPU's and I/O APIC's. --- //depot/vendor/freebsd/src/sys/i386/include/bus_at386.h 2005/01/06 22:21:32 +++ //depot/projects/smpng/sys/i386/include/bus_at386.h 2005/01/07 18:05:05 @@ -298,8 +298,8 @@ 1: movb (%2),%%al \n\ stosb \n\ loop 1b" : - "=D" (addr), "=c" (count) : - "r" (bsh + offset), "0" (addr), "1" (count) : + "+D" (addr), "+c" (count) : + "r" (bsh + offset) : "%eax", "memory"); #else # ifndef lint @@ -331,8 +331,8 @@ 1: movw (%2),%%ax \n\ stosw \n\ loop 1b" : - "=D" (addr), "=c" (count) : - "r" (bsh + offset), "0" (addr), "1" (count) : + "+D" (addr), "+c" (count) : + "r" (bsh + offset) : "%eax", "memory"); #else # ifndef lint @@ -364,8 +364,8 @@ 1: movl (%2),%%eax \n\ stosl \n\ loop 1b" : - "=D" (addr), "=c" (count) : - "r" (bsh + offset), "0" (addr), "1" (count) : + "+D" (addr), "+c" (count) : + "r" (bsh + offset) : "%eax", "memory"); #else # ifndef lint @@ -418,9 +418,8 @@ stosb \n\ incl %2 \n\ loop 1b" : - "=D" (addr), "=c" (count), "=d" (_port_) : - "0" (addr), "1" (count), "2" (_port_) : - "%eax", "memory", "cc"); + "+D" (addr), "+c" (count), "+d" (_port_) : : + "%eax", "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -439,9 +438,8 @@ cld \n\ repne \n\ movsb" : - "=D" (addr), "=c" (count), "=S" (_port_) : - "0" (addr), "1" (count), "2" (_port_) : - "memory", "cc"); + "+D" (addr), "+c" (count), "+S" (_port_) : : + "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -468,9 +466,8 @@ stosw \n\ addl $2,%2 \n\ loop 1b" : - "=D" (addr), "=c" (count), "=d" (_port_) : - "0" (addr), "1" (count), "2" (_port_) : - "%eax", "memory", "cc"); + "+D" (addr), "+c" (count), "+d" (_port_) : : + "%eax", "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -489,9 +486,8 @@ cld \n\ repne \n\ movsw" : - "=D" (addr), "=c" (count), "=S" (_port_) : - "0" (addr), "1" (count), "2" (_port_) : - "memory", "cc"); + "+D" (addr), "+c" (count), "+S" (_port_) : : + "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -518,9 +514,8 @@ stosl \n\ addl $4,%2 \n\ loop 1b" : - "=D" (addr), "=c" (count), "=d" (_port_) : - "0" (addr), "1" (count), "2" (_port_) : - "%eax", "memory", "cc"); + "+D" (addr), "+c" (count), "+d" (_port_) : : + "%eax", "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -539,9 +534,8 @@ cld \n\ repne \n\ movsl" : - "=D" (addr), "=c" (count), "=S" (_port_) : - "0" (addr), "1" (count), "2" (_port_) : - "memory", "cc"); + "+D" (addr), "+c" (count), "+S" (_port_) : : + "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -673,9 +667,9 @@ 1: lodsb \n\ movb %%al,(%2) \n\ loop 1b" : - "=S" (addr), "=c" (count) : - "r" (bsh + offset), "0" (addr), "1" (count) : - "%eax", "memory", "cc"); + "+S" (addr), "+c" (count) : + "r" (bsh + offset) : + "%eax", "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -706,9 +700,9 @@ 1: lodsw \n\ movw %%ax,(%2) \n\ loop 1b" : - "=S" (addr), "=c" (count) : - "r" (bsh + offset), "0" (addr), "1" (count) : - "%eax", "memory", "cc"); + "+S" (addr), "+c" (count) : + "r" (bsh + offset) : + "%eax", "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -739,9 +733,9 @@ 1: lodsl \n\ movl %%eax,(%2) \n\ loop 1b" : - "=S" (addr), "=c" (count) : - "r" (bsh + offset), "0" (addr), "1" (count) : - "%eax", "memory", "cc"); + "+S" (addr), "+c" (count) : + "r" (bsh + offset) : + "%eax", "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -794,9 +788,8 @@ outb %%al,%w0 \n\ incl %0 \n\ loop 1b" : - "=d" (_port_), "=S" (addr), "=c" (count) : - "0" (_port_), "1" (addr), "2" (count) : - "%eax", "memory", "cc"); + "+d" (_port_), "+S" (addr), "+c" (count) : : + "%eax", "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -815,9 +808,8 @@ cld \n\ repne \n\ movsb" : - "=D" (_port_), "=S" (addr), "=c" (count) : - "0" (_port_), "1" (addr), "2" (count) : - "memory", "cc"); + "+D" (_port_), "+S" (addr), "+c" (count) : : + "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -844,9 +836,8 @@ outw %%ax,%w0 \n\ addl $2,%0 \n\ loop 1b" : - "=d" (_port_), "=S" (addr), "=c" (count) : - "0" (_port_), "1" (addr), "2" (count) : - "%eax", "memory", "cc"); + "+d" (_port_), "+S" (addr), "+c" (count) : : + "%eax", "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -865,9 +856,8 @@ cld \n\ repne \n\ movsw" : - "=D" (_port_), "=S" (addr), "=c" (count) : - "0" (_port_), "1" (addr), "2" (count) : - "memory", "cc"); + "+D" (_port_), "+S" (addr), "+c" (count) : : + "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -894,9 +884,8 @@ outl %%eax,%w0 \n\ addl $4,%0 \n\ loop 1b" : - "=d" (_port_), "=S" (addr), "=c" (count) : - "0" (_port_), "1" (addr), "2" (count) : - "%eax", "memory", "cc"); + "+d" (_port_), "+S" (addr), "+c" (count) : : + "%eax", "memory"); #else # ifndef lint # error "no assembler code for your compiler" @@ -915,9 +904,8 @@ cld \n\ repne \n\ movsl" : - "=D" (_port_), "=S" (addr), "=c" (count) : - "0" (_port_), "1" (addr), "2" (count) : - "memory", "cc"); + "+D" (_port_), "+S" (addr), "+c" (count) : : + "memory"); #else # ifndef lint # error "no assembler code for your compiler" --- //depot/vendor/freebsd/src/sys/i386/include/bus_pc98.h 2005/01/06 22:21:32 +++ //depot/projects/smpng/sys/i386/include/bus_pc98.h 2005/01/07 18:05:05 @@ -269,11 +269,10 @@ \ __asm __volatile("call *%2" \ :"=a" (result), \ - "=d" (offset) \ + "+d" (offset) \ :"o" (bsh->bsh_bam.bs_read_##BWN), \ - "b" (bsh), \ - "1" (offset) \ - ); \ + "b" (bsh) \ + :"ecx","memory"); \ \ return result; \ } @@ -295,12 +294,11 @@ { \ \ __asm __volatile("call *%1" \ - :"=d" (offset) \ + :"+d" (offset) \ :"o" (bsh->bsh_bam.bs_write_##BWN), \ "a" (val), \ - "b" (bsh), \ - "0" (offset) \ - ); \ + "b" (bsh) \ + :"ecx","memory"); \ } _BUS_SPACE_WRITE(u_int8_t,1) @@ -321,15 +319,12 @@ { \ \ __asm __volatile("call *%3" \ - :"=c" (cnt), \ - "=d" (offset), \ - "=D" (buf) \ + :"+c" (cnt), \ + "+d" (offset), \ + "+D" (buf) \ :"o" (bsh->bsh_bam.bs_read_multi_##BWN), \ - "b" (bsh), \ - "0" (cnt), \ - "1" (offset), \ - "2" (buf) \ - :"memory"); \ + "b" (bsh) \ + :"eax","memory"); \ } _BUS_SPACE_READ_MULTI(u_int8_t,1) @@ -350,15 +345,12 @@ { \ \ __asm __volatile("call *%3" \ - :"=c" (cnt), \ - "=d" (offset), \ - "=S" (buf) \ + :"+c" (cnt), \ + "+d" (offset), \ + "+S" (buf) \ :"o" (bsh->bsh_bam.bs_write_multi_##BWN), \ - "b" (bsh), \ - "0" (cnt), \ - "1" (offset), \ - "2" (buf) \ - ); \ + "b" (bsh) \ + :"eax","memory"); \ } _BUS_SPACE_WRITE_MULTI(u_int8_t,1) @@ -374,20 +366,17 @@ bus_space_tag_t tag; \ bus_space_handle_t bsh; \ bus_size_t offset; \ - TYPE *buf; \ + TYPE *buf; \ size_t cnt; \ { \ \ __asm __volatile("call *%3" \ - :"=c" (cnt), \ - "=d" (offset), \ - "=D" (buf) \ + :"+c" (cnt), \ + "+d" (offset), \ + "+D" (buf) \ :"o" (bsh->bsh_bam.bs_read_region_##BWN), \ - "b" (bsh), \ - "0" (cnt), \ - "1" (offset), \ - "2" (buf) \ - :"memory"); \ + "b" (bsh) \ + :"eax","memory"); \ } _BUS_SPACE_READ_REGION(u_int8_t,1) @@ -408,15 +397,12 @@ { \ \ __asm __volatile("call *%3" \ - :"=c" (cnt), \ - "=d" (offset), \ - "=S" (buf) \ + :"+c" (cnt), \ + "+d" (offset), \ + "+S" (buf) \ :"o" (bsh->bsh_bam.bs_write_region_##BWN), \ - "b" (bsh), \ - "0" (cnt), \ - "1" (offset), \ - "2" (buf) \ - ); \ + "b" (bsh) \ + :"eax","memory"); \ } _BUS_SPACE_WRITE_REGION(u_int8_t,1) @@ -437,14 +423,12 @@ { \ \ __asm __volatile("call *%2" \ - :"=c" (cnt), \ - "=d" (offset) \ + :"+c" (cnt), \ + "+d" (offset) \ :"o" (bsh->bsh_bam.bs_set_multi_##BWN), \ "a" (val), \ - "b" (bsh), \ - "0" (cnt), \ - "1" (offset) \ - ); \ + "b" (bsh) \ + :"memory"); \ } _BUS_SPACE_SET_MULTI(u_int8_t,1) @@ -465,14 +449,12 @@ { \ \ __asm __volatile("call *%2" \ - :"=c" (cnt), \ - "=d" (offset) \ + :"+c" (cnt), \ + "+d" (offset) \ :"o" (bsh->bsh_bam.bs_set_region_##BWN), \ "a" (val), \ - "b" (bsh), \ - "0" (cnt), \ - "1" (offset) \ - ); \ + "b" (bsh) \ + :"memory"); \ } _BUS_SPACE_SET_REGION(u_int8_t,1) @@ -497,16 +479,13 @@ panic("bus_space_copy_region: funcs mismatch (ENOSUPPORT)");\ \ __asm __volatile("call *%3" \ - :"=c" (cnt), \ - "=S" (src), \ - "=D" (dst) \ + :"+c" (cnt), \ + "+S" (src), \ + "+D" (dst) \ :"o" (dbsh->bsh_bam.bs_copy_region_##BWN), \ "a" (sbsh), \ - "b" (dbsh), \ - "0" (cnt), \ - "1" (src), \ - "2" (dst) \ - ); \ + "b" (dbsh) \ + :"edx","memory"); \ } _BUS_SPACE_COPY_REGION(1) --- //depot/vendor/freebsd/src/sys/i386/include/cpufunc.h 2004/04/07 20:52:05 +++ //depot/projects/smpng/sys/i386/include/cpufunc.h 2004/08/25 00:38:37 @@ -38,6 +38,8 @@ #ifndef _MACHINE_CPUFUNC_H_ #define _MACHINE_CPUFUNC_H_ +#include + struct region_descriptor; #define readb(va) (*(volatile u_int8_t *) (va)) @@ -608,14 +610,16 @@ register_t eflags; eflags = read_eflags(); - disable_intr(); + if (eflags & PSL_I) + disable_intr(); return (eflags); } static __inline void intr_restore(register_t eflags) { - write_eflags(eflags); + if (eflags & PSL_I) + enable_intr(); } #else /* !(__GNUC__ || __INTEL_COMPILER) */ --- //depot/vendor/freebsd/src/sys/i386/isa/npx.c 2004/11/27 06:55:50 +++ //depot/projects/smpng/sys/i386/isa/npx.c 2005/01/19 21:44:15 @@ -497,8 +497,9 @@ register_t savecrit; savecrit = intr_disable(); - if (curthread == PCPU_GET(fpcurthread)) - npxsave(&PCPU_GET(curpcb)->pcb_save); + if (td == PCPU_GET(fpcurthread)) + /* XXX: npxdrop() instead? */ + npxsave(&td->td_pcb->pcb_save); intr_restore(savecrit); #ifdef NPX_DEBUG if (npx_exists) { --- //depot/vendor/freebsd/src/sys/i386/linux/linux_sysvec.c 2005/01/29 23:15:43 +++ //depot/projects/smpng/sys/i386/linux/linux_sysvec.c 2005/01/31 22:15:49 @@ -72,11 +72,6 @@ #include #include -MODULE_VERSION(linux, 1); -MODULE_DEPEND(linux, sysvmsg, 1, 1, 1); -MODULE_DEPEND(linux, sysvsem, 1, 1, 1); -MODULE_DEPEND(linux, sysvshm, 1, 1, 1); - MALLOC_DEFINE(M_LINUX, "linux", "Linux mode structures"); #if BYTE_ORDER == LITTLE_ENDIAN --- //depot/vendor/freebsd/src/sys/kern/kern_exec.c 2005/01/29 23:56:25 +++ //depot/projects/smpng/sys/kern/kern_exec.c 2005/01/31 22:15:49 @@ -496,6 +496,16 @@ } else oldsigacts = NULL; + /* + * Ensure that this thread's credentials match the proces + * credentials. + */ + if (td->td_ucred != p->p_ucred) { + oldcred = td->td_ucred; + td->td_ucred = crhold(p->p_ucred); + crfree(oldcred); + } + /* Stop profiling */ stopprofclock(p); --- //depot/vendor/freebsd/src/sys/kern/kern_intr.c 2005/01/06 23:37:37 +++ //depot/projects/smpng/sys/kern/kern_intr.c 2005/01/07 18:05:05 @@ -484,7 +484,7 @@ struct intrhand *ih; /* and our interrupt handler chain */ struct thread *td; struct proc *p; - int count, warned, storming; + int count, warned; td = curthread; p = td->td_proc; @@ -493,7 +493,6 @@ ("%s: ithread and proc linkage out of sync", __func__)); count = 0; warned = 0; - storming = 0; /* * As long as we have interrupts outstanding, go through the @@ -560,7 +559,7 @@ * interrupts exceeds the storm threshold, then * enter storming mode. */ - if (!storming && intr_storm_threshold != 0 && + if (intr_storm_threshold != 0 && count >= intr_storm_threshold) { if (!warned) { printf( @@ -568,11 +567,8 @@ p->p_comm); warned = 1; } - storming = 1; - } - if (storming) tsleep(&count, td->td_priority, "istorm", 1); - else + } else count++; if (ithd->it_enable != NULL) @@ -590,7 +586,6 @@ if (!ithd->it_need) { TD_SET_IWAIT(td); count = 0; - storming = 0; CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid); mi_switch(SW_VOL, NULL); CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid); --- //depot/vendor/freebsd/src/sys/kern/kern_mutex.c 2005/01/05 21:15:36 +++ //depot/projects/smpng/sys/kern/kern_mutex.c 2005/01/05 22:38:38 @@ -59,6 +59,11 @@ #include #include +#ifdef __i386__ +#include +#include +#include +#endif #include #include #include @@ -577,6 +582,9 @@ int line) { int i = 0; +#ifdef __i386__ + int apic_hack; +#endif if (LOCK_LOG_TEST(&m->mtx_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); @@ -587,6 +595,11 @@ /* Give interrupts a chance while we spin. */ critical_exit(); +#ifdef __i386__ + apic_hack = (read_eflags() & PSL_I) == 0; + if (apic_hack) + APIC_IPI_SPINWAIT_ENTER(); +#endif while (m->mtx_lock != MTX_UNOWNED) { if (i++ < 10000000) { cpu_spinwait(); @@ -605,6 +618,10 @@ } cpu_spinwait(); } +#ifdef __i386__ + if (apic_hack) + APIC_IPI_SPINWAIT_EXIT(); +#endif critical_enter(); } --- //depot/vendor/freebsd/src/sys/kern/kern_shutdown.c 2004/11/30 06:25:36 +++ //depot/projects/smpng/sys/kern/kern_shutdown.c 2004/12/15 21:41:34 @@ -475,7 +475,7 @@ } #ifdef SMP -static u_int panic_cpu = NOCPU; +static struct thread *panic_thread = NULL; #endif /* @@ -496,15 +496,14 @@ #ifdef SMP /* * We don't want multiple CPU's to panic at the same time, so we - * use panic_cpu as a simple spinlock. We have to keep checking - * panic_cpu if we are spinning in case the panic on the first + * use panic_thread as a simple spinlock. We have to keep checking + * panic_thread if we are spinning in case the panic on the first * CPU is canceled. */ - if (panic_cpu != PCPU_GET(cpuid)) - while (atomic_cmpset_int(&panic_cpu, NOCPU, - PCPU_GET(cpuid)) == 0) - while (panic_cpu != NOCPU) - ; /* nothing */ + if (panic_thread != curthread) + while (atomic_cmpset_ptr(&panic_thread, NULL, curthread) == 0) + while (panic_thread != NULL) + cpu_spinwait(); #endif bootopt = RB_AUTOBOOT | RB_DUMP; @@ -540,7 +539,7 @@ /* See if the user aborted the panic, in which case we continue. */ if (panicstr == NULL) { #ifdef SMP - atomic_store_rel_int(&panic_cpu, NOCPU); + atomic_store_rel_ptr(&panic_thread, NULL); #endif return; } --- //depot/vendor/freebsd/src/sys/kern/kern_sig.c 2005/01/06 23:37:37 +++ //depot/projects/smpng/sys/kern/kern_sig.c 2005/01/07 18:05:05 @@ -1388,7 +1388,10 @@ if ((p = zpfind(uap->pid)) == NULL) return (ESRCH); } - error = p_cansignal(td, p, uap->signum); + if (p->p_pid != 1 && p->p_flag & P_SYSTEM) + error = EPERM; + else + error = p_cansignal(td, p, uap->signum); if (error == 0 && uap->signum) psignal(p, uap->signum); PROC_UNLOCK(p); @@ -2182,7 +2185,7 @@ /* * Don't take default actions on system processes. */ - if (p->p_pid <= 1) { + if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) { #ifdef DIAGNOSTIC /* * Are you sure you want to ignore SIGSEGV --- //depot/vendor/freebsd/src/sys/kern/kern_synch.c 2004/12/30 20:30:44 +++ //depot/projects/smpng/sys/kern/kern_synch.c 2004/12/30 20:59:50 @@ -287,7 +287,7 @@ #endif KASSERT(td->td_critnest == 1 || (td->td_critnest == 2 && (td->td_pflags & TDP_OWEPREEMPT) != 0 && (flags & SW_INVOL) != 0 && - newtd == NULL), + newtd == NULL) || panicstr, ("mi_switch: switch in a critical section")); KASSERT((flags & (SW_INVOL | SW_VOL)) != 0, ("mi_switch: switch must be voluntary or involuntary")); --- //depot/vendor/freebsd/src/sys/kern/subr_smp.c 2005/01/06 23:37:37 +++ //depot/projects/smpng/sys/kern/subr_smp.c 2005/01/07 18:05:05 @@ -47,6 +47,7 @@ #include #include +#include #include #include "opt_sched.h" @@ -107,7 +108,7 @@ static void (*smp_rv_action_func)(void *arg); static void (*smp_rv_teardown_func)(void *arg); static void *smp_rv_func_arg; -static volatile int smp_rv_waiters[2]; +static volatile int smp_rv_waiters[3]; /* * Shared mutex to restrict busywaits between smp_rendezvous() and @@ -240,8 +241,9 @@ ipi_selected(map, IPI_STOP); i = 0; - while ((atomic_load_acq_int(&stopped_cpus) & map) != map) { + while ((stopped_cpus & map) != map) { /* spin */ + cpu_spinwait(); i++; #ifdef DIAGNOSTIC if (i == 100000) { @@ -281,8 +283,8 @@ atomic_store_rel_int(&started_cpus, map); /* wait for each to clear its bit */ - while ((atomic_load_acq_int(&stopped_cpus) & map) != 0) - ; /* nothing */ + while ((stopped_cpus & map) != 0) + cpu_spinwait(); return 1; } @@ -300,20 +302,29 @@ smp_rendezvous_action(void) { + /* Ensure we have up-to-date values. */ + atomic_add_acq_int(&smp_rv_waiters[0], 1); + while (smp_rv_waiters[0] < mp_ncpus) + cpu_spinwait(); + /* setup function */ if (smp_rv_setup_func != NULL) smp_rv_setup_func(smp_rv_func_arg); + /* spin on entry rendezvous */ - atomic_add_int(&smp_rv_waiters[0], 1); - while (atomic_load_acq_int(&smp_rv_waiters[0]) < mp_ncpus) - ; /* nothing */ + atomic_add_int(&smp_rv_waiters[1], 1); + while (smp_rv_waiters[1] < mp_ncpus) + cpu_spinwait(); + /* action function */ if (smp_rv_action_func != NULL) smp_rv_action_func(smp_rv_func_arg); + /* spin on exit rendezvous */ - atomic_add_int(&smp_rv_waiters[1], 1); - while (atomic_load_acq_int(&smp_rv_waiters[1]) < mp_ncpus) - ; /* nothing */ + atomic_add_int(&smp_rv_waiters[2], 1); + while (smp_rv_waiters[2] < mp_ncpus) + cpu_spinwait(); + /* teardown function */ if (smp_rv_teardown_func != NULL) smp_rv_teardown_func(smp_rv_func_arg); @@ -344,8 +355,9 @@ smp_rv_action_func = action_func; smp_rv_teardown_func = teardown_func; smp_rv_func_arg = arg; - smp_rv_waiters[0] = 0; smp_rv_waiters[1] = 0; + smp_rv_waiters[2] = 0; + atomic_store_rel_int(&smp_rv_waiters[0], 0); /* signal other processors, which will enter the IPI with interrupts off */ ipi_all_but_self(IPI_RENDEZVOUS); --- //depot/vendor/freebsd/src/sys/kern/subr_witness.c 2005/01/22 21:15:26 +++ //depot/projects/smpng/sys/kern/subr_witness.c 2005/01/31 22:15:49 @@ -257,6 +257,7 @@ static struct witness_order_list_entry order_lists[] = { { "proctree", &lock_class_sx }, { "allproc", &lock_class_sx }, + { NULL, NULL }, { "Giant", &lock_class_mtx_sleep }, { "filedesc structure", &lock_class_mtx_sleep }, { "pipe mutex", &lock_class_mtx_sleep },