Index: sys/amd64/amd64/machdep.c =========================================================================== --- sys/amd64/amd64/machdep.c 2011/09/12 17:10:03 #122 +++ sys/amd64/amd64/machdep.c 2011/09/12 17:10:03 @@ -1293,6 +1293,7 @@ u_int64_t msr; char *env; + sched_init_thread0(); thread0.td_kstack = physfree + KERNBASE; bzero((void *)thread0.td_kstack, KSTACK_PAGES * PAGE_SIZE); physfree += KSTACK_PAGES * PAGE_SIZE; Index: sys/amd64/amd64/vm_machdep.c =========================================================================== --- sys/amd64/amd64/vm_machdep.c 2011/09/12 17:10:03 #52 +++ sys/amd64/amd64/vm_machdep.c 2011/09/12 17:10:03 @@ -256,8 +256,7 @@ * Initialize machine state (pcb and trap frame) for a new thread about to * upcall. Put enough state in the new thread's PCB to get it to go back * userret(), where we can intercept it again to set the return (upcall) - * Address and stack, along with those from upcals that are from other sources - * such as those generated in thread_userret() itself. + * Address and stack, along with those from upcals that are from other sources. */ void cpu_set_upcall(struct thread *td, struct thread *td0) @@ -304,24 +303,14 @@ } /* - * Set that machine state for performing an upcall that has to - * be done in thread_userret() so that those upcalls generated - * in thread_userret() itself can be done as well. + * Modify the machine state created by cpu_set_upcall() to arrange + * for the new thread to make a specific call as its first act. */ void -cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, +cpu_set_upcall_func(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { - /* - * Do any extra cleaning that needs to be done. - * The thread may have optional components - * that are not present in a fresh thread. - * This may be a recycled thread so make it look - * as though it's newly allocated. - */ - cpu_thread_clean(td); - #ifdef COMPAT_IA32 if (td->td_proc->p_sysent == &ia32_freebsd_sysvec) { /* Index: sys/arm/arm/vm_machdep.c =========================================================================== --- sys/arm/arm/vm_machdep.c 2011/09/12 17:10:03 #36 +++ sys/arm/arm/vm_machdep.c 2011/09/12 17:10:03 @@ -269,8 +269,7 @@ * Initialize machine state (pcb and trap frame) for a new thread about to * upcall. Put enough state in the new thread's PCB to get it to go back * userret(), where we can intercept it again to set the return (upcall) - * Address and stack, along with those from upcals that are from other sources - * such as those generated in thread_userret() itself. + * Address and stack, along with those from upcals that are from other sources. */ void cpu_set_upcall(struct thread *td, struct thread *td0) @@ -296,12 +295,11 @@ } /* - * Set that machine state for performing an upcall that has to - * be done in thread_userret() so that those upcalls generated - * in thread_userret() itself can be done as well. + * Modify the machine state created by cpu_set_upcall() to arrange + * for the new thread to make a specific call as its first act. */ void -cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, +cpu_set_upcall_func(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf = td->td_frame; Index: sys/arm/at91/kb920x_machdep.c =========================================================================== --- sys/arm/at91/kb920x_machdep.c 2011/09/12 17:10:03 #30 +++ sys/arm/at91/kb920x_machdep.c 2011/09/12 17:10:03 @@ -398,6 +398,7 @@ undefined_handler_address = (u_int)undefinedinstruction_bounce; undefined_init(); + sched_init_thread0(); proc_linkup0(&proc0, &thread0); thread0.td_kstack = kernelstack.pv_va; thread0.td_pcb = (struct pcb *) Index: sys/arm/sa11x0/assabet_machdep.c =========================================================================== --- sys/arm/sa11x0/assabet_machdep.c 2011/09/12 17:10:03 #25 +++ sys/arm/sa11x0/assabet_machdep.c 2011/09/12 17:10:03 @@ -384,6 +384,7 @@ /* Set stack for exception handlers */ + sched_init_thread0(); proc_linkup0(&proc0, &thread0); thread0.td_kstack = kernelstack.pv_va; thread0.td_pcb = (struct pcb *) Index: sys/arm/xscale/i80321/iq31244_machdep.c =========================================================================== --- sys/arm/xscale/i80321/iq31244_machdep.c 2011/09/12 17:10:03 #31 +++ sys/arm/xscale/i80321/iq31244_machdep.c 2011/09/12 17:10:03 @@ -372,7 +372,7 @@ undefined_handler_address = (u_int)undefinedinstruction_bounce; undefined_init(); - proc_linkup0(&proc0, &thread0); + proc_linkup(&proc0, &thread0); thread0.td_kstack = kernelstack.pv_va; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; Index: sys/i386/i386/intr_machdep.c =========================================================================== --- sys/i386/i386/intr_machdep.c 2011/09/12 17:10:03 #36 +++ sys/i386/i386/intr_machdep.c 2011/09/12 17:10:03 @@ -63,11 +63,12 @@ typedef void (*mask_fn)(void *); static int intrcnt_index; -static struct intsrc *interrupt_sources[NUM_IO_INTS]; static struct sx intr_table_lock; static struct mtx intrcnt_lock; static STAILQ_HEAD(, pic) pics; +struct intsrc *interrupt_sources[NUM_IO_INTS]; + #ifdef SMP static int assign_cpu; @@ -227,11 +228,10 @@ } void -intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) +intr_execute_handlers(struct intsrc *isrc, u_int vector, struct trapframe *frame) { struct intr_event *ie; struct thread *td; - int vector; td = curthread; @@ -250,7 +250,6 @@ * XXX: We assume that IRQ 0 is only used for the ISA timer * device (clk). */ - vector = isrc->is_pic->pic_vector(isrc); if (vector == 0) clkintr_pending = 1; Index: sys/i386/i386/local_apic.c =========================================================================== --- sys/i386/i386/local_apic.c 2011/09/12 17:10:03 #46 +++ sys/i386/i386/local_apic.c 2011/09/12 17:10:03 @@ -641,10 +641,11 @@ { struct intsrc *isrc; - if (vector == -1) - panic("Couldn't get vector from ISR!"); - isrc = intr_lookup_source(apic_idt_to_irq(vector)); - intr_execute_handlers(isrc, frame); + KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL && + vector <= APIC_IO_INTS + APIC_NUM_IOINTS, + ("Vector %u does not map to an IRQ line", vector)); + isrc = interrupt_sources[ioint_irqs[vector - APIC_IO_INTS]]; + intr_execute_handlers(isrc, vector, frame); } void @@ -877,17 +878,6 @@ mtx_unlock_spin(&icu_lock); } -/* Map an IDT vector (APIC) to an IRQ (interrupt source). */ -u_int -apic_idt_to_irq(u_int vector) -{ - - KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL && - vector <= APIC_IO_INTS + APIC_NUM_IOINTS, - ("Vector %u does not map to an IRQ line", vector)); - return (ioint_irqs[vector - APIC_IO_INTS]); -} - #ifdef DDB /* * Dump data about APIC IDT vector mappings. Index: sys/i386/i386/machdep.c =========================================================================== --- sys/i386/i386/machdep.c 2011/09/12 17:10:03 #208 +++ sys/i386/i386/machdep.c 2011/09/12 17:10:03 @@ -2273,6 +2273,7 @@ int gsel_tss, metadata_missing, x; struct pcpu *pc; + sched_init_thread0(); thread0.td_kstack = proc0kstack; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; Index: sys/i386/i386/trap.c =========================================================================== --- sys/i386/i386/trap.c 2011/09/12 17:10:03 #117 +++ sys/i386/i386/trap.c 2011/09/12 17:10:03 @@ -388,7 +388,6 @@ break; case T_PAGEFLT: /* page fault */ - i = trap_pfault(frame, TRUE, eva); #if defined(I586_CPU) && !defined(NO_F00F_HACK) if (i == -2) { Index: sys/i386/i386/vm_machdep.c =========================================================================== --- sys/i386/i386/vm_machdep.c 2011/09/12 17:10:03 #120 +++ sys/i386/i386/vm_machdep.c 2011/09/12 17:10:03 @@ -379,8 +379,7 @@ * Initialize machine state (pcb and trap frame) for a new thread about to * upcall. Put enough state in the new thread's PCB to get it to go back * userret(), where we can intercept it again to set the return (upcall) - * Address and stack, along with those from upcals that are from other sources - * such as those generated in thread_userret() itself. + * Address and stack, along with those from upcals that are from other sources. */ void cpu_set_upcall(struct thread *td, struct thread *td0) @@ -437,12 +436,11 @@ } /* - * Set that machine state for performing an upcall that has to - * be done in thread_userret() so that those upcalls generated - * in thread_userret() itself can be done as well. + * Modify the machine state created by cpu_set_upcall() to arrange + * for the new thread to make a specific call as its first act. */ void -cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, +cpu_set_upcall_func(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { Index: sys/i386/include/apicvar.h =========================================================================== --- sys/i386/include/apicvar.h 2011/09/12 17:10:03 #25 +++ sys/i386/include/apicvar.h 2011/09/12 17:10:03 @@ -181,7 +181,6 @@ void apic_disable_vector(u_int vector); void apic_enable_vector(u_int vector); void apic_free_vector(u_int vector, u_int irq); -u_int apic_idt_to_irq(u_int vector); void apic_register_enumerator(struct apic_enumerator *enumerator); void *ioapic_create(vm_paddr_t addr, int32_t apic_id, int intbase); int ioapic_disable_pin(void *cookie, u_int pin); Index: sys/i386/include/intr_machdep.h =========================================================================== --- sys/i386/include/intr_machdep.h 2011/09/12 17:10:03 #21 +++ sys/i386/include/intr_machdep.h 2011/09/12 17:10:03 @@ -118,6 +118,8 @@ u_int is_handlers; }; +extern struct intsrc *interrupt_sources[NUM_IO_INTS]; + struct trapframe; extern struct mtx icu_lock; @@ -138,7 +140,8 @@ #endif int intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol); -void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame); +void intr_execute_handlers(struct intsrc *isrc, u_int vector, + struct trapframe *frame); struct intsrc *intr_lookup_source(int vector); int intr_register_pic(struct pic *pic); int intr_register_source(struct intsrc *isrc); Index: sys/i386/isa/atpic.c =========================================================================== --- sys/i386/isa/atpic.c 2011/09/12 17:10:03 #32 +++ sys/i386/isa/atpic.c 2011/09/12 17:10:03 @@ -592,7 +592,7 @@ if ((isr & IRQ_MASK(7)) == 0) return; } - intr_execute_handlers(isrc, frame); + intr_execute_handlers(isrc, vector, frame); } #ifdef DEV_ISA Index: sys/ia64/ia64/machdep.c =========================================================================== --- sys/ia64/ia64/machdep.c 2011/09/12 17:10:03 #204 +++ sys/ia64/ia64/machdep.c 2011/09/12 17:10:03 @@ -631,6 +631,7 @@ if (boothowto & RB_VERBOSE) bootverbose = 1; + sched_init_thread0(); /* * Setup the PCPU data for the bootstrap processor. It is needed * by printf(). Also, since printf() has critical sections, we Index: sys/ia64/ia64/vm_machdep.c =========================================================================== --- sys/ia64/ia64/vm_machdep.c 2011/09/12 17:10:03 #73 +++ sys/ia64/ia64/vm_machdep.c 2011/09/12 17:10:03 @@ -172,7 +172,7 @@ } void -cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, +cpu_set_upcall_func(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct ia64_fdesc *fd; Index: sys/kern/imgact_elf.c =========================================================================== --- sys/kern/imgact_elf.c 2011/09/12 17:10:03 #92 +++ sys/kern/imgact_elf.c 2011/09/12 17:10:03 @@ -1142,8 +1142,7 @@ /* Write it to the core file. */ return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0, - UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, - td)); + UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, td)); } #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32 Index: sys/kern/init_main.c =========================================================================== --- sys/kern/init_main.c 2011/09/12 17:10:03 #119 +++ sys/kern/init_main.c 2011/09/12 17:10:03 @@ -95,7 +95,7 @@ static struct session session0; static struct pgrp pgrp0; struct proc proc0; -struct thread thread0 __aligned(16); +struct thread *thread0p; struct vmspace vmspace0; struct proc *initproc; @@ -374,7 +374,7 @@ GIANT_REQUIRED; p = &proc0; td = &thread0; - + /* * Initialize magic number and osrel. */ Index: sys/kern/kern_fork.c =========================================================================== --- sys/kern/kern_fork.c 2011/09/12 17:10:03 #177 +++ sys/kern/kern_fork.c 2011/09/12 17:10:03 @@ -788,8 +788,8 @@ p = td->td_proc; KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new")); - CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)", - td, td->td_sched, p->p_pid, td->td_name); + CTR3(KTR_PROC, "fork_exit: new thread %p (pid %d, %s)", + td, p->p_pid, td->td_name); sched_fork_exit(td); /* Index: sys/kern/kern_intr.c =========================================================================== --- sys/kern/kern_intr.c 2011/09/12 17:10:03 #103 +++ sys/kern/kern_intr.c 2011/09/12 17:10:03 @@ -801,7 +801,7 @@ CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, td->td_name); TD_CLR_IWAIT(td); - sched_add(td, SRQ_INTR); + sched_run_ithread(td); } else { CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", __func__, p->p_pid, td->td_name, it->it_need, td->td_state); @@ -955,7 +955,7 @@ CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, td->td_name); TD_CLR_IWAIT(td); - sched_add(td, SRQ_INTR); + sched_run_ithread(td); } else { CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", __func__, p->p_pid, td->td_name, it->it_need, td->td_state); Index: sys/kern/kern_kthread.c =========================================================================== --- sys/kern/kern_kthread.c 2011/09/12 17:10:03 #31 +++ sys/kern/kern_kthread.c 2011/09/12 17:10:03 @@ -295,7 +295,7 @@ thread_link(newtd, p); thread_lock(oldtd); /* let the scheduler know about these things. */ - sched_fork_thread(oldtd, newtd); + sched_fork(oldtd, newtd); TD_SET_CAN_RUN(newtd); thread_unlock(oldtd); PROC_UNLOCK(p); Index: sys/kern/kern_proc.c =========================================================================== --- sys/kern/kern_proc.c 2011/09/12 17:10:03 #169 +++ sys/kern/kern_proc.c 2011/09/12 17:10:03 @@ -682,7 +682,6 @@ kp->ki_structsize = sizeof(*kp); kp->ki_paddr = p; - kp->ki_addr =/* p->p_addr; */0; /* XXX */ kp->ki_args = p->p_args; kp->ki_textvp = p->p_textvp; #ifdef KTRACE @@ -831,7 +830,7 @@ bzero(kp->ki_lockname, sizeof(kp->ki_lockname)); } - if (p->p_state == PRS_NORMAL) { /* approximate. */ + if (p->p_state == PRS_NORMAL) { if (TD_ON_RUNQ(td) || TD_CAN_RUN(td) || TD_IS_RUNNING(td)) { Index: sys/kern/kern_sig.c =========================================================================== --- sys/kern/kern_sig.c 2011/09/12 17:10:03 #236 +++ sys/kern/kern_sig.c 2011/09/12 17:10:03 @@ -1839,8 +1839,6 @@ ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], &td->td_sigmask, code); #endif - (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], - ksi, &td->td_sigmask); SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); if (!SIGISMEMBER(ps->ps_signodefer, sig)) SIGADDSET(td->td_sigmask, sig); Index: sys/kern/kern_switch.c =========================================================================== --- sys/kern/kern_switch.c 2011/09/12 17:10:03 #130 +++ sys/kern/kern_switch.c 2011/09/12 17:10:03 @@ -24,7 +24,6 @@ * SUCH DAMAGE. */ - #include __FBSDID("$FreeBSD: src/sys/kern/kern_switch.c,v 1.145 2008/05/12 06:42:06 julian Exp $"); @@ -140,7 +139,6 @@ retry: td = sched_choose(); - /* * If we are in panic, only allow system threads, * plus the one we are running in, to be run. @@ -323,11 +321,10 @@ rqh = &rq->rq_queues[pri]; CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p", td, td->td_priority, pri, rqh); - if (flags & SRQ_PREEMPTED) { + if (flags & SRQ_PREEMPTED) TAILQ_INSERT_HEAD(rqh, td, td_runq); - } else { + else TAILQ_INSERT_TAIL(rqh, td, td_runq); - } } void Index: sys/kern/kern_synch.c =========================================================================== --- sys/kern/kern_synch.c 2011/09/12 17:10:03 #162 +++ sys/kern/kern_synch.c 2011/09/12 17:10:03 @@ -406,8 +406,8 @@ td->td_generation++; /* bump preempt-detect counter */ PCPU_INC(cnt.v_swtch); PCPU_SET(switchticks, ticks); - CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)", - td->td_tid, td->td_sched, p->p_pid, td->td_name); + CTR3(KTR_PROC, "mi_switch: old thread %ld (pid %ld, %s)", + td->td_tid, p->p_pid, td->td_name); #if (KTR_COMPILE & KTR_SCHED) != 0 if (TD_IS_IDLETHREAD(td)) CTR3(KTR_SCHED, "mi_switch: %p(%s) prio %d idle", @@ -427,8 +427,8 @@ CTR3(KTR_SCHED, "mi_switch: running %p(%s) prio %d", td, td->td_name, td->td_priority); - CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)", - td->td_tid, td->td_sched, p->p_pid, td->td_name); + CTR3(KTR_PROC, "mi_switch: new thread %ld (pid %ld, %s)", + td->td_tid, p->p_pid, td->td_name); /* * If the last thread was exiting, finish cleaning it up. Index: sys/kern/kern_thr.c =========================================================================== --- sys/kern/kern_thr.c 2011/09/12 17:10:03 #68 +++ sys/kern/kern_thr.c 2011/09/12 17:10:03 @@ -76,7 +76,6 @@ #endif extern int max_threads_per_proc; - static int create_thread(struct thread *td, mcontext_t *ctx, void (*start_func)(void *), void *arg, char *stack_base, size_t stack_size, @@ -172,7 +171,6 @@ return (EINVAL); } } - /* Initialize our td */ newtd = thread_alloc(); if (newtd == NULL) @@ -216,7 +214,7 @@ stack.ss_sp = stack_base; stack.ss_size = stack_size; /* Set upcall address to user thread entry function. */ - cpu_set_upcall_kse(newtd, start_func, arg, &stack); + cpu_set_upcall_func(newtd, start_func, arg, &stack); /* Setup user TLS address and TLS pointer register. */ error = cpu_set_user_tls(newtd, tls_base); if (error != 0) { @@ -233,7 +231,7 @@ bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name)); thread_lock(td); /* let the scheduler know about these things. */ - sched_fork_thread(td, newtd); + sched_fork(td, newtd); thread_unlock(td); if (P_SHOULDSTOP(p)) newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; Index: sys/kern/kern_thread.c =========================================================================== --- sys/kern/kern_thread.c 2011/09/12 17:10:03 #272 +++ sys/kern/kern_thread.c 2011/09/12 17:10:03 @@ -68,12 +68,19 @@ SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, &max_threads_hits, 0, ""); +/* + * I'm unsure if this is safe to remove. thread_free might not be able + * to be called from the contexts that thread_stash is. + */ +#define ZOMBIES 1 +#ifdef ZOMBIES TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); static struct mtx zombie_lock; MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); static void thread_zombie(struct thread *); - +static void thread_reap(void); +#endif struct mtx tid_lock; static struct unrhdr *tid_unrhdr; @@ -156,7 +163,6 @@ td->td_sleepqueue = sleepq_alloc(); td->td_turnstile = turnstile_alloc(); EVENTHANDLER_INVOKE(thread_init, td); - td->td_sched = (struct td_sched *)&td[1]; umtx_thread_init(td); td->td_kstack = 0; return (0); @@ -224,6 +230,7 @@ 16 - 1, 0); } +#ifdef ZOMBIES /* * Place an unused thread on the zombie list. * Use the slpq as that must be unused by now. @@ -235,6 +242,7 @@ TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); mtx_unlock_spin(&zombie_lock); } +#endif /* * Release a thread that has exited after cpu_throw(). @@ -243,13 +251,18 @@ thread_stash(struct thread *td) { atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); +#ifdef ZOMBIES thread_zombie(td); +#else + thread_free(td); /* unsafe here? */ +#endif } +#ifdef ZOMBIES /* - * Reap zombie resources. + * Reap zombie threads. */ -void +static void thread_reap(void) { struct thread *td_first, *td_next; @@ -273,6 +286,7 @@ } } } +#endif /* * Allocate a thread. @@ -282,7 +296,9 @@ { struct thread *td; +#ifdef ZOMBIES thread_reap(); /* check if any zombies to get */ +#endif td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); @@ -294,7 +310,6 @@ return (td); } - /* * Deallocate a thread. */ @@ -326,7 +341,6 @@ { uint64_t new_switchtime; struct thread *td; - struct thread *td2; struct proc *p; td = curthread; @@ -372,8 +386,7 @@ if (p->p_flag & P_HADTHREADS) { if (p->p_numthreads > 1) { thread_unlink(td); - td2 = FIRST_THREAD_IN_PROC(p); - sched_exit_thread(td2, td); + sched_exit(p, td); /* * The test below is NOT true if we are the @@ -387,7 +400,6 @@ thread_unlock(p->p_singlethread); } } - atomic_add_int(&td->td_proc->p_exitthreads, 1); PCPU_SET(deadthread, td); } else { @@ -431,13 +443,18 @@ td->td_cpuset = NULL; cpu_thread_clean(td); crfree(td->td_ucred); +#ifdef ZOMBIES thread_reap(); /* check for zombie threads etc. */ +#endif } /* * Link a thread to a process. * set up anything that needs to be initialized for it to * be used by the process. + * + * Called from: + * proc_linkup() */ void thread_link(struct thread *td, struct proc *p) Index: sys/kern/p1003_1b.c =========================================================================== --- sys/kern/p1003_1b.c 2011/09/12 17:10:03 #8 +++ sys/kern/p1003_1b.c 2011/09/12 17:10:03 @@ -156,6 +156,7 @@ if (targetp == NULL) { return (ESRCH); } + /* XXX: This doesn't work well for threads. */ targettd = FIRST_THREAD_IN_PROC(targetp); } @@ -223,6 +224,7 @@ e = ESRCH; goto done2; } + /* XXX: This doesn't work well for threads. */ targettd = FIRST_THREAD_IN_PROC(targetp); } Index: sys/kern/sched_4bsd.c =========================================================================== --- sys/kern/sched_4bsd.c 2011/09/12 17:10:03 #126 +++ sys/kern/sched_4bsd.c 2011/09/12 17:10:03 @@ -101,7 +101,14 @@ #define SKE_RUNQ_PCPU(ts) \ ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq) -static struct td_sched td_sched0; +#define TD_TO_TS(td) ((struct td_sched *)(&(td)[1])) + +/* Packed structure to match the layout of the uma thread zone */ +static struct { + struct thread initial_thread; + struct td_sched initial_sched; +} sched0 __aligned(16); + struct mtx sched_lock; static int sched_tdcnt; /* Total runnable threads in the system. */ @@ -445,7 +452,7 @@ FOREACH_THREAD_IN_PROC(p, td) { awake = 0; thread_lock(td); - ts = td->td_sched; + ts = TD_TO_TS(td); /* * Increment sleep time (if sleeping). We * ignore overflow, as above. @@ -550,7 +557,7 @@ fixpt_t loadfac; unsigned int newcpu; - ts = td->td_sched; + ts = TD_TO_TS(td); loadfac = loadfactor(averunnable.ldavg[0]); if (ts->ts_slptime > 5 * loadfac) td->td_estcpu = 0; @@ -629,7 +636,6 @@ * Set up the scheduler specific parts of proc0. */ proc0.p_sched = NULL; /* XXX */ - thread0.td_sched = &td_sched0; thread0.td_lock = &sched_lock; mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); } @@ -672,7 +678,7 @@ struct td_sched *ts; THREAD_LOCK_ASSERT(td, MA_OWNED); - ts = td->td_sched; + ts = TD_TO_TS(td); ts->ts_cpticks++; td->td_estcpu = ESTCPULIM(td->td_estcpu + 1); @@ -694,21 +700,13 @@ * charge childs scheduling cpu usage to parent. */ void -sched_exit(struct proc *p, struct thread *td) +sched_exit(struct proc *p, struct thread *child) { + struct thread *td; CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", - td, td->td_name, td->td_priority); - PROC_LOCK_ASSERT(p, MA_OWNED); - sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); -} - -void -sched_exit_thread(struct thread *td, struct thread *child) -{ - - CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", child, child->td_name, child->td_priority); + td = FIRST_THREAD_IN_PROC(p); thread_lock(td); td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu); thread_unlock(td); @@ -721,12 +719,6 @@ void sched_fork(struct thread *td, struct thread *childtd) { - sched_fork_thread(td, childtd); -} - -void -sched_fork_thread(struct thread *td, struct thread *childtd) -{ struct td_sched *ts; childtd->td_estcpu = td->td_estcpu; @@ -887,7 +879,7 @@ THREAD_LOCK_ASSERT(td, MA_OWNED); td->td_slptick = ticks; - td->td_sched->ts_slptime = 0; + TD_TO_TS(td)->ts_slptime = 0; if (pri) sched_prio(td, pri); if (TD_IS_SUSPENDED(td) || pri <= PSOCK) @@ -900,7 +892,7 @@ struct td_sched *ts; struct proc *p; - ts = td->td_sched; + ts = TD_TO_TS(td); p = td->td_proc; THREAD_LOCK_ASSERT(td, MA_OWNED); @@ -1018,7 +1010,7 @@ struct td_sched *ts; THREAD_LOCK_ASSERT(td, MA_OWNED); - ts = td->td_sched; + ts = TD_TO_TS(td); td->td_flags &= ~TDF_CANSWAP; if (ts->ts_slptime > 1) { updatepri(td); @@ -1161,7 +1153,7 @@ int cpu; int single_cpu = 0; - ts = td->td_sched; + ts = TD_TO_TS(td); THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT((td->td_inhibitors == 0), ("sched_add: trying to run inhibited thread")); @@ -1230,7 +1222,7 @@ #else /* SMP */ { struct td_sched *ts; - ts = td->td_sched; + ts = TD_TO_TS(td); THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT((td->td_inhibitors == 0), ("sched_add: trying to run inhibited thread")); @@ -1276,11 +1268,17 @@ #endif /* SMP */ void +sched_run_ithread(struct thread *td) +{ + sched_add(td, SRQ_INTR); +} + +void sched_rem(struct thread *td) { struct td_sched *ts; - ts = td->td_sched; + ts = TD_TO_TS(td); KASSERT(td->td_flags & TDF_INMEM, ("sched_rem: thread swapped out")); KASSERT(TD_ON_RUNQ(td), @@ -1383,7 +1381,7 @@ KASSERT(TD_IS_RUNNING(td), ("sched_bind: cannot bind non-running thread")); - ts = td->td_sched; + ts = TD_TO_TS(td); td->td_flags |= TDF_BOUND; #ifdef SMP @@ -1435,13 +1433,20 @@ return (sizeof(struct thread) + sizeof(struct td_sched)); } +/* + * Early boot support. Make thread0 a viable entity. + */ +void +sched_init_thread0(void) +{ + + thread0p = &sched0.initial_thread; +} + fixpt_t sched_pctcpu(struct thread *td) { - struct td_sched *ts; - - ts = td->td_sched; - return (ts->ts_pctcpu); + return (TD_TO_TS(td)->ts_pctcpu); } void @@ -1516,3 +1521,22 @@ sched_affinity(struct thread *td) { } + +#include + +/* + * thread is being either created or recycled. + * Fix up the per-scheduler resources associated with it. + * Called from: + * sched_fork() + * thread_dtor() (*may go away) + * thread_init() (*may go away) + */ +void +sched_newthread(struct thread *td) +{ + struct td_sched *ts; + + ts = TD_TO_TS(td); + bzero(ts, sizeof(*ts)); +} Index: sys/kern/sched_ule.c =========================================================================== --- sys/kern/sched_ule.c 2011/09/12 17:10:03 #242 +++ sys/kern/sched_ule.c 2011/09/12 17:10:03 @@ -105,12 +105,17 @@ #define TSF_BOUND 0x0001 /* Thread can not migrate. */ #define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */ -static struct td_sched td_sched0; +#define TD_TO_TS(td) ((struct td_sched *)(&(td)[1])) #define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0) #define THREAD_CAN_SCHED(td, cpu) \ CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) +static struct { + struct thread initial_thread; + struct td_sched initial_sched; +} sched0 __aligned(16); + /* * Cpu percentage computation macros and defines. * @@ -267,7 +272,7 @@ static int sched_interact_score(struct thread *); static void sched_interact_update(struct thread *); static void sched_interact_fork(struct thread *); -static void sched_pctcpu_update(struct td_sched *); +static void sched_pctcpu_update(struct thread *); /* Operations on per processor queues */ static struct thread *tdq_choose(struct tdq *); @@ -408,7 +413,7 @@ THREAD_LOCK_ASSERT(td, MA_OWNED); pri = td->td_priority; - ts = td->td_sched; + ts = TD_TO_TS(td); TD_SET_RUNQ(td); if (THREAD_CAN_MIGRATE(td)) { tdq->tdq_transferable++; @@ -454,9 +459,8 @@ { struct td_sched *ts; - ts = td->td_sched; + ts = TD_TO_TS(td); TDQ_LOCK_ASSERT(tdq, MA_OWNED); - KASSERT(ts->ts_runq != NULL, ("tdq_runq_remove: thread %p null ts_runq", td)); if (ts->ts_flags & TSF_XFERABLE) { tdq->tdq_transferable--; @@ -505,6 +509,7 @@ if ((td->td_proc->p_flag & P_NOLOAD) == 0) tdq->tdq_sysload--; CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); + TD_TO_TS(td)->ts_runq = NULL; } /* @@ -851,7 +856,6 @@ static int tdq_move(struct tdq *from, struct tdq *to) { - struct td_sched *ts; struct thread *td; struct tdq *tdq; int cpu; @@ -864,7 +868,6 @@ td = tdq_steal(tdq, cpu); if (td == NULL) return (0); - ts = td->td_sched; /* * Although the run queue is locked the thread may be blocked. Lock * it to clear this and acquire the run-queue lock. @@ -873,7 +876,7 @@ /* Drop recursive lock on from acquired via thread_lock(). */ TDQ_UNLOCK(from); sched_rem(td); - ts->ts_cpu = cpu; + TD_TO_TS(ts)->ts_cpu = cpu; td->td_lock = TDQ_LOCKPTR(to); tdq_add(to, td, SRQ_YIELDING); return (1); @@ -948,7 +951,7 @@ if (tdq->tdq_ipipending) return; - cpu = td->td_sched->ts_cpu; + cpu = TD_TO_TS(td)->ts_cpu; pri = td->td_priority; cpri = pcpu_find(cpu)->pc_curthread->td_priority; if (!sched_shouldpreempt(pri, cpri, 1)) @@ -1079,6 +1082,8 @@ THREAD_LOCK_ASSERT(td, MA_OWNED); tdq = TDQ_CPU(cpu); td->td_sched->ts_cpu = cpu; + ts = TD_TO_TS(td); + ts->ts_cpu = cpu; /* * If the lock matches just return the queue. */ @@ -1125,9 +1130,9 @@ int cpu; self = PCPU_GET(cpuid); - ts = td->td_sched; if (smp_started == 0) return (self); + ts = TD_TO_TS(td); /* * Don't migrate a running thread from sched_switch(). */ @@ -1342,7 +1347,7 @@ struct td_sched *ts; int div; - ts = td->td_sched; + ts = TD_TO_TS(td); /* * The score is only needed if this is likely to be an interactive * task. Don't go through the expense of computing it if there's @@ -1379,6 +1384,7 @@ static void sched_priority(struct thread *td) { + struct td_sched *ts; int score; int pri; @@ -1407,15 +1413,15 @@ pri, score)); } else { pri = SCHED_PRI_MIN; - if (td->td_sched->ts_ticks) - pri += SCHED_PRI_TICKS(td->td_sched); + ts = TD_TO_TS(td); + if (ts->ts_ticks) + pri += SCHED_PRI_TICKS(ts); pri += SCHED_PRI_NICE(td->td_proc->p_nice); KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE, ("sched_priority: invalid priority %d: nice %d, " "ticks %d ftick %d ltick %d tick pri %d", - pri, td->td_proc->p_nice, td->td_sched->ts_ticks, - td->td_sched->ts_ftick, td->td_sched->ts_ltick, - SCHED_PRI_TICKS(td->td_sched))); + pri, td->td_proc->p_nice, ts->ts_ticks, + ts->ts_ftick, ts->ts_ltick, SCHED_PRI_TICKS(ts))); } sched_user_prio(td, pri); @@ -1433,7 +1439,7 @@ struct td_sched *ts; u_int sum; - ts = td->td_sched; + ts = TD_TO_TS(td); sum = ts->ts_runtime + ts->ts_slptime; if (sum < SCHED_SLP_RUN_MAX) return; @@ -1475,14 +1481,16 @@ static void sched_interact_fork(struct thread *td) { + struct td_sched *ts; int ratio; int sum; - sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime; + ts = TD_TO_TS(td); + sum = ts->ts_runtime + ts->ts_slptime; if (sum > SCHED_SLP_RUN_FORK) { ratio = sum / SCHED_SLP_RUN_FORK; - td->td_sched->ts_runtime /= ratio; - td->td_sched->ts_slptime /= ratio; + ts->ts_runtime /= ratio; + ts->ts_slptime /= ratio; } } @@ -1497,10 +1505,9 @@ * Set up the scheduler specific parts of proc0. */ proc0.p_sched = NULL; /* XXX */ - thread0.td_sched = &td_sched0; - td_sched0.ts_ltick = ticks; - td_sched0.ts_ftick = ticks; - td_sched0.ts_slice = sched_slice; + TD_TO_TS(&thread0)->ts_ltick = ticks; + TD_TO_TS(&thread0)->ts_ftick = ticks; + TD_TO_TS(&thread0)->ts_slice = sched_slice; } /* @@ -1523,8 +1530,9 @@ * mechanism since it happens with less regular and frequent events. */ static void -sched_pctcpu_update(struct td_sched *ts) +sched_pctcpu_update(struct thread *td) { + struct td_sched *ts = TD_TO_TS(td); if (ts->ts_ticks == 0) return; @@ -1558,7 +1566,7 @@ CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", td, td->td_name, td->td_priority, prio, curthread, curthread->td_name); - ts = td->td_sched; + ts = TD_TO_TS(td); THREAD_LOCK_ASSERT(td, MA_OWNED); if (td->td_priority == prio) return; @@ -1725,7 +1733,7 @@ { struct tdq *tdn; - tdn = TDQ_CPU(td->td_sched->ts_cpu); + tdn = TDQ_CPU(TD_TO_TS(td)->ts_cpu); #ifdef SMP tdq_load_rem(tdq, td); /* @@ -1783,7 +1791,7 @@ cpuid = PCPU_GET(cpuid); tdq = TDQ_CPU(cpuid); - ts = td->td_sched; + ts = TD_TO_TS(td); mtx = td->td_lock; ts->ts_rltick = ticks; td->td_lastcpu = td->td_oncpu; @@ -1913,7 +1921,7 @@ int slptick; THREAD_LOCK_ASSERT(td, MA_OWNED); - ts = td->td_sched; + ts = TD_TO_TS(td); td->td_flags &= ~TDF_CANSWAP; /* * If we slept for more than a tick update our interactivity and @@ -1927,7 +1935,7 @@ hzticks = (ticks - slptick) << SCHED_TICK_SHIFT; ts->ts_slptime += hzticks; sched_interact_update(td); - sched_pctcpu_update(ts); + sched_pctcpu_update(td); } /* Reset the slice value after we sleep. */ ts->ts_slice = sched_slice; @@ -1941,24 +1949,6 @@ void sched_fork(struct thread *td, struct thread *child) { - THREAD_LOCK_ASSERT(td, MA_OWNED); - sched_fork_thread(td, child); - /* - * Penalize the parent and child for forking. - */ - sched_interact_fork(child); - sched_priority(child); - td->td_sched->ts_runtime += tickincr; - sched_interact_update(td); - sched_priority(td); -} - -/* - * Fork a new thread, may be within the same process. - */ -void -sched_fork_thread(struct thread *td, struct thread *child) -{ struct td_sched *ts; struct td_sched *ts2; @@ -1966,8 +1956,8 @@ /* * Initialize child. */ - ts = td->td_sched; - ts2 = child->td_sched; + ts = TD_TO_TS(td); + ts2 = TD_TO_TS(child); child->td_lock = TDQ_LOCKPTR(TDQ_SELF()); child->td_cpuset = cpuset_ref(td->td_cpuset); ts2->ts_cpu = ts->ts_cpu; @@ -1986,6 +1976,14 @@ ts2->ts_slptime = ts->ts_slptime; ts2->ts_runtime = ts->ts_runtime; ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */ + /* + * Penalize the parent and child for forking. + */ + sched_interact_fork(child); + sched_priority(child); + TD_TO_TS(td)->ts_runtime += tickincr; + sched_interact_update(td); + sched_priority(td); } /* @@ -2003,32 +2001,20 @@ /* * Return some of the child's priority and interactivity to the parent. - */ -void -sched_exit(struct proc *p, struct thread *child) -{ - struct thread *td; - - CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", - child, child->td_name, child->td_priority); - - PROC_LOCK_ASSERT(p, MA_OWNED); - td = FIRST_THREAD_IN_PROC(p); - sched_exit_thread(td, child); -} - -/* + * * Penalize another thread for the time spent on this one. This helps to * worsen the priority and interactivity of processes which schedule batch * jobs such as make. This has little effect on the make process itself but * causes new processes spawned by it to receive worse scores immediately. */ void -sched_exit_thread(struct thread *td, struct thread *child) +sched_exit(struct proc *p, struct thread *child) { + struct thread *td; - CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", + CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", child, child->td_name, child->td_priority); + td = FIRST_THREAD_IN_PROC(p); /* * Give the child's runtime to the parent without returning the @@ -2036,7 +2022,7 @@ * launch expensive things to mark their children as expensive. */ thread_lock(td); - td->td_sched->ts_runtime += child->td_sched->ts_runtime; + TD_TO_TS(td)->ts_runtime += TD_TO_TS(child)->ts_runtime; sched_interact_update(td); sched_priority(td); thread_unlock(td); @@ -2129,7 +2115,7 @@ if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) tdq->tdq_ridx = tdq->tdq_idx; } - ts = td->td_sched; + ts = TD_TO_TS(td); if (td->td_pri_class & PRI_FIFO_BIT) return; if (td->td_pri_class == PRI_TIMESHARE) { @@ -2137,7 +2123,7 @@ * We used a tick; charge it to the thread so * that we can compute our interactivity. */ - td->td_sched->ts_runtime += tickincr; + ts->ts_runtime += tickincr; sched_interact_update(td); sched_priority(td); } @@ -2162,7 +2148,7 @@ { struct td_sched *ts; - ts = curthread->td_sched; + ts = TD_TO_TS(curthread); /* Adjust ticks for pctcpu */ ts->ts_ticks += 1 << SCHED_TICK_SHIFT; ts->ts_ltick = ticks; @@ -2171,7 +2157,7 @@ * second. */ if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick) - sched_pctcpu_update(ts); + sched_pctcpu_update(curthread); } /* @@ -2213,7 +2199,7 @@ TDQ_LOCK_ASSERT(tdq, MA_OWNED); td = tdq_choose(tdq); if (td) { - td->td_sched->ts_ltick = ticks; + TD_TO_TS(td)->ts_ltick = ticks; tdq_runq_rem(tdq, td); tdq->tdq_lowpri = td->td_priority; return (td); @@ -2317,10 +2303,16 @@ sched_setpreempt(td); } +void +sched_run_ithread(struct thread *td) +{ + sched_add(td, SRQ_INTR); +} + /* * Remove a thread from a run-queue without running it. This is used * when we're stealing a thread from a remote queue. Otherwise all threads - * exit by calling sched_exit_thread() and sched_throw() themselves. + * exit by calling sched_exit() and sched_throw() themselves. */ void sched_rem(struct thread *td) @@ -2330,7 +2322,7 @@ CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", td, td->td_name, td->td_priority, curthread, curthread->td_name); - tdq = TDQ_CPU(td->td_sched->ts_cpu); + tdq = TDQ_CPU(TD_TO_TS(td)->ts_cpu); TDQ_LOCK_ASSERT(tdq, MA_OWNED); MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); KASSERT(TD_ON_RUNQ(td), @@ -2352,7 +2344,7 @@ struct td_sched *ts; pctcpu = 0; - ts = td->td_sched; + ts = TD_TO_TS(td); if (ts == NULL) return (0); @@ -2360,7 +2352,7 @@ if (ts->ts_ticks) { int rtick; - sched_pctcpu_update(ts); + sched_pctcpu_update(td); /* How many rtick per second ? */ rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; @@ -2411,7 +2403,7 @@ struct td_sched *ts; THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); - ts = td->td_sched; + ts = TD_TO_TS(td); if (ts->ts_flags & TSF_BOUND) sched_unbind(td); ts->ts_flags |= TSF_BOUND; @@ -2432,7 +2424,7 @@ struct td_sched *ts; THREAD_LOCK_ASSERT(td, MA_OWNED); - ts = td->td_sched; + ts = TD_TO_TS(td); if ((ts->ts_flags & TSF_BOUND) == 0) return; ts->ts_flags &= ~TSF_BOUND; @@ -2443,7 +2435,7 @@ sched_is_bound(struct thread *td) { THREAD_LOCK_ASSERT(td, MA_OWNED); - return (td->td_sched->ts_flags & TSF_BOUND); + return (TD_TO_TS(td)->ts_flags & TSF_BOUND); } /* @@ -2489,6 +2481,16 @@ } /* + * Early boot support. Make thread0 a viable entity. + */ +void +sched_init_thread0(void) +{ + + thread0p = &sched0.initial_thread; +} + +/* * The actual idle process. */ void @@ -2585,7 +2587,7 @@ */ cpuid = PCPU_GET(cpuid); tdq = TDQ_CPU(cpuid); - ts = td->td_sched; + ts = TD_TO_TS(td); if (TD_IS_IDLETHREAD(td)) td->td_lock = TDQ_LOCKPTR(tdq); MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); @@ -2629,3 +2631,22 @@ /* ps compat. All cpu percentages from ULE are weighted. */ static int ccpu = 0; SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); + +#include + +/* + * thread is being either created or recycled. + * Fix up the per-scheduler resources associated with it. + * Called from: + * sched_fork() + * thread_dtor() (*may go away) + * thread_init() (*may go away) + */ +void +sched_newthread(struct thread *td) +{ + struct td_sched *ts; + + ts = TD_TO_TS(td); + bzero(ts, sizeof(*ts)); +} Index: sys/kern/subr_trap.c =========================================================================== --- sys/kern/subr_trap.c 2011/09/12 17:10:03 #109 +++ sys/kern/subr_trap.c 2011/09/12 17:10:03 @@ -109,9 +109,8 @@ /* * Charge system time if profiling. */ - if (p->p_flag & P_PROFIL) { + if (p->p_flag & P_PROFIL) addupc_task(td, TRAPF_PC(frame), td->td_pticks * psratio); - } /* * Let the scheduler adjust our priority etc. */ Index: sys/pc98/pc98/machdep.c =========================================================================== --- sys/pc98/pc98/machdep.c 2011/09/12 17:10:03 #47 +++ sys/pc98/pc98/machdep.c 2011/09/12 17:10:03 @@ -1931,6 +1931,7 @@ int gsel_tss, metadata_missing, x; struct pcpu *pc; + sched_init_thread0(); thread0.td_kstack = proc0kstack; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; Index: sys/sparc64/sparc64/machdep.c =========================================================================== --- sys/sparc64/sparc64/machdep.c 2011/09/12 17:10:03 #139 +++ sys/sparc64/sparc64/machdep.c 2011/09/12 17:10:03 @@ -399,6 +399,7 @@ /* * Initialize proc0 stuff (p_contested needs to be done early). */ + sched_init_thread0(); proc_linkup0(&proc0, &thread0); proc0.p_md.md_sigtramp = NULL; proc0.p_md.md_utrap = NULL; Index: sys/sparc64/sparc64/vm_machdep.c =========================================================================== --- sys/sparc64/sparc64/vm_machdep.c 2011/09/12 17:10:03 #76 +++ sys/sparc64/sparc64/vm_machdep.c 2011/09/12 17:10:03 @@ -182,7 +182,7 @@ } void -cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, +cpu_set_upcall_func(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf; Index: sys/sun4v/sun4v/machdep.c =========================================================================== --- sys/sun4v/sun4v/machdep.c 2011/09/12 17:10:03 #18 +++ sys/sun4v/sun4v/machdep.c 2011/09/12 17:10:03 @@ -363,7 +363,7 @@ /* * Initialize proc0 stuff (p_contested needs to be done early). */ - + sched_init_thread0(); proc_linkup0(&proc0, &thread0); proc0.p_md.md_sigtramp = NULL; proc0.p_md.md_utrap = NULL; Index: sys/sun4v/sun4v/vm_machdep.c =========================================================================== --- sys/sun4v/sun4v/vm_machdep.c 2011/09/12 17:10:03 #8 +++ sys/sun4v/sun4v/vm_machdep.c 2011/09/12 17:10:03 @@ -166,7 +166,7 @@ } void -cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, +cpu_set_upcall_func(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf; Index: sys/sys/proc.h =========================================================================== --- sys/sys/proc.h 2011/09/12 17:10:03 #343 +++ sys/sys/proc.h 2011/09/12 17:10:03 @@ -153,7 +153,6 @@ * for write access. */ struct kaudit_record; -struct td_sched; struct nlminfo; struct kaioinfo; struct p_sched; @@ -179,6 +178,7 @@ TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */ TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */ TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */ + TAILQ_ENTRY(thread) td_procq; /* (j/z) Run queue. */ struct cpuset *td_cpuset; /* (t) CPU affinity mask. */ struct seltd *td_sel; /* Select queue/channel. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ @@ -201,6 +201,7 @@ u_char td_oncpu; /* (t) Which cpu we are on. */ volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */ u_char td_tsqueue; /* (t) Turnstile queue blocked on. */ + u_char td_rqindex; short td_locks; /* (k) Count of non-spin locks. */ short td_rw_rlocks; /* (k) Count of rwlock read locks. */ short td_lk_slocks; /* (k) Count of lockmgr shared locks. */ @@ -267,7 +268,6 @@ int td_altkstack_pages; /* (a) Size of alternate kstack. */ volatile u_int td_critnest; /* (k*) Critical section nest level. */ struct mdthread td_md; /* (k) Any machine-dependent fields. */ - struct td_sched *td_sched; /* (*) Scheduler-specific data. */ struct kaudit_record *td_ar; /* (k) Active audit record, if any. */ int td_syscalls; /* per-thread syscall count (used by NFS :)) */ struct lpohead td_lprof[2]; /* (a) lock profiling objects. */ @@ -752,7 +752,8 @@ extern struct sx proctree_lock; extern struct mtx ppeers_lock; extern struct proc proc0; /* Process slot for swapper. */ -extern struct thread thread0; /* Primary thread in proc0. */ +extern struct thread *thread0p; /* Primary thread in proc0. */ +#define thread0 (*thread0p) /* API Compatability */ extern struct vmspace vmspace0; /* VM space for proc0. */ extern int hogticks; /* Limit on kernel cpu hogs. */ extern int lastpid; @@ -829,7 +830,7 @@ void cpu_set_fork_handler(struct thread *, void (*)(void *), void *); void cpu_set_upcall(struct thread *td, struct thread *td0); -void cpu_set_upcall_kse(struct thread *, void (*)(void *), void *, +void cpu_set_upcall_func(struct thread *, void (*)(void *), void *, stack_t *); int cpu_set_user_tls(struct thread *, void *tls_base); void cpu_thread_alloc(struct thread *); @@ -842,7 +843,6 @@ void thread_exit(void) __dead2; void thread_free(struct thread *td); void thread_link(struct thread *td, struct proc *p); -void thread_reap(void); int thread_single(int how); void thread_single_end(void); void thread_stash(struct thread *td); Index: sys/sys/sched.h =========================================================================== --- sys/sys/sched.h 2011/09/12 17:10:03 #39 +++ sys/sys/sched.h 2011/09/12 17:10:03 @@ -89,8 +89,6 @@ * Threads are switched in and out, block on resources, have temporary * priorities inherited from their procs, and use up cpu time. */ -void sched_exit_thread(struct thread *td, struct thread *child); -void sched_fork_thread(struct thread *td, struct thread *child); void sched_lend_prio(struct thread *td, u_char prio); void sched_lend_user_prio(struct thread *td, u_char pri); fixpt_t sched_pctcpu(struct thread *td); @@ -115,6 +113,7 @@ void sched_relinquish(struct thread *td); struct thread *sched_choose(void); void sched_idletd(void *); +void sched_run_ithread(struct thread *td); /* * Binding makes cpu affinity permanent while pinning is used to temporarily @@ -174,6 +173,8 @@ * Fixup scheduler state for proc0 and thread0 */ void schedinit(void); +void sched_init_thread0(void); + #endif /* _KERNEL */ /* POSIX 1003.1b Process Scheduling */