? amd64.diff ? idle.diff ? idle2.diff ? mwait.diff ? schedstat.diff ? sys.diff ? amd64/amd64/machdep.diff ? i386/i386/machdep.diff Index: amd64/amd64/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/amd64/amd64/machdep.c,v retrieving revision 1.683 diff -u -r1.683 machdep.c --- amd64/amd64/machdep.c 16 Mar 2008 10:58:02 -0000 1.683 +++ amd64/amd64/machdep.c 25 Apr 2008 05:01:45 -0000 @@ -53,6 +53,7 @@ #include "opt_maxmem.h" #include "opt_msgbuf.h" #include "opt_perfmon.h" +#include "opt_sched.h" #include #include @@ -528,62 +529,192 @@ __asm__ ("hlt"); } -/* - * Hook to idle the CPU when possible. In the SMP case we default to - * off because a halted cpu will not currently pick up a new thread in the - * run queue until the next timer tick. If turned on this will result in - * approximately a 4.2% loss in real time performance in buildworld tests - * (but improves user and sys times oddly enough), and saves approximately - * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3). - * - * XXX we need to have a cpu mask of idle cpus and generate an IPI or - * otherwise generate some sort of interrupt to wake up cpus sitting in HLT. - * Then we can have our cake and eat it too. - * - * XXX I'm turning it on for SMP as well by default for now. It seems to - * help lock contention somewhat, and this is critical for HTT. -Peter - */ -static int cpu_idle_hlt = 1; -TUNABLE_INT("machdep.cpu_idle_hlt", &cpu_idle_hlt); -SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, - &cpu_idle_hlt, 0, "Idle loop HLT enable"); +void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */ static void -cpu_idle_default(void) +cpu_idle_hlt(int busy) { /* - * we must absolutely guarentee that hlt is the - * absolute next instruction after sti or we - * introduce a timing window. + * we must absolutely guarentee that hlt is the next instruction + * after sti or we introduce a timing window. */ - __asm __volatile("sti; hlt"); + disable_intr(); + if (sched_runnable()) + enable_intr(); + else + __asm __volatile("sti; hlt"); } -/* - * Note that we have to be careful here to avoid a race between checking - * sched_runnable() and actually halting. If we don't do this, we may waste - * the time between calling hlt and the next interrupt even though there - * is a runnable process. - */ -void -cpu_idle(void) +static void +cpu_idle_acpi(int busy) { + disable_intr(); + if (sched_runnable()) + enable_intr(); + else if (cpu_idle_hook) + cpu_idle_hook(); + else + __asm __volatile("sti; hlt"); +} +static void +cpu_idle_spin(int busy) +{ + return; +} + +void (*cpu_idle_fn)(int) = cpu_idle_acpi; + +void +cpu_idle(int busy) +{ #ifdef SMP if (mp_grab_cpu_hlt()) return; #endif - if (cpu_idle_hlt) { - disable_intr(); - if (sched_runnable()) - enable_intr(); - else - (*cpu_idle_hook)(); + cpu_idle_fn(busy); +} + +/* + * mwait cpu power states. Lower 4 bits are sub-states. + */ +#define MWAIT_C0 0xf0 +#define MWAIT_C1 0x00 +#define MWAIT_C2 0x10 +#define MWAIT_C3 0x20 +#define MWAIT_C4 0x30 + +#define MWAIT_DISABLED 0x0 +#define MWAIT_WOKEN 0x1 +#define MWAIT_WAITING 0x2 + +static void +cpu_idle_mwait(int busy) +{ + int *mwait; + + mwait = (int *)PCPU_PTR(monitorbuf); + *mwait = MWAIT_WAITING; + if (sched_runnable()) + return; + cpu_monitor(mwait, 0, 0); + if (*mwait == MWAIT_WAITING) + cpu_mwait(0, MWAIT_C1); +} + +static void +cpu_idle_mwait_hlt(int busy) +{ + int *mwait; + + mwait = (int *)PCPU_PTR(monitorbuf); + if (busy == 0) { + *mwait = MWAIT_DISABLED; + cpu_idle_hlt(busy); + return; } + *mwait = MWAIT_WAITING; + if (sched_runnable()) + return; + cpu_monitor(mwait, 0, 0); + if (*mwait == MWAIT_WAITING) + cpu_mwait(0, MWAIT_C1); } -/* Other subsystems (e.g., ACPI) can hook this later. */ -void (*cpu_idle_hook)(void) = cpu_idle_default; +int +cpu_idle_wakeup(int cpu) +{ + struct pcpu *pcpu; + int *mwait; + + if (cpu_idle_fn == cpu_idle_spin) + return (1); + if (cpu_idle_fn != cpu_idle_mwait && cpu_idle_fn != cpu_idle_mwait_hlt) + return (0); + pcpu = pcpu_find(cpu); + mwait = (int *)pcpu->pc_monitorbuf; + /* + * This doesn't need to be atomic since missing the race will + * simply result in unnecessary IPIs. + */ + if (cpu_idle_fn == cpu_idle_mwait_hlt && *mwait == MWAIT_DISABLED) + return (0); + *mwait = MWAIT_WOKEN; + + return (1); +} + +/* + * Ordered by speed/power consumption. + */ +struct { + void *id_fn; + char *id_name; +} idle_tbl[] = { + { cpu_idle_spin, "spin" }, + { cpu_idle_mwait, "mwait" }, + { cpu_idle_mwait_hlt, "mwait_hlt" }, + { cpu_idle_hlt, "hlt" }, + { cpu_idle_acpi, "acpi" }, + { NULL, NULL } +}; + +static int +idle_sysctl_available(SYSCTL_HANDLER_ARGS) +{ + char *avail, *p; + int error; + int i; + + avail = malloc(256, M_TEMP, M_WAITOK); + p = avail; + for (i = 0; idle_tbl[i].id_name != NULL; i++) { + if (strstr(idle_tbl[i].id_name, "mwait") && + (cpu_feature2 & CPUID2_MON) == 0) + continue; + p += sprintf(p, "%s, ", idle_tbl[i].id_name); + } + error = sysctl_handle_string(oidp, avail, 0, req); + free(avail, M_TEMP); + return (error); +} + +static int +idle_sysctl(SYSCTL_HANDLER_ARGS) +{ + char buf[16]; + int error; + char *p; + int i; + + p = "unknown"; + for (i = 0; idle_tbl[i].id_name != NULL; i++) { + if (idle_tbl[i].id_fn == cpu_idle_fn) { + p = idle_tbl[i].id_name; + break; + } + } + strncpy(buf, p, sizeof(buf)); + error = sysctl_handle_string(oidp, buf, sizeof(buf), req); + if (error != 0 || req->newptr == NULL) + return (error); + for (i = 0; idle_tbl[i].id_name != NULL; i++) { + if (strstr(idle_tbl[i].id_name, "mwait") && + (cpu_feature2 & CPUID2_MON) == 0) + continue; + if (strcmp(idle_tbl[i].id_name, buf)) + continue; + cpu_idle_fn = idle_tbl[i].id_fn; + return (0); + } + return (EINVAL); +} + +SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD, + 0, 0, idle_sysctl_available, "A", "list of available idle functions"); + +SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, + idle_sysctl, "A", "currently selected idle function"); /* * Clear registers on exec Index: amd64/conf/GENERIC =================================================================== RCS file: /home/ncvs/src/sys/amd64/conf/GENERIC,v retrieving revision 1.495 diff -u -r1.495 GENERIC --- amd64/conf/GENERIC 27 Mar 2008 11:54:17 -0000 1.495 +++ amd64/conf/GENERIC 25 Apr 2008 05:01:45 -0000 @@ -27,6 +27,7 @@ makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols options SCHED_ULE # ULE scheduler +options SCHED_STATS options PREEMPTION # Enable kernel thread preemption options INET # InterNETworking options INET6 # IPv6 communications protocols Index: amd64/include/pcpu.h =================================================================== RCS file: /home/ncvs/src/sys/amd64/include/pcpu.h,v retrieving revision 1.48 diff -u -r1.48 pcpu.h --- amd64/include/pcpu.h 4 Jun 2007 21:38:45 -0000 1.48 +++ amd64/include/pcpu.h 25 Apr 2008 05:01:45 -0000 @@ -43,6 +43,7 @@ * other processors" */ #define PCPU_MD_FIELDS \ + char pc_monitorbuf[128] __aligned(128); /* cache line */ \ struct pcpu *pc_prvspace; /* Self-reference */ \ struct pmap *pc_curpmap; \ struct amd64tss *pc_tssp; \ Index: arm/arm/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/arm/arm/machdep.c,v retrieving revision 1.30 diff -u -r1.30 machdep.c --- arm/arm/machdep.c 3 Apr 2008 16:44:49 -0000 1.30 +++ arm/arm/machdep.c 25 Apr 2008 05:01:45 -0000 @@ -326,12 +326,19 @@ } void -cpu_idle(void) +cpu_idle(int busy) { cpu_sleep(0); } int +cpu_idle_wakeup(int cpu) +{ + + return (0); +} + +int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *tf = td->td_frame; Index: i386/i386/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/machdep.c,v retrieving revision 1.668 diff -u -r1.668 machdep.c --- i386/i386/machdep.c 16 Mar 2008 10:58:03 -0000 1.668 +++ i386/i386/machdep.c 25 Apr 2008 05:01:48 -0000 @@ -1129,63 +1129,192 @@ __asm__ ("hlt"); } -/* - * Hook to idle the CPU when possible. In the SMP case we default to - * off because a halted cpu will not currently pick up a new thread in the - * run queue until the next timer tick. If turned on this will result in - * approximately a 4.2% loss in real time performance in buildworld tests - * (but improves user and sys times oddly enough), and saves approximately - * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3). - * - * XXX we need to have a cpu mask of idle cpus and generate an IPI or - * otherwise generate some sort of interrupt to wake up cpus sitting in HLT. - * Then we can have our cake and eat it too. - * - * XXX I'm turning it on for SMP as well by default for now. It seems to - * help lock contention somewhat, and this is critical for HTT. -Peter - */ -static int cpu_idle_hlt = 1; -TUNABLE_INT("machdep.cpu_idle_hlt", &cpu_idle_hlt); -SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, - &cpu_idle_hlt, 0, "Idle loop HLT enable"); +void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */ static void -cpu_idle_default(void) +cpu_idle_hlt(int busy) { /* - * we must absolutely guarentee that hlt is the - * absolute next instruction after sti or we - * introduce a timing window. + * we must absolutely guarentee that hlt is the next instruction + * after sti or we introduce a timing window. */ - __asm __volatile("sti; hlt"); + disable_intr(); + if (sched_runnable()) + enable_intr(); + else + __asm __volatile("sti; hlt"); } -/* - * Note that we have to be careful here to avoid a race between checking - * sched_runnable() and actually halting. If we don't do this, we may waste - * the time between calling hlt and the next interrupt even though there - * is a runnable process. - */ -void -cpu_idle(void) +static void +cpu_idle_acpi(int busy) { + disable_intr(); + if (sched_runnable()) + enable_intr(); + else if (cpu_idle_hook) + cpu_idle_hook(); + else + __asm __volatile("sti; hlt"); +} + +static void +cpu_idle_spin(int busy) +{ + return; +} +void (*cpu_idle_fn)(int) = cpu_idle_acpi; + +void +cpu_idle(int busy) +{ #ifdef SMP if (mp_grab_cpu_hlt()) return; #endif + cpu_idle_fn(busy); +} + +/* + * mwait cpu power states. Lower 4 bits are sub-states. + */ +#define MWAIT_C0 0xf0 +#define MWAIT_C1 0x00 +#define MWAIT_C2 0x10 +#define MWAIT_C3 0x20 +#define MWAIT_C4 0x30 + +#define MWAIT_DISABLED 0x0 +#define MWAIT_WOKEN 0x1 +#define MWAIT_WAITING 0x2 + +static void +cpu_idle_mwait(int busy) +{ + int *mwait; + + mwait = (int *)PCPU_PTR(monitorbuf); + *mwait = MWAIT_WAITING; + if (sched_runnable()) + return; + cpu_monitor(mwait, 0, 0); + if (*mwait == MWAIT_WAITING) + cpu_mwait(0, MWAIT_C1); +} + +static void +cpu_idle_mwait_hlt(int busy) +{ + int *mwait; + + mwait = (int *)PCPU_PTR(monitorbuf); + if (busy == 0) { + *mwait = MWAIT_DISABLED; + cpu_idle_hlt(busy); + return; + } + *mwait = MWAIT_WAITING; + if (sched_runnable()) + return; + cpu_monitor(mwait, 0, 0); + if (*mwait == MWAIT_WAITING) + cpu_mwait(0, MWAIT_C1); +} - if (cpu_idle_hlt) { - disable_intr(); - if (sched_runnable()) - enable_intr(); - else - (*cpu_idle_hook)(); +int +cpu_idle_wakeup(int cpu) +{ + struct pcpu *pcpu; + int *mwait; + + if (cpu_idle_fn == cpu_idle_spin) + return (1); + if (cpu_idle_fn != cpu_idle_mwait && cpu_idle_fn != cpu_idle_mwait_hlt) + return (0); + pcpu = pcpu_find(cpu); + mwait = (int *)pcpu->pc_monitorbuf; + /* + * This doesn't need to be atomic since missing the race will + * simply result in unnecessary IPIs. + */ + if (cpu_idle_fn == cpu_idle_mwait_hlt && *mwait == MWAIT_DISABLED) + return (0); + *mwait = MWAIT_WOKEN; + + return (1); +} + +/* + * Ordered by speed/power consumption. + */ +struct { + void *id_fn; + char *id_name; +} idle_tbl[] = { + { cpu_idle_spin, "spin" }, + { cpu_idle_mwait, "mwait" }, + { cpu_idle_mwait_hlt, "mwait_hlt" }, + { cpu_idle_hlt, "hlt" }, + { cpu_idle_acpi, "acpi" }, + { NULL, NULL } +}; + +static int +idle_sysctl_available(SYSCTL_HANDLER_ARGS) +{ + char *avail, *p; + int error; + int i; + + avail = malloc(256, M_TEMP, M_WAITOK); + p = avail; + for (i = 0; idle_tbl[i].id_name != NULL; i++) { + if (strstr(idle_tbl[i].id_name, "mwait") && + (cpu_feature2 & CPUID2_MON) == 0) + continue; + p += sprintf(p, "%s, ", idle_tbl[i].id_name); + } + error = sysctl_handle_string(oidp, avail, 0, req); + free(avail, M_TEMP); + return (error); +} + +static int +idle_sysctl(SYSCTL_HANDLER_ARGS) +{ + char buf[16]; + int error; + char *p; + int i; + + p = "unknown"; + for (i = 0; idle_tbl[i].id_name != NULL; i++) { + if (idle_tbl[i].id_fn == cpu_idle_fn) { + p = idle_tbl[i].id_name; + break; + } + } + strncpy(buf, p, sizeof(buf)); + error = sysctl_handle_string(oidp, buf, sizeof(buf), req); + if (error != 0 || req->newptr == NULL) + return (error); + for (i = 0; idle_tbl[i].id_name != NULL; i++) { + if (strstr(idle_tbl[i].id_name, "mwait") && + (cpu_feature2 & CPUID2_MON) == 0) + continue; + if (strcmp(idle_tbl[i].id_name, buf)) + continue; + cpu_idle_fn = idle_tbl[i].id_fn; + return (0); } + return (EINVAL); } -/* Other subsystems (e.g., ACPI) can hook this later. */ -void (*cpu_idle_hook)(void) = cpu_idle_default; +SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD, + 0, 0, idle_sysctl_available, "A", "list of available idle functions"); + +SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, + idle_sysctl, "A", "currently selected idle function"); /* * Clear registers on exec Index: i386/include/pcpu.h =================================================================== RCS file: /home/ncvs/src/sys/i386/include/pcpu.h,v retrieving revision 1.50 diff -u -r1.50 pcpu.h --- i386/include/pcpu.h 4 Jun 2007 21:38:46 -0000 1.50 +++ i386/include/pcpu.h 25 Apr 2008 05:01:48 -0000 @@ -46,6 +46,7 @@ * other processors" */ #define PCPU_MD_FIELDS \ + char pc_monitorbuf[128] __aligned(128); /* cache line */ \ struct pcpu *pc_prvspace; /* Self-reference */ \ struct pmap *pc_curpmap; \ struct i386tss pc_common_tss; \ Index: ia64/ia64/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/ia64/ia64/machdep.c,v retrieving revision 1.236 diff -u -r1.236 machdep.c --- ia64/ia64/machdep.c 15 Apr 2008 05:02:42 -0000 1.236 +++ ia64/ia64/machdep.c 25 Apr 2008 05:01:48 -0000 @@ -336,7 +336,7 @@ } static void -cpu_idle_default(void) +cpu_idle_default(int busy) { struct ia64_pal_result res; @@ -349,6 +349,13 @@ (*cpu_idle_hook)(); } +int +cpu_idle_wakeup(int cpu) +{ + + return (0); +} + /* Other subsystems (e.g., ACPI) can hook this later. */ void (*cpu_idle_hook)(void) = cpu_idle_default; Index: kern/sched_4bsd.c =================================================================== RCS file: /home/ncvs/src/sys/kern/sched_4bsd.c,v retrieving revision 1.124 diff -u -r1.124 sched_4bsd.c --- kern/sched_4bsd.c 17 Apr 2008 04:20:10 -0000 1.124 +++ kern/sched_4bsd.c 25 Apr 2008 05:01:48 -0000 @@ -1443,7 +1443,7 @@ mtx_assert(&Giant, MA_NOTOWNED); while (sched_runnable() == 0) - cpu_idle(); + cpu_idle(0); mtx_lock_spin(&sched_lock); mi_switch(SW_VOL | SWT_IDLE, NULL); Index: kern/sched_ule.c =================================================================== RCS file: /home/ncvs/src/sys/kern/sched_ule.c,v retrieving revision 1.243 diff -u -r1.243 sched_ule.c --- kern/sched_ule.c 17 Apr 2008 09:56:01 -0000 1.243 +++ kern/sched_ule.c 25 Apr 2008 05:01:48 -0000 @@ -954,6 +954,12 @@ */ if (tdq->tdq_idlestate == TDQ_RUNNING) return; + /* + * If the MD code has an idle wakeup routine try that before + * falling back to IPI. + */ + if (cpu_idle_wakeup(cpu)) + return; } tdq->tdq_ipipending = 1; ipi_selected(1 << cpu, IPI_PREEMPT); @@ -2095,10 +2101,7 @@ * If there is some activity seed it to reflect that. */ tdq->tdq_oldswitchcnt = tdq->tdq_switchcnt; - if (tdq->tdq_load) - tdq->tdq_switchcnt = 2; - else - tdq->tdq_switchcnt = 0; + tdq->tdq_switchcnt = tdq->tdq_load; /* * Advance the insert index once for each tick to ensure that all * threads get a chance to run. @@ -2507,9 +2510,10 @@ * tdq_notify(). */ if (tdq->tdq_load == 0) { + switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt; tdq->tdq_idlestate = TDQ_IDLE; if (tdq->tdq_load == 0) - cpu_idle(); + cpu_idle(switchcnt > 1); } if (tdq->tdq_load) { thread_lock(td); Index: mips/mips/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/mips/mips/machdep.c,v retrieving revision 1.2 diff -u -r1.2 machdep.c --- mips/mips/machdep.c 15 Apr 2008 02:50:07 -0000 1.2 +++ mips/mips/machdep.c 25 Apr 2008 05:01:49 -0000 @@ -527,7 +527,7 @@ * call platform specific code to halt (until next interrupt) for the idle loop */ void -cpu_idle(void) +cpu_idle(int busy) { if (mips_cp0_status_read() & SR_INT_ENAB) __asm __volatile ("wait"); @@ -535,6 +535,13 @@ panic("ints disabled in idleproc!"); } +int +cpu_idle_wakeup(int cpu) +{ + + return (0); +} + void dumpsys(struct dumperinfo *di __unused) { Index: pc98/pc98/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/pc98/pc98/machdep.c,v retrieving revision 1.403 diff -u -r1.403 machdep.c --- pc98/pc98/machdep.c 16 Mar 2008 10:58:08 -0000 1.403 +++ pc98/pc98/machdep.c 25 Apr 2008 05:01:50 -0000 @@ -1134,7 +1134,7 @@ * is a runnable process. */ void -cpu_idle(void) +cpu_idle(int busy) { #ifdef SMP @@ -1151,6 +1151,13 @@ } } +int +cpu_idle_wakeup(int cpu) +{ + + return (0); +} + /* Other subsystems (e.g., ACPI) can hook this later. */ void (*cpu_idle_hook)(void) = cpu_idle_default; Index: powerpc/aim/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/powerpc/aim/machdep.c,v retrieving revision 1.112 diff -u -r1.112 machdep.c --- powerpc/aim/machdep.c 16 Apr 2008 23:28:12 -0000 1.112 +++ powerpc/aim/machdep.c 25 Apr 2008 05:01:50 -0000 @@ -731,7 +731,7 @@ } void -cpu_idle(void) +cpu_idle(int busy) { uint32_t msr; @@ -751,6 +751,13 @@ } } +int +cpu_idle_wakeup(int cpu) +{ + + return (0); +} + /* * Set set up registers on exec. */ Index: powerpc/booke/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/powerpc/booke/machdep.c,v retrieving revision 1.4 diff -u -r1.4 machdep.c --- powerpc/booke/machdep.c 16 Mar 2008 10:58:08 -0000 1.4 +++ powerpc/booke/machdep.c 25 Apr 2008 05:01:50 -0000 @@ -696,7 +696,7 @@ * Set Wait state enable. */ void -cpu_idle (void) +cpu_idle (int busy) { register_t msr; @@ -723,6 +723,13 @@ #endif } +int +cpu_idle_wakeup(int cpu) +{ + + return (0); +} + void spinlock_enter(void) { Index: sparc64/sparc64/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/sparc64/sparc64/machdep.c,v retrieving revision 1.140 diff -u -r1.140 machdep.c --- sparc64/sparc64/machdep.c 25 Dec 2007 17:52:01 -0000 1.140 +++ sparc64/sparc64/machdep.c 25 Apr 2008 05:01:50 -0000 @@ -750,12 +750,19 @@ } void -cpu_idle(void) +cpu_idle(int busy) { /* Insert code to halt (until next interrupt) for the idle loop */ } int +cpu_idle_wakeup(int cpu) +{ + + return (0); +} + +int ptrace_set_pc(struct thread *td, u_long addr) { Index: sun4v/sun4v/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/sun4v/sun4v/machdep.c,v retrieving revision 1.17 diff -u -r1.17 machdep.c --- sun4v/sun4v/machdep.c 25 Dec 2007 17:52:01 -0000 1.17 +++ sun4v/sun4v/machdep.c 25 Apr 2008 05:01:50 -0000 @@ -819,7 +819,7 @@ } void -cpu_idle(void) +cpu_idle(int busy) { if (rdpr(pil) != 0) @@ -832,6 +832,13 @@ } int +cpu_idle_wakeup(int cpu) +{ + + return (0); +} + +int ptrace_set_pc(struct thread *td, u_long addr) { Index: sys/proc.h =================================================================== RCS file: /home/ncvs/src/sys/sys/proc.h,v retrieving revision 1.511 diff -u -r1.511 proc.h --- sys/proc.h 17 Apr 2008 04:20:10 -0000 1.511 +++ sys/proc.h 25 Apr 2008 05:01:50 -0000 @@ -808,7 +808,8 @@ void sleepinit(void); void stopevent(struct proc *, u_int, u_int); void threadinit(void); -void cpu_idle(void); +void cpu_idle(int); +int cpu_idle_wakeup(int); extern void (*cpu_idle_hook)(void); /* Hook to machdep CPU idler. */ void cpu_switch(struct thread *, struct thread *, struct mtx *); void cpu_throw(struct thread *, struct thread *) __dead2;