Index: sys/kern/kern_clocksource.c =================================================================== --- sys/kern/kern_clocksource.c (revision 221802) +++ sys/kern/kern_clocksource.c (working copy) @@ -59,7 +59,7 @@ #ifdef KDTRACE_HOOKS #include -cyclic_clock_func_t cyclic_clock_func[MAXCPU]; +cyclic_clock_func_t cyclic_clock_func = NULL; #endif int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */ @@ -131,6 +131,9 @@ struct bintime nexthard; /* Next hardlock() event. */ struct bintime nextstat; /* Next statclock() event. */ struct bintime nextprof; /* Next profclock() event. */ +#ifdef KDTRACE_HOOKS + struct bintime nextcyc; /* Next OpenSolaris cyclics event. */ +#endif int ipi; /* This CPU needs IPI. */ int idle; /* This CPU is in idle mode. */ }; @@ -193,17 +196,10 @@ usermode = TRAPF_USERMODE(frame); pc = TRAPF_PC(frame); } -#ifdef KDTRACE_HOOKS - /* - * If the DTrace hooks are configured and a callback function - * has been registered, then call it to process the high speed - * timers. - */ - if (!fake && cyclic_clock_func[curcpu] != NULL) - (*cyclic_clock_func[curcpu])(frame); -#endif + runs = 0; state = DPCPU_PTR(timerstate); + while (bintime_cmp(now, &state->nexthard, >=)) { bintime_add(&state->nexthard, &hardperiod); runs++; @@ -227,6 +223,16 @@ } } else state->nextprof = state->nextstat; + +#ifdef KDTRACE_HOOKS + if (fake == 0 && cyclic_clock_func != NULL && + state->nextcyc.sec != -1 && + bintime_cmp(now, &state->nextcyc, >=)) { + state->nextcyc.sec = -1; + (*cyclic_clock_func)(frame); + } +#endif + getnextcpuevent(&t, 0); if (fake == 2) { state->nextevent = t; @@ -266,10 +272,11 @@ } else { /* If CPU is active - handle all types of events. */ if (bintime_cmp(event, &state->nextstat, >)) *event = state->nextstat; - if (profiling && - bintime_cmp(event, &state->nextprof, >)) + if (profiling && bintime_cmp(event, &state->nextprof, >)) *event = state->nextprof; } + if (state->nextcyc.sec != -1 && bintime_cmp(event, &state->nextcyc, >)) + *event = state->nextcyc; } /* @@ -593,6 +600,7 @@ CPU_FOREACH(cpu) { state = DPCPU_ID_PTR(cpu, timerstate); mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); + state->nextcyc.sec = -1; } #ifdef SMP callout_new_inserted = cpu_new_callout; @@ -787,6 +795,43 @@ spinlock_exit(); } +#ifdef KDTRACE_HOOKS +void +clocksource_cyc_set(const struct bintime *t) +{ + struct bintime now; + struct pcpu_state *state; + + state = DPCPU_PTR(timerstate); + if (periodic) + now = state->now; + else + binuptime(&now); + + CTR4(KTR_SPARE2, "set_cyc at %d: now %d.%08x%08x", + curcpu, now.sec, (unsigned int)(now.frac >> 32), + (unsigned int)(now.frac & 0xffffffff)); + CTR4(KTR_SPARE2, "set_cyc at %d: t %d.%08x%08x", + curcpu, t->sec, (unsigned int)(t->frac >> 32), + (unsigned int)(t->frac & 0xffffffff)); + + ET_HW_LOCK(state); + if (bintime_cmp(t, &state->nextcyc, ==)) { + ET_HW_UNLOCK(state); + return; + } + state->nextcyc = *t; + if (bintime_cmp(&state->nextcyc, &state->nextevent, >=)) { + ET_HW_UNLOCK(state); + return; + } + state->nextevent = state->nextcyc; + if (!periodic) + loadtimer(&now, 0); + ET_HW_UNLOCK(state); +} +#endif + #ifdef SMP static void cpu_new_callout(int cpu, int ticks) Index: sys/cddl/dev/cyclic/i386/cyclic_machdep.c =================================================================== --- sys/cddl/dev/cyclic/i386/cyclic_machdep.c (revision 221802) +++ sys/cddl/dev/cyclic/i386/cyclic_machdep.c (working copy) @@ -30,6 +30,7 @@ static void disable(cyb_arg_t); static void reprogram(cyb_arg_t, hrtime_t); static void xcall(cyb_arg_t, cpu_t *, cyc_func_t, void *); +static void cyclic_clock(struct trapframe *frame); static cyc_backend_t be = { NULL, /* cyb_configure */ @@ -45,6 +46,7 @@ cyclic_ap_start(void *dummy) { /* Initialise the rest of the CPUs. */ + cyclic_clock_func = cyclic_clock; cyclic_mp_init(); } @@ -63,18 +65,10 @@ static void cyclic_machdep_uninit(void) { - int i; - - for (i = 0; i <= mp_maxid; i++) - /* Reset the cyclic clock callback hook. */ - cyclic_clock_func[i] = NULL; - /* De-register the cyclic backend. */ cyclic_uninit(); } -static hrtime_t exp_due[MAXCPU]; - /* * This function is the one registered by the machine dependent * initialiser as the callback for high speed timer events. @@ -84,7 +78,7 @@ { cpu_t *c = &solaris_cpu[curcpu]; - if (c->cpu_cyclic != NULL && gethrtime() >= exp_due[curcpu]) { + if (c->cpu_cyclic != NULL) { if (TRAPF_USERMODE(frame)) { c->cpu_profile_pc = 0; c->cpu_profile_upc = TRAPF_PC(frame); @@ -102,26 +96,34 @@ } } -static void enable(cyb_arg_t arg) +static void +enable(cyb_arg_t arg __unused) { - /* Register the cyclic clock callback function. */ - cyclic_clock_func[curcpu] = cyclic_clock; + } -static void disable(cyb_arg_t arg) +static void +disable(cyb_arg_t arg __unused) { - /* Reset the cyclic clock callback function. */ - cyclic_clock_func[curcpu] = NULL; + } -static void reprogram(cyb_arg_t arg, hrtime_t exp) +static void +reprogram(cyb_arg_t arg __unused, hrtime_t exp) { - exp_due[curcpu] = exp; + struct bintime bt; + struct timespec ts; + + ts.tv_sec = exp / 1000000000; + ts.tv_nsec = exp % 1000000000; + timespec2bintime(&ts, &bt); + clocksource_cyc_set(&bt); } -static void xcall(cyb_arg_t arg, cpu_t *c, cyc_func_t func, void *param) +static void xcall(cyb_arg_t arg __unused, cpu_t *c, cyc_func_t func, + void *param) { - smp_rendezvous_cpus((cpumask_t) (1 << c->cpuid), + smp_rendezvous_cpus((cpumask_t)1 << c->cpuid, smp_no_rendevous_barrier, func, smp_no_rendevous_barrier, param); } Index: sys/sys/dtrace_bsd.h =================================================================== --- sys/sys/dtrace_bsd.h (revision 221802) +++ sys/sys/dtrace_bsd.h (working copy) @@ -44,14 +44,9 @@ * subsystem into the appropriate timer interrupt. */ typedef void (*cyclic_clock_func_t)(struct trapframe *); +extern cyclic_clock_func_t cyclic_clock_func; -/* - * These external variables are actually machine-dependent, so - * they might not actually exist. - * - * Defining them here avoids a proliferation of header files. - */ -extern cyclic_clock_func_t cyclic_clock_func[]; +void clocksource_cyc_set(const struct bintime *t); /* * The dtrace module handles traps that occur during a DTrace probe.