diff --git a/sys/cddl/compat/opensolaris/sys/cyclic_impl.h b/sys/cddl/compat/opensolaris/sys/cyclic_impl.h index a195251..57bb167 100644 --- a/sys/cddl/compat/opensolaris/sys/cyclic_impl.h +++ b/sys/cddl/compat/opensolaris/sys/cyclic_impl.h @@ -288,7 +288,14 @@ typedef struct cyc_id { typedef struct cyc_xcallarg { cyc_cpu_t *cyx_cpu; - hrtime_t cyx_exp; + cyc_handler_t *cyx_hdlr; + cyc_time_t *cyx_when; + cyc_index_t cyx_ndx; + cyc_index_t *cyx_heap; + cyclic_t *cyx_cyclics; + cyc_index_t cyx_size; + uint16_t cyx_flags; + int cyx_wait; } cyc_xcallarg_t; #define CY_DEFAULT_PERCPU 1 diff --git a/sys/cddl/compat/opensolaris/sys/time.h b/sys/cddl/compat/opensolaris/sys/time.h index 4275790..b243193 100644 --- a/sys/cddl/compat/opensolaris/sys/time.h +++ b/sys/cddl/compat/opensolaris/sys/time.h @@ -57,7 +57,7 @@ gethrtime(void) { struct timespec ts; hrtime_t nsec; -#if 1 +#if 0 getnanouptime(&ts); #else nanouptime(&ts); diff --git a/sys/cddl/dev/cyclic/cyclic.c b/sys/cddl/dev/cyclic/cyclic.c index df0de6b..8c87b13 100644 --- a/sys/cddl/dev/cyclic/cyclic.c +++ b/sys/cddl/dev/cyclic/cyclic.c @@ -473,73 +473,6 @@ cyclic_expire(cyc_cpu_t *cpu, cyc_index_t ndx, cyclic_t *cyclic) (*handler)(arg); } -static void -cyclic_enable_xcall(void *v) -{ - cyc_xcallarg_t *argp = v; - cyc_cpu_t *cpu = argp->cyx_cpu; - cyc_backend_t *be = cpu->cyp_backend; - - be->cyb_enable(be->cyb_arg); -} - -static void -cyclic_enable(cyc_cpu_t *cpu) -{ - cyc_backend_t *be = cpu->cyp_backend; - cyc_xcallarg_t arg; - - arg.cyx_cpu = cpu; - - /* Cross call to the target CPU */ - be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, cyclic_enable_xcall, &arg); -} - -static void -cyclic_disable_xcall(void *v) -{ - cyc_xcallarg_t *argp = v; - cyc_cpu_t *cpu = argp->cyx_cpu; - cyc_backend_t *be = cpu->cyp_backend; - - be->cyb_disable(be->cyb_arg); -} - -static void -cyclic_disable(cyc_cpu_t *cpu) -{ - cyc_backend_t *be = cpu->cyp_backend; - cyc_xcallarg_t arg; - - arg.cyx_cpu = cpu; - - /* Cross call to the target CPU */ - be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, cyclic_disable_xcall, &arg); -} - -static void -cyclic_reprogram_xcall(void *v) -{ - cyc_xcallarg_t *argp = v; - cyc_cpu_t *cpu = argp->cyx_cpu; - cyc_backend_t *be = cpu->cyp_backend; - - be->cyb_reprogram(be->cyb_arg, argp->cyx_exp); -} - -static void -cyclic_reprogram(cyc_cpu_t *cpu, hrtime_t exp) -{ - cyc_backend_t *be = cpu->cyp_backend; - cyc_xcallarg_t arg; - - arg.cyx_cpu = cpu; - arg.cyx_exp = exp; - - /* Cross call to the target CPU */ - be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, cyclic_reprogram_xcall, &arg); -} - /* * cyclic_fire(cpu_t *) * @@ -570,17 +503,15 @@ static void cyclic_fire(cpu_t *c) { cyc_cpu_t *cpu = c->cpu_cyclic; - - mtx_lock_spin(&cpu->cyp_mtx); - + cyc_backend_t *be = cpu->cyp_backend; cyc_index_t *heap = cpu->cyp_heap; cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics; + void *arg = be->cyb_arg; hrtime_t now = gethrtime(); hrtime_t exp; if (cpu->cyp_nelems == 0) { /* This is a spurious fire. */ - mtx_unlock_spin(&cpu->cyp_mtx); return; } @@ -631,9 +562,42 @@ cyclic_fire(cpu_t *c) * Now we have a cyclic in the root slot which isn't in the past; * reprogram the interrupt source. */ - cyclic_reprogram(cpu, exp); + be->cyb_reprogram(arg, exp); +} + +static void +cyclic_expand_xcall(cyc_xcallarg_t *arg) +{ + cyc_cpu_t *cpu = arg->cyx_cpu; + cyc_index_t new_size = arg->cyx_size, size = cpu->cyp_size, i; + cyc_index_t *new_heap = arg->cyx_heap; + cyclic_t *cyclics = cpu->cyp_cyclics, *new_cyclics = arg->cyx_cyclics; + + /* + * Assert that the new size is a power of 2. + */ + ASSERT((new_size & (new_size - 1)) == 0); + ASSERT(new_size == (size << 1)); + ASSERT(cpu->cyp_heap != NULL && cpu->cyp_cyclics != NULL); + + bcopy(cpu->cyp_heap, new_heap, sizeof (cyc_index_t) * size); + bcopy(cyclics, new_cyclics, sizeof (cyclic_t) * size); - mtx_unlock_spin(&cpu->cyp_mtx); + /* + * Set up the free list, and set all of the new cyclics to be CYF_FREE. + */ + for (i = size; i < new_size; i++) { + new_heap[i] = i; + new_cyclics[i].cy_flags = CYF_FREE; + } + + /* + * We can go ahead and plow the value of cyp_heap and cyp_cyclics; + * cyclic_expand() has kept a copy. + */ + cpu->cyp_heap = new_heap; + cpu->cyp_cyclics = new_cyclics; + cpu->cyp_size = new_size; } /* @@ -643,102 +607,68 @@ cyclic_fire(cpu_t *c) static void cyclic_expand(cyc_cpu_t *cpu) { - cyc_index_t new_size, old_size, i; + cyc_index_t new_size, old_size; cyc_index_t *new_heap, *old_heap; cyclic_t *new_cyclics, *old_cyclics; + cyc_xcallarg_t arg; + cyc_backend_t *be = cpu->cyp_backend; ASSERT(MUTEX_HELD(&cpu_lock)); - if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0) + old_heap = cpu->cyp_heap; + old_cyclics = cpu->cyp_cyclics; + + if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0) { new_size = CY_DEFAULT_PERCPU; + ASSERT(old_heap == NULL && old_cyclics == NULL); + } /* * Check that the new_size is a power of 2. */ ASSERT(((new_size - 1) & new_size) == 0); - /* Unlock the mutex while allocating memory so we can wait... */ - mtx_unlock_spin(&cpu->cyp_mtx); - new_heap = malloc(sizeof(cyc_index_t) * new_size, M_CYCLIC, M_WAITOK); new_cyclics = malloc(sizeof(cyclic_t) * new_size, M_CYCLIC, M_ZERO | M_WAITOK); - /* Grab the lock again now we've got the memory... */ - mtx_lock_spin(&cpu->cyp_mtx); - - /* Check if another thread beat us while the mutex was unlocked. */ - if (old_size != cpu->cyp_size) { - /* Oh well, he won. */ - mtx_unlock_spin(&cpu->cyp_mtx); - - free(new_heap, M_CYCLIC); - free(new_cyclics, M_CYCLIC); - - mtx_lock_spin(&cpu->cyp_mtx); - return; - } - - old_heap = cpu->cyp_heap; - old_cyclics = cpu->cyp_cyclics; - - bcopy(cpu->cyp_heap, new_heap, sizeof (cyc_index_t) * old_size); - bcopy(old_cyclics, new_cyclics, sizeof (cyclic_t) * old_size); - - /* - * Set up the free list, and set all of the new cyclics to be CYF_FREE. - */ - for (i = old_size; i < new_size; i++) { - new_heap[i] = i; - new_cyclics[i].cy_flags = CYF_FREE; - } + arg.cyx_cpu = cpu; + arg.cyx_heap = new_heap; + arg.cyx_cyclics = new_cyclics; + arg.cyx_size = new_size; - /* - * We can go ahead and plow the value of cyp_heap and cyp_cyclics; - * cyclic_expand() has kept a copy. - */ - cpu->cyp_heap = new_heap; - cpu->cyp_cyclics = new_cyclics; - cpu->cyp_size = new_size; + be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, + (cyc_func_t)cyclic_expand_xcall, &arg); if (old_cyclics != NULL) { ASSERT(old_heap != NULL); ASSERT(old_size != 0); - mtx_unlock_spin(&cpu->cyp_mtx); - free(old_cyclics, M_CYCLIC); free(old_heap, M_CYCLIC); - - mtx_lock_spin(&cpu->cyp_mtx); } } -static cyc_index_t -cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr, - cyc_time_t *when, uint16_t flags) +static void +cyclic_add_xcall(cyc_xcallarg_t *arg) { + cyc_cpu_t *cpu = arg->cyx_cpu; + cyc_handler_t *hdlr = arg->cyx_hdlr; + cyc_time_t *when = arg->cyx_when; + cyc_backend_t *be = cpu->cyp_backend; cyc_index_t ndx, nelems; + cyb_arg_t bar = be->cyb_arg; cyclic_t *cyclic; - ASSERT(MUTEX_HELD(&cpu_lock)); - - mtx_lock_spin(&cpu->cyp_mtx); - - ASSERT(!(cpu->cyp_cpu->cpu_flags & CPU_OFFLINE)); - ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0); - - while (cpu->cyp_nelems == cpu->cyp_size) - cyclic_expand(cpu); - ASSERT(cpu->cyp_nelems < cpu->cyp_size); nelems = cpu->cyp_nelems++; - if (nelems == 0) + if (nelems == 0) { /* * If this is the first element, we need to enable the * backend on this CPU. */ - cyclic_enable(cpu); + be->cyb_enable(bar); + } ndx = cpu->cyp_heap[nelems]; cyclic = &cpu->cyp_cyclics[ndx]; @@ -746,14 +676,20 @@ cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr, ASSERT(cyclic->cy_flags == CYF_FREE); cyclic->cy_interval = when->cyt_interval; - if (when->cyt_when == 0) - cyclic->cy_expire = gethrtime() + cyclic->cy_interval; - else + if (when->cyt_when == 0) { + /* + * If a start time hasn't been explicitly specified, we'll + * start on the next interval boundary. + */ + cyclic->cy_expire = (gethrtime() / cyclic->cy_interval + 1) * + cyclic->cy_interval; + } else { cyclic->cy_expire = when->cyt_when; + } cyclic->cy_handler = hdlr->cyh_func; cyclic->cy_arg = hdlr->cyh_arg; - cyclic->cy_flags = flags; + cyclic->cy_flags = arg->cyx_flags; if (cyclic_upheap(cpu, nelems)) { hrtime_t exp = cyclic->cy_expire; @@ -762,30 +698,59 @@ cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr, * If our upheap propagated to the root, we need to * reprogram the interrupt source. */ - cyclic_reprogram(cpu, exp); + be->cyb_reprogram(bar, exp); } - mtx_unlock_spin(&cpu->cyp_mtx); - - return (ndx); + arg->cyx_ndx = ndx; } - -static int -cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait) +static cyc_index_t +cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr, + cyc_time_t *when, uint16_t flags) { - cyc_index_t nelems, i; - cyclic_t *cyclic; - cyc_index_t *heap, last; + cyc_backend_t *be = cpu->cyp_backend; + cyb_arg_t bar = be->cyb_arg; + cyc_xcallarg_t arg; ASSERT(MUTEX_HELD(&cpu_lock)); - ASSERT(wait == CY_WAIT || wait == CY_NOWAIT); + ASSERT(!(cpu->cyp_cpu->cpu_flags & CPU_OFFLINE)); + ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0); - mtx_lock_spin(&cpu->cyp_mtx); + if (cpu->cyp_nelems == cpu->cyp_size) { + /* + * This is expensive; it will cross call onto the other + * CPU to perform the expansion. + */ + cyclic_expand(cpu); + ASSERT(cpu->cyp_nelems < cpu->cyp_size); + } - heap = cpu->cyp_heap; + /* + * By now, we know that we're going to be able to successfully + * perform the add. Now cross call over to the CPU of interest to + * actually add our cyclic. + */ + arg.cyx_cpu = cpu; + arg.cyx_hdlr = hdlr; + arg.cyx_when = when; + arg.cyx_flags = flags; - nelems = cpu->cyp_nelems; + be->cyb_xcall(bar, cpu->cyp_cpu, (cyc_func_t)cyclic_add_xcall, &arg); + + return (arg.cyx_ndx); +} + +static void +cyclic_remove_xcall(cyc_xcallarg_t *arg) +{ + cyc_cpu_t *cpu = arg->cyx_cpu; + cyc_backend_t *be = cpu->cyp_backend; + cyb_arg_t bar = be->cyb_arg; + cyc_index_t ndx = arg->cyx_ndx, nelems = cpu->cyp_nelems, i; + cyc_index_t *heap = cpu->cyp_heap, last; + cyclic_t *cyclic; + + ASSERT(nelems > 0); cyclic = &cpu->cyp_cyclics[ndx]; @@ -794,11 +759,17 @@ cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait) * removed as part of a juggling operation, the expiration time * will be used when the cyclic is added to the new CPU. */ - if (when != NULL) { - when->cyt_when = cyclic->cy_expire; - when->cyt_interval = cyclic->cy_interval; + if (arg->cyx_when != NULL) { + arg->cyx_when->cyt_when = cyclic->cy_expire; + arg->cyx_when->cyt_interval = cyclic->cy_interval; } + /* + * Now set the flags to CYF_FREE. We don't need a membar_enter() + * between zeroing pend and setting the flags because we're at + * CY_HIGH_LEVEL (that is, the zeroing of pend and the setting + * of cy_flags appear atomic to softints). + */ cyclic->cy_flags = CYF_FREE; for (i = 0; i < nelems; i++) { @@ -811,19 +782,21 @@ cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait) cpu->cyp_nelems = --nelems; - if (nelems == 0) + if (nelems == 0) { /* * If we just removed the last element, then we need to * disable the backend on this CPU. */ - cyclic_disable(cpu); + be->cyb_disable(bar); + } - if (i == nelems) + if (i == nelems) { /* * If we just removed the last element of the heap, then * we don't have to downheap. */ - goto done; + goto out; + } /* * Swap the last element of the heap with the one we want to @@ -833,17 +806,18 @@ cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait) heap[i] = (last = heap[nelems]); heap[nelems] = ndx; - if (i == 0) + if (i == 0) { cyclic_downheap(cpu, 0); - else { + } else { if (cyclic_upheap(cpu, i) == 0) { /* * The upheap didn't propagate to the root; if it * didn't propagate at all, we need to downheap. */ - if (heap[i] == last) + if (heap[i] == last) { cyclic_downheap(cpu, i); - goto done; + } + goto out; } } @@ -854,10 +828,27 @@ cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait) cyclic = &cpu->cyp_cyclics[heap[0]]; ASSERT(nelems != 0); - cyclic_reprogram(cpu, cyclic->cy_expire); + be->cyb_reprogram(bar, cyclic->cy_expire); +out: + return; +} + +static int +cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait) +{ + cyc_backend_t *be = cpu->cyp_backend; + cyc_xcallarg_t arg; + + ASSERT(MUTEX_HELD(&cpu_lock)); + ASSERT(wait == CY_WAIT || wait == CY_NOWAIT); -done: - mtx_unlock_spin(&cpu->cyp_mtx); + arg.cyx_ndx = ndx; + arg.cyx_cpu = cpu; + arg.cyx_when = when; + arg.cyx_wait = wait; + + be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, + (cyc_func_t)cyclic_remove_xcall, &arg); return (1); } @@ -1214,15 +1205,10 @@ cyclic_add_omni(cyc_omni_handler_t *omni) idp->cyi_omni_hdlr = *omni; - for (i = 0; i < MAXCPU; i++) { - if (pcpu_find(i) == NULL) - continue; - + CPU_FOREACH(i) { c = &solaris_cpu[i]; - if ((cpu = c->cpu_cyclic) == NULL) continue; - cyclic_omni_start(idp, cpu); } @@ -1325,12 +1311,8 @@ cyclic_mp_init(void) mutex_enter(&cpu_lock); - for (i = 0; i <= mp_maxid; i++) { - if (pcpu_find(i) == NULL) - continue; - + CPU_FOREACH(i) { c = &solaris_cpu[i]; - if (c->cpu_cyclic == NULL) cyclic_configure(c); } @@ -1346,10 +1328,8 @@ cyclic_uninit(void) CPU_FOREACH(id) { c = &solaris_cpu[id]; - if (c->cpu_cyclic == NULL) continue; - cyclic_unconfigure(c); } diff --git a/sys/cddl/dev/cyclic/i386/cyclic_machdep.c b/sys/cddl/dev/cyclic/i386/cyclic_machdep.c index 0b6ab59..6a1970f 100644 --- a/sys/cddl/dev/cyclic/i386/cyclic_machdep.c +++ b/sys/cddl/dev/cyclic/i386/cyclic_machdep.c @@ -26,14 +26,16 @@ * */ +static cyb_arg_t configure(cpu_t *cpu); +static void unconfigure(cyb_arg_t); static void enable(cyb_arg_t); static void disable(cyb_arg_t); static void reprogram(cyb_arg_t, hrtime_t); static void xcall(cyb_arg_t, cpu_t *, cyc_func_t, void *); static cyc_backend_t be = { - NULL, /* cyb_configure */ - NULL, /* cyb_unconfigure */ + configure, /* cyb_configure */ + unconfigure, /* cyb_unconfigure */ enable, disable, reprogram, @@ -63,18 +65,10 @@ cyclic_machdep_init(void) static void cyclic_machdep_uninit(void) { - int i; - - for (i = 0; i <= mp_maxid; i++) - /* Reset the cyclic clock callback hook. */ - cyclic_clock_func[i] = NULL; - /* De-register the cyclic backend. */ cyclic_uninit(); } -static hrtime_t exp_due[MAXCPU]; - /* * This function is the one registered by the machine dependent * initialiser as the callback for high speed timer events. @@ -84,7 +78,7 @@ cyclic_clock(struct trapframe *frame) { cpu_t *c = &solaris_cpu[curcpu]; - if (c->cpu_cyclic != NULL && gethrtime() >= exp_due[curcpu]) { + if (c->cpu_cyclic != NULL) { if (TRAPF_USERMODE(frame)) { c->cpu_profile_pc = 0; c->cpu_profile_upc = TRAPF_PC(frame); @@ -102,32 +96,48 @@ cyclic_clock(struct trapframe *frame) } } -static void enable(cyb_arg_t arg) +static cyb_arg_t +configure(cpu_t *cpu __unused) { - /* Register the cyclic clock callback function. */ - cyclic_clock_func[curcpu] = cyclic_clock; + + cyclic_clock_func = cyclic_clock; + return (NULL); } -static void disable(cyb_arg_t arg) +static void +unconfigure(cyb_arg_t arg __unused) { - /* Reset the cyclic clock callback function. */ - cyclic_clock_func[curcpu] = NULL; + + cyclic_clock_func = NULL; } -static void reprogram(cyb_arg_t arg, hrtime_t exp) +static void +enable(cyb_arg_t arg __unused) { - exp_due[curcpu] = exp; + } -static void xcall(cyb_arg_t arg, cpu_t *c, cyc_func_t func, void *param) +static void +disable(cyb_arg_t arg __unused) +{ + +} + +static void +reprogram(cyb_arg_t arg __unused, hrtime_t exp) +{ + struct bintime bt; + struct timespec ts; + + ts.tv_sec = exp / 1000000000; + ts.tv_nsec = exp % 1000000000; + timespec2bintime(&ts, &bt); + clocksource_cyc_set(&bt); +} + +static void +xcall(cyb_arg_t arg, cpu_t *c, cyc_func_t func, void *param) { - /* - * If the target CPU is the current one, just call the - * function. This covers the non-SMP case. - */ - if (c == &solaris_cpu[curcpu]) - (*func)(param); - else - smp_rendezvous_cpus((cpumask_t) (1 << c->cpuid), NULL, - func, smp_no_rendevous_barrier, param); + smp_rendezvous_cpus((cpumask_t) (1 << c->cpuid), + smp_no_rendevous_barrier, func, smp_no_rendevous_barrier, param); } diff --git a/sys/kern/kern_clocksource.c b/sys/kern/kern_clocksource.c index dad07d2..e3c3918 100644 --- a/sys/kern/kern_clocksource.c +++ b/sys/kern/kern_clocksource.c @@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -59,7 +60,7 @@ __FBSDID("$FreeBSD$"); #ifdef KDTRACE_HOOKS #include -cyclic_clock_func_t cyclic_clock_func[MAXCPU]; +cyclic_clock_func_t cyclic_clock_func = NULL; #endif int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */ @@ -131,6 +132,9 @@ struct pcpu_state { struct bintime nexthard; /* Next hardlock() event. */ struct bintime nextstat; /* Next statclock() event. */ struct bintime nextprof; /* Next profclock() event. */ +#ifdef KDTRACE_HOOKS + struct bintime nextcyc; /* Next OpenSolaris cyclics event. */ +#endif int ipi; /* This CPU needs IPI. */ int idle; /* This CPU is in idle mode. */ }; @@ -193,17 +197,10 @@ handleevents(struct bintime *now, int fake) usermode = TRAPF_USERMODE(frame); pc = TRAPF_PC(frame); } -#ifdef KDTRACE_HOOKS - /* - * If the DTrace hooks are configured and a callback function - * has been registered, then call it to process the high speed - * timers. - */ - if (!fake && cyclic_clock_func[curcpu] != NULL) - (*cyclic_clock_func[curcpu])(frame); -#endif + runs = 0; state = DPCPU_PTR(timerstate); + while (bintime_cmp(now, &state->nexthard, >=)) { bintime_add(&state->nexthard, &hardperiod); runs++; @@ -227,6 +224,16 @@ handleevents(struct bintime *now, int fake) } } else state->nextprof = state->nextstat; + +#ifdef KDTRACE_HOOKS + if (fake == 0 && cyclic_clock_func != NULL && + state->nextcyc.sec != -1 && + bintime_cmp(now, &state->nextcyc, >=)) { + state->nextcyc.sec = -1; + (*cyclic_clock_func)(frame); + } +#endif + getnextcpuevent(&t, 0); if (fake == 2) { state->nextevent = t; @@ -266,10 +273,11 @@ getnextcpuevent(struct bintime *event, int idle) } else { /* If CPU is active - handle all types of events. */ if (bintime_cmp(event, &state->nextstat, >)) *event = state->nextstat; - if (profiling && - bintime_cmp(event, &state->nextprof, >)) + if (profiling && bintime_cmp(event, &state->nextprof, >)) *event = state->nextprof; } + if (state->nextcyc.sec != -1 && bintime_cmp(event, &state->nextcyc, >)) + *event = state->nextcyc; } /* @@ -593,6 +601,7 @@ cpu_initclocks_bsp(void) CPU_FOREACH(cpu) { state = DPCPU_ID_PTR(cpu, timerstate); mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); + state->nextcyc.sec = -1; } #ifdef SMP callout_new_inserted = cpu_new_callout; @@ -787,6 +796,43 @@ cpu_activeclock(void) spinlock_exit(); } +#ifdef KDTRACE_HOOKS +void +clocksource_cyc_set(const struct bintime *t) +{ + struct bintime now; + struct pcpu_state *state; + + state = DPCPU_PTR(timerstate); + if (periodic) + now = state->now; + else + binuptime(&now); + + CTR4(KTR_SPARE2, "set_cyc at %d: now %d.%08x%08x", + curcpu, now.sec, (unsigned int)(now.frac >> 32), + (unsigned int)(now.frac & 0xffffffff)); + CTR4(KTR_SPARE2, "set_cyc at %d: t %d.%08x%08x", + curcpu, t->sec, (unsigned int)(t->frac >> 32), + (unsigned int)(t->frac & 0xffffffff)); + + ET_HW_LOCK(state); + if (bintime_cmp(t, &state->nextcyc, ==)) { + ET_HW_UNLOCK(state); + return; + } + state->nextcyc = *t; + if (bintime_cmp(&state->nextcyc, &state->nextevent, >=)) { + ET_HW_UNLOCK(state); + return; + } + state->nextevent = state->nextcyc; + if (!periodic) + loadtimer(&now, 0); + ET_HW_UNLOCK(state); +} +#endif + #ifdef SMP static void cpu_new_callout(int cpu, int ticks) diff --git a/sys/sys/dtrace_bsd.h b/sys/sys/dtrace_bsd.h index 2eded7b..15e1be9 100644 --- a/sys/sys/dtrace_bsd.h +++ b/sys/sys/dtrace_bsd.h @@ -44,14 +44,9 @@ struct reg; * subsystem into the appropriate timer interrupt. */ typedef void (*cyclic_clock_func_t)(struct trapframe *); +extern cyclic_clock_func_t cyclic_clock_func; -/* - * These external variables are actually machine-dependent, so - * they might not actually exist. - * - * Defining them here avoids a proliferation of header files. - */ -extern cyclic_clock_func_t cyclic_clock_func[]; +void clocksource_cyc_set(const struct bintime *t); /* * The dtrace module handles traps that occur during a DTrace probe.