Index: sys/sys/callout.h =================================================================== --- sys/sys/callout.h (revision 237922) +++ sys/sys/callout.h (working copy) @@ -113,7 +113,7 @@ int callout_schedule_on(struct callout *, int, int callout_schedule_on((c), (on_tick), PCPU_GET(cpuid)) #define callout_stop(c) _callout_stop_safe(c, 0) int _callout_stop_safe(struct callout *, int); -void callout_process(void); +void callout_process(struct bintime *); extern void (*callout_new_inserted)(int cpu, struct bintime bt); #endif Index: sys/kern/kern_clock.c =================================================================== --- sys/kern/kern_clock.c (revision 237922) +++ sys/kern/kern_clock.c (working copy) @@ -425,6 +425,7 @@ initclocks(dummy) void hardclock_cpu(int usermode) { + struct bintime now; struct pstats *pstats; struct thread *td = curthread; struct proc *p = td->td_proc; @@ -459,7 +460,8 @@ hardclock_cpu(int usermode) if (td->td_intr_frame != NULL) PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame); #endif - callout_process(); + binuptime(&now); + callout_process(&now); } /* Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 237922) +++ sys/kern/kern_timeout.c (working copy) @@ -349,9 +349,9 @@ get_bucket(struct bintime *bt) } void -callout_process(void) +callout_process(struct bintime *now) { - struct bintime max, min, next, now, tmp_max, tmp_min; + struct bintime max, min, next, tmp_max, tmp_min; struct callout *tmp; struct callout_cpu *cc; struct callout_tailq *sc; @@ -364,10 +364,9 @@ void need_softclock = 0; cc = CC_SELF(); mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); - binuptime(&now); cpu = curcpu; first = callout_hash(&cc->cc_lastscan); - last = callout_hash(&now); + last = callout_hash(now); /* * Check if we wrapped around the entire wheel from the last scan. * In case, we need to scan entirely the wheel for pending callouts. @@ -380,7 +379,7 @@ void TAILQ_FOREACH(tmp, sc, c_links.tqe) { next = tmp->c_time; bintime_sub(&next, &tmp->c_precision); - if (bintime_cmp(&next, &now, <=)) { + if (bintime_cmp(&next, now, <=)) { /* * Consumer told us the callout may be run * directly from hardware interrupt context. @@ -445,7 +444,7 @@ void if (max.sec == TIME_T_MAX) { next.sec = 0; next.frac = (uint64_t)1 << (64 - 2); - bintime_add(&next, &now); + bintime_add(&next, now); } else { /* * Now that we found something to aggregate, schedule an @@ -461,7 +460,7 @@ void cc->cc_firstevent = next; if (callout_new_inserted != NULL) (*callout_new_inserted)(cpu, next); - cc->cc_lastscan = now; + cc->cc_lastscan = *now; mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); /* * swi_sched acquires the thread lock, so we don't want to call it Index: sys/kern/kern_clocksource.c =================================================================== --- sys/kern/kern_clocksource.c (revision 237922) +++ sys/kern/kern_clocksource.c (working copy) @@ -239,7 +239,7 @@ handleevents(struct bintime *now, int fake) if (bintime_cmp(now, &state->nextcall, >=) && (state->nextcall.sec != -1)) { state->nextcall.sec = -1; - callout_process(); + callout_process(now); } #ifdef KDTRACE_HOOKS @@ -363,13 +363,12 @@ timercb(struct eventtimer *et, void *arg) next = &state->nexttick; } else next = &nexttick; - if (periodic) { - now = *next; /* Ex-next tick time becomes present time. */ + binuptime(&now); + if (periodic) { + *next = now; bintime_add(next, &timerperiod); /* Next tick in 1 period. */ - } else { - binuptime(&now); /* Get present time from hardware. */ - next->sec = -1; /* Next tick is not scheduled yet. */ - } + } else + next->sec = -1; /* Next tick is not scheduled yet. */ state->now = now; CTR4(KTR_SPARE2, "intr at %d: now %d.%08x%08x", curcpu, (int)(now.sec), (u_int)(now.frac >> 32), @@ -541,18 +540,13 @@ configtimer(int start) /* Initialize time machine parameters. */ next = now; bintime_add(&next, &timerperiod); - if (periodic) - nexttick = next; - else + if (!periodic) nexttick.sec = -1; CPU_FOREACH(cpu) { state = DPCPU_ID_PTR(cpu, timerstate); state->now = now; state->nextevent = next; - if (periodic) - state->nexttick = next; - else - state->nexttick.sec = -1; + state->nexttick.sec = -1; state->nexthard = next; state->nextstat = next; state->nextprof = next; @@ -720,11 +714,7 @@ cpu_initclocks_ap(void) state = DPCPU_PTR(timerstate); binuptime(&now); ET_HW_LOCK(state); - if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 && periodic) { - state->now = nexttick; - bintime_sub(&state->now, &timerperiod); - } else - state->now = now; + state->now = now; hardclock_sync(curcpu); handleevents(&state->now, 2); if (timer->et_flags & ET_FLAGS_PERCPU)