Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 237915) +++ sys/kern/kern_timeout.c (working copy) @@ -378,7 +378,9 @@ callout_process(void) for (;;) { sc = &cc->cc_callwheel[first]; TAILQ_FOREACH(tmp, sc, c_links.tqe) { - if (bintime_cmp(&tmp->c_time, &now, <=)) { + next = tmp->c_time; + bintime_sub(&next, &c->c_precision); + if (bintime_cmp(&next, &now, <=)) { /* * Consumer told us the callout may be run * directly from hardware interrupt context. @@ -499,6 +501,7 @@ callout_cc_add(struct callout *c, struct callout_c struct bintime to_bintime, void (*func)(void *), void *arg, int cpu, int flags) { + struct bintime bt; int bucket, r_shift, r_val; CC_LOCK_ASSERT(cc); @@ -546,13 +549,15 @@ callout_cc_add(struct callout *c, struct callout_c c, c_links.tqe); /* * Inform the eventtimers(4) subsystem there's a new callout - * that has been inserted. + * that has been inserted, but only if really required. */ + bt = c->c_time; + bintime_add(&c->c_time, &c->c_precision); if (callout_new_inserted != NULL && - (bintime_cmp(&c->c_time, &cc->cc_firstevent, <) || + (bintime_cmp(&bt, &cc->cc_firstevent, <) || (cc->cc_firstevent.sec == 0 && cc->cc_firstevent.frac == 0))) { - cc->cc_firstevent = c->c_time; - (*callout_new_inserted)(cpu, c->c_time); + cc->cc_firstevent = bt; + (*callout_new_inserted)(cpu, bt); } }