Index: sys/callout.h =================================================================== --- sys/callout.h (revision 242516) +++ sys/callout.h (working copy) @@ -114,7 +114,8 @@ int callout_schedule_on(struct callout *, int, int #define callout_stop(c) _callout_stop_safe(c, 0) int _callout_stop_safe(struct callout *, int); void callout_process(struct bintime *); -extern void (*callout_new_inserted)(int cpu, struct bintime bt); +extern void (*callout_new_inserted)(int cpu, struct bintime bt, + struct bintime); #endif Index: kern/kern_timeout.c =================================================================== --- kern/kern_timeout.c (revision 242516) +++ kern/kern_timeout.c (working copy) @@ -182,7 +182,8 @@ struct callout_cpu cc_cpu; (sizeof(time_t) == (sizeof(int64_t)) ? INT64_MAX : INT32_MAX) static int timeout_cpu; -void (*callout_new_inserted)(int cpu, struct bintime bt) = NULL; +void (*callout_new_inserted)(int cpu, struct bintime bt, + struct bintime bt_opt) = NULL; static struct callout * softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls, int *lockcalls, int *gcalls, int direct); @@ -369,11 +370,14 @@ start_softclock(void *dummy) SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); +#define CC_HASH_SHIFT 10 + static inline int callout_hash(struct bintime *bt) { - return (int) ((bt->sec<<10)+(bt->frac>>54)); + return (int) ((bt->sec << CC_HASH_SHIFT) + + (bt->frac >> (64 - CC_HASH_SHIFT))); } static inline int @@ -386,7 +390,7 @@ get_bucket(struct bintime *bt) void callout_process(struct bintime *now) { - struct bintime max, min, next, tmp_max, tmp_min; + struct bintime max, min, next, next_opt, tmp_max, tmp_min; struct callout *tmp; struct callout_cpu *cc; struct callout_tailq *sc; @@ -443,7 +447,7 @@ callout_process(struct bintime *now) first = (first + 1) & callwheelmask; } cc->cc_exec_next_dir = NULL; - future = (last + hz / 4) & callwheelmask; + future = (last + (1 << CC_HASH_SHIFT) / 2) & callwheelmask; max.sec = min.sec = TIME_T_MAX; max.frac = min.frac = UINT64_MAX; /* @@ -486,8 +490,9 @@ callout_process(struct bintime *now) last = (last + 1) & callwheelmask; } if (max.sec == TIME_T_MAX) { - next = *now; - bintime_addx(&next, (uint64_t)1 << (64 - 2)); + next = next_opt = *now; + bintime_addx(&next, (uint64_t)1 << (64 - 1)); + bintime_addx(&next_opt, (uint64_t)3 << (64 - 3)); } else { /* * Now that we found something to aggregate, schedule an @@ -502,9 +507,10 @@ callout_process(struct bintime *now) next.sec >>= 1; } else next = max; + next_opt = min; } if (callout_new_inserted != NULL) - (*callout_new_inserted)(cpu, next); + (*callout_new_inserted)(cpu, next, next_opt); cc->cc_firstevent = next; cc->cc_lastscan = *now; #ifdef CALLOUT_PROFILING @@ -607,7 +613,9 @@ callout_cc_add(struct callout *c, struct callout_c (bintime_cmp(&bt, &cc->cc_firstevent, <) || !bintime_isset(&cc->cc_firstevent))) { cc->cc_firstevent = c->c_time; - (*callout_new_inserted)(cpu, c->c_time); + bt = c->c_time; + bintime_sub(&bt, &c->c_precision); + (*callout_new_inserted)(cpu, c->c_time, bt); } } Index: kern/kern_clocksource.c =================================================================== --- kern/kern_clocksource.c (revision 242516) +++ kern/kern_clocksource.c (working copy) @@ -72,7 +72,8 @@ static int round_freq(struct eventtimer *et, int static void getnextcpuevent(struct bintime *event, int idle); static void getnextevent(struct bintime *event); static int handleevents(struct bintime *now, int fake); -static void cpu_new_callout(int cpu, struct bintime bt); +static void cpu_new_callout(int cpu, struct bintime bt, + struct bintime bt_opt); static struct mtx et_hw_mtx; @@ -135,6 +136,7 @@ struct pcpu_state { struct bintime nextstat; /* Next statclock() event. */ struct bintime nextprof; /* Next profclock() event. */ struct bintime nextcall; /* Next callout event. */ + struct bintime nextcallopt; #ifdef KDTRACE_HOOKS struct bintime nextcyc; /* Next OpenSolaris cyclics event. */ #endif @@ -238,9 +240,10 @@ handleevents(struct bintime *now, int fake) } } else state->nextprof = state->nextstat; - if (bintime_cmp(now, &state->nextcall, >=) && - (state->nextcall.sec != -1)) { + if (bintime_cmp(now, &state->nextcallopt, >=) && + (state->nextcallopt.sec != -1)) { state->nextcall.sec = -1; + state->nextcallopt.sec = -1; callout_process(now); } @@ -637,6 +640,7 @@ cpu_initclocks_bsp(void) state->nextcyc.sec = -1; #endif state->nextcall.sec = -1; + state->nextcallopt.sec = -1; } callout_new_inserted = cpu_new_callout; periodic = want_periodic; @@ -863,7 +867,7 @@ clocksource_cyc_set(const struct bintime *t) #endif static void -cpu_new_callout(int cpu, struct bintime bt) +cpu_new_callout(int cpu, struct bintime bt, struct bintime bt_opt) { struct bintime now; struct pcpu_state *state; @@ -881,6 +885,7 @@ static void * with respect to race conditions between interrupts execution * and scheduling. */ + state->nextcallopt = bt_opt; if (state->nextcall.sec != -1 && bintime_cmp(&bt, &state->nextcall, >=)) { ET_HW_UNLOCK(state);