Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 239132) +++ sys/kern/kern_timeout.c (working copy) @@ -113,7 +113,6 @@ int callwheelsize, callwheelmask; * the migrating callout is already running. */ struct cc_exec { - struct callout *cc_next; struct callout *cc_curr; #ifdef SMP void (*ce_migration_func)(void *); @@ -142,11 +141,9 @@ struct callout_cpu { }; #define cc_exec_curr cc_exec_entity[0].cc_curr -#define cc_exec_next cc_exec_entity[0].cc_next #define cc_exec_cancel cc_exec_entity[0].cc_cancel #define cc_exec_waiting cc_exec_entity[0].cc_waiting #define cc_exec_curr_dir cc_exec_entity[1].cc_curr -#define cc_exec_next_dir cc_exec_entity[1].cc_next #define cc_exec_cancel_dir cc_exec_entity[1].cc_cancel #define cc_exec_waiting_dir cc_exec_entity[1].cc_waiting @@ -185,7 +182,7 @@ struct callout_cpu cc_cpu; static int timeout_cpu; void (*callout_new_inserted)(int cpu, struct bintime bt) = NULL; -static struct callout * +static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls, int *lockcalls, int *gcalls, int direct); @@ -216,7 +213,6 @@ cc_cme_cleanup(struct callout_cpu *cc, int direct) { cc->cc_exec_entity[direct].cc_curr = NULL; - cc->cc_exec_entity[direct].cc_next = NULL; cc->cc_exec_entity[direct].cc_cancel = 0; cc->cc_exec_entity[direct].cc_waiting = 0; #ifdef SMP @@ -429,7 +425,7 @@ callout_process(struct bintime *now) if (tmp->c_flags & CALLOUT_DIRECT) { ++depth_dir; TAILQ_REMOVE(sc, tmp, c_links.tqe); - tmp = softclock_call_cc(tmp, cc, + softclock_call_cc(tmp, cc, &mpcalls_dir, &lockcalls_dir, NULL, 1); } else { @@ -438,17 +434,14 @@ callout_process(struct bintime *now) TAILQ_REMOVE(sc, tmp, c_links.tqe); tmp->c_flags |= CALLOUT_PROCESSED; need_softclock = 1; - tmp = TAILQ_NEXT(tmp, c_links.tqe); } } - else - tmp = TAILQ_NEXT(tmp, c_links.tqe); + tmp = TAILQ_NEXT(tmp, c_links.tqe); } if (first == last) break; first = (first + 1) & callwheelmask; } - cc->cc_exec_next_dir = NULL; future = (last + hz / 4) & callwheelmask; max.sec = min.sec = TIME_T_MAX; max.frac = min.frac = UINT64_MAX; @@ -558,7 +551,8 @@ callout_cc_add(struct callout *c, struct callout_c int flags) { struct bintime bt; - int bucket, r_shift, r_val; + uint64_t r_val; + int bucket, r_shift; CC_LOCK_ASSERT(cc); if (bintime_cmp(&to_bintime, &cc->cc_lastscan, <)) @@ -620,17 +614,13 @@ static void callout_cc_del(struct callout *c, struct callout_cpu *cc, int direct) { - if (cc->cc_exec_next_dir == c) - cc->cc_exec_next_dir = TAILQ_NEXT(c, c_links.tqe); - else if (cc->cc_exec_next == c) - cc->cc_exec_next = TAILQ_NEXT(c, c_staiter); if (c->c_flags & CALLOUT_LOCAL_ALLOC) { c->c_func = NULL; SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); } } -static struct callout * +static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls, int *lockcalls, int *gcalls, int direct) { @@ -653,10 +643,6 @@ softclock_call_cc(struct callout *c, struct callou static timeout_t *lastfunc; #endif - if (direct) - cc->cc_exec_next_dir = TAILQ_NEXT(c, c_links.tqe); - else - cc->cc_exec_next = TAILQ_NEXT(c, c_staiter); class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1; c_lock = c->c_lock; @@ -778,7 +764,7 @@ skip: "deferred cancelled %p func %p arg %p", c, new_func, new_arg); callout_cc_del(c, cc, direct); - goto nextc; + return; } c->c_flags &= ~CALLOUT_DFRMIGRATION; @@ -798,10 +784,6 @@ skip: panic("migration should not happen"); #endif } -#ifdef SMP -nextc: -#endif - return cc->cc_exec_entity[direct].cc_next; } /* @@ -837,8 +819,8 @@ softclock(void *arg) while (c != NULL) { ++depth; TAILQ_REMOVE(&cc->cc_expireq, c, c_staiter); - c = softclock_call_cc(c, cc, &mpcalls, - &lockcalls, &gcalls, 0); + softclock_call_cc(c, cc, &mpcalls, &lockcalls, &gcalls, 0); + c = TAILQ_NEXT(c, c_staiter); } #ifdef CALLOUT_PROFILING avg_depth += (depth * 1000 - avg_depth) >> 8; @@ -846,7 +828,6 @@ softclock(void *arg) avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; #endif - cc->cc_exec_next = NULL; CC_UNLOCK(cc); } @@ -983,17 +964,11 @@ _callout_reset_on(struct callout *c, struct bintim } if (c->c_flags & CALLOUT_PENDING) { if ((c->c_flags & CALLOUT_PROCESSED) == 0) { - if (cc->cc_exec_next_dir == c) - cc->cc_exec_next_dir = TAILQ_NEXT(c, - c_links.tqe); bucket = get_bucket(&c->c_time); TAILQ_REMOVE(&cc->cc_callwheel[bucket], c, c_links.tqe); - } else { - if (cc->cc_exec_next == c) - cc->cc_exec_next = TAILQ_NEXT(c, c_staiter); + } else TAILQ_REMOVE(&cc->cc_expireq, c, c_staiter); - } cancelled = 1; c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); }