Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 215415) +++ sys/kern/kern_timeout.c (working copy) @@ -581,8 +581,8 @@ * callout_deactivate() - marks the callout as having been serviced */ int -callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), - void *arg, int cpu) +_callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), + void *arg, int cpu, int tracing) { struct callout_cpu *cc; int cancelled = 0; @@ -608,6 +608,7 @@ * Someone has called callout_drain to kill this * callout. Don't reschedule. */ + if (tracing) CTR4(KTR_CALLOUT, "%s %p func %p arg %p", cancelled ? "cancelled" : "failed to cancel", c, c->c_func, c->c_arg); @@ -644,6 +645,7 @@ c->c_time = cc->cc_ticks + to_ticks; TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], c, c_links.tqe); + if (tracing) CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); CC_UNLOCK(cc); @@ -667,9 +669,10 @@ } int -_callout_stop_safe(c, safe) +_callout_stop_safe(c, safe, tracing) struct callout *c; int safe; + int tracing; { struct callout_cpu *cc; struct lock_class *class; @@ -706,6 +709,7 @@ * callout, then we can't stop it, so just bail. */ if (cc->cc_curr != c) { + if (tracing) CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", c, c->c_func, c->c_arg); CC_UNLOCK(cc); @@ -768,13 +772,15 @@ * softclock(). */ cc->cc_cancel = 1; + if (tracing) CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", c, c->c_func, c->c_arg); CC_UNLOCK(cc); KASSERT(!sq_locked, ("sleepqueue chain locked")); return (1); } - CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", + if (tracing) + CTR3(KTR_CALLOUT, "2failed to stop %p func %p arg %p", c, c->c_func, c->c_arg); CC_UNLOCK(cc); KASSERT(!sq_locked, ("sleepqueue chain still locked")); @@ -791,6 +797,7 @@ TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, c_links.tqe); + if (tracing) CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", c, c->c_func, c->c_arg); Index: sys/kern/kern_event.c =================================================================== --- sys/kern/kern_event.c (revision 215415) +++ sys/kern/kern_event.c (working copy) @@ -519,8 +519,8 @@ if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) { calloutp = (struct callout *)kn->kn_hook; - callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata), - filt_timerexpire, kn); + _callout_reset_on(calloutp, timertoticks(kn->kn_sdata), + filt_timerexpire, kn, calloutp->c_cpu, 1); } } @@ -545,8 +545,8 @@ calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); callout_init(calloutp, CALLOUT_MPSAFE); kn->kn_hook = calloutp; - callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata), - filt_timerexpire, kn); + _callout_reset_on(calloutp, timertoticks(kn->kn_sdata), + filt_timerexpire, kn, calloutp->c_cpu, 1); return (0); } @@ -558,7 +558,7 @@ struct callout *calloutp; calloutp = (struct callout *)kn->kn_hook; - callout_drain(calloutp); + _callout_stop_safe(calloutp, 1, 1); free(calloutp, M_KQUEUE); atomic_add_int(&kq_ncallouts, -1); kn->kn_status |= KN_DETACHED; /* knlist_remove usually clears it */ Index: sys/sys/callout.h =================================================================== --- sys/sys/callout.h (revision 215415) +++ sys/sys/callout.h (working copy) @@ -74,7 +74,7 @@ #define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE) #define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE) -#define callout_drain(c) _callout_stop_safe(c, 1) +#define callout_drain(c) _callout_stop_safe(c, 1, 0) void callout_init(struct callout *, int); void _callout_init_lock(struct callout *, struct lock_object *, int); #define callout_init_mtx(c, mtx, flags) \ @@ -84,7 +84,10 @@ _callout_init_lock((c), ((rw) != NULL) ? &(rw)->lock_object : \ NULL, (flags)) #define callout_pending(c) ((c)->c_flags & CALLOUT_PENDING) -int callout_reset_on(struct callout *, int, void (*)(void *), void *, int); +int _callout_reset_on(struct callout *, int, void (*)(void *), void *, int, + int); +#define callout_reset_on(c, on_tick, fn, arg, cpu) \ + _callout_reset_on((c), (on_tick), (fn), (arg), (cpu), 0) #define callout_reset(c, on_tick, fn, arg) \ callout_reset_on((c), (on_tick), (fn), (arg), (c)->c_cpu) #define callout_reset_curcpu(c, on_tick, fn, arg) \ @@ -93,8 +96,8 @@ int callout_schedule_on(struct callout *, int, int); #define callout_schedule_curcpu(c, on_tick) \ callout_schedule_on((c), (on_tick), PCPU_GET(cpuid)) -#define callout_stop(c) _callout_stop_safe(c, 0) -int _callout_stop_safe(struct callout *, int); +#define callout_stop(c) _callout_stop_safe(c, 0, 0) +int _callout_stop_safe(struct callout *, int, int); void callout_tick(void);