Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 216074) +++ sys/kern/kern_timeout.c (working copy) @@ -280,7 +280,7 @@ } static struct callout_cpu * -callout_lock(struct callout *c) +_callout_lock(struct callout *c, int trace) { struct callout_cpu *cc; int cpu; @@ -289,6 +289,9 @@ cpu = c->c_cpu; cc = CC_CPU(cpu); CC_LOCK(cc); + if (trace) + CTR4(KTR_SPARE2, "%p with cpu %u (%u) cc %p", c, cpu, + c->c_cpu, cc); if (cpu == c->c_cpu) break; CC_UNLOCK(cc); @@ -296,6 +299,8 @@ return (cc); } +#define callout_lock(c) _callout_lock(c, 0) + /* * The callout mechanism is based on the work of Adam M. Costello and * George Varghese, published in a technical report entitled "Redesigning @@ -581,8 +586,8 @@ * callout_deactivate() - marks the callout as having been serviced */ int -callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), - void *arg, int cpu) +_callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), + void *arg, int cpu, int trace) { struct callout_cpu *cc; int cancelled = 0; @@ -608,9 +613,11 @@ * Someone has called callout_drain to kill this * callout. Don't reschedule. */ - CTR4(KTR_CALLOUT, "%s %p func %p arg %p", - cancelled ? "cancelled" : "failed to cancel", - c, c->c_func, c->c_arg); + if (trace) + CTR5(KTR_SPARE2, "%s %p func %p arg %p cc %p", + cancelled ? "cancelled" : + "failed to cancel", c, c->c_func, + c->c_arg, cc); CC_UNLOCK(cc); return (cancelled); } @@ -630,6 +637,9 @@ * we can't hold both the new and old locks simultaneously. */ if (c->c_cpu != cpu) { + if (trace) + CTR4(KTR_SPARE2, "%p with cpu %u (%u) cc %p", c, cpu, + c->c_cpu, cc); c->c_cpu = cpu; CC_UNLOCK(cc); goto retry; @@ -644,8 +654,10 @@ c->c_time = cc->cc_ticks + to_ticks; TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], c, c_links.tqe); - CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", - cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); + if (trace) + CTR6(KTR_SPARE2, "%sscheduled %p func %p arg %p in %d cc %p", + cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks, + cc); CC_UNLOCK(cc); return (cancelled); @@ -667,9 +679,10 @@ } int -_callout_stop_safe(c, safe) +_callout_stop_safe(c, safe, trace) struct callout *c; int safe; + int trace; { struct callout_cpu *cc; struct lock_class *class; @@ -692,7 +705,7 @@ sq_locked = 0; again: - cc = callout_lock(c); + cc = _callout_lock(c, trace); /* * If the callout isn't pending, it's not on the queue, so * don't attempt to remove it from the queue. We can try to @@ -706,8 +719,10 @@ * callout, then we can't stop it, so just bail. */ if (cc->cc_curr != c) { - CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", - c, c->c_func, c->c_arg); + if (trace) + CTR4(KTR_SPARE2, + "failed to stop %p func %p arg %p cc %p", c, + c->c_func, c->c_arg, cc); CC_UNLOCK(cc); if (sq_locked) sleepq_release(&cc->cc_waiting); @@ -741,6 +756,10 @@ * set. */ if (!sq_locked) { + if (trace) + CTR3(KTR_SPARE2, + "%p with cpu %u cc %p", c, + c->c_cpu, cc); CC_UNLOCK(cc); sleepq_lock(&cc->cc_waiting); sq_locked = 1; @@ -748,6 +767,10 @@ } cc->cc_waiting = 1; DROP_GIANT(); + if (trace) + CTR3(KTR_SPARE2, + "%p with cpu %u cc %p", c, + c->c_cpu, cc); CC_UNLOCK(cc); sleepq_add(&cc->cc_waiting, &cc->cc_lock.lock_object, "codrain", @@ -758,6 +781,10 @@ /* Reacquire locks previously released. */ PICKUP_GIANT(); CC_LOCK(cc); + if (trace) + CTR3(KTR_SPARE2, + "%p with cpu %u cc %p", c, + c->c_cpu, cc); } } else if (use_lock && !cc->cc_cancel) { /* @@ -768,14 +795,18 @@ * softclock(). */ cc->cc_cancel = 1; - CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", - c, c->c_func, c->c_arg); + if (trace) + CTR4(KTR_SPARE2, + "cancelled %p func %p arg %p cc %p", + c, c->c_func, c->c_arg, cc); CC_UNLOCK(cc); KASSERT(!sq_locked, ("sleepqueue chain locked")); return (1); } - CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", - c, c->c_func, c->c_arg); + if (trace) + CTR4(KTR_SPARE2, + "failed to stop %p func %p arg %p cc %p", + c, c->c_func, c->c_arg, cc); CC_UNLOCK(cc); KASSERT(!sq_locked, ("sleepqueue chain still locked")); return (0); @@ -791,8 +822,9 @@ TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, c_links.tqe); - CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", - c, c->c_func, c->c_arg); + if (trace) + CTR4(KTR_SPARE2, "cancelled %p func %p arg %p cc %p", + c, c->c_func, c->c_arg, cc); if (c->c_flags & CALLOUT_LOCAL_ALLOC) { c->c_func = NULL; Index: sys/kern/kern_event.c =================================================================== --- sys/kern/kern_event.c (revision 216074) +++ sys/kern/kern_event.c (working copy) @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -519,8 +520,8 @@ if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) { calloutp = (struct callout *)kn->kn_hook; - callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata), - filt_timerexpire, kn); + _callout_reset_on(calloutp, timertoticks(kn->kn_sdata), + filt_timerexpire, kn, PCPU_GET(cpuid), 1); } } @@ -545,8 +546,8 @@ calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); callout_init(calloutp, CALLOUT_MPSAFE); kn->kn_hook = calloutp; - callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata), - filt_timerexpire, kn); + _callout_reset_on(calloutp, timertoticks(kn->kn_sdata), + filt_timerexpire, kn, PCPU_GET(cpuid), 1); return (0); } @@ -558,7 +559,7 @@ struct callout *calloutp; calloutp = (struct callout *)kn->kn_hook; - callout_drain(calloutp); + _callout_stop_safe(calloutp, 1, 1); free(calloutp, M_KQUEUE); atomic_add_int(&kq_ncallouts, -1); kn->kn_status |= KN_DETACHED; /* knlist_remove usually clears it */ @@ -1061,6 +1062,10 @@ goto done; } + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote attach kn %p status 0x%x", kn, + kn->kn_status); if ((error = kn->kn_fop->f_attach(kn)) != 0) { knote_drop(kn, td); goto done; @@ -1078,8 +1083,13 @@ if (kev->flags & EV_DELETE) { kn->kn_status |= KN_INFLUX; KQ_UNLOCK(kq); - if (!(kn->kn_status & KN_DETACHED)) + if (!(kn->kn_status & KN_DETACHED)) { + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote detach kn %p status 0x%x", kn, + kn->kn_status); kn->kn_fop->f_detach(kn); + } knote_drop(kn, td); goto done; } @@ -1407,8 +1417,13 @@ * it _INFLUX. */ *kevp = kn->kn_kevent; - if (!(kn->kn_status & KN_DETACHED)) + if (!(kn->kn_status & KN_DETACHED)) { + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote detach kn %p status 0x%x", + kn, kn->kn_status); kn->kn_fop->f_detach(kn); + } knote_drop(kn, td); KQ_LOCK(kq); kn = NULL; @@ -1644,8 +1659,13 @@ } kn->kn_status |= KN_INFLUX; KQ_UNLOCK(kq); - if (!(kn->kn_status & KN_DETACHED)) + if (!(kn->kn_status & KN_DETACHED)) { + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote detach kn %p status 0x%x", + kn, kn->kn_status); kn->kn_fop->f_detach(kn); + } knote_drop(kn, td); KQ_LOCK(kq); } @@ -1661,8 +1681,13 @@ } kn->kn_status |= KN_INFLUX; KQ_UNLOCK(kq); - if (!(kn->kn_status & KN_DETACHED)) + if (!(kn->kn_status & KN_DETACHED)) { + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote detach kn %p status 0x%x", + kn, kn->kn_status); kn->kn_fop->f_detach(kn); + } knote_drop(kn, td); KQ_LOCK(kq); } @@ -2035,8 +2060,13 @@ } kn->kn_status |= KN_INFLUX; KQ_UNLOCK(kq); - if (!(kn->kn_status & KN_DETACHED)) + if (!(kn->kn_status & KN_DETACHED)) { + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote detach kn %p status 0x%x", + kn, kn->kn_status); kn->kn_fop->f_detach(kn); + } knote_drop(kn, td); influx = 1; KQ_LOCK(kq); Index: sys/sys/callout.h =================================================================== --- sys/sys/callout.h (revision 216074) +++ sys/sys/callout.h (working copy) @@ -74,7 +74,7 @@ #define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE) #define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE) -#define callout_drain(c) _callout_stop_safe(c, 1) +#define callout_drain(c) _callout_stop_safe(c, 1, 0) void callout_init(struct callout *, int); void _callout_init_lock(struct callout *, struct lock_object *, int); #define callout_init_mtx(c, mtx, flags) \ @@ -84,7 +84,10 @@ _callout_init_lock((c), ((rw) != NULL) ? &(rw)->lock_object : \ NULL, (flags)) #define callout_pending(c) ((c)->c_flags & CALLOUT_PENDING) -int callout_reset_on(struct callout *, int, void (*)(void *), void *, int); +int _callout_reset_on(struct callout *, int, void (*)(void *), void *, + int, int); +#define callout_reset_on(c, on_tick, fn, arg, cpu) \ + _callout_reset_on((c), (on_tick), (fn), (arg), (cpu), 0) #define callout_reset(c, on_tick, fn, arg) \ callout_reset_on((c), (on_tick), (fn), (arg), (c)->c_cpu) #define callout_reset_curcpu(c, on_tick, fn, arg) \ @@ -93,8 +96,8 @@ int callout_schedule_on(struct callout *, int, int); #define callout_schedule_curcpu(c, on_tick) \ callout_schedule_on((c), (on_tick), PCPU_GET(cpuid)) -#define callout_stop(c) _callout_stop_safe(c, 0) -int _callout_stop_safe(struct callout *, int); +#define callout_stop(c) _callout_stop_safe(c, 0, 0) +int _callout_stop_safe(struct callout *, int, int); void callout_tick(void);