Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 215807) +++ sys/kern/kern_timeout.c (working copy) @@ -417,7 +417,7 @@ } } else { mpcalls++; - CTR3(KTR_CALLOUT, + CTR3(KTR_SPARE2, "callout mpsafe %p func %p arg %p", c, c_func, c_arg); } @@ -581,8 +581,8 @@ * callout_deactivate() - marks the callout as having been serviced */ int -callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), - void *arg, int cpu) +_callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), + void *arg, int cpu, int trace) { struct callout_cpu *cc; int cancelled = 0; @@ -608,9 +608,11 @@ * Someone has called callout_drain to kill this * callout. Don't reschedule. */ - CTR4(KTR_CALLOUT, "%s %p func %p arg %p", - cancelled ? "cancelled" : "failed to cancel", - c, c->c_func, c->c_arg); + if (trace) + CTR5(KTR_SPARE2, "%s %p func %p arg %p cc %p", + cancelled ? "cancelled" : + "failed to cancel", c, c->c_func, + c->c_arg, cc); CC_UNLOCK(cc); return (cancelled); } @@ -644,8 +646,10 @@ c->c_time = cc->cc_ticks + to_ticks; TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], c, c_links.tqe); - CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", - cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); + if (trace) + CTR6(KTR_SPARE2, "%sscheduled %p func %p arg %p in %d cc %p", + cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks, + cc); CC_UNLOCK(cc); return (cancelled); @@ -667,9 +671,10 @@ } int -_callout_stop_safe(c, safe) +_callout_stop_safe(c, safe, trace) struct callout *c; int safe; + int trace; { struct callout_cpu *cc; struct lock_class *class; @@ -706,8 +711,10 @@ * callout, then we can't stop it, so just bail. */ if (cc->cc_curr != c) { - CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", - c, c->c_func, c->c_arg); + if (trace) + CTR4(KTR_SPARE2, + "failed to stop %p func %p arg %p cc %p", c, + c->c_func, c->c_arg, cc); CC_UNLOCK(cc); if (sq_locked) sleepq_release(&cc->cc_waiting); @@ -768,14 +775,18 @@ * softclock(). */ cc->cc_cancel = 1; - CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", - c, c->c_func, c->c_arg); + if (trace) + CTR4(KTR_SPARE2, + "cancelled %p func %p arg %p cc %p", + c, c->c_func, c->c_arg, cc); CC_UNLOCK(cc); KASSERT(!sq_locked, ("sleepqueue chain locked")); return (1); } - CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", - c, c->c_func, c->c_arg); + if (trace) + CTR4(KTR_SPARE2, + "failed to stop %p func %p arg %p cc %p", + c, c->c_func, c->c_arg, cc); CC_UNLOCK(cc); KASSERT(!sq_locked, ("sleepqueue chain still locked")); return (0); @@ -791,8 +802,9 @@ TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, c_links.tqe); - CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", - c, c->c_func, c->c_arg); + if (trace) + CTR4(KTR_SPARE2, "cancelled %p func %p arg %p cc %p", + c, c->c_func, c->c_arg, cc); if (c->c_flags & CALLOUT_LOCAL_ALLOC) { c->c_func = NULL; Index: sys/kern/kern_event.c =================================================================== --- sys/kern/kern_event.c (revision 215807) +++ sys/kern/kern_event.c (working copy) @@ -454,6 +454,10 @@ */ if ((kn->kn_sfflags & NOTE_TRACK) == 0) { kn->kn_status |= KN_HASKQLOCK; + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote event kn %p status 0x%x", kn, + kn->kn_status); if (kn->kn_fop->f_event(kn, NOTE_FORK | pid)) KNOTE_ACTIVATE(kn, 1); kn->kn_status &= ~KN_HASKQLOCK; @@ -482,6 +486,9 @@ kev.data = kn->kn_id; /* parent */ kev.udata = kn->kn_kevent.udata;/* preserve udata */ error = kqueue_register(kq, &kev, NULL, 0); + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, "knote event kn %p status 0x%x", kn, + kn->kn_status); if (kn->kn_fop->f_event(kn, NOTE_FORK | pid)) KNOTE_ACTIVATE(kn, 0); if (error) @@ -519,8 +526,8 @@ if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) { calloutp = (struct callout *)kn->kn_hook; - callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata), - filt_timerexpire, kn); + _callout_reset_on(calloutp, timertoticks(kn->kn_sdata), + filt_timerexpire, kn, PCPU_GET(cpuid), 1); } } @@ -545,8 +552,8 @@ calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); callout_init(calloutp, CALLOUT_MPSAFE); kn->kn_hook = calloutp; - callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata), - filt_timerexpire, kn); + _callout_reset_on(calloutp, timertoticks(kn->kn_sdata), + filt_timerexpire, kn, PCPU_GET(cpuid), 1); return (0); } @@ -558,7 +565,7 @@ struct callout *calloutp; calloutp = (struct callout *)kn->kn_hook; - callout_drain(calloutp); + _callout_stop_safe(calloutp, 1, 1); free(calloutp, M_KQUEUE); atomic_add_int(&kq_ncallouts, -1); kn->kn_status |= KN_DETACHED; /* knlist_remove usually clears it */ @@ -1078,8 +1085,13 @@ if (kev->flags & EV_DELETE) { kn->kn_status |= KN_INFLUX; KQ_UNLOCK(kq); - if (!(kn->kn_status & KN_DETACHED)) + if (!(kn->kn_status & KN_DETACHED)) { + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote detach kn %p status 0x%x", kn, + kn->kn_status); kn->kn_fop->f_detach(kn); + } knote_drop(kn, td); goto done; } @@ -1108,6 +1120,9 @@ * kn_knlist. */ done_ev_add: + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, "knote event kn %p status 0x%x", kn, + kn->kn_status); event = kn->kn_fop->f_event(kn, 0); KQ_LOCK(kq); if (event) @@ -1407,8 +1422,13 @@ * it _INFLUX. */ *kevp = kn->kn_kevent; - if (!(kn->kn_status & KN_DETACHED)) + if (!(kn->kn_status & KN_DETACHED)) { + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote detach kn %p status 0x%x", + kn, kn->kn_status); kn->kn_fop->f_detach(kn); + } knote_drop(kn, td); KQ_LOCK(kq); kn = NULL; @@ -1418,6 +1438,10 @@ if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) KQ_GLOBAL_LOCK(&kq_global, haskqglobal); KN_LIST_LOCK(kn); + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote event kn %p status 0x%x", kn, + kn->kn_status); if (kn->kn_fop->f_event(kn, 0) == 0) { KQ_LOCK(kq); KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); @@ -1644,8 +1668,13 @@ } kn->kn_status |= KN_INFLUX; KQ_UNLOCK(kq); - if (!(kn->kn_status & KN_DETACHED)) + if (!(kn->kn_status & KN_DETACHED)) { + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote detach kn %p status 0x%x", + kn, kn->kn_status); kn->kn_fop->f_detach(kn); + } knote_drop(kn, td); KQ_LOCK(kq); } @@ -1661,8 +1690,13 @@ } kn->kn_status |= KN_INFLUX; KQ_UNLOCK(kq); - if (!(kn->kn_status & KN_DETACHED)) + if (!(kn->kn_status & KN_DETACHED)) { + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote detach kn %p status 0x%x", + kn, kn->kn_status); kn->kn_fop->f_detach(kn); + } knote_drop(kn, td); KQ_LOCK(kq); } @@ -1763,6 +1797,10 @@ } else if ((lockflags & KNF_NOKQLOCK) != 0) { kn->kn_status |= KN_INFLUX; KQ_UNLOCK(kq); + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote event kn %p status 0x%x", kn, + kn->kn_status); error = kn->kn_fop->f_event(kn, hint); KQ_LOCK(kq); kn->kn_status &= ~KN_INFLUX; @@ -1771,6 +1809,10 @@ KQ_UNLOCK_FLUX(kq); } else { kn->kn_status |= KN_HASKQLOCK; + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote event kn %p status 0x%x", kn, + kn->kn_status); if (kn->kn_fop->f_event(kn, hint)) KNOTE_ACTIVATE(kn, 1); kn->kn_status &= ~KN_HASKQLOCK; @@ -2035,8 +2077,13 @@ } kn->kn_status |= KN_INFLUX; KQ_UNLOCK(kq); - if (!(kn->kn_status & KN_DETACHED)) + if (!(kn->kn_status & KN_DETACHED)) { + if (kn->kn_fop == &timer_filtops) + CTR2(KTR_SPARE2, + "knote detach kn %p status 0x%x", + kn, kn->kn_status); kn->kn_fop->f_detach(kn); + } knote_drop(kn, td); influx = 1; KQ_LOCK(kq); Index: sys/sys/callout.h =================================================================== --- sys/sys/callout.h (revision 215807) +++ sys/sys/callout.h (working copy) @@ -74,7 +74,7 @@ #define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE) #define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE) -#define callout_drain(c) _callout_stop_safe(c, 1) +#define callout_drain(c) _callout_stop_safe(c, 1, 0) void callout_init(struct callout *, int); void _callout_init_lock(struct callout *, struct lock_object *, int); #define callout_init_mtx(c, mtx, flags) \ @@ -84,7 +84,10 @@ _callout_init_lock((c), ((rw) != NULL) ? &(rw)->lock_object : \ NULL, (flags)) #define callout_pending(c) ((c)->c_flags & CALLOUT_PENDING) -int callout_reset_on(struct callout *, int, void (*)(void *), void *, int); +int _callout_reset_on(struct callout *, int, void (*)(void *), void *, + int, int); +#define callout_reset_on(c, on_tick, fn, arg, cpu) \ + _callout_reset_on((c), (on_tick), (fn), (arg), (cpu), 0) #define callout_reset(c, on_tick, fn, arg) \ callout_reset_on((c), (on_tick), (fn), (arg), (c)->c_cpu) #define callout_reset_curcpu(c, on_tick, fn, arg) \ @@ -93,8 +96,8 @@ int callout_schedule_on(struct callout *, int, int); #define callout_schedule_curcpu(c, on_tick) \ callout_schedule_on((c), (on_tick), PCPU_GET(cpuid)) -#define callout_stop(c) _callout_stop_safe(c, 0) -int _callout_stop_safe(struct callout *, int); +#define callout_stop(c) _callout_stop_safe(c, 0, 0) +int _callout_stop_safe(struct callout *, int, int); void callout_tick(void);