Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 215865) +++ sys/kern/kern_timeout.c (working copy) @@ -105,12 +105,10 @@ struct callout_tailq *cc_callwheel; struct callout_list cc_callfree; struct callout *cc_next; - struct callout *cc_curr; void *cc_cookie; int cc_ticks; int cc_softticks; int cc_cancel; - int cc_waiting; int cc_firsttick; }; @@ -133,19 +131,12 @@ /** * Locked by cc_lock: - * cc_curr - If a callout is in progress, it is curr_callout. - * If curr_callout is non-NULL, threads waiting in - * callout_drain() will be woken up as soon as the - * relevant callout completes. * cc_cancel - Changing to 1 with both callout_lock and c_lock held * guarantees that the current callout will not run. * The softclock() function sets this to 0 before it * drops callout_lock to acquire c_lock, and it calls * the handler only if curr_cancelled is still 0 after * c_lock is successfully acquired. - * cc_waiting - If a thread is waiting in callout_drain(), then - * callout_wait is nonzero. Set only when - * curr_callout is non-NULL. */ /* @@ -340,8 +331,10 @@ softclock(void *arg) { struct callout_cpu *cc; + struct callout_cpu *cc_handover; struct callout *c; struct callout_tailq *bucket; + int cancelled; int curticks; int steps; /* #steps since we last allowed interrupts */ int depth; @@ -412,9 +405,10 @@ c->c_flags = (c->c_flags & ~CALLOUT_PENDING); } - cc->cc_curr = c; + c->c_flags |= CALLOUT_EXECUTING; cc->cc_cancel = 0; CC_UNLOCK(cc); + cancelled = 0; if (c_lock != NULL) { class->lc_lock(c_lock, sharedlock); /* @@ -423,6 +417,7 @@ */ if (cc->cc_cancel) { class->lc_unlock(c_lock); + cancelled = 1; goto skip; } /* The callout cannot be stopped now. */ @@ -476,8 +471,29 @@ if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) class->lc_unlock(c_lock); skip: - CC_LOCK(cc); + /* + * If the callout has been cancelled, it is + * no-more safe to access the callout. + */ + if (cancelled != 0) { + CC_LOCK(cc); + if (c_flags & CALLOUT_LOCAL_ALLOC) { + c->c_func = NULL; + SLIST_INSERT_HEAD(&cc->cc_callfree, c, + c_links.sle); + } + steps = 0; + c = cc->cc_next; + continue; + } + + /* + * Callout may have migrated, thus execute the + * handover operations with the correct lock. + */ + cc_handover = callout_lock(c); + /* * If the current callout is locally * allocated (from timeout(9)) * then put it on the freelist. @@ -488,24 +504,23 @@ * callout pointer. */ if (c_flags & CALLOUT_LOCAL_ALLOC) { - KASSERT(c->c_flags == - CALLOUT_LOCAL_ALLOC, - ("corrupted callout")); + MPASS(cc == cc_handover); c->c_func = NULL; SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); } - cc->cc_curr = NULL; - if (cc->cc_waiting) { + c->c_flags &= ~CALLOUT_EXECUTING; + if (c->c_flags & CALLOUT_DRAINING) { /* * There is someone waiting * for the callout to complete. */ - cc->cc_waiting = 0; - CC_UNLOCK(cc); - wakeup(&cc->cc_waiting); - CC_LOCK(cc); - } + c->c_flags &= ~CALLOUT_DRAINING; + CC_UNLOCK(cc_handover); + wakeup(&c->c_flags); + } else + CC_UNLOCK(cc_handover); + CC_LOCK(cc); steps = 0; c = cc->cc_next; } @@ -619,7 +634,7 @@ cpu = c->c_cpu; retry: cc = callout_lock(c); - if (cc->cc_curr == c) { + if (c->c_flags & CALLOUT_EXECUTING) { /* * We're being asked to reschedule a callout which is * currently in progress. If there is a lock then we @@ -627,7 +642,7 @@ */ if (c->c_lock != NULL && !cc->cc_cancel) cancelled = cc->cc_cancel = 1; - if (cc->cc_waiting) { + if (c->c_flags & CALLOUT_DRAINING) { /* * Someone has called callout_drain to kill this * callout. Don't reschedule. @@ -735,12 +750,12 @@ * If it wasn't on the queue and it isn't the current * callout, then we can't stop it, so just bail. */ - if (cc->cc_curr != c) { + if (!(c->c_flags & CALLOUT_EXECUTING)) { CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", c, c->c_func, c->c_arg); CC_UNLOCK(cc); if (sq_locked) - sleepq_release(&cc->cc_waiting); + sleepq_release(&c->c_flags); return (0); } @@ -751,7 +766,7 @@ * just wait for the current invocation to * finish. */ - while (cc->cc_curr == c) { + while (c->c_flags & CALLOUT_EXECUTING) { /* * Use direct calls to sleepqueue interface @@ -772,17 +787,16 @@ */ if (!sq_locked) { CC_UNLOCK(cc); - sleepq_lock(&cc->cc_waiting); + sleepq_lock(&c->c_flags); sq_locked = 1; goto again; } - cc->cc_waiting = 1; + c->c_flags |= CALLOUT_DRAINING; DROP_GIANT(); CC_UNLOCK(cc); - sleepq_add(&cc->cc_waiting, - &cc->cc_lock.lock_object, "codrain", + sleepq_add(&c->c_flags, NULL, "codrain", SLEEPQ_SLEEP, 0); - sleepq_wait(&cc->cc_waiting, 0); + sleepq_wait(&c->c_flags, 0); sq_locked = 0; /* Reacquire locks previously released. */ @@ -811,7 +825,7 @@ return (0); } if (sq_locked) - sleepq_release(&cc->cc_waiting); + sleepq_release(&c->c_flags); c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); Index: sys/sys/callout.h =================================================================== --- sys/sys/callout.h (revision 215865) +++ sys/sys/callout.h (working copy) @@ -64,6 +64,8 @@ #define CALLOUT_MPSAFE 0x0008 /* callout handler is mp safe */ #define CALLOUT_RETURNUNLOCKED 0x0010 /* handler returns with mtx unlocked */ #define CALLOUT_SHAREDLOCK 0x0020 /* callout lock held in shared mode */ +#define CALLOUT_EXECUTING 0x0040 /* callout is currently executing */ +#define CALLOUT_DRAINING 0x0080 /* callout is under draining op */ struct callout_handle { struct callout *callout;