Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 216952) +++ sys/kern/kern_timeout.c (working copy) @@ -550,7 +550,7 @@ softclock(void *arg) /* * There must not be any waiting * thread now because the callout - * has a blocked CPU. + * is migrating. * Also, the callout must not be * freed, but that is not easy to * assert. @@ -564,6 +564,7 @@ softclock(void *arg) cc->cc_migration_ticks = 0; cc->cc_migration_func = NULL; cc->cc_migration_arg = NULL; + c->c_cpu = CPUBLOCK; CC_UNLOCK(cc); new_cc = CC_CPU(new_cpu); CC_LOCK(new_cc); @@ -733,7 +734,6 @@ callout_reset_on(struct callout *c, int to_ticks, * to a more appropriate moment. */ if (c->c_cpu != cpu) { - c->c_cpu = CPUBLOCK; if (cc->cc_curr == c) { cc->cc_migration_cpu = cpu; cc->cc_migration_ticks = to_ticks; @@ -745,9 +745,11 @@ callout_reset_on(struct callout *c, int to_ticks, CC_UNLOCK(cc); return (cancelled); } + c->c_cpu = CPUBLOCK; CC_UNLOCK(cc); cc = CC_CPU(cpu); CC_LOCK(cc); + MPASS(c->c_cpu == CPUBLOCK); c->c_cpu = cpu; } #endif @@ -817,6 +819,14 @@ again: goto again; } + /* If the callout is scheduled for migration, cancel that. */ + if (cc->cc_migration_cpu != CPUBLOCK) { + cc->cc_migration_cpu = CPUBLOCK; + cc->cc_migration_ticks = 0; + cc->cc_migration_func = NULL; + cc->cc_migration_arg = NULL; + } + /* * If the callout isn't pending, it's not on the queue, so * don't attempt to remove it from the queue. We can try to