diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index 0fa1af6ef5a7..daa038a02040 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -160,6 +160,7 @@ struct cc_exec { * state for the callout processing thread on the individual CPU. */ struct callout_cpu { + int cc_in_callout_process; struct mtx_padalign cc_lock; struct cc_exec cc_exec_entity[2]; struct callout *cc_next; @@ -315,6 +316,7 @@ callout_cpu_init(struct callout_cpu *cc, int cpu) { int i; + cc->cc_in_callout_process = 0; mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN); cc->cc_callwheel = malloc_domainset(sizeof(struct callout_list) * callwheelsize, M_CALLOUT, @@ -440,6 +442,10 @@ callout_process(sbintime_t now) cc = CC_SELF(); mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); + if (cc->cc_in_callout_process) { + printf("%s: cpu %d already processing callouts! %d\n", __func__, curcpu, cc->cc_in_callout_process); + } + cc->cc_in_callout_process++; /* Compute the buckets of the last scan and present times. */ firstb = callout_hash(cc->cc_lastscan); @@ -495,6 +501,8 @@ callout_process(sbintime_t now) next = cc_exec_next(cc); cc_exec_next(cc) = NULL; } else { + if (cc_exec_next(cc) == c) + printf("%s: removing cc next!\n", __func__); LIST_REMOVE(c, c_links.le); TAILQ_INSERT_TAIL(&cc->cc_expireq, c, c_links.tqe); @@ -534,6 +542,8 @@ callout_process(sbintime_t now) cc->cc_firstevent = last; cpu_new_callout(curcpu, last, first); + cc->cc_in_callout_process--; + #ifdef CALLOUT_PROFILING avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;