Index: sys/kern/kern_timeout.c =================================================================== RCS file: /dump/FreeBSD-CVS/src/sys/kern/kern_timeout.c,v retrieving revision 1.91.2.1 diff -u -r1.91.2.1 kern_timeout.c --- sys/kern/kern_timeout.c 31 Jan 2005 23:26:16 -0000 1.91.2.1 +++ sys/kern/kern_timeout.c 13 Mar 2005 20:14:42 -0000 @@ -80,6 +80,12 @@ * If curr_callout is non-NULL, threads waiting on * callout_wait will be woken up as soon as the * relevant callout completes. + * curr_cancelled - Changing to 1 with both callout_lock and Giant held + * guarantees that the current callout will not run. + * The softclock() function sets this to 0 before it + * drops callout_lock to acquire Giant, and it calls + * the handler only if curr_cancelled still 0 when + * Giant is successfully acquired. * wakeup_ctr - Incremented every time a thread wants to wait * for a callout to complete. Modified only when * curr_callout is non-NULL. @@ -88,6 +94,7 @@ * cutt_callout is non-NULL. */ static struct callout *curr_callout; +static int curr_cancelled; static int wakeup_ctr; static int wakeup_needed; @@ -237,14 +244,27 @@ c->c_flags = CALLOUT_LOCAL_ALLOC; SLIST_INSERT_HEAD(&callfree, c, c_links.sle); + curr_callout = NULL; } else { c->c_flags = (c->c_flags & ~CALLOUT_PENDING); + curr_callout = c; } - curr_callout = c; + curr_cancelled = 0; mtx_unlock_spin(&callout_lock); if (!(c_flags & CALLOUT_MPSAFE)) { mtx_lock(&Giant); + /* + * The callout may have been cancelled + * while we switched locks. + */ + if (curr_cancelled) { + mtx_unlock(&Giant); + mtx_lock_spin(&callout_lock); + goto done_locked; + } + /* The callout cannot be stopped now. */ + curr_cancelled = 1; gcalls++; CTR1(KTR_CALLOUT, "callout %p", c_func); } else { @@ -278,6 +298,7 @@ if (!(c_flags & CALLOUT_MPSAFE)) mtx_unlock(&Giant); mtx_lock_spin(&callout_lock); +done_locked: curr_callout = NULL; if (wakeup_needed) { /* @@ -396,14 +417,22 @@ { mtx_lock_spin(&callout_lock); - if (c == curr_callout && wakeup_needed) { + if (c == curr_callout) { /* * We're being asked to reschedule a callout which is - * currently in progress, and someone has called - * callout_drain to kill that callout. Don't reschedule. + * currently in progress. If there is a mutex then we + * can cancel the callout if it has not really started. */ - mtx_unlock_spin(&callout_lock); - return; + if ((c->c_flags & CALLOUT_MPSAFE) == 0 && !curr_cancelled) + curr_cancelled = 1; + if (wakeup_needed) { + /* + * Someone has called callout_drain to kill this + * callout. Don't reschedule. + */ + mtx_unlock_spin(&callout_lock); + return; + } } if (c->c_flags & CALLOUT_PENDING) { if (nextsoftcheck == c) { @@ -444,7 +473,13 @@ struct callout *c; int safe; { - int wakeup_cookie; + int use_mtx, wakeup_cookie; + + if (!safe && (c->c_flags & CALLOUT_MPSAFE) == 0) { + use_mtx = mtx_owned(&Giant); + } else { + use_mtx = 0; + } mtx_lock_spin(&callout_lock); /* @@ -452,7 +487,11 @@ */ if (!(c->c_flags & CALLOUT_PENDING)) { c->c_flags &= ~CALLOUT_ACTIVE; - if (c == curr_callout && safe) { + if (c != curr_callout) { + mtx_unlock_spin(&callout_lock); + return (0); + } + if (safe) { /* We need to wait until the callout is finished. */ wakeup_needed = 1; wakeup_cookie = wakeup_ctr++; @@ -468,6 +507,11 @@ cv_wait(&callout_wait, &callout_wait_lock); mtx_unlock(&callout_wait_lock); + } else if (use_mtx && !curr_cancelled) { + /* We can stop the callout before it runs. */ + curr_cancelled = 1; + mtx_unlock_spin(&callout_lock); + return (1); } else mtx_unlock_spin(&callout_lock); return (0);