Index: sys/sys/callout.h =================================================================== --- sys/sys/callout.h (revision 236814) +++ sys/sys/callout.h (working copy) @@ -48,6 +48,7 @@ #define CALLOUT_SHAREDLOCK 0x0020 /* callout lock held in shared mode */ #define CALLOUT_DFRMIGRATION 0x0040 /* callout in deferred migration mode */ #define CALLOUT_PROCESSED 0x0080 /* callout in wheel or processing list? */ +#define CALLOUT_DIRECT 0x1000 /* allow exec from hw int context */ struct callout_handle { struct callout *callout; @@ -69,7 +70,7 @@ NULL, (flags)) #define callout_pending(c) ((c)->c_flags & CALLOUT_PENDING) int callout_reset_bt_on(struct callout *, struct bintime, void(*)(void *), - void *, int); + void *, int, int); int callout_reset_on(struct callout *, int, void (*)(void *), void *, int); #define callout_reset(c, on_tick, fn, arg) \ callout_reset_on((c), (on_tick), (fn), (arg), (c)->c_cpu) Index: sys/kern/subr_sleepqueue.c =================================================================== --- sys/kern/subr_sleepqueue.c (revision 236814) +++ sys/kern/subr_sleepqueue.c (working copy) @@ -374,7 +374,7 @@ MPASS(TD_ON_SLEEPQ(td)); MPASS(td->td_sleepqueue == NULL); MPASS(wchan != NULL); - callout_reset_bt_on(&td->td_slpcallout, bt, sleepq_timeout, td, PCPU_GET(cpuid)); + callout_reset_bt_on(&td->td_slpcallout, bt, sleepq_timeout, td, PCPU_GET(cpuid), 0); } void Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 236814) +++ sys/kern/kern_timeout.c (working copy) @@ -402,10 +402,18 @@ TAILQ_FOREACH(tmp, sc, c_links.tqe) { if ((!flag || flag == 1) && bintime_cmp(&tmp->c_time, &now, <=)) { - TAILQ_INSERT_TAIL(cc->cc_localexp,tmp,c_staiter); - TAILQ_REMOVE(sc, tmp, c_links.tqe); - tmp->c_flags |= CALLOUT_PROCESSED; - need_softclock = 1; + if (tmp->c_flags & CALLOUT_DIRECT) { + tmp->c_func(tmp->c_arg); + TAILQ_REMOVE(sc, tmp, c_links.tqe); + tmp->c_flags &= ~CALLOUT_PENDING; + } + else { + TAILQ_INSERT_TAIL(cc->cc_localexp, + tmp,c_staiter); + TAILQ_REMOVE(sc, tmp, c_links.tqe); + tmp->c_flags |= CALLOUT_PROCESSED; + need_softclock = 1; + } } if ((flag == 1 || flag == 2) && bintime_cmp(&tmp->c_time, &now, >)) { @@ -466,7 +474,7 @@ static void callout_cc_add(struct callout *c, struct callout_cpu *cc, - struct bintime to_bintime, void (*func)(void *), void *arg, int cpu) + struct bintime to_bintime, void (*func)(void *), void *arg, int cpu, int direct) { int bucket; @@ -476,6 +484,8 @@ } c->c_arg = arg; c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); + if (direct) + c->c_flags |= CALLOUT_DIRECT; c->c_flags &= ~CALLOUT_PROCESSED; c->c_func = func; c->c_time = to_bintime; @@ -654,7 +664,7 @@ */ new_cc = callout_cpu_switch(c, cc, new_cpu); callout_cc_add(c, new_cc, new_time, new_func, new_arg, - new_cpu); + new_cpu, 0); CC_UNLOCK(new_cc); CC_LOCK(cc); #else @@ -818,7 +828,7 @@ */ int callout_reset_bt_on(struct callout *c, struct bintime bt, void (*ftn)(void *), - void *arg, int cpu) + void *arg, int cpu, int direct) { struct callout_cpu *cc; int cancelled = 0; @@ -892,7 +902,7 @@ } #endif - callout_cc_add(c, cc, bt, ftn, arg, cpu); + callout_cc_add(c, cc, bt, ftn, arg, cpu, direct); CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %ld %ld", cancelled ? "re" : "", c, c->c_func, c->c_arg, bt.sec, bt.frac); CC_UNLOCK(cc); @@ -910,7 +920,7 @@ getbinuptime(&now); bintime_mul(&bt,to_ticks); bintime_add(&bt,&now); - return (callout_reset_bt_on(c, bt, ftn, arg, cpu)); + return (callout_reset_bt_on(c, bt, ftn, arg, cpu, 0)); } /*