Index: sys/sys/sleepqueue.h =================================================================== --- sys/sys/sleepqueue.h (revision 237202) +++ sys/sys/sleepqueue.h (working copy) @@ -107,12 +107,12 @@ struct sleepqueue *sleepq_lookup(void *wchan); void sleepq_release(void *wchan); void sleepq_remove(struct thread *td, void *wchan); -int sleepq_signal(void *wchan, int flags, int pri, int queue); void _sleepq_set_timeout(void *wchan, struct bintime *bt, int timo); #define sleepq_set_timeout(wchan, timo) \ _sleepq_set_timeout((wchan), (NULL), (timo)) #define sleepq_set_timeout_bt(wchan, bt) \ _sleepq_set_timeout((wchan), (&bt), (0)) +int sleepq_signal(void *wchan, int flags, int pri, int queue); u_int sleepq_sleepcnt(void *wchan, int queue); int sleepq_timedwait(void *wchan, int pri); int sleepq_timedwait_sig(void *wchan, int pri); Index: sys/sys/callout.h =================================================================== --- sys/sys/callout.h (revision 237502) +++ sys/sys/callout.h (working copy) @@ -62,11 +62,19 @@ #ifdef _KERNEL extern int ncallout; +void callout_init(struct callout *, int); +void _callout_init_lock(struct callout *, struct lock_object *, int); +extern void (*callout_new_inserted)(int cpu, struct bintime bt); +int _callout_reset_on(struct callout *, struct bintime *, int, + void (*)(void *), void *, int, int); +int callout_schedule(struct callout *, int); +int callout_schedule_on(struct callout *, int, int); +int _callout_stop_safe(struct callout *, int); +void callout_tick(void); + #define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE) #define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE) #define callout_drain(c) _callout_stop_safe(c, 1) -void callout_init(struct callout *, int); -void _callout_init_lock(struct callout *, struct lock_object *, int); #define callout_init_mtx(c, mtx, flags) \ _callout_init_lock((c), ((mtx) != NULL) ? &(mtx)->lock_object : \ NULL, (flags)) @@ -74,29 +82,20 @@ _callout_init_lock((c), ((rw) != NULL) ? &(rw)->lock_object : \ NULL, (flags)) #define callout_pending(c) ((c)->c_flags & CALLOUT_PENDING) -int _callout_reset_on(struct callout *, struct bintime *, int, - void (*)(void *), void *, int, int); #define callout_reset_on(c, to_ticks, fn, arg, cpu) \ - _callout_reset_on((c), (NULL), (to_ticks), (fn), (arg), (cpu), \ - (0)) -#define callout_reset_flags_on(c, to_ticks, fn, arg, cpu, flags) \ - _callout_reset_on((c), (NULL), (to_ticks), (fn), (arg), (cpu), \ + _callout_reset_on((c), NULL, (to_ticks), (fn), (arg), (cpu), 0) \ +#define callout_reset_flags_on(c, to_ticks, fn, arg, cpu, flags) \ + _callout_reset_on((c), NULL, (to_ticks), (fn), (arg), (cpu), \ (flags)) #define callout_reset_bt_on(c, bt, fn, arg, cpu, flags) \ - _callout_reset_on((c), (bt), (0), (fn), (arg), (cpu), (flags)) + _callout_reset_on((c), (bt), 0, (fn), (arg), (cpu), (flags)) #define callout_reset(c, on_tick, fn, arg) \ callout_reset_on((c), (on_tick), (fn), (arg), (c)->c_cpu) #define callout_reset_curcpu(c, on_tick, fn, arg) \ callout_reset_on((c), (on_tick), (fn), (arg), PCPU_GET(cpuid)) -int callout_schedule(struct callout *, int); -int callout_schedule_on(struct callout *, int, int); #define callout_schedule_curcpu(c, on_tick) \ callout_schedule_on((c), (on_tick), PCPU_GET(cpuid)) #define callout_stop(c) _callout_stop_safe(c, 0) -int _callout_stop_safe(struct callout *, int); -void callout_tick(void); -extern void (*callout_new_inserted)(int cpu, struct bintime bt); - #endif #endif /* _SYS_CALLOUT_H_ */ Index: sys/kern/kern_time.c =================================================================== --- sys/kern/kern_time.c (revision 237202) +++ sys/kern/kern_time.c (working copy) @@ -353,8 +353,8 @@ int kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt) { + struct bintime bt, bt2, bt3; struct timespec ts; - struct bintime bt, bt2, tmp; int error; if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) @@ -362,8 +362,8 @@ if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) return (0); binuptime(&bt); - timespec2bintime(rqt, &tmp); - bintime_add(&bt,&tmp); + timespec2bintime(rqt, &bt3); + bintime_add(&bt, &bt3); for (;;) { sleepq_lock(&nanowait); sleepq_add(&nanowait, NULL, "nanslp", PWAIT | PCATCH, 0); @@ -374,9 +374,9 @@ if (error == ERESTART) error = EINTR; if (rmt != NULL) { - tmp = bt; - bintime_sub(&tmp, &bt2); - bintime2timespec(&tmp, &ts); + bt3 = bt; + bintime_sub(&bt3, &bt2); + bintime2timespec(&bt3, &ts); if (ts.tv_sec < 0) timespecclear(&ts); *rmt = ts; Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 237502) +++ sys/kern/kern_timeout.c (working copy) @@ -395,8 +395,7 @@ tmp->c_func(tmp->c_arg); TAILQ_REMOVE(sc, tmp, c_links.tqe); tmp->c_flags &= ~CALLOUT_PENDING; - } - else { + } else { TAILQ_INSERT_TAIL(cc->cc_localexp, tmp,c_staiter); TAILQ_REMOVE(sc, tmp, c_links.tqe); @@ -409,9 +408,9 @@ break; first = (first + 1) & callwheelmask; } - future = ((last + hz/4) & callwheelmask); - max.sec = max.frac = INT_MAX; - min.sec = min.frac = INT_MAX; + future = (last + hz / 4) & callwheelmask; + max.sec = min.sec = ULONG_MAX; + max.frac = min.frac = ULONG_MAX; limit.sec = 0; limit.frac = (uint64_t)1 << (64 - 2); bintime_add(&limit, &now); @@ -426,7 +425,7 @@ bintime_add(&tmp_max, &tmp->c_precision); bintime_sub(&tmp_min, &tmp->c_precision); /* - * This is the fist event we're going to process or + * This is the first event we're processing, or the * event maximal time is less than present minimal. * In both cases, take it. */ @@ -449,20 +448,19 @@ max = (bintime_cmp(&tmp_max, &max, >)) ? tmp_max : max; } if (last == future || - (max.sec != INT_MAX && min.sec != INT_MAX)) + (max.sec != ULONG_MAX && min.sec != ULONG_MAX)) break; last = (last + 1) & callwheelmask; } - if (max.sec == INT_MAX && min.sec == INT_MAX) { + if (max.sec == ULONG_MAX && min.sec == ULONG_MAX) { next.sec = 0; next.frac = (uint64_t)1 << (64 - 2); bintime_add(&next, &now); - } - /* - * Now that we found something to aggregate, schedule an interrupt in - * the middle of the previously calculated range. - */ - else { + } else { + /* + * Now that we found something to aggregate, schedule an interrupt in + * the middle of the previously calculated range. + */ bintime_add(&max, &min); next = max; next.frac >>= 1; @@ -521,7 +519,7 @@ to_bintime = cc->cc_softticks; } c->c_arg = arg; - c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); + c->c_flags |= CALLOUT_ACTIVE | CALLOUT_PENDING; if (flags & C_DIRECT_EXEC) c->c_flags |= CALLOUT_DIRECT; c->c_flags &= ~CALLOUT_PROCESSED; @@ -531,16 +529,13 @@ if (flags & C_10US) { tv.tv_usec = 10; timeval2bintime(&tv, &c->c_precision); - } - else if (flags & C_100US) { + } else if (flags & C_100US) { tv.tv_usec = 100; timeval2bintime(&tv, &c->c_precision); - } - else if (flags & C_1MS) { + } else if (flags & C_1MS) { tv.tv_usec = 1000; timeval2bintime(&tv, &c->c_precision); - } - else { + } else { c->c_precision.sec = 0; c->c_precision.frac = 0; } @@ -783,8 +778,7 @@ CC_LOCK(cc); c = cc->cc_next; steps = 0; - } - else { + } else { TAILQ_REMOVE(cc->cc_localexp, c, c_staiter); c = softclock_call_cc(c, cc, &mpcalls, &lockcalls, &gcalls); @@ -898,8 +892,7 @@ getbinuptime(&now); bintime_mul(&to_bt,to_ticks); bintime_add(&to_bt,&now); - } - else + } else to_bt = *bt; /* * Don't allow migration of pre-allocated callouts lest they @@ -935,8 +928,7 @@ bucket = get_bucket(&c->c_time); TAILQ_REMOVE(&cc->cc_callwheel[bucket], c, c_links.tqe); - } - else { + } else { if (cc->cc_next == c) cc->cc_next = TAILQ_NEXT(c, c_staiter); TAILQ_REMOVE(cc->cc_localexp, c, @@ -1155,8 +1147,7 @@ bucket = get_bucket(&c->c_time); TAILQ_REMOVE(&cc->cc_callwheel[bucket], c, c_links.tqe); - } - else + } else TAILQ_REMOVE(cc->cc_localexp, c, c_staiter); callout_cc_del(c, cc); Index: sys/kern/kern_clocksource.c =================================================================== --- sys/kern/kern_clocksource.c (revision 237202) +++ sys/kern/kern_clocksource.c (working copy) @@ -169,7 +169,7 @@ now = state->now; CTR4(KTR_SPARE2, "ipi at %d: now %d.%08x%08x", curcpu, now.sec, (u_int)(now.frac >> 32), - (u_int)(now.frac & 0xffffffff)); + (u_int)(now.frac & 0xffffffff)); done = handleevents(&now, 0); return (done ? FILTER_HANDLED : FILTER_STRAY); } @@ -189,7 +189,7 @@ CTR4(KTR_SPARE2, "handle at %d: now %d.%08x%08x", curcpu, now->sec, (u_int)(now->frac >> 32), - (u_int)(now->frac & 0xffffffff)); + (u_int)(now->frac & 0xffffffff)); done = 0; if (fake) { frame = NULL; @@ -272,8 +272,8 @@ static void getnextcpuevent(struct bintime *event, int idle) { + struct bintime bt; struct pcpu_state *state; - struct bintime tmp; int hardfreq; state = DPCPU_PTR(timerstate); @@ -285,9 +285,9 @@ if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > hardfreq) hardfreq = tc_min_ticktock_freq; if (hz > hardfreq) { - tmp = hardperiod; - bintime_mul(&tmp, hz / hardfreq - 1); - bintime_add(event, &tmp); + bt = hardperiod; + bintime_mul(&bt, hz / hardfreq - 1); + bintime_add(event, &bt); } } /* Handle callout events. */ @@ -340,7 +340,7 @@ } CTR5(KTR_SPARE2, "next at %d: next %d.%08x%08x by %d", curcpu, event->sec, (u_int)(event->frac >> 32), - (u_int)(event->frac & 0xffffffff), c); + (u_int)(event->frac & 0xffffffff), c); } /* Hardware timer callback function. */ @@ -373,7 +373,7 @@ state->now = now; CTR4(KTR_SPARE2, "intr at %d: now %d.%08x%08x", curcpu, (int)(now.sec), (u_int)(now.frac >> 32), - (u_int)(now.frac & 0xffffffff)); + (u_int)(now.frac & 0xffffffff)); #ifdef SMP /* Prepare broadcasting to other CPUs for non-per-CPU timers. */ @@ -455,8 +455,7 @@ eq = bintime_cmp(&new, next, ==); CTR5(KTR_SPARE2, "load at %d: next %d.%08x%08x eq %d", curcpu, new.sec, (u_int)(new.frac >> 32), - (u_int)(new.frac & 0xffffffff), - eq); + (u_int)(new.frac & 0xffffffff), eq); if (!eq) { *next = new; bintime_sub(&new, now); @@ -789,7 +788,7 @@ binuptime(&now); CTR4(KTR_SPARE2, "idle at %d: now %d.%08x%08x", curcpu, now.sec, (u_int)(now.frac >> 32), - (u_int)(now.frac & 0xffffffff)); + (u_int)(now.frac & 0xffffffff)); getnextcpuevent(&t, 1); ET_HW_LOCK(state); state->idle = 1; @@ -818,7 +817,7 @@ binuptime(&now); CTR4(KTR_SPARE2, "active at %d: now %d.%08x%08x", curcpu, now.sec, (u_int)(now.frac >> 32), - (u_int)(now.frac & 0xffffffff)); + (u_int)(now.frac & 0xffffffff)); spinlock_enter(); td = curthread; td->td_intr_nesting_level++; @@ -842,10 +841,10 @@ CTR4(KTR_SPARE2, "set_cyc at %d: now %d.%08x%08x", curcpu, now.sec, (u_int)(now.frac >> 32), - (u_int)(now.frac & 0xffffffff)); + (u_int)(now.frac & 0xffffffff)); CTR4(KTR_SPARE2, "set_cyc at %d: t %d.%08x%08x", curcpu, t->sec, (u_int)(t->frac >> 32), - (u_int)(t->frac & 0xffffffff)); + (u_int)(t->frac & 0xffffffff)); ET_HW_LOCK(state); if (bintime_cmp(t, &state->nextcyc, ==)) { @@ -872,36 +871,38 @@ CTR5(KTR_SPARE2, "new co at %d: on %d at %d.%08x%08x", curcpu, cpu, (int)(bt.sec), (u_int)(bt.frac >> 32), - (u_int)(bt.frac & 0xffffffff)); + (u_int)(bt.frac & 0xffffffff)); state = DPCPU_ID_PTR(cpu, timerstate); ET_HW_LOCK(state); - /* If there is callout time already set earlier -- do nothing. */ + /* If there is callout time already set earlier, do nothing. */ if (state->nextcall.sec != -1 && bintime_cmp(&bt, &state->nextcall, >=)) { ET_HW_UNLOCK(state); return; } state->nextcall = bt; - /* If there is some some other event set earlier -- do nothing. */ + /* If there is some some other event set earlier, do nothing. */ if (bintime_cmp(&state->nextcall, &state->nextevent, >=)) { ET_HW_UNLOCK(state); return; } state->nextevent = state->nextcall; - /* If timer is periodic -- there is nothing to reprogram. */ + /* If timer is periodic, there is nothing to reprogram. */ if (periodic) { ET_HW_UNLOCK(state); return; } - /* If timer is global or of the current CPU -- reprogram it. */ + /* If timer is global or per-CPU and this is the current CPU, + * reprogram it. + */ if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || cpu == curcpu) { binuptime(&now); loadtimer(&now, 0); ET_HW_UNLOCK(state); return; } - /* Otherwise make other CPU to reprogram it. */ + /* Otherwise, make other CPU to reprogram it. */ state->handle = 1; ET_HW_UNLOCK(state); ipi_cpu(cpu, IPI_HARDCLOCK);