Index: sys/dev/random/randomdev_soft.c =================================================================== --- sys/dev/random/randomdev_soft.c (revision 246035) +++ sys/dev/random/randomdev_soft.c (working copy) @@ -282,9 +282,8 @@ random_kthread(void *arg __unused) random_kthread_control = 0; /* Work done, so don't belabour the issue */ - msleep_spin_bt(&random_kthread_control, &harvest_mtx, - "-", ticks2bintime(hz / 10), zero_bt, - C_PREL(1) | C_HARDCLOCK); + msleep_spin_sbt(&random_kthread_control, &harvest_mtx, + "-", (SBT_1S / 10), 0, C_PREL(1)); } mtx_unlock_spin(&harvest_mtx); Index: sys/dev/syscons/syscons.c =================================================================== --- sys/dev/syscons/syscons.c (revision 246035) +++ sys/dev/syscons/syscons.c (working copy) @@ -1899,8 +1899,8 @@ done: rate = 2; else rate = 30; - callout_reset_bt(&sc->ctimeout, ticks2bintime(hz / rate), zero_bt, - scrn_timer, sc, C_PREL(1) | C_HARDCLOCK); + callout_reset_sbt(&sc->ctimeout, (SBT_1S / rate), 0, + scrn_timer, sc, C_PREL(1)); } } @@ -3845,8 +3845,8 @@ blink_screen(void *arg) (*scp->rndr->draw)(scp, 0, scp->xsize*scp->ysize, scp->sc->blink_in_progress & 1); scp->sc->blink_in_progress--; - callout_reset_bt(&scp->sc->cblink, ticks2bintime(hz / 15), zero_bt, - blink_screen, scp, C_PREL(0) | C_HARDCLOCK); + callout_reset_sbt(&scp->sc->cblink, (SBT_1S / 15), 0, + blink_screen, scp, C_PREL(0)); } } Index: sys/kern/kern_condvar.c =================================================================== --- sys/kern/kern_condvar.c (revision 246035) +++ sys/kern/kern_condvar.c (working copy) @@ -274,8 +274,8 @@ _cv_wait_sig(struct cv *cvp, struct lock_object *l * cv_signal or cv_broadcast, EWOULDBLOCK if the timeout expires. */ int -_cv_timedwait_bt(struct cv *cvp, struct lock_object *lock, struct bintime bt, - struct bintime pr, int flags) +_cv_timedwait_sbt(struct cv *cvp, struct lock_object *lock, sbintime_t sbt, + sbintime_t pr, int flags) { WITNESS_SAVE_DECL(lock_witness); struct lock_class *class; @@ -311,7 +311,7 @@ int DROP_GIANT(); sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR, 0); - sleepq_set_timeout_bt(cvp, bt, pr, flags); + sleepq_set_timeout_sbt(cvp, sbt, pr, flags); if (lock != &Giant.lock_object) { if (class->lc_flags & LC_SLEEPABLE) sleepq_release(cvp); @@ -342,8 +342,8 @@ int * or ERESTART if a signal was caught. */ int -_cv_timedwait_sig_bt(struct cv *cvp, struct lock_object *lock, - struct bintime bt, struct bintime pr, int flags) +_cv_timedwait_sig_sbt(struct cv *cvp, struct lock_object *lock, + sbintime_t sbt, sbintime_t pr, int flags) { WITNESS_SAVE_DECL(lock_witness); struct lock_class *class; @@ -380,7 +380,7 @@ int sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR | SLEEPQ_INTERRUPTIBLE, 0); - sleepq_set_timeout_bt(cvp, bt, pr, flags); + sleepq_set_timeout_sbt(cvp, sbt, pr, flags); if (lock != &Giant.lock_object) { if (class->lc_flags & LC_SLEEPABLE) sleepq_release(cvp); Index: sys/kern/kern_event.c =================================================================== --- sys/kern/kern_event.c (revision 246035) +++ sys/kern/kern_event.c (working copy) @@ -517,14 +517,11 @@ knote_fork(struct knlist *list, int pid) * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the * interval timer support code. */ -static struct bintime -timer2bintime(intptr_t data) +static __inline sbintime_t +timer2sbintime(intptr_t data) { - struct bintime bt; - bt.sec = data / 1000; - bt.frac = (data % 1000) * (((uint64_t)1 << 63) / 500); - return bt; + return (SBT_1MS * data); } static void @@ -546,8 +543,8 @@ filt_timerexpire(void *knx) */ if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) { calloutp = (struct callout *)kn->kn_hook; - callout_reset_bt_on(calloutp, - timer2bintime(kn->kn_sdata), zero_bt /* 1ms? */, + callout_reset_sbt_on(calloutp, + timer2sbintime(kn->kn_sdata), 0 /* 1ms? */, filt_timerexpire, kn, PCPU_GET(cpuid), 0); } } @@ -572,8 +569,8 @@ filt_timerattach(struct knote *kn) calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); callout_init(calloutp, CALLOUT_MPSAFE); kn->kn_hook = calloutp; - callout_reset_bt_on(calloutp, - timer2bintime(kn->kn_sdata), zero_bt /* 1ms? */, + callout_reset_sbt_on(calloutp, + timer2sbintime(kn->kn_sdata), 0 /* 1ms? */, filt_timerexpire, kn, PCPU_GET(cpuid), 0); return (0); @@ -1319,7 +1316,7 @@ kqueue_scan(struct kqueue *kq, int maxevents, stru const struct timespec *tsp, struct kevent *keva, struct thread *td) { struct kevent *kevp; - struct bintime abt, rbt; + sbintime_t asbt, rsbt; struct knote *kn, *marker; int count, nkev, error, influx; int haskqglobal, touch; @@ -1339,19 +1336,15 @@ kqueue_scan(struct kqueue *kq, int maxevents, stru goto done_nl; } if (timespecisset(tsp)) { - timespec2bintime(tsp, &rbt); - if (TIMESEL(&abt, &rbt)) - bintime_add(&abt, &tc_tick_bt); - bintime_add(&abt, &rbt); - bintime_shift(&rbt, -tc_timeexp); - } else { - abt.sec = -1; - abt.frac = 0; - } - } else { - abt.sec = 0; - abt.frac = 0; - } + rsbt = timespec2sbintime(*tsp); + if (TIMESEL(&asbt, rsbt)) + asbt += tc_tick_sbt; + asbt += rsbt; + rsbt >>= tc_precexp; + } else + asbt = -1; + } else + asbt = 0; marker = knote_alloc(1); if (marker == NULL) { error = ENOMEM; @@ -1363,12 +1356,12 @@ kqueue_scan(struct kqueue *kq, int maxevents, stru retry: kevp = keva; if (kq->kq_count == 0) { - if (abt.sec < 0) { + if (asbt == -1) { error = EWOULDBLOCK; } else { kq->kq_state |= KQ_SLEEP; - error = msleep_bt(kq, &kq->kq_lock, PSOCK | PCATCH, - "kqread", abt, rbt, C_ABSOLUTE); + error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, + "kqread", asbt, rsbt, C_ABSOLUTE); } if (error == 0) goto retry; Index: sys/kern/kern_resource.c =================================================================== --- sys/kern/kern_resource.c (revision 246035) +++ sys/kern/kern_resource.c (working copy) @@ -645,7 +645,7 @@ lim_cb(void *arg) } } if ((p->p_flag & P_WEXIT) == 0) - callout_reset_bt(&p->p_limco, ticks2bintime(hz), zero_bt, + callout_reset_sbt(&p->p_limco, SBT_1S, 0, lim_cb, p, C_PREL(1) | C_HARDCLOCK); } @@ -698,7 +698,7 @@ kern_proc_setrlimit(struct thread *td, struct proc case RLIMIT_CPU: if (limp->rlim_cur != RLIM_INFINITY && p->p_cpulimit == RLIM_INFINITY) - callout_reset_bt(&p->p_limco, ticks2bintime(hz), zero_bt, + callout_reset_sbt(&p->p_limco, SBT_1S, 0, lim_cb, p, C_PREL(1) | C_HARDCLOCK); p->p_cpulimit = limp->rlim_cur; break; @@ -1139,7 +1139,7 @@ lim_fork(struct proc *p1, struct proc *p2) p2->p_limit = lim_hold(p1->p_limit); callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0); if (p1->p_cpulimit != RLIM_INFINITY) - callout_reset_bt(&p2->p_limco, ticks2bintime(hz), zero_bt, + callout_reset_sbt(&p2->p_limco, SBT_1S, 0, lim_cb, p2, C_PREL(1) | C_HARDCLOCK); } Index: sys/kern/kern_synch.c =================================================================== --- sys/kern/kern_synch.c (revision 246071) +++ sys/kern/kern_synch.c (working copy) @@ -146,7 +146,7 @@ sleepinit(void) */ int _sleep(void *ident, struct lock_object *lock, int priority, - const char *wmesg, struct bintime bt, struct bintime pr, int flags) + const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) { struct thread *td; struct proc *p; @@ -162,7 +162,7 @@ _sleep(void *ident, struct lock_object *lock, int #endif WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock, "Sleeping on \"%s\"", wmesg); - KASSERT(bintime_isset(&bt) || mtx_owned(&Giant) || lock != NULL, + KASSERT(sbt != 0 || mtx_owned(&Giant) || lock != NULL, ("sleeping without a lock")); KASSERT(p != NULL, ("msleep1")); KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep")); @@ -232,17 +232,17 @@ _sleep(void *ident, struct lock_object *lock, int * return from cursig(). */ sleepq_add(ident, lock, wmesg, sleepq_flags, 0); - if (bintime_isset(&bt)) - sleepq_set_timeout_bt(ident, bt, pr, flags); + if (sbt != 0) + sleepq_set_timeout_sbt(ident, sbt, pr, flags); if (lock != NULL && class->lc_flags & LC_SLEEPABLE) { sleepq_release(ident); WITNESS_SAVE(lock, lock_witness); lock_state = class->lc_unlock(lock); sleepq_lock(ident); } - if (bintime_isset(&bt) && catch) + if (sbt != 0 && catch) rval = sleepq_timedwait_sig(ident, pri); - else if (bintime_isset(&bt)) + else if (sbt != 0) rval = sleepq_timedwait(ident, pri); else if (catch) rval = sleepq_wait_sig(ident, pri); @@ -263,8 +263,8 @@ _sleep(void *ident, struct lock_object *lock, int } int -msleep_spin_bt(void *ident, struct mtx *mtx, const char *wmesg, - struct bintime bt, struct bintime pr, int flags) +msleep_spin_sbt(void *ident, struct mtx *mtx, const char *wmesg, + sbintime_t sbt, sbintime_t pr, int flags) { struct thread *td; struct proc *p; @@ -302,8 +302,8 @@ int * We put ourselves on the sleep queue and start our timeout. */ sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0); - if (bintime_isset(&bt)) - sleepq_set_timeout_bt(ident, bt, pr, flags); + if (sbt != 0) + sleepq_set_timeout_sbt(ident, sbt, pr, flags); /* * Can't call ktrace with any spin locks held so it can lock the @@ -325,7 +325,7 @@ int wmesg); sleepq_lock(ident); #endif - if (bintime_isset(&bt)) + if (sbt != 0) rval = sleepq_timedwait(ident, 0); else { sleepq_wait(ident, 0); @@ -349,28 +349,30 @@ int * to a "timo" value of one. */ int -pause_bt(const char *wmesg, struct bintime bt, struct bintime pr, int flags) +pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) { + int sbt_sec; - KASSERT(bt.sec >= 0, ("pause: timo must be >= 0")); + sbt_sec = sbintime_getsec(sbt); + KASSERT(sbt_sec >= 0, ("pause: timo must be >= 0")); /* silently convert invalid timeouts */ - if (!bintime_isset(&bt)) - bt = tick_bt; + if (sbt == 0) + sbt = tick_sbt; if (cold) { /* * We delay one second at a time to avoid overflowing the * system specific DELAY() function(s): */ - while (bt.sec > 0) { + while (sbt_sec > 0) { DELAY(1000000); - bt.sec--; + sbt_sec--; } - DELAY(bt.frac >> 44); + DELAY(sbt / SBT_1US); return (0); } - return (_sleep(&pause_wchan, NULL, 0, wmesg, bt, pr, flags)); + return (_sleep(&pause_wchan, NULL, 0, wmesg, sbt, pr, flags)); } /* @@ -561,8 +563,8 @@ loadav(void *arg) * random variation to avoid synchronisation with processes that * run at regular intervals. */ - callout_reset_bt(&loadav_callout, - ticks2bintime(hz * 4 + (int)(random() % (hz * 2 + 1))), zero_bt, + callout_reset_sbt(&loadav_callout, + tick_sbt * (hz * 4 + (int)(random() % (hz * 2 + 1))), 0, loadav, NULL, C_DIRECT_EXEC | C_HARDCLOCK); } Index: sys/kern/kern_tc.c =================================================================== --- sys/kern/kern_tc.c (revision 246071) +++ sys/kern/kern_tc.c (working copy) @@ -122,8 +122,11 @@ SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnin struct bintime bt_timethreshold; struct bintime bt_tickthreshold; +sbintime_t sbt_timethreshold; +sbintime_t sbt_tickthreshold; struct bintime tc_tick_bt; -int tc_timeexp; +sbintime_t tc_tick_sbt; +int tc_precexp; int tc_timepercentage = TC_DEFAULTPERC; TUNABLE_INT("kern.timecounter.alloweddeviation", &tc_timepercentage); static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS); @@ -347,6 +350,16 @@ binuptime(struct bintime *bt) } void +sbinuptime(sbintime_t *sbt) +{ + /* XXX: We need a real implementation, but tomorrow */ + struct bintime bt; + + binuptime(&bt); + *sbt = bintime2sbintime(bt); +} + +void nanouptime(struct timespec *tsp) { struct bintime bt; @@ -404,6 +417,16 @@ getbinuptime(struct bintime *bt) } void +getsbinuptime(sbintime_t *sbt) +{ + /* XXX: We need a real implementation, but tomorrow */ + struct bintime bt; + + getbinuptime(&bt); + *sbt = bintime2sbintime(bt); +} + +void getnanouptime(struct timespec *tsp) { struct timehands *th; @@ -896,6 +919,16 @@ binuptime(struct bintime *bt) } void +sbinuptime(sbintime_t sbt) +{ + /* XXX: We need a real implementation, but tomorrow */ + struct bintime bt; + + binuptime(&bt); + *sbt = bintime2sbintime(bt); +} + +void nanouptime(struct timespec *tsp) { @@ -938,6 +971,16 @@ getbinuptime(struct bintime *bt) } void +getsbinuptime(sbintime_t *sbt) +{ + /* XXX: We need a real implementation, but tomorrow */ + struct bintime bt; + + getbinuptime(&bt); + *sbt = bintime2sbintime(bt); +} + +void getnanouptime(struct timespec *tsp) { @@ -1725,17 +1768,19 @@ tc_adjprecision(void) if (tc_timepercentage > 0) { t = (99 + tc_timepercentage) / tc_timepercentage; - tc_timeexp = fls(t + (t >> 1)) - 1; + tc_precexp = fls(t + (t >> 1)) - 1; FREQ2BT(hz / tc_tick, &bt_timethreshold); FREQ2BT(hz, &bt_tickthreshold); - bintime_shift(&bt_timethreshold, tc_timeexp); - bintime_shift(&bt_tickthreshold, tc_timeexp); + bintime_shift(&bt_timethreshold, tc_precexp); + bintime_shift(&bt_tickthreshold, tc_precexp); } else { - tc_timeexp = 31; + tc_precexp = 31; bt_timethreshold.sec = INT_MAX; bt_timethreshold.frac = ~(uint64_t)0; bt_tickthreshold = bt_timethreshold; } + sbt_timethreshold = bintime2sbintime(bt_timethreshold); + sbt_tickthreshold = bintime2sbintime(bt_tickthreshold); } static int @@ -1772,8 +1817,10 @@ inittimecounter(void *dummy) tc_tick = 1; tc_adjprecision(); FREQ2BT(hz, &tick_bt); + tick_sbt = bintime2sbintime(tick_bt); tick_rate = hz / tc_tick; FREQ2BT(tick_rate, &tc_tick_bt); + tc_tick_sbt = bintime2sbintime(tc_tick_bt); p = (tc_tick * 1000000) / hz; printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000); Index: sys/kern/kern_time.c =================================================================== --- sys/kern/kern_time.c (revision 246035) +++ sys/kern/kern_time.c (working copy) @@ -483,34 +483,32 @@ int kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt) { struct timespec ts; - struct bintime bt, btt, bt_prec, tmp; + sbintime_t sbt, sbtt, prec, tmp; int error; if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) return (EINVAL); if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) return (0); - timespec2bintime(rqt, &tmp); - bt_prec = tmp; - bintime_shift(&bt_prec, -tc_timeexp); - if (TIMESEL(&bt, &tmp)) - bintime_add(&bt, &tc_tick_bt); - bintime_add(&bt, &tmp); - error = tsleep_bt(&nanowait, PWAIT | PCATCH, "nanslp", bt, bt_prec, + tmp = timespec2sbintime(*rqt); + prec = tmp; + prec >>= tc_precexp; + if (TIMESEL(&sbt, tmp)) + sbt += tc_tick_sbt; + sbt += tmp; + error = tsleep_sbt(&nanowait, PWAIT | PCATCH, "nanslp", sbt, prec, C_ABSOLUTE); if (error != EWOULDBLOCK) { if (error == ERESTART) error = EINTR; - TIMESEL(&btt, &tmp); + TIMESEL(&sbtt, tmp); if (rmt != NULL) { - tmp = bt; - bintime_sub(&tmp, &btt); - bintime2timespec(&tmp, &ts); + ts = sbintime2timespec(sbt - sbtt); if (ts.tv_sec < 0) timespecclear(&ts); *rmt = ts; } - if (bintime_cmp(&btt, &bt, >=)) + if (sbtt >= sbt) return (0); return (error); } Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 246035) +++ sys/kern/kern_timeout.c (working copy) @@ -121,7 +121,7 @@ struct cc_exec { void (*ce_migration_func)(void *); void *ce_migration_arg; int ce_migration_cpu; - struct bintime ce_migration_time; + sbintime_t ce_migration_time; #endif int cc_cancel; int cc_waiting; @@ -138,8 +138,8 @@ struct callout_cpu { struct callout_tailq *cc_callwheel; struct callout_tailq cc_expireq; struct callout_list cc_callfree; - struct bintime cc_firstevent; - struct bintime cc_lastscan; + sbintime_t cc_firstevent; + sbintime_t cc_lastscan; void *cc_cookie; }; @@ -217,7 +217,7 @@ cc_cme_cleanup(struct callout_cpu *cc, int direct) cc->cc_exec_entity[direct].cc_waiting = 0; #ifdef SMP cc->cc_exec_entity[direct].ce_migration_cpu = CPUBLOCK; - bintime_clear(&cc->cc_exec_entity[direct].ce_migration_time); + cc->cc_exec_entity[direct].ce_migration_time = 0; cc->cc_exec_entity[direct].ce_migration_func = NULL; cc->cc_exec_entity[direct].ce_migration_arg = NULL; #endif @@ -368,30 +368,29 @@ SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER #define CC_HASH_SHIFT 10 static inline int -callout_hash(struct bintime *bt) +callout_hash(sbintime_t sbt) { - - return (int) ((bt->sec << CC_HASH_SHIFT) + - (bt->frac >> (64 - CC_HASH_SHIFT))); + + return (int)(sbt >> (32 - CC_HASH_SHIFT)); } static inline int -get_bucket(struct bintime *bt) +callout_get_bucket(sbintime_t sbt) { - return callout_hash(bt) & callwheelmask; + return callout_hash(sbt) & callwheelmask; } void callout_process(struct bintime *now) { - struct bintime first, last, max, tmp_max; struct callout *tmp, *tmpn; struct callout_cpu *cc; struct callout_tailq *sc; uint64_t lookahead; - int depth_dir, firstb, mpcalls_dir, lastb, nowb, lockcalls_dir, - need_softclock, exit_allowed, exit_wanted; + sbintime_t first, last, max, now_sbt, tmp_max; + int depth_dir, firstb, lastb, mpcalls_dir, nowb, + lockcalls_dir, need_softclock, exit_allowed, exit_wanted; need_softclock = 0; depth_dir = 0; @@ -401,22 +400,23 @@ callout_process(struct bintime *now) mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); /* Compute the buckets of the last scan and present times. */ - firstb = callout_hash(&cc->cc_lastscan); - cc->cc_lastscan = *now; - nowb = callout_hash(now); + firstb = callout_hash(cc->cc_lastscan); + now_sbt = bintime2sbintime(*now); + cc->cc_lastscan = now_sbt; + nowb = callout_hash(now_sbt); /* Compute the last bucket and minimum time of the bucket after it. */ if (nowb == firstb) - lookahead = 1LLU << 60; /* 1/16s */ + lookahead = (SBT_1S / 16); else if (nowb - firstb == 1) - lookahead = 1LLU << 61; /* 1/8s */ + lookahead = (SBT_1S / 8); else - lookahead = 1LLU << 63; /* 1/2s */ - first = last = *now; - bintime_addx(&first, lookahead / 2); - bintime_addx(&last, lookahead); - last.frac &= (0xffffffffffffffffLLU << (64 - CC_HASH_SHIFT)); - lastb = callout_hash(&last) - 1; + lookahead = (SBT_1S / 2); + first = last = now_sbt; + first += (lookahead / 2); + last += lookahead; + last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); + lastb = callout_hash(last) - 1; max = last; /* @@ -438,7 +438,7 @@ callout_process(struct bintime *now) tmp = TAILQ_FIRST(sc); while (tmp != NULL) { /* Run the callout if present time within allowed. */ - if (bintime_cmp(&tmp->c_time, now, <=)) { + if (tmp->c_time <= now_sbt) { /* * Consumer told us the callout may be run * directly from hardware interrupt context. @@ -464,22 +464,22 @@ callout_process(struct bintime *now) continue; } /* Skip events from distant future. */ - if (bintime_cmp(&tmp->c_time, &max, >=)) + if (tmp->c_time >= max) goto next; /* * Event minimal time is bigger than present maximal * time, so it cannot be aggregated. */ - if (bintime_cmp(&tmp->c_time, &last, >)) { + if (tmp->c_time > last) { exit_wanted = 1; goto next; } /* Update first and last time, respecting this event. */ - if (bintime_cmp(&tmp->c_time, &first, <)) + if (tmp->c_time < first) first = tmp->c_time; tmp_max = tmp->c_time; - bintime_add(&tmp_max, &tmp->c_precision); - if (bintime_cmp(&tmp_max, &last, <)) + tmp_max += tmp->c_precision; + if (tmp_max < last) last = tmp_max; next: tmp = TAILQ_NEXT(tmp, c_links.tqe); @@ -500,7 +500,8 @@ next: } cc->cc_exec_next_dir = NULL; if (callout_new_inserted != NULL) - (*callout_new_inserted)(curcpu, last, first); + (*callout_new_inserted)(curcpu, sbintime2bintime(last), + sbintime2bintime(first)); cc->cc_firstevent = last; #ifdef CALLOUT_PROFILING avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; @@ -542,39 +543,38 @@ callout_lock(struct callout *c) static void callout_cc_add(struct callout *c, struct callout_cpu *cc, - struct bintime to_bintime, struct bintime precision, void (*func)(void *), + sbintime_t sbt, sbintime_t precision, void (*func)(void *), void *arg, int cpu, int flags) { - struct bintime last; + sbintime_t last; int bucket; CC_LOCK_ASSERT(cc); - if (bintime_cmp(&to_bintime, &cc->cc_lastscan, <)) - to_bintime = cc->cc_lastscan; + if (sbt < cc->cc_lastscan) + sbt = cc->cc_lastscan; c->c_arg = arg; c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); if (flags & C_DIRECT_EXEC) c->c_flags |= CALLOUT_DIRECT; c->c_flags &= ~CALLOUT_PROCESSED; c->c_func = func; - c->c_time = to_bintime; + c->c_time = sbt; c->c_precision = precision; - CTR4(KTR_CALLOUT, "precision set for %p: %d.%08x%08x", - c, c->c_precision.sec, (u_int) (c->c_precision.frac >> 32), - (u_int) (c->c_precision.frac & 0xffffffff)); - bucket = get_bucket(&c->c_time); + bucket = callout_get_bucket(c->c_time); + CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", + c, (int)(c->c_precision >> 32), + (u_int)(c->c_precision & 0xffffffff)); TAILQ_INSERT_TAIL(&cc->cc_callwheel[bucket], c, c_links.tqe); /* * Inform the eventtimers(4) subsystem there's a new callout * that has been inserted, but only if really required. */ - last = c->c_time; - bintime_add(&last, &c->c_precision); - if (callout_new_inserted != NULL && - (bintime_cmp(&last, &cc->cc_firstevent, <) || - !bintime_isset(&cc->cc_firstevent))) { + last = c->c_time + c->c_precision; + if (callout_new_inserted != NULL && ((last < cc->cc_firstevent) || + (cc->cc_firstevent == 0))) { cc->cc_firstevent = last; - (*callout_new_inserted)(cpu, last, c->c_time); + (*callout_new_inserted)(cpu, sbintime2bintime(last), + sbintime2bintime(c->c_time)); } } @@ -602,7 +602,7 @@ softclock_call_cc(struct callout *c, struct callou void (*new_func)(void *); void *new_arg; int flags, new_cpu; - struct bintime new_time; + sbintime_t new_time; #endif #ifdef DIAGNOSTIC struct bintime bt1, bt2; @@ -896,27 +896,28 @@ DPCPU_DECLARE(struct bintime, hardclocktime); * callout_deactivate() - marks the callout as having been serviced */ int -callout_reset_bt_on(struct callout *c, struct bintime bt, struct bintime pr, +callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision, void (*ftn)(void *), void *arg, int cpu, int flags) { - struct bintime to_bt, pr1; + sbintime_t to_sbt, pr; + struct bintime to_bt; struct callout_cpu *cc; int bucket, cancelled, direct; cancelled = 0; if (flags & C_ABSOLUTE) { - to_bt = bt; + to_sbt = sbt; } else { - if ((flags & C_HARDCLOCK) && bintime_cmp(&bt, &tick_bt, <)) - bt = tick_bt; + if ((flags & C_HARDCLOCK) && (sbt < tick_sbt)) + sbt = tick_sbt; if ((flags & C_HARDCLOCK) || #ifdef NO_EVENTTIMERS - bintime_cmp(&bt, &bt_timethreshold, >=)) { - getbinuptime(&to_bt); + sbt >= sbt_timethreshold) { + getsbinuptime(&to_sbt); /* Add safety belt for the case of hz > 1000. */ - bintime_addx(&to_bt, tc_tick_bt.frac - tick_bt.frac); + to_sbt += (tc_tick_dur - tick_dur); #else - bintime_cmp(&bt, &bt_tickthreshold, >=)) { + sbt >= sbt_tickthreshold) { /* * Obtain the time of the last hardclock() call on * this CPU directly from the kern_clocksource.c. @@ -925,20 +926,18 @@ int */ spinlock_enter(); to_bt = DPCPU_GET(hardclocktime); + to_sbt = bintime2sbintime(to_bt); spinlock_exit(); #endif if ((flags & C_HARDCLOCK) == 0) - bintime_addx(&to_bt, tick_bt.frac); + to_sbt += tick_sbt; } else - binuptime(&to_bt); - bintime_add(&to_bt, &bt); - pr1 = bt; - if (C_PRELGET(flags) < 0) - bintime_shift(&pr1, -tc_timeexp); - else - bintime_shift(&pr1, -C_PRELGET(flags)); - if (bintime_cmp(&pr1, &pr, >)) - pr = pr1; + sbinuptime(&to_sbt); + to_sbt += sbt; + pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : + sbt >> C_PRELGET(flags)); + if (pr > precision) + precision = pr; } /* * Don't allow migration of pre-allocated callouts lest they @@ -975,7 +974,7 @@ int if (cc->cc_exec_next_dir == c) cc->cc_exec_next_dir = TAILQ_NEXT(c, c_links.tqe); - bucket = get_bucket(&c->c_time); + bucket = callout_get_bucket(c->c_time); TAILQ_REMOVE(&cc->cc_callwheel[bucket], c, c_links.tqe); } else @@ -994,14 +993,14 @@ int if (cc->cc_exec_entity[direct].cc_curr == c) { cc->cc_exec_entity[direct].ce_migration_cpu = cpu; cc->cc_exec_entity[direct].ce_migration_time - = to_bt; + = to_sbt; cc->cc_exec_entity[direct].ce_migration_func = ftn; cc->cc_exec_entity[direct].ce_migration_arg = arg; c->c_flags |= CALLOUT_DFRMIGRATION; CTR6(KTR_CALLOUT, "migration of %p func %p arg %p in %d.%08x to %u deferred", - c, c->c_func, c->c_arg, (int)(to_bt.sec), - (u_int)(to_bt.frac >> 32), cpu); + c, c->c_func, c->c_arg, (int)(to_sbt >> 32), + (u_int)(to_sbt & 0xffffffff), cpu); CC_UNLOCK(cc); return (cancelled); } @@ -1009,10 +1008,10 @@ int } #endif - callout_cc_add(c, cc, to_bt, pr, ftn, arg, cpu, flags); + callout_cc_add(c, cc, to_sbt, pr, ftn, arg, cpu, flags); CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", - cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_bt.sec), - (u_int)(to_bt.frac >> 32)); + cancelled ? "re" : "", c, c->c_func, c->c_arg,(int)(to_time >> 32), + (u_int)(to_time & 0xffffffff)); CC_UNLOCK(cc); return (cancelled); @@ -1197,7 +1196,7 @@ again: if ((c->c_flags & CALLOUT_PROCESSED) == 0) { if (cc->cc_exec_next_dir == c) cc->cc_exec_next_dir = TAILQ_NEXT(c, c_links.tqe); - bucket = get_bucket(&c->c_time); + bucket = callout_get_bucket(c->c_time); TAILQ_REMOVE(&cc->cc_callwheel[bucket], c, c_links.tqe); } else Index: sys/kern/subr_log.c =================================================================== --- sys/kern/subr_log.c (revision 246035) +++ sys/kern/subr_log.c (working copy) @@ -117,9 +117,8 @@ logopen(struct cdev *dev, int flags, int mode, str return (EBUSY); } log_open = 1; - callout_reset_bt(&logsoftc.sc_callout, - ticks2bintime(hz / log_wakeups_per_second), zero_bt, - logtimeout, NULL, C_PREL(1) | C_HARDCLOCK); + callout_reset_sbt(&logsoftc.sc_callout, + (SBT_1S / log_wakeups_per_second), 0, logtimeout, NULL, C_PREL(1)); mtx_unlock(&msgbuf_lock); fsetown(td->td_proc->p_pid, &logsoftc.sc_sigio); /* signal process only */ @@ -247,9 +246,8 @@ done: printf("syslog wakeup is less than one. Adjusting to 1.\n"); log_wakeups_per_second = 1; } - callout_reset_bt(&logsoftc.sc_callout, - ticks2bintime(hz / log_wakeups_per_second), zero_bt, - logtimeout, NULL, C_PREL(1) | C_HARDCLOCK); + callout_reset_sbt(&logsoftc.sc_callout, + (SBT_1S / log_wakeups_per_second), 0, logtimeout, NULL, C_PREL(1)); } /*ARGSUSED*/ Index: sys/kern/subr_param.c =================================================================== --- sys/kern/subr_param.c (revision 246071) +++ sys/kern/subr_param.c (working copy) @@ -84,7 +84,8 @@ static int sysctl_kern_vm_guest(SYSCTL_HANDLER_ARG int hz; /* system clock's frequency */ int tick; /* usec per tick (1000000 / hz) */ struct bintime tick_bt; /* bintime per tick (1s / hz) */ -struct bintime zero_bt = { 0, 0 }; /* bintime per tick (1s / hz) */ +struct bintime zero_bt = { 0, 0 }; /* bintime per tick (1s / hz) */ +sbintime_t tick_sbt; int maxusers; /* base tunable */ int maxproc; /* maximum # of processes */ int maxprocperuid; /* max # of procs per user */ @@ -224,6 +225,7 @@ init_param1(void) hz = vm_guest > VM_GUEST_NO ? HZ_VM : HZ; tick = 1000000 / hz; FREQ2BT(hz, &tick_bt); + tick_sbt = bintime2sbintime(tick_bt); #ifdef VM_SWZONE_SIZE_MAX maxswzone = VM_SWZONE_SIZE_MAX; Index: sys/kern/subr_sleepqueue.c =================================================================== --- sys/kern/subr_sleepqueue.c (revision 246035) +++ sys/kern/subr_sleepqueue.c (working copy) @@ -363,7 +363,7 @@ sleepq_add(void *wchan, struct lock_object *lock, * sleep queue after timo ticks if the thread has not already been awakened. */ void -sleepq_set_timeout_bt(void *wchan, struct bintime bt, struct bintime pr, +sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr, int flags) { @@ -376,7 +376,7 @@ void MPASS(TD_ON_SLEEPQ(td)); MPASS(td->td_sleepqueue == NULL); MPASS(wchan != NULL); - callout_reset_bt_on(&td->td_slpcallout, bt, pr, + callout_reset_sbt_on(&td->td_slpcallout, sbt, pr, sleepq_timeout, td, PCPU_GET(cpuid), flags | C_DIRECT_EXEC); } Index: sys/kern/sys_generic.c =================================================================== --- sys/kern/sys_generic.c (revision 246035) +++ sys/kern/sys_generic.c (working copy) @@ -102,7 +102,7 @@ static int dofilewrite(struct thread *, int, struc off_t, int); static void doselwakeup(struct selinfo *, int); static void seltdinit(struct thread *); -static int seltdwait(struct thread *, struct bintime, struct bintime); +static int seltdwait(struct thread *, sbintime_t, sbintime_t); static void seltdclear(struct thread *); /* @@ -903,7 +903,7 @@ kern_select(struct thread *td, int nd, fd_set *fd_ */ fd_mask s_selbits[howmany(2048, NFDBITS)]; fd_mask *ibits[3], *obits[3], *selbits, *sbp; - struct bintime abt, precision, rbt; + sbintime_t asbt, precision, rsbt; struct timeval rtv; int error, lf, ndu; u_int nbufbytes, ncpbytes, ncpubytes, nfdbits; @@ -1003,23 +1003,21 @@ kern_select(struct thread *td, int nd, fd_set *fd_ error = EINVAL; goto done; } - timeval2bintime(&rtv, &rbt); - precision = rbt; - bintime_shift(&precision, -tc_timeexp); - if (TIMESEL(&abt, &rbt)) - bintime_add(&abt, &tc_tick_bt); - bintime_add(&abt, &rbt); - } else { - abt.sec = (time_t)-1; - abt.frac = 0; - } + rsbt = timeval2sbintime(rtv); + precision = rsbt; + precision >>= tc_precexp; + if (TIMESEL(&asbt, rsbt)) + asbt += tc_tick_sbt; + asbt += rsbt; + } else + asbt = -1; seltdinit(td); /* Iterate until the timeout expires or descriptors become ready. */ for (;;) { error = selscan(td, ibits, obits, nd); if (error || td->td_retval[0] != 0) break; - error = seltdwait(td, abt, precision); + error = seltdwait(td, asbt, precision); if (error) break; error = selrescan(td, ibits, obits); @@ -1251,7 +1249,7 @@ sys_poll(td, uap) { struct pollfd *bits; struct pollfd smallbits[32]; - struct bintime abt, precision, rbt; + sbintime_t asbt, precision, rsbt; int error; u_int nfds; size_t ni; @@ -1272,24 +1270,21 @@ sys_poll(td, uap) error = EINVAL; goto done; } - rbt.sec = uap->timeout / 1000; - rbt.frac = (uap->timeout % 1000) * (((uint64_t)1 << 63) / 500); - precision = rbt; - bintime_shift(&precision, -tc_timeexp); - if (TIMESEL(&abt, &rbt)) - bintime_add(&abt, &tc_tick_bt); - bintime_add(&abt, &rbt); - } else { - abt.sec = (time_t)-1; - abt.frac = 0; - } + rsbt = SBT_1MS * uap->timeout; + precision = rsbt; + precision >>= tc_precexp; + if (TIMESEL(&asbt, rsbt)) + asbt += tc_tick_sbt; + asbt += rsbt; + } else + asbt = -1; seltdinit(td); /* Iterate until the timeout expires or descriptors become ready. */ for (;;) { error = pollscan(td, bits, nfds); if (error || td->td_retval[0] != 0) break; - error = seltdwait(td, abt, precision); + error = seltdwait(td, asbt, precision); if (error) break; error = pollrescan(td); @@ -1631,7 +1626,7 @@ out: } static int -seltdwait(struct thread *td, struct bintime bt, struct bintime precision) +seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision) { struct seltd *stp; int error; @@ -1650,11 +1645,11 @@ static int mtx_unlock(&stp->st_mtx); return (0); } - if (!bintime_isset(&bt)) + if (sbt != 0) error = EWOULDBLOCK; - else if (bt.sec != (time_t)-1) - error = cv_timedwait_sig_bt(&stp->st_wait, &stp->st_mtx, - bt, precision, C_ABSOLUTE); + else if (sbt != -1) + error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx, + sbt, precision, C_ABSOLUTE); else error = cv_wait_sig(&stp->st_wait, &stp->st_mtx); mtx_unlock(&stp->st_mtx); Index: sys/netinet/tcp_timer.c =================================================================== --- sys/netinet/tcp_timer.c (revision 246071) +++ sys/netinet/tcp_timer.c (working copy) @@ -718,40 +718,25 @@ tcp_timer_active(struct tcpcb *tp, int timer_type) #define ticks_to_msecs(t) (1000*(t) / hz) -static int -delta_bintime_in_msecs(struct bintime bt, struct bintime now) -{ - - bintime_sub(&bt, &now); - return (((uint64_t)1000 * (uint64_t)(bt.frac >> 32)) >> 32) + - (bt.sec * 1000); -} - void tcp_timer_to_xtimer(struct tcpcb *tp, struct tcp_timer *timer, struct xtcp_timer *xtimer) { - struct bintime bt, now; + sbintime_t now; bzero(xtimer, sizeof(*xtimer)); if (timer == NULL) return; - bintime_clear(&bt); - getbinuptime(&now); + getsbinuptime(&now); if (callout_active(&timer->tt_delack)) - xtimer->tt_delack = delta_bintime_in_msecs( - timer->tt_delack.c_time, now); + if (callout_active(&timer->tt_rexmt)) - xtimer->tt_rexmt = delta_bintime_in_msecs( - timer->tt_rexmt.c_time, now); + xtimer->tt_rexmt = (timer->tt_rexmt.c_time - now) / SBT_1MS; if (callout_active(&timer->tt_persist)) - xtimer->tt_persist = delta_bintime_in_msecs( - timer->tt_persist.c_time, now); + xtimer->tt_persist = (timer->tt_persist.c_time - now) / SBT_1MS; if (callout_active(&timer->tt_keep)) - xtimer->tt_keep = delta_bintime_in_msecs( - timer->tt_keep.c_time, now); + xtimer->tt_keep = (timer->tt_keep.c_time - now) / SBT_1MS; if (callout_active(&timer->tt_2msl)) - xtimer->tt_2msl = delta_bintime_in_msecs( - timer->tt_2msl.c_time, now); + xtimer->tt_2msl = (timer->tt_2msl.c_time - now) / SBT_1MS; xtimer->t_rcvtime = ticks_to_msecs(ticks - tp->t_rcvtime); } Index: sys/sys/_callout.h =================================================================== --- sys/sys/_callout.h (revision 246035) +++ sys/sys/_callout.h (working copy) @@ -51,8 +51,8 @@ struct callout { SLIST_ENTRY(callout) sle; TAILQ_ENTRY(callout) tqe; } c_links; - struct bintime c_time; /* ticks to the event */ - struct bintime c_precision; /* delta allowed wrt opt */ + sbintime_t c_time; /* ticks to the event */ + sbintime_t c_precision; /* delta allowed wrt opt */ void *c_arg; /* function argument */ void (*c_func)(void *); /* function to call */ struct lock_object *c_lock; /* lock to handle */ Index: sys/sys/callout.h =================================================================== --- sys/sys/callout.h (revision 246035) +++ sys/sys/callout.h (working copy) @@ -77,14 +77,14 @@ void _callout_init_lock(struct callout *, struct l _callout_init_lock((c), ((rw) != NULL) ? &(rw)->lock_object : \ NULL, (flags)) #define callout_pending(c) ((c)->c_flags & CALLOUT_PENDING) -int callout_reset_bt_on(struct callout *, struct bintime, struct bintime, +int callout_reset_sbt_on(struct callout *, sbintime_t, sbintime_t, void (*)(void *), void *, int, int); -#define callout_reset_bt(c, bt, pr, fn, arg, flags) \ - callout_reset_bt_on((c), (bt), (pr), (fn), (arg), (c)->c_cpu, flags) -#define callout_reset_bt_curcpu(c, bt, pr, fn, arg, flags) \ - callout_reset_bt_on((c), (bt), (pr), (fn), (arg), PCPU_GET(cpuid), flags) +#define callout_reset_sbt(c, sbt, pr, fn, arg, flags) \ + callout_reset_sbt_on((c), (sbt), (pr), (fn), (arg), (c)->c_cpu, flags) +#define callout_reset_sbt_curcpu(c, sbt, pr, fn, arg, flags) \ + callout_reset_sbt_on((c), (sbt), (pr), (fn), (arg), PCPU_GET(cpuid), flags) #define callout_reset_on(c, to_ticks, fn, arg, cpu) \ - callout_reset_bt_on((c), ticks2bintime(to_ticks), zero_bt, (fn), (arg), \ + callout_reset_sbt_on((c), (tick_sbt * (to_ticks)), 0, (fn), (arg), \ (cpu), C_HARDCLOCK) #define callout_reset(c, on_tick, fn, arg) \ callout_reset_on((c), (on_tick), (fn), (arg), (c)->c_cpu) Index: sys/sys/condvar.h =================================================================== --- sys/sys/condvar.h (revision 246035) +++ sys/sys/condvar.h (working copy) @@ -55,10 +55,10 @@ void cv_destroy(struct cv *cvp); void _cv_wait(struct cv *cvp, struct lock_object *lock); void _cv_wait_unlock(struct cv *cvp, struct lock_object *lock); int _cv_wait_sig(struct cv *cvp, struct lock_object *lock); -int _cv_timedwait_bt(struct cv *cvp, struct lock_object *lock, - struct bintime bt, struct bintime pr, int flags); -int _cv_timedwait_sig_bt(struct cv *cvp, struct lock_object *lock, - struct bintime bt, struct bintime pr, int flags); +int _cv_timedwait_sbt(struct cv *cvp, struct lock_object *lock, + sbintime_t sbt, sbintime_t pr, int flags); +int _cv_timedwait_sig_sbt(struct cv *cvp, struct lock_object *lock, + sbintime_t sbt, sbintime_t pr, int flags); void cv_signal(struct cv *cvp); void cv_broadcastpri(struct cv *cvp, int pri); @@ -70,15 +70,15 @@ void cv_broadcastpri(struct cv *cvp, int pri); #define cv_wait_sig(cvp, lock) \ _cv_wait_sig((cvp), &(lock)->lock_object) #define cv_timedwait(cvp, lock, timo) \ - _cv_timedwait_bt((cvp), &(lock)->lock_object, \ - ticks2bintime(timo), zero_bt, C_HARDCLOCK) -#define cv_timedwait_bt(cvp, lock, bt, pr, flags) \ - _cv_timedwait_bt((cvp), &(lock)->lock_object, (bt), (pr), (flags)) + _cv_timedwait_sbt((cvp), &(lock)->lock_object, \ + (tick_sbt * (timo)), 0, C_HARDCLOCK) +#define cv_timedwait_sbt(cvp, lock, sbt, pr, flags) \ + _cv_timedwait_sbt((cvp), &(lock)->lock_object, (sbt), (pr), (flags)) #define cv_timedwait_sig(cvp, lock, timo) \ - _cv_timedwait_sig_bt((cvp), &(lock)->lock_object, \ - ticks2bintime(timo), zero_bt, C_HARDCLOCK) -#define cv_timedwait_sig_bt(cvp, lock, bt, pr, flags) \ - _cv_timedwait_sig_bt((cvp), &(lock)->lock_object, (bt), (pr), (flags)) + _cv_timedwait_sig_sbt((cvp), &(lock)->lock_object, \ + (tick_sbt * (timo)), 0, C_HARDCLOCK) +#define cv_timedwait_sig_sbt(cvp, lock, sbt, pr, flags) \ + _cv_timedwait_sig_sbt((cvp), &(lock)->lock_object, (sbt), (pr), (flags)) #define cv_broadcast(cvp) cv_broadcastpri(cvp, 0) Index: sys/sys/mutex.h =================================================================== --- sys/sys/mutex.h (revision 246035) +++ sys/sys/mutex.h (working copy) @@ -377,7 +377,7 @@ extern struct mtx_pool *mtxpool_sleep; #define mtx_sleep(chan, mtx, pri, wmesg, timo) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \ - ticks2bintime(timo), zero_bt, C_HARDCLOCK) + (tick_sbt * (timo)), 0, C_HARDCLOCK) #define mtx_initialized(m) lock_initalized(&(m)->lock_object) Index: sys/sys/rwlock.h =================================================================== --- sys/sys/rwlock.h (revision 246035) +++ sys/sys/rwlock.h (working copy) @@ -212,7 +212,7 @@ void __rw_assert(const volatile uintptr_t *c, int } while (0) #define rw_sleep(chan, rw, pri, wmesg, timo) \ _sleep((chan), &(rw)->lock_object, (pri), (wmesg), \ - ticks2bintime(timo), zero_bt, C_HARDCLOCK) + (tick_sbt * (timo)), 0, C_HARDCLOCK) #define rw_initialized(rw) lock_initalized(&(rw)->lock_object) Index: sys/sys/sleepqueue.h =================================================================== --- sys/sys/sleepqueue.h (revision 246035) +++ sys/sys/sleepqueue.h (working copy) @@ -108,10 +108,10 @@ struct sleepqueue *sleepq_lookup(void *wchan); void sleepq_release(void *wchan); void sleepq_remove(struct thread *td, void *wchan); int sleepq_signal(void *wchan, int flags, int pri, int queue); -void sleepq_set_timeout_bt(void *wchan, struct bintime bt, - struct bintime pr, int flags); +void sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, + sbintime_t pr, int flags); #define sleepq_set_timeout(wchan, timo) \ - sleepq_set_timeout_bt((wchan), ticks2bintime(timo), zero_bt, C_HARDCLOCK) + sleepq_set_timeout_sbt((wchan), (tick_sbt * (timo)), 0, C_HARDCLOCK) u_int sleepq_sleepcnt(void *wchan, int queue); int sleepq_timedwait(void *wchan, int pri); int sleepq_timedwait_sig(void *wchan, int pri); Index: sys/sys/sx.h =================================================================== --- sys/sys/sx.h (revision 246035) +++ sys/sys/sx.h (working copy) @@ -276,7 +276,7 @@ __sx_sunlock(struct sx *sx, const char *file, int #define sx_sleep(chan, sx, pri, wmesg, timo) \ _sleep((chan), &(sx)->lock_object, (pri), (wmesg), \ - ticks2bintime(timo), zero_bt, C_HARDCLOCK) + (tick_sbt * (timo)), 0, C_HARDCLOCK) /* * Options passed to sx_init_flags(). Index: sys/sys/systm.h =================================================================== --- sys/sys/systm.h (revision 246035) +++ sys/sys/systm.h (working copy) @@ -345,26 +345,26 @@ static __inline void splx(intrmask_t ipl __unused * less often. */ int _sleep(void *chan, struct lock_object *lock, int pri, const char *wmesg, - struct bintime bt, struct bintime pr, int flags) __nonnull(1); + sbintime_t sbt, sbintime_t pr, int flags) __nonnull(1); #define msleep(chan, mtx, pri, wmesg, timo) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \ - ticks2bintime(timo), zero_bt, C_HARDCLOCK) -#define msleep_bt(chan, mtx, pri, wmesg, bt, pr, flags) \ + (tick_sbt * (timo)), 0, C_HARDCLOCK) +#define msleep_sbt(chan, mtx, pri, wmesg, bt, pr, flags) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (bt), (pr), \ (flags)) -int msleep_spin_bt(void *chan, struct mtx *mtx, const char *wmesg, - struct bintime bt, struct bintime pr, int flags) __nonnull(1); +int msleep_spin_sbt(void *chan, struct mtx *mtx, const char *wmesg, + sbintime_t sbt, sbintime_t pr, int flags) __nonnull(1); #define msleep_spin(chan, mtx, wmesg, timo) \ - msleep_spin_bt((chan), (mtx), (wmesg), ticks2bintime(timo), \ - zero_bt, C_HARDCLOCK) -int pause_bt(const char *wmesg, struct bintime bt, struct bintime pr, + msleep_spin_sbt((chan), (mtx), (wmesg), (tick_sbt * (timo)), \ + 0, C_HARDCLOCK) +int pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); #define pause(wmesg, timo) \ - pause_bt((wmesg), ticks2bintime(timo), zero_bt, C_HARDCLOCK) + pause_sbt((wmesg), (tick_sbt * (timo)), 0, C_HARDCLOCK) #define tsleep(chan, pri, wmesg, timo) \ - _sleep((chan), NULL, (pri), (wmesg), ticks2bintime(timo), \ - zero_bt, C_HARDCLOCK) -#define tsleep_bt(chan, pri, wmesg, bt, pr, flags) \ + _sleep((chan), NULL, (pri), (wmesg), (tick_sbt * (timo)), \ + 0, C_HARDCLOCK) +#define tsleep_sbt(chan, pri, wmesg, bt, pr, flags) \ _sleep((chan), NULL, (pri), (wmesg), (bt), (pr), (flags)) void wakeup(void *chan) __nonnull(1); void wakeup_one(void *chan) __nonnull(1); Index: sys/sys/time.h =================================================================== --- sys/sys/time.h (revision 246071) +++ sys/sys/time.h (working copy) @@ -124,24 +124,46 @@ bintime_shift(struct bintime *bt, int exp) ((a)->frac cmp (b)->frac) : \ ((a)->sec cmp (b)->sec)) -#ifdef _KERNEL -extern struct bintime tick_bt; -extern struct bintime zero_bt; +typedef int64_t sbintime_t; +#define SBT_1S ((sbintime_t)1 << 32) +#define SBT_1M (SBT_1S * 60) +#define SBT_1MS (SBT_1S / 1000) +#define SBT_1US (SBT_1S / 1000000) +#define SBT_1NS (SBT_1S / 1000000000) -static __inline struct bintime -ticks2bintime(u_int ticks) +static __inline int +sbintime_getsec(sbintime_t sbt) { + + return (int)(sbt >> 32); +} + +static __inline sbintime_t +bintime2sbintime(const struct bintime bt) +{ + + return ((bt.sec << 32) + (bt.frac >> 32)); +} + +static __inline struct bintime +sbintime2bintime(sbintime_t sbt) +{ struct bintime bt; - uint64_t p1, p2; - p1 = (tick_bt.frac & 0xffffffff) * ticks; - p2 = (tick_bt.frac >> 32) * ticks + (p1 >> 32); - bt.sec = (p2 >> 32); - bt.frac = (p2 << 32) | (p1 & 0xffffffff); + bt.sec = sbt >> 32; + bt.frac = sbt << 32; return (bt); + } -#endif +#ifdef _KERNEL + +extern struct bintime tick_bt; +extern struct bintime zero_bt; +extern sbintime_t tick_sbt; + +#endif /* KERNEL */ + /*- * Background information: * @@ -189,6 +211,43 @@ timeval2bintime(const struct timeval *tv, struct b /* 18446744073709 = int(2^64 / 1000000) */ bt->frac = tv->tv_usec * (uint64_t)18446744073709LL; } + +static __inline struct timespec +sbintime2timespec(sbintime_t sbt) +{ + struct timespec ts; + + ts.tv_sec = sbt >> 32; + ts.tv_nsec = ((uint64_t)1000000000 * (uint32_t)sbt) >> 32; + return (ts); +} + +static __inline sbintime_t +timespec2sbintime(struct timespec ts) +{ + + return (((sbintime_t)ts.tv_sec << 32) + + (ts.tv_nsec * (((sbintime_t)1 << 63) / 500000000) >> 32)); +} + +static __inline struct timeval +sbintime2timeval(sbintime_t sbt) +{ + struct timeval tv; + + tv.tv_sec = sbt >> 32; + tv.tv_usec = ((uint64_t)1000000 * (uint32_t)sbt) >> 32; + return (tv); +} + +static __inline sbintime_t +timeval2sbintime(struct timeval tv) +{ + + return (((sbintime_t)tv.tv_sec << 32) + + (tv.tv_usec * (((sbintime_t)1 << 63) / 500000) >> 32)); +} + #endif /* __BSD_VISIBLE */ #ifdef _KERNEL @@ -324,11 +383,15 @@ extern volatile time_t time_second; extern volatile time_t time_uptime; extern struct bintime boottimebin; extern struct bintime tc_tick_bt; +extern sbintime_t tc_tick_sbt; +extern struct bintime zero_bt; extern struct timeval boottime; -extern int tc_timeexp; +extern int tc_precexp; extern int tc_timepercentage; extern struct bintime bt_timethreshold; extern struct bintime bt_tickthreshold; +extern sbintime_t sbt_timethreshold; +extern sbintime_t sbt_tickthreshold; /* * Functions for looking at our clock: [get]{bin,nano,micro}[up]time() @@ -352,6 +415,7 @@ extern struct bintime bt_tickthreshold; */ void binuptime(struct bintime *bt); +void sbinuptime(sbintime_t *sbt); void nanouptime(struct timespec *tsp); void microuptime(struct timeval *tvp); @@ -360,6 +424,7 @@ void nanotime(struct timespec *tsp); void microtime(struct timeval *tvp); void getbinuptime(struct bintime *bt); +void getsbinuptime(sbintime_t *sbt); void getnanouptime(struct timespec *tsp); void getmicrouptime(struct timeval *tvp); @@ -388,9 +453,9 @@ int tvtohz(struct timeval *tv); (bt)->frac = ((uint64_t)0x8000000000000000 / (freq)) << 1; \ } -#define TIMESEL(bt, bt2) \ - ((bintime_cmp((bt2), (&bt_timethreshold), >=)) ? \ - (getbinuptime(bt), 1) : (binuptime(bt), 0)) +#define TIMESEL(sbt, sbt2) \ + (((sbt2) >= sbt_timethreshold) ? \ + (getsbinuptime(sbt), 1) : (sbinuptime(sbt), 0)) #else /* !_KERNEL */ #include