Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 236893) +++ sys/kern/kern_timeout.c (working copy) @@ -896,8 +896,9 @@ callout_reset_bt_on(struct callout *c, struct bint cc->cc_migration_arg = arg; c->c_flags |= CALLOUT_DFRMIGRATION; CTR6(KTR_CALLOUT, - "migration of %p func %p arg %p in %ld %ld to %u deferred", - c, c->c_func, c->c_arg, bt.sec, bt.frac, cpu); + "migration of %p func %p arg %p in %d.%ld to %u deferred", + c, c->c_func, c->c_arg, (int)(bt.sec), + (u_int)(bt.frac >> 32), cpu); CC_UNLOCK(cc); return (cancelled); } @@ -906,8 +907,9 @@ callout_reset_bt_on(struct callout *c, struct bint #endif callout_cc_add(c, cc, bt, ftn, arg, cpu, direct); - CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %ld %ld", - cancelled ? "re" : "", c, c->c_func, c->c_arg, bt.sec, bt.frac); + CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", + cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(bt.sec), + (u_int)(bt.frac >> 32)); CC_UNLOCK(cc); return (cancelled); Index: sys/kern/kern_clocksource.c =================================================================== --- sys/kern/kern_clocksource.c (revision 236814) +++ sys/kern/kern_clocksource.c (working copy) @@ -168,8 +168,8 @@ hardclockintr(void) state = DPCPU_PTR(timerstate); now = state->now; CTR4(KTR_SPARE2, "ipi at %d: now %d.%08x%08x", - curcpu, now.sec, (unsigned int)(now.frac >> 32), - (unsigned int)(now.frac & 0xffffffff)); + curcpu, now.sec, (u_int)(now.frac >> 32), + (u_int)(now.frac & 0xffffffff)); done = handleevents(&now, 0); return (done ? FILTER_HANDLED : FILTER_STRAY); } @@ -188,8 +188,8 @@ handleevents(struct bintime *now, int fake) int done, runs; CTR4(KTR_SPARE2, "handle at %d: now %d.%08x%08x", - curcpu, now->sec, (unsigned int)(now->frac >> 32), - (unsigned int)(now->frac & 0xffffffff)); + curcpu, now->sec, (u_int)(now->frac >> 32), + (u_int)(now->frac & 0xffffffff)); done = 0; if (fake) { frame = NULL; @@ -339,8 +339,8 @@ getnextevent(struct bintime *event) *event = nexthard; } CTR5(KTR_SPARE2, "next at %d: next %d.%08x%08x by %d", - curcpu, event->sec, (unsigned int)(event->frac >> 32), - (unsigned int)(event->frac & 0xffffffff), c); + curcpu, event->sec, (u_int)(event->frac >> 32), + (u_int)(event->frac & 0xffffffff), c); } /* Hardware timer callback function. */ @@ -372,8 +372,8 @@ timercb(struct eventtimer *et, void *arg) } state->now = now; CTR4(KTR_SPARE2, "intr at %d: now %d.%08x%08x", - curcpu, now.sec, (unsigned int)(now.frac >> 32), - (unsigned int)(now.frac & 0xffffffff)); + curcpu, (int)(now.sec), (u_int)(now.frac >> 32), + (u_int)(now.frac & 0xffffffff)); #ifdef SMP /* Prepare broadcasting to other CPUs for non-per-CPU timers. */ @@ -444,8 +444,8 @@ loadtimer(struct bintime *now, int start) if (new.frac < tmp) /* Left less then passed. */ bintime_add(&new, &timerperiod); CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x", - curcpu, now->sec, (unsigned int)(now->frac >> 32), - new.sec, (unsigned int)(new.frac >> 32)); + curcpu, now->sec, (u_int)(now->frac >> 32), + new.sec, (u_int)(new.frac >> 32)); *next = new; bintime_add(next, now); et_start(timer, &new, &timerperiod); @@ -454,8 +454,8 @@ loadtimer(struct bintime *now, int start) getnextevent(&new); eq = bintime_cmp(&new, next, ==); CTR5(KTR_SPARE2, "load at %d: next %d.%08x%08x eq %d", - curcpu, new.sec, (unsigned int)(new.frac >> 32), - (unsigned int)(new.frac & 0xffffffff), + curcpu, new.sec, (u_int)(new.frac >> 32), + (u_int)(new.frac & 0xffffffff), eq); if (!eq) { *next = new; @@ -788,8 +788,8 @@ cpu_idleclock(void) else binuptime(&now); CTR4(KTR_SPARE2, "idle at %d: now %d.%08x%08x", - curcpu, now.sec, (unsigned int)(now.frac >> 32), - (unsigned int)(now.frac & 0xffffffff)); + curcpu, now.sec, (u_int)(now.frac >> 32), + (u_int)(now.frac & 0xffffffff)); getnextcpuevent(&t, 1); ET_HW_LOCK(state); state->idle = 1; @@ -817,8 +817,8 @@ cpu_activeclock(void) else binuptime(&now); CTR4(KTR_SPARE2, "active at %d: now %d.%08x%08x", - curcpu, now.sec, (unsigned int)(now.frac >> 32), - (unsigned int)(now.frac & 0xffffffff)); + curcpu, now.sec, (u_int)(now.frac >> 32), + (u_int)(now.frac & 0xffffffff)); spinlock_enter(); td = curthread; td->td_intr_nesting_level++; @@ -841,11 +841,11 @@ clocksource_cyc_set(const struct bintime *t) binuptime(&now); CTR4(KTR_SPARE2, "set_cyc at %d: now %d.%08x%08x", - curcpu, now.sec, (unsigned int)(now.frac >> 32), - (unsigned int)(now.frac & 0xffffffff)); + curcpu, now.sec, (u_int)(now.frac >> 32), + (u_int)(now.frac & 0xffffffff)); CTR4(KTR_SPARE2, "set_cyc at %d: t %d.%08x%08x", - curcpu, t->sec, (unsigned int)(t->frac >> 32), - (unsigned int)(t->frac & 0xffffffff)); + curcpu, t->sec, (u_int)(t->frac >> 32), + (u_int)(t->frac & 0xffffffff)); ET_HW_LOCK(state); if (bintime_cmp(t, &state->nextcyc, ==)) { @@ -870,9 +870,9 @@ cpu_new_callout(int cpu, struct bintime bt) struct bintime now; struct pcpu_state *state; - CTR4(KTR_SPARE2, "new co at %d: on %d at %d.%08x%08x", - curcpu, cpu, (unsigned int)(bt.frac >> 32), - (unsigned int)(bt.frac & 0xffffffff)); + CTR5(KTR_SPARE2, "new co at %d: on %d at %d.%08x%08x", + curcpu, cpu, (int)(bt.sec), (u_int)(bt.frac >> 32), + (u_int)(bt.frac & 0xffffffff)); state = DPCPU_ID_PTR(cpu, timerstate); ET_HW_LOCK(state);