Index: sys/netinet/tcp_timer.c =================================================================== --- sys/netinet/tcp_timer.c (revision 235056) +++ sys/netinet/tcp_timer.c (working copy) @@ -662,21 +662,43 @@ #define ticks_to_msecs(t) (1000*(t) / hz) +#define BT2FREQ(bt) \ + (((uint64_t)0x8000000000000000 + ((bt)->frac >> 2)) / \ + ((bt)->frac >> 1)) + void tcp_timer_to_xtimer(struct tcpcb *tp, struct tcp_timer *timer, struct xtcp_timer *xtimer) { + int tmp; + bzero(xtimer, sizeof(struct xtcp_timer)); if (timer == NULL) return; - if (callout_active(&timer->tt_delack)) - xtimer->tt_delack = ticks_to_msecs(timer->tt_delack.c_time - ticks); - if (callout_active(&timer->tt_rexmt)) - xtimer->tt_rexmt = ticks_to_msecs(timer->tt_rexmt.c_time - ticks); - if (callout_active(&timer->tt_persist)) - xtimer->tt_persist = ticks_to_msecs(timer->tt_persist.c_time - ticks); - if (callout_active(&timer->tt_keep)) - xtimer->tt_keep = ticks_to_msecs(timer->tt_keep.c_time - ticks); - if (callout_active(&timer->tt_2msl)) - xtimer->tt_2msl = ticks_to_msecs(timer->tt_2msl.c_time - ticks); + + if (callout_active(&timer->tt_delack)) { + tmp = BT2FREQ(&(timer->tt_delack.c_time)); + xtimer->tt_delack = ticks_to_msecs(tmp - ticks); + } + + if (callout_active(&timer->tt_rexmt)) { + tmp = BT2FREQ(&(timer->tt_rexmt.c_time)); + xtimer->tt_rexmt = ticks_to_msecs(tmp - ticks); + } + + if (callout_active(&timer->tt_persist)) { + tmp = BT2FREQ(&(timer->tt_persist.c_time)); + xtimer->tt_persist = ticks_to_msecs(tmp - ticks); + } + + if (callout_active(&timer->tt_keep)) { + tmp = BT2FREQ(&(timer->tt_keep.c_time)); + xtimer->tt_keep = ticks_to_msecs(tmp - ticks); + } + + if (callout_active(&timer->tt_2msl)) { + tmp = BT2FREQ(&(timer->tt_keep.c_time)); + xtimer->tt_2msl = ticks_to_msecs(tmp - ticks); + } + xtimer->t_rcvtime = ticks_to_msecs(ticks - tp->t_rcvtime); } Index: sys/amd64/conf/GENERIC =================================================================== --- sys/amd64/conf/GENERIC (revision 235056) +++ sys/amd64/conf/GENERIC (working copy) @@ -22,7 +22,6 @@ ident GENERIC makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols -makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options SCHED_ULE # ULE scheduler options PREEMPTION # Enable kernel thread preemption @@ -59,13 +58,10 @@ options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options KBD_INSTALL_CDEV # install a CDEV entry in /dev -options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) -options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_FRAME # Ensure frames are compiled in -options KDTRACE_HOOKS # Kernel DTrace hooks options INCLUDE_CONFIG_FILE # Include this file in kernel # Debugging support. Always need this: @@ -75,7 +71,6 @@ # For full debugger support use this instead: options DDB # Support DDB. options GDB # Support remote GDB. -options DDB_CTF # kernel ELF linker loads CTF data options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS @@ -86,6 +81,13 @@ # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel +# Options for goodies +options BREAK_TO_DEBUGGER +options ALT_BREAK_TO_DEBUGGER +options KTR +options KTR_COMPILE=(KTR_SPARE2|KTR_CALLOUT) +options KTR_ENTRIES=131072 + # CPU frequency control device cpufreq Index: sys/sys/_callout.h =================================================================== --- sys/sys/_callout.h (revision 235056) +++ sys/sys/_callout.h (working copy) @@ -39,6 +39,7 @@ #define _SYS__CALLOUT_H #include +#include struct lock_object; @@ -50,7 +51,8 @@ SLIST_ENTRY(callout) sle; TAILQ_ENTRY(callout) tqe; } c_links; - int c_time; /* ticks to the event */ + TAILQ_ENTRY(callout) c_staiter; + struct bintime c_time; /* ticks to the event */ void *c_arg; /* function argument */ void (*c_func)(void *); /* function to call */ struct lock_object *c_lock; /* lock to handle */ Index: sys/sys/callout.h =================================================================== --- sys/sys/callout.h (revision 235056) +++ sys/sys/callout.h (working copy) @@ -79,8 +79,8 @@ #define callout_stop(c) _callout_stop_safe(c, 0) int _callout_stop_safe(struct callout *, int); void callout_tick(void); -int callout_tickstofirst(int limit); -extern void (*callout_new_inserted)(int cpu, int ticks); +struct bintime callout_tickstofirst(void); +extern void (*callout_new_inserted)(int cpu, struct bintime bt); #endif Index: sys/kern/kern_clock.c =================================================================== --- sys/kern/kern_clock.c (revision 235056) +++ sys/kern/kern_clock.c (working copy) @@ -454,7 +454,6 @@ if (td->td_intr_frame != NULL) PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame); #endif - callout_tick(); } /* Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 235056) +++ sys/kern/kern_timeout.c (working copy) @@ -43,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -68,9 +67,6 @@ SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0, "struct callout *"); -static int avg_depth; -SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, - "Average number of items examined per softclock call. Units = 1/1000"); static int avg_gcalls; SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, "Average number of Giant callouts made per softclock call. Units = 1/1000"); @@ -94,10 +90,10 @@ */ struct cc_mig_ent { #ifdef SMP - void (*ce_migration_func)(void *); - void *ce_migration_arg; - int ce_migration_cpu; - int ce_migration_ticks; + void (*ce_migration_func)(void *); + void *ce_migration_arg; + int ce_migration_cpu; + struct bintime ce_migration_time; #endif }; @@ -127,18 +123,19 @@ struct callout *cc_next; struct callout *cc_curr; void *cc_cookie; - int cc_ticks; - int cc_softticks; + struct bintime cc_ticks; + struct bintime cc_softticks; int cc_cancel; int cc_waiting; - int cc_firsttick; + struct bintime cc_firsttick; + struct callout_tailq *cc_localexp; }; #ifdef SMP #define cc_migration_func cc_migrating_entity.ce_migration_func #define cc_migration_arg cc_migrating_entity.ce_migration_arg #define cc_migration_cpu cc_migrating_entity.ce_migration_cpu -#define cc_migration_ticks cc_migrating_entity.ce_migration_ticks +#define cc_migration_time cc_migrating_entity.ce_migration_time struct callout_cpu cc_cpu[MAXCPU]; #define CPUBLOCK MAXCPU @@ -153,26 +150,32 @@ #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) +#define FREQ2BT(freq, bt) \ +{ \ + (bt)->sec = 0; \ + (bt)->frac = ((uint64_t)0x8000000000000000 / (freq)) << 1; \ +} + static int timeout_cpu; -void (*callout_new_inserted)(int cpu, int ticks) = NULL; +void (*callout_new_inserted)(int cpu, struct bintime bt) = NULL; static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); /** * Locked by cc_lock: - * cc_curr - If a callout is in progress, it is curr_callout. - * If curr_callout is non-NULL, threads waiting in + * cc_curr - If a callout is in progress, it is cc_curr. + * If cc_curr is non-NULL, threads waiting in * callout_drain() will be woken up as soon as the * relevant callout completes. - * cc_cancel - Changing to 1 with both callout_lock and c_lock held + * cc_cancel - Changing to 1 with both callout_lock and cc_lock held * guarantees that the current callout will not run. * The softclock() function sets this to 0 before it - * drops callout_lock to acquire c_lock, and it calls + * drops callout_lock to acquire cc_lock, and it calls * the handler only if curr_cancelled is still 0 after - * c_lock is successfully acquired. + * cc_lock is successfully acquired. * cc_waiting - If a thread is waiting in callout_drain(), then * callout_wait is nonzero. Set only when - * curr_callout is non-NULL. + * cc_curr is non-NULL. */ /* @@ -184,7 +187,8 @@ #ifdef SMP cc->cc_migration_cpu = CPUBLOCK; - cc->cc_migration_ticks = 0; + cc->cc_migration_time.sec = 0; + cc->cc_migration_time.frac = 0; cc->cc_migration_func = NULL; cc->cc_migration_arg = NULL; #endif @@ -230,6 +234,8 @@ v = (caddr_t)(cc->cc_callout + ncallout); cc->cc_callwheel = (struct callout_tailq *)v; v = (caddr_t)(cc->cc_callwheel + callwheelsize); + cc->cc_localexp = (struct callout_tailq *)v; + v = (caddr_t)(cc->cc_localexp + 1); return(v); } @@ -244,6 +250,7 @@ for (i = 0; i < callwheelsize; i++) { TAILQ_INIT(&cc->cc_callwheel[i]); } + TAILQ_INIT(cc->cc_localexp); cc_cme_cleanup(cc); if (cc->cc_callout == NULL) return; @@ -325,6 +332,8 @@ cc->cc_callwheel = malloc( sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT, M_WAITOK); + cc->cc_localexp = malloc( + sizeof(struct callout_tailq), M_CALLOUT, M_WAITOK); callout_cpu_init(cc); } #endif @@ -332,12 +341,26 @@ SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); +static int +get_bucket(struct bintime *bt) +{ + time_t sec; + uint64_t frac; + sec = bt->sec; + frac = bt->frac; + return (int) (((sec<<10)+(frac>>54)) & callwheelmask); +} + void callout_tick(void) { + struct callout *tmp; struct callout_cpu *cc; + struct callout_tailq *sc; + struct bintime bt; int need_softclock; int bucket; + int first_bucket; /* * Process callouts at a very low cpu priority, so we don't keep the @@ -346,13 +369,23 @@ need_softclock = 0; cc = CC_SELF(); mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); - cc->cc_firsttick = cc->cc_ticks = ticks; - for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) { - bucket = cc->cc_softticks & callwheelmask; - if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) { - need_softclock = 1; + bintime(&bt); + cc->cc_firsttick = cc->cc_ticks = bt; + first_bucket = get_bucket(&(cc->cc_ticks)); + bucket = get_bucket(&(cc->cc_softticks)); + for (;;) { + sc = &cc->cc_callwheel[bucket & callwheelmask]; + TAILQ_FOREACH(tmp, sc, c_staiter) { + if (bintime_cmp(&tmp->c_time,&cc->cc_firsttick, <=)) { + TAILQ_INSERT_TAIL(cc->cc_localexp,tmp,c_staiter); + TAILQ_REMOVE(sc, tmp, c_links.tqe); + need_softclock = 1; + break; + } + } + bucket++; + if((bucket & callwheelmask) == first_bucket) break; - } } mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); /* @@ -363,31 +396,44 @@ swi_sched(cc->cc_cookie, 0); } -int -callout_tickstofirst(int limit) +struct bintime +callout_tickstofirst(void) { struct callout_cpu *cc; struct callout *c; struct callout_tailq *sc; - int curticks; - int skip = 1; + struct bintime curticks; + struct bintime ncall; + struct bintime skip; + struct bintime tmp; + int buck; + skip.sec = 0; + skip.frac = 1; + FREQ2BT(ncallout, &ncall); cc = CC_SELF(); mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); curticks = cc->cc_ticks; - while( skip < ncallout && skip < limit ) { - sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ]; - /* search scanning ticks */ - TAILQ_FOREACH( c, sc, c_links.tqe ){ - if (c->c_time - curticks <= ncallout) + buck = get_bucket(&curticks); + for (;;) { + sc = &cc->cc_callwheel[buck & callwheelmask]; + TAILQ_FOREACH( c, sc, c_links.tqe ) { + tmp = c->c_time; + bintime_sub(&tmp, &curticks); + if (bintime_cmp(&tmp, &ncall, <=)) { + tmp = c->c_time; + skip = tmp; goto out; + } } - skip++; + buck++; } out: - cc->cc_firsttick = curticks + skip; + tmp = curticks; + bintime_add(&tmp, &skip); + cc->cc_firsttick = tmp; mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); - return (skip); + return (tmp); } static struct callout_cpu * @@ -415,25 +461,35 @@ } static void -callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks, - void (*func)(void *), void *arg, int cpu) +callout_cc_add(struct callout *c, struct callout_cpu *cc, + struct bintime to_bintime, void (*func)(void *), void *arg, int cpu) { + int bucket; CC_LOCK_ASSERT(cc); - if (to_ticks <= 0) - to_ticks = 1; + if (bintime_cmp(&to_bintime, &cc->cc_softticks, <)) { + to_bintime = cc->cc_softticks; + } c->c_arg = arg; c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); c->c_func = func; - c->c_time = ticks + to_ticks; - TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], + c->c_time = to_bintime; + bucket = get_bucket(&c->c_time); + TAILQ_INSERT_TAIL(&cc->cc_callwheel[bucket & callwheelmask], c, c_links.tqe); - if ((c->c_time - cc->cc_firsttick) < 0 && + + /* + * cc->cc_firsttick keeps track of the time at which the + * nearest event in the future should be fired. + * We don't need to call callout_new_inserted if the time + * of the callout we're inserting is greater than this value. + */ + if (bintime_cmp(&c->c_time, &cc->cc_firsttick , <) && callout_new_inserted != NULL) { cc->cc_firsttick = c->c_time; (*callout_new_inserted)(cpu, - to_ticks + (ticks - cc->cc_ticks)); + to_bintime); } } @@ -462,7 +518,8 @@ struct callout_cpu *new_cc; void (*new_func)(void *); void *new_arg; - int new_cpu, new_ticks; + int new_cpu; + struct bintime new_time; #endif #ifdef DIAGNOSTIC struct bintime bt1, bt2; @@ -574,7 +631,7 @@ * migration just perform it now. */ new_cpu = cc->cc_migration_cpu; - new_ticks = cc->cc_migration_ticks; + new_time = cc->cc_migration_time; new_func = cc->cc_migration_func; new_arg = cc->cc_migration_arg; cc_cme_cleanup(cc); @@ -598,7 +655,7 @@ * is not easy. */ new_cc = callout_cpu_switch(c, cc, new_cpu); - callout_cc_add(c, new_cc, new_ticks, new_func, new_arg, + callout_cc_add(c, new_cc, new_time, new_func, new_arg, new_cpu); CC_UNLOCK(new_cc); CC_LOCK(cc); @@ -633,10 +690,7 @@ { struct callout_cpu *cc; struct callout *c; - struct callout_tailq *bucket; - int curticks; int steps; /* #steps since we last allowed interrupts */ - int depth; int mpcalls; int lockcalls; int gcalls; @@ -648,42 +702,30 @@ mpcalls = 0; lockcalls = 0; gcalls = 0; - depth = 0; steps = 0; cc = (struct callout_cpu *)arg; CC_LOCK(cc); - while (cc->cc_softticks - 1 != cc->cc_ticks) { - /* - * cc_softticks may be modified by hard clock, so cache - * it while we work on a given bucket. - */ - curticks = cc->cc_softticks; - cc->cc_softticks++; - bucket = &cc->cc_callwheel[curticks & callwheelmask]; - c = TAILQ_FIRST(bucket); - while (c != NULL) { - depth++; - if (c->c_time != curticks) { - c = TAILQ_NEXT(c, c_links.tqe); - ++steps; - if (steps >= MAX_SOFTCLOCK_STEPS) { - cc->cc_next = c; - /* Give interrupts a chance. */ - CC_UNLOCK(cc); - ; /* nothing */ - CC_LOCK(cc); - c = cc->cc_next; - steps = 0; - } - } else { - TAILQ_REMOVE(bucket, c, c_links.tqe); - c = softclock_call_cc(c, cc, &mpcalls, - &lockcalls, &gcalls); - steps = 0; - } + + c = TAILQ_FIRST(cc->cc_localexp); + while (c != NULL) { + ++steps; + if (steps >= MAX_SOFTCLOCK_STEPS) { + cc->cc_next = c; + /* Give interrupts a chance. */ + CC_UNLOCK(cc); + ; /* nothing */ + CC_LOCK(cc); + c = cc->cc_next; + steps = 0; } + else { + TAILQ_REMOVE(cc->cc_localexp, c, c_staiter); + c = softclock_call_cc(c, cc, &mpcalls, + &lockcalls, &gcalls); + steps = 0; + } } - avg_depth += (depth * 1000 - avg_depth) >> 8; + avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; @@ -781,8 +823,16 @@ void *arg, int cpu) { struct callout_cpu *cc; + struct bintime bt; + struct bintime now; int cancelled = 0; + int bucket; + FREQ2BT(hz,&bt); + bintime(&now); + bintime_mul(&bt,to_ticks); + bintime_add(&bt,&now); + /* * Don't allow migration of pre-allocated callouts lest they * become unbalanced. @@ -814,7 +864,8 @@ if (cc->cc_next == c) { cc->cc_next = TAILQ_NEXT(c, c_links.tqe); } - TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, + bucket = get_bucket(&c->c_time); + TAILQ_REMOVE(&cc->cc_callwheel[bucket], c, c_links.tqe); cancelled = 1; @@ -830,13 +881,13 @@ if (c->c_cpu != cpu) { if (cc->cc_curr == c) { cc->cc_migration_cpu = cpu; - cc->cc_migration_ticks = to_ticks; + cc->cc_migration_time = bt; cc->cc_migration_func = ftn; cc->cc_migration_arg = arg; c->c_flags |= CALLOUT_DFRMIGRATION; CTR5(KTR_CALLOUT, "migration of %p func %p arg %p in %d to %u deferred", - c, c->c_func, c->c_arg, to_ticks, cpu); + c, c->c_func, c->c_arg, bt.frac, cpu); CC_UNLOCK(cc); return (cancelled); } @@ -844,9 +895,9 @@ } #endif - callout_cc_add(c, cc, to_ticks, ftn, arg, cpu); + callout_cc_add(c, cc, bt, ftn, arg, cpu); CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", - cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); + cancelled ? "re" : "", c, c->c_func, c->c_arg, bt.frac); CC_UNLOCK(cc); return (cancelled); @@ -874,7 +925,7 @@ { struct callout_cpu *cc, *old_cc; struct lock_class *class; - int use_lock, sq_locked; + int use_lock, sq_locked, bucket; /* * Some old subsystems don't hold Giant while running a callout_stop(), @@ -1024,7 +1075,8 @@ CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", c, c->c_func, c->c_arg); - TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, + bucket = get_bucket(&c->c_time); + TAILQ_REMOVE(&cc->cc_callwheel[bucket], c, c_links.tqe); callout_cc_del(c, cc); Index: sys/kern/kern_clocksource.c =================================================================== --- sys/kern/kern_clocksource.c (revision 235056) +++ sys/kern/kern_clocksource.c (working copy) @@ -46,6 +46,7 @@ #include #include #include +#include #include #include @@ -72,7 +73,7 @@ static void getnextevent(struct bintime *event); static int handleevents(struct bintime *now, int fake); #ifdef SMP -static void cpu_new_callout(int cpu, int ticks); +static void cpu_new_callout(int cpu, struct bintime bt); #endif static struct mtx et_hw_mtx; @@ -135,6 +136,7 @@ struct bintime nexthard; /* Next hardlock() event. */ struct bintime nextstat; /* Next statclock() event. */ struct bintime nextprof; /* Next profclock() event. */ + struct bintime nextcall; /* Next callout event. */ #ifdef KDTRACE_HOOKS struct bintime nextcyc; /* Next OpenSolaris cyclics event. */ #endif @@ -237,6 +239,11 @@ } else state->nextprof = state->nextstat; + if (&state->nextcall != NULL && + bintime_cmp(now, &state->nextcall, >=)) { + callout_tick(); + } + #ifdef KDTRACE_HOOKS if (fake == 0 && cyclic_clock_func != NULL && state->nextcyc.sec != -1 && @@ -269,22 +276,20 @@ { struct bintime tmp; struct pcpu_state *state; - int skip; state = DPCPU_PTR(timerstate); /* Handle hardclock() events. */ *event = state->nexthard; if (idle || (!activetick && !profiling && (timer->et_flags & ET_FLAGS_PERCPU) == 0)) { - skip = idle ? 4 : (stathz / 2); - if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > skip) - skip = tc_min_ticktock_freq; - skip = callout_tickstofirst(hz / skip) - 1; - CTR2(KTR_SPARE2, "skip at %d: %d", curcpu, skip); tmp = hardperiod; - bintime_mul(&tmp, skip); bintime_add(event, &tmp); } + + state->nextcall = callout_tickstofirst(); + if (bintime_cmp(event, &state->nextcall, >)) + *event = state->nextcall; + if (!idle) { /* If CPU is active - handle other types of events. */ if (bintime_cmp(event, &state->nextstat, >)) *event = state->nextstat; @@ -858,9 +863,9 @@ #ifdef SMP static void -cpu_new_callout(int cpu, int ticks) +cpu_new_callout(int cpu, struct bintime bt) { - struct bintime tmp; + struct bintime now; struct pcpu_state *state; CTR3(KTR_SPARE2, "new co at %d: on %d in %d", @@ -871,27 +876,23 @@ ET_HW_UNLOCK(state); return; } + binuptime(&now); /* - * If timer is periodic - just update next event time for target CPU. - * If timer is global - there is chance it is already programmed. - */ - if (periodic || (timer->et_flags & ET_FLAGS_PERCPU) == 0) { - tmp = hardperiod; - bintime_mul(&tmp, ticks - 1); - bintime_add(&tmp, &state->nexthard); - if (bintime_cmp(&tmp, &state->nextevent, <)) - state->nextevent = tmp; - if (periodic || - bintime_cmp(&state->nextevent, &nexttick, >=)) { - ET_HW_UNLOCK(state); - return; - } - } - /* * Otherwise we have to wake that CPU up, as we can't get present * bintime to reprogram global timer from here. If timer is per-CPU, * we by definition can't do it from here. */ + if(bintime_cmp(&bt, &state->nextcall, ==)) { + ET_HW_UNLOCK(state); + return; + } + state->nextcall = bt; + if (bintime_cmp(&state->nextcall, &state->nextevent, >=)) { + ET_HW_UNLOCK(state); + return; + } + state->nextevent = state->nextcall; + loadtimer(&now,0); ET_HW_UNLOCK(state); if (timer->et_flags & ET_FLAGS_PERCPU) { state->handle = 1;