Index: alpha/alpha/mp_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/alpha/alpha/mp_machdep.c,v retrieving revision 1.56 diff -u -r1.56 mp_machdep.c --- alpha/alpha/mp_machdep.c 4 Apr 2005 21:53:51 -0000 1.56 +++ alpha/alpha/mp_machdep.c 5 Apr 2006 01:43:27 -0000 @@ -224,7 +224,7 @@ spinlock_exit(); KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); - binuptime(PCPU_PTR(switchtime)); + PCPU_SET(switchtime, cpu_ticks()); PCPU_SET(switchticks, ticks); cpu_throw(NULL, choosethread()); /* doesn't return */ Index: alpha/alpha/trap.c =================================================================== RCS file: /home/ncvs/src/sys/alpha/alpha/trap.c,v retrieving revision 1.123.2.1 diff -u -r1.123.2.1 trap.c --- alpha/alpha/trap.c 7 Mar 2006 18:08:08 -0000 1.123.2.1 +++ alpha/alpha/trap.c 5 Apr 2006 01:43:27 -0000 @@ -259,7 +259,6 @@ register struct proc *p; register int i; u_int64_t ucode; - u_int sticks; int user; #ifdef SMP register_t s; @@ -301,12 +300,11 @@ CTR5(KTR_TRAP, "%s trap: pid %d, (%lx, %lx, %lx)", user ? "user" : "kernel", p->p_pid, a0, a1, a2); if (user) { - sticks = td->td_sticks; + td->td_pticks = 0; td->td_frame = framep; if (td->td_ucred != p->p_ucred) cred_update_thread(td); } else { - sticks = 0; /* XXX bogus -Wuninitialized warning */ KASSERT(cold || td->td_ucred != NULL, ("kernel trap doesn't have ucred")); } @@ -593,7 +591,7 @@ out: if (user) { framep->tf_regs[FRAME_SP] = alpha_pal_rdusp(); - userret(td, framep, sticks); + userret(td, framep); mtx_assert(&Giant, MA_NOTOWNED); } return; @@ -630,7 +628,6 @@ struct proc *p; int error = 0; u_int64_t opc; - u_int sticks; u_int64_t args[10]; /* XXX */ u_int hidden = 0, nargs; #ifdef SMP @@ -662,7 +659,7 @@ PCPU_LAZY_INC(cnt.v_syscall); td->td_frame = framep; opc = framep->tf_regs[FRAME_PC] - 4; - sticks = td->td_sticks; + td->td_pticks = 0; if (td->td_ucred != p->p_ucred) cred_update_thread(td); if (p->p_flag & P_SA) @@ -771,7 +768,7 @@ if ((callp->sy_narg & SYF_MPSAFE) == 0) mtx_unlock(&Giant); - userret(td, framep, sticks); + userret(td, framep); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) Index: amd64/amd64/mp_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/amd64/amd64/mp_machdep.c,v retrieving revision 1.260.2.5 diff -u -r1.260.2.5 mp_machdep.c --- amd64/amd64/mp_machdep.c 22 Mar 2006 13:45:47 -0000 1.260.2.5 +++ amd64/amd64/mp_machdep.c 5 Apr 2006 01:43:28 -0000 @@ -571,7 +571,7 @@ spinlock_exit(); KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); - binuptime(PCPU_PTR(switchtime)); + PCPU_SET(switchtime, cpu_ticks()); PCPU_SET(switchticks, ticks); cpu_throw(NULL, choosethread()); /* doesn't return */ Index: amd64/amd64/trap.c =================================================================== RCS file: /home/ncvs/src/sys/amd64/amd64/trap.c,v retrieving revision 1.289.2.3 diff -u -r1.289.2.3 trap.c --- amd64/amd64/trap.c 28 Nov 2005 20:03:03 -0000 1.289.2.3 +++ amd64/amd64/trap.c 5 Apr 2006 01:43:28 -0000 @@ -161,7 +161,6 @@ { struct thread *td = curthread; struct proc *p = td->td_proc; - u_int sticks = 0; int i = 0, ucode = 0, type, code; PCPU_LAZY_INC(cnt.v_trap); @@ -241,7 +240,7 @@ if (ISPL(frame.tf_cs) == SEL_UPL) { /* user trap */ - sticks = td->td_sticks; + td->td_pticks = 0; td->td_frame = &frame; if (td->td_ucred != p->p_ucred) cred_update_thread(td); @@ -487,7 +486,7 @@ #endif user: - userret(td, &frame, sticks); + userret(td, &frame); mtx_assert(&Giant, MA_NOTOWNED); userout: out: @@ -694,7 +693,6 @@ struct thread *td = curthread; struct proc *p = td->td_proc; register_t orig_tf_rflags; - u_int sticks; int error; int narg; register_t args[8]; @@ -719,7 +717,7 @@ reg = 0; regcnt = 6; - sticks = td->td_sticks; + td->td_pticks = 0; td->td_frame = &frame; if (td->td_ucred != p->p_ucred) cred_update_thread(td); @@ -837,7 +835,7 @@ /* * Handle reschedule and other end-of-syscall issues */ - userret(td, &frame, sticks); + userret(td, &frame); CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td, td->td_proc->p_pid, td->td_proc->p_comm, code); Index: amd64/amd64/tsc.c =================================================================== RCS file: /home/ncvs/src/sys/amd64/amd64/tsc.c,v retrieving revision 1.205 diff -u -r1.205 tsc.c --- amd64/amd64/tsc.c 17 Nov 2003 08:58:13 -0000 1.205 +++ amd64/amd64/tsc.c 5 Apr 2006 01:43:28 -0000 @@ -77,6 +77,7 @@ tsc_freq = tscval[1] - tscval[0]; if (bootverbose) printf("TSC clock: %lu Hz\n", tsc_freq); + set_cputicker(rdtsc, tsc_freq, 1); } Index: amd64/ia32/ia32_syscall.c =================================================================== RCS file: /home/ncvs/src/sys/amd64/ia32/ia32_syscall.c,v retrieving revision 1.8 diff -u -r1.8 ia32_syscall.c --- amd64/ia32/ia32_syscall.c 12 Apr 2005 23:18:53 -0000 1.8 +++ amd64/ia32/ia32_syscall.c 5 Apr 2006 01:43:28 -0000 @@ -95,7 +95,6 @@ struct thread *td = curthread; struct proc *p = td->td_proc; register_t orig_tf_rflags; - u_int sticks; int error; int narg; u_int32_t args[8]; @@ -108,7 +107,7 @@ */ PCPU_LAZY_INC(cnt.v_syscall); - sticks = td->td_sticks; + td->td_pticks = 0; td->td_frame = &frame; if (td->td_ucred != p->p_ucred) cred_update_thread(td); @@ -233,7 +232,7 @@ /* * Handle reschedule and other end-of-syscall issues */ - userret(td, &frame, sticks); + userret(td, &frame); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) Index: arm/arm/trap.c =================================================================== RCS file: /home/ncvs/src/sys/arm/arm/trap.c,v retrieving revision 1.17 diff -u -r1.17 trap.c --- arm/arm/trap.c 23 Jun 2005 11:39:18 -0000 1.17 +++ arm/arm/trap.c 5 Apr 2006 01:43:28 -0000 @@ -228,7 +228,6 @@ vm_prot_t ftype; void *onfault; vm_offset_t va; - u_int sticks = 0; int error = 0; struct ksig ksig; struct proc *p; @@ -255,7 +254,8 @@ user = TRAP_USERMODE(tf); if (user) { - sticks = td->td_sticks; td->td_frame = tf; + td->td_pticks = 0; + td->td_frame = tf; if (td->td_ucred != td->td_proc->p_ucred) cred_update_thread(td); if (td->td_pflags & TDP_SA) @@ -458,7 +458,7 @@ out: /* If returning to user mode, make sure to invoke userret() */ if (user) - userret(td, tf, sticks); + userret(td, tf); } /* @@ -700,7 +700,6 @@ struct vm_map *map; vm_offset_t fault_pc, va; int error = 0; - u_int sticks = 0; struct ksig ksig; @@ -747,7 +746,7 @@ /* Prefetch aborts cannot happen in kernel mode */ if (__predict_false(!TRAP_USERMODE(tf))) dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig); - sticks = td->td_sticks; + td->td_pticks = 0; /* Ok validate the address, can only execute in USER space */ @@ -802,7 +801,7 @@ call_trapsignal(td, ksig.signb, ksig.code); out: - userret(td, tf, sticks); + userret(td, tf); } @@ -864,10 +863,9 @@ register_t *ap, *args, copyargs[MAXARGS]; struct sysent *callp; int locked = 0; - u_int sticks = 0; PCPU_LAZY_INC(cnt.v_syscall); - sticks = td->td_sticks; + td->td_pticks = 0; if (td->td_ucred != td->td_proc->p_ucred) cred_update_thread(td); switch (insn & SWI_OS_MASK) { @@ -876,11 +874,11 @@ break; default: trapsignal(td, SIGILL, 0); - userret(td, frame, td->td_sticks); + userret(td, frame); return; } code = insn & 0x000fffff; - sticks = td->td_sticks; + td->td_pticks = 0; ap = &frame->tf_r0; if (code == SYS_syscall) { code = *ap++; @@ -964,7 +962,7 @@ mtx_unlock(&Giant); - userret(td, frame, sticks); + userret(td, frame); CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td, td->td_proc->p_pid, td->td_proc->p_comm, code); @@ -986,6 +984,7 @@ td->td_frame = frame; + td->td_pticks = 0; if (td->td_proc->p_flag & P_SA) thread_user_enter(td); /* @@ -994,7 +993,7 @@ */ if (__predict_false(((frame->tf_pc - INSN_SIZE) & 3) != 0)) { trapsignal(td, SIGILL, 0); - userret(td, frame, td->td_sticks); + userret(td, frame); return; } insn = *(u_int32_t *)(frame->tf_pc - INSN_SIZE); Index: i386/i386/geode.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/geode.c,v retrieving revision 1.5.8.1 diff -u -r1.5.8.1 geode.c --- i386/i386/geode.c 16 Aug 2005 22:47:14 -0000 1.5.8.1 +++ i386/i386/geode.c 5 Apr 2006 01:43:33 -0000 @@ -110,6 +110,20 @@ 1000 }; +static uint64_t +geode_cputicks(void) +{ + unsigned c; + static unsigned last; + static uint64_t offset; + + c = inl(geode_counter); + if (c < last) + offset += (1LL << 32); + last = c; + return (offset | c); +} + /* * The GEODE watchdog runs from a 32kHz frequency. One period of that is * 31250 nanoseconds which we round down to 2^14 nanoseconds. The watchdog @@ -176,6 +190,7 @@ tc_init(&geode_timecounter); EVENTHANDLER_REGISTER(watchdog_list, geode_watchdog, NULL, 0); + set_cputicker(geode_cputicks, 27000000, 0); } } else if (pci_get_devid(self) == 0x0510100b) { gpio = pci_read_config(self, PCIR_BAR(0), 4); Index: i386/i386/mp_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/mp_machdep.c,v retrieving revision 1.252.2.5 diff -u -r1.252.2.5 mp_machdep.c --- i386/i386/mp_machdep.c 14 Mar 2006 21:07:34 -0000 1.252.2.5 +++ i386/i386/mp_machdep.c 5 Apr 2006 01:43:33 -0000 @@ -641,7 +641,7 @@ spinlock_exit(); KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); - binuptime(PCPU_PTR(switchtime)); + PCPU_SET(switchtime, cpu_ticks()); PCPU_SET(switchticks, ticks); cpu_throw(NULL, choosethread()); /* doesn't return */ Index: i386/i386/trap.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/trap.c,v retrieving revision 1.277.2.3 diff -u -r1.277.2.3 trap.c --- i386/i386/trap.c 28 Nov 2005 20:03:04 -0000 1.277.2.3 +++ i386/i386/trap.c 5 Apr 2006 01:43:33 -0000 @@ -175,7 +175,6 @@ { struct thread *td = curthread; struct proc *p = td->td_proc; - u_int sticks = 0; int i = 0, ucode = 0, type, code; vm_offset_t eva; #ifdef POWERFAIL_NMI @@ -274,7 +273,7 @@ !(PCPU_GET(curpcb)->pcb_flags & PCB_VM86CALL))) { /* user trap */ - sticks = td->td_sticks; + td->td_pticks = 0; td->td_frame = &frame; if (td->td_ucred != p->p_ucred) cred_update_thread(td); @@ -649,7 +648,7 @@ #endif user: - userret(td, &frame, sticks); + userret(td, &frame); mtx_assert(&Giant, MA_NOTOWNED); userout: out: @@ -879,7 +878,6 @@ struct thread *td = curthread; struct proc *p = td->td_proc; register_t orig_tf_eflags; - u_int sticks; int error; int narg; int args[8]; @@ -900,7 +898,7 @@ } #endif - sticks = td->td_sticks; + td->td_pticks = 0; td->td_frame = &frame; if (td->td_ucred != p->p_ucred) cred_update_thread(td); @@ -1028,7 +1026,7 @@ /* * Handle reschedule and other end-of-syscall issues */ - userret(td, &frame, sticks); + userret(td, &frame); CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td, td->td_proc->p_pid, td->td_proc->p_comm, code); Index: i386/i386/tsc.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/tsc.c,v retrieving revision 1.204 diff -u -r1.204 tsc.c --- i386/i386/tsc.c 21 Oct 2003 18:28:34 -0000 1.204 +++ i386/i386/tsc.c 5 Apr 2006 01:43:33 -0000 @@ -86,6 +86,7 @@ tsc_freq = tscval[1] - tscval[0]; if (bootverbose) printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); + set_cputicker(rdtsc, tsc_freq, 1); } Index: ia64/ia32/ia32_trap.c =================================================================== RCS file: /home/ncvs/src/sys/ia64/ia32/ia32_trap.c,v retrieving revision 1.5 diff -u -r1.5 ia32_trap.c --- ia64/ia32/ia32_trap.c 12 Apr 2005 23:18:54 -0000 1.5 +++ ia64/ia32/ia32_trap.c 5 Apr 2006 01:43:33 -0000 @@ -200,7 +200,6 @@ struct thread *td; uint64_t ucode; int sig; - u_int sticks; KASSERT(TRAPF_USERMODE(tf), ("%s: In kernel mode???", __func__)); @@ -209,7 +208,7 @@ td = curthread; td->td_frame = tf; - sticks = td->td_sticks; + td->td_pticks = 0; p = td->td_proc; if (td->td_ucred != p->p_ucred) cred_update_thread(td); @@ -287,7 +286,7 @@ trapsignal(td, sig, ucode); out: - userret(td, tf, sticks); + userret(td, tf); mtx_assert(&Giant, MA_NOTOWNED); do_ast(tf); } Index: ia64/ia64/mp_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/ia64/ia64/mp_machdep.c,v retrieving revision 1.55.2.2 diff -u -r1.55.2.2 mp_machdep.c --- ia64/ia64/mp_machdep.c 14 Feb 2006 03:40:49 -0000 1.55.2.2 +++ ia64/ia64/mp_machdep.c 5 Apr 2006 01:43:33 -0000 @@ -136,7 +136,7 @@ mtx_lock_spin(&sched_lock); - binuptime(PCPU_PTR(switchtime)); + PCPU_SET(switchtime, cpu_ticks()); PCPU_SET(switchticks, ticks); ia64_set_tpr(0); Index: ia64/ia64/trap.c =================================================================== RCS file: /home/ncvs/src/sys/ia64/ia64/trap.c,v retrieving revision 1.113.2.1 diff -u -r1.113.2.1 trap.c --- ia64/ia64/trap.c 13 Sep 2005 21:07:14 -0000 1.113.2.1 +++ ia64/ia64/trap.c 5 Apr 2006 01:43:33 -0000 @@ -360,7 +360,6 @@ struct thread *td; uint64_t ucode; int error, sig, user; - u_int sticks; user = TRAPF_USERMODE(tf) ? 1 : 0; @@ -372,12 +371,11 @@ if (user) { ia64_set_fpsr(IA64_FPSR_DEFAULT); - sticks = td->td_sticks; + td->td_pticks = 0; td->td_frame = tf; if (td->td_ucred != p->p_ucred) cred_update_thread(td); } else { - sticks = 0; /* XXX bogus -Wuninitialized warning */ KASSERT(cold || td->td_ucred != NULL, ("kernel trap doesn't have ucred")); #ifdef KDB @@ -873,7 +871,7 @@ out: if (user) { - userret(td, tf, sticks); + userret(td, tf); mtx_assert(&Giant, MA_NOTOWNED); do_ast(tf); } @@ -939,7 +937,6 @@ struct thread *td; uint64_t *args; int code, error; - u_int sticks; ia64_set_fpsr(IA64_FPSR_DEFAULT); @@ -952,7 +949,7 @@ td->td_frame = tf; p = td->td_proc; - sticks = td->td_sticks; + td->td_pticks = 0; if (td->td_ucred != p->p_ucred) cred_update_thread(td); if (p->p_flag & P_SA) @@ -1026,7 +1023,7 @@ } } - userret(td, tf, sticks); + userret(td, tf); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) Index: kern/init_main.c =================================================================== RCS file: /home/ncvs/src/sys/kern/init_main.c,v retrieving revision 1.256.2.2 diff -u -r1.256.2.2 init_main.c --- kern/init_main.c 5 Oct 2005 10:31:03 -0000 1.256.2.2 +++ kern/init_main.c 5 Apr 2006 01:43:34 -0000 @@ -452,11 +452,10 @@ sx_slock(&allproc_lock); LIST_FOREACH(p, &allproc, p_list) { microuptime(&p->p_stats->p_start); - p->p_rux.rux_runtime.sec = 0; - p->p_rux.rux_runtime.frac = 0; + p->p_rux.rux_runtime = 0; } sx_sunlock(&allproc_lock); - binuptime(PCPU_PTR(switchtime)); + PCPU_SET(switchtime, cpu_ticks()); PCPU_SET(switchticks, ticks); /* Index: kern/kern_clock.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_clock.c,v retrieving revision 1.178.2.3 diff -u -r1.178.2.3 kern_clock.c --- kern/kern_clock.c 10 Mar 2006 19:37:33 -0000 1.178.2.3 +++ kern/kern_clock.c 5 Apr 2006 01:43:34 -0000 @@ -421,7 +421,7 @@ */ if (p->p_flag & P_SA) thread_statclock(1); - p->p_rux.rux_uticks++; + td->td_uticks++; if (p->p_nice > NZERO) cp_time[CP_NICE]++; else @@ -441,13 +441,13 @@ */ if ((td->td_pflags & TDP_ITHREAD) || td->td_intr_nesting_level >= 2) { - p->p_rux.rux_iticks++; + td->td_iticks++; cp_time[CP_INTR]++; } else { if (p->p_flag & P_SA) thread_statclock(0); + td->td_pticks++; td->td_sticks++; - p->p_rux.rux_sticks++; if (td != PCPU_GET(idlethread)) cp_time[CP_SYS]++; else Index: kern/kern_fork.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_fork.c,v retrieving revision 1.252.2.1 diff -u -r1.252.2.1 kern_fork.c --- kern/kern_fork.c 18 Mar 2006 23:37:36 -0000 1.252.2.1 +++ kern/kern_fork.c 5 Apr 2006 01:43:34 -0000 @@ -831,7 +831,7 @@ struct trapframe *frame; { - userret(td, frame, 0); + userret(td, frame); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) ktrsysret(SYS_fork, 0, 0); Index: kern/kern_proc.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_proc.c,v retrieving revision 1.230.2.3 diff -u -r1.230.2.3 kern_proc.c --- kern/kern_proc.c 5 Jan 2006 20:23:10 -0000 1.230.2.3 +++ kern/kern_proc.c 5 Apr 2006 01:43:34 -0000 @@ -611,7 +611,6 @@ struct thread *td0; struct tty *tp; struct session *sp; - struct timeval tv; struct ucred *cred; struct sigacts *ps; @@ -682,8 +681,7 @@ kp->ki_swtime = p->p_swtime; kp->ki_pid = p->p_pid; kp->ki_nice = p->p_nice; - bintime2timeval(&p->p_rux.rux_runtime, &tv); - kp->ki_runtime = tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec; + kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime); mtx_unlock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) && p->p_stats != NULL) { kp->ki_start = p->p_stats->p_start; Index: kern/kern_resource.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_resource.c,v retrieving revision 1.148.2.1 diff -u -r1.148.2.1 kern_resource.c --- kern/kern_resource.c 28 Dec 2005 17:35:55 -0000 1.148.2.1 +++ kern/kern_resource.c 5 Apr 2006 01:43:34 -0000 @@ -69,8 +69,6 @@ static LIST_HEAD(uihashhead, uidinfo) *uihashtbl; static u_long uihash; /* size of hash table - 1 */ -static void calcru1(struct proc *p, struct rusage_ext *ruxp, - struct timeval *up, struct timeval *sp); static int donice(struct thread *td, struct proc *chgp, int n); static struct uidinfo *uilookup(uid_t uid); @@ -694,52 +692,6 @@ return (error); } -/* - * Transform the running time and tick information in proc p into user, - * system, and interrupt time usage. - */ -void -calcru(p, up, sp) - struct proc *p; - struct timeval *up; - struct timeval *sp; -{ - struct bintime bt; - struct rusage_ext rux; - struct thread *td; - int bt_valid; - - PROC_LOCK_ASSERT(p, MA_OWNED); - mtx_assert(&sched_lock, MA_NOTOWNED); - bt_valid = 0; - mtx_lock_spin(&sched_lock); - rux = p->p_rux; - FOREACH_THREAD_IN_PROC(p, td) { - if (TD_IS_RUNNING(td)) { - /* - * Adjust for the current time slice. This is - * actually fairly important since the error here is - * on the order of a time quantum which is much - * greater than the precision of binuptime(). - */ - KASSERT(td->td_oncpu != NOCPU, - ("%s: running thread has no CPU", __func__)); - if (!bt_valid) { - binuptime(&bt); - bt_valid = 1; - } - bintime_add(&rux.rux_runtime, &bt); - bintime_sub(&rux.rux_runtime, - &pcpu_find(td->td_oncpu)->pc_switchtime); - } - } - mtx_unlock_spin(&sched_lock); - calcru1(p, &rux, up, sp); - p->p_rux.rux_uu = rux.rux_uu; - p->p_rux.rux_su = rux.rux_su; - p->p_rux.rux_iu = rux.rux_iu; -} - void calccru(p, up, sp) struct proc *p; @@ -748,37 +700,52 @@ { PROC_LOCK_ASSERT(p, MA_OWNED); - calcru1(p, &p->p_crux, up, sp); + calcru(p, up, sp); } -static void -calcru1(p, ruxp, up, sp) - struct proc *p; - struct rusage_ext *ruxp; - struct timeval *up; - struct timeval *sp; +/* + * Transform the running time and tick information in proc p into user, + * system, and interrupt time usage. If appropriate, include the current + * time slice on this CPU. + */ + +void +calcru(struct proc *p, struct timeval *up, struct timeval *sp) { - struct timeval tv; + struct thread *td; + struct rusage_ext *ruxp = &p->p_rux; + uint64_t u; /* {user, system, interrupt, total} {ticks, usec}; previous tu: */ u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu; + PROC_LOCK_ASSERT(p, MA_OWNED); + mtx_assert(&sched_lock, MA_NOTOWNED); + mtx_lock_spin(&sched_lock); + if (curthread->td_proc == p) { + td = curthread; + u = cpu_ticks(); + ruxp->rux_runtime += (u - PCPU_GET(switchtime)); + PCPU_SET(switchtime, u); + ruxp->rux_uticks += td->td_uticks; + td->td_uticks = 0; + ruxp->rux_iticks += td->td_iticks; + td->td_iticks = 0; + ruxp->rux_sticks += td->td_sticks; + td->td_sticks = 0; + } + ut = ruxp->rux_uticks; st = ruxp->rux_sticks; it = ruxp->rux_iticks; + tu = ruxp->rux_runtime; + mtx_unlock_spin(&sched_lock); + tu = cputick2usec(tu); tt = ut + st + it; if (tt == 0) { st = 1; tt = 1; } - bintime2timeval(&ruxp->rux_runtime, &tv); - tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec; ptu = ruxp->rux_uu + ruxp->rux_su + ruxp->rux_iu; - if (tu < ptu) { - printf( -"calcru: runtime went backwards from %ju usec to %ju usec for pid %d (%s)\n", - (uintmax_t)ptu, (uintmax_t)tu, p->p_pid, p->p_comm); - tu = ptu; - } if ((int64_t)tu < 0) { printf("calcru: negative runtime of %jd usec for pid %d (%s)\n", (intmax_t)tu, p->p_pid, p->p_comm); @@ -789,7 +756,17 @@ uu = (tu * ut) / tt; su = (tu * st) / tt; iu = tu - uu - su; - + if (tu < ptu) { + printf( +"calcru: runtime went backwards from %ju usec to %ju usec for pid %d (%s)\n", + (uintmax_t)ptu, (uintmax_t)tu, p->p_pid, p->p_comm); + printf("u %ju:%ju/%ju s %ju:%ju/%ju i %ju:%ju/%ju\n", + (uintmax_t)ut, (uintmax_t)ruxp->rux_uu, uu, + (uintmax_t)st, (uintmax_t)ruxp->rux_su, su, + (uintmax_t)it, (uintmax_t)ruxp->rux_iu, iu); + tu = ptu; + } +#if 0 /* Enforce monotonicity. */ if (uu < ruxp->rux_uu || su < ruxp->rux_su || iu < ruxp->rux_iu) { if (uu < ruxp->rux_uu) @@ -811,6 +788,9 @@ KASSERT(iu >= ruxp->rux_iu, ("calcru: monotonisation botch 2")); } + KASSERT(uu + su + iu <= tu, + ("calcru: monotisation botch 3")); +#endif ruxp->rux_uu = uu; ruxp->rux_su = su; ruxp->rux_iu = iu; @@ -884,7 +864,7 @@ register long *ip, *ip2; register int i; - bintime_add(&rux->rux_runtime, &rux2->rux_runtime); + rux->rux_runtime += rux2->rux_runtime; rux->rux_uticks += rux2->rux_uticks; rux->rux_sticks += rux2->rux_sticks; rux->rux_iticks += rux2->rux_iticks; Index: kern/kern_synch.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_synch.c,v retrieving revision 1.270.2.4 diff -u -r1.270.2.4 kern_synch.c --- kern/kern_synch.c 2 Apr 2006 00:14:57 -0000 1.270.2.4 +++ kern/kern_synch.c 5 Apr 2006 01:43:34 -0000 @@ -253,7 +253,7 @@ void mi_switch(int flags, struct thread *newtd) { - struct bintime new_switchtime; + uint64_t new_switchtime; struct thread *td; struct proc *p; @@ -282,9 +282,14 @@ * Compute the amount of time during which the current * process was running, and add that to its total so far. */ - binuptime(&new_switchtime); - bintime_add(&p->p_rux.rux_runtime, &new_switchtime); - bintime_sub(&p->p_rux.rux_runtime, PCPU_PTR(switchtime)); + new_switchtime = cpu_ticks(); + p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime)); + p->p_rux.rux_uticks += td->td_uticks; + td->td_uticks = 0; + p->p_rux.rux_iticks += td->td_iticks; + td->td_iticks = 0; + p->p_rux.rux_sticks += td->td_sticks; + td->td_sticks = 0; td->td_generation++; /* bump preempt-detect counter */ @@ -303,7 +308,7 @@ * it reaches the max, arrange to kill the process in ast(). */ if (p->p_cpulimit != RLIM_INFINITY && - p->p_rux.rux_runtime.sec >= p->p_cpulimit) { + p->p_rux.rux_runtime >= p->p_cpulimit * cpu_tickrate()) { p->p_sflag |= PS_XCPU; td->td_flags |= TDF_ASTPENDING; } Index: kern/kern_tc.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_tc.c,v retrieving revision 1.164 diff -u -r1.164 kern_tc.c --- kern/kern_tc.c 26 Mar 2005 20:04:28 -0000 1.164 +++ kern/kern_tc.c 5 Apr 2006 01:43:34 -0000 @@ -116,6 +116,7 @@ #undef TC_STATS static void tc_windup(void); +static void cpu_tick_calibrate(int); static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS) @@ -131,6 +132,7 @@ #endif return SYSCTL_OUT(req, &boottime, sizeof(boottime)); } + /* * Return the difference between the timehands' counter value now and what * was when we copied it to the timehands' offset_count. @@ -363,6 +365,7 @@ struct timespec ts2; struct bintime bt, bt2; + cpu_tick_calibrate(1); nsetclock++; binuptime(&bt2); timespec2bintime(ts, &bt); @@ -379,6 +382,7 @@ (intmax_t)ts2.tv_sec, ts2.tv_nsec, (intmax_t)ts->tv_sec, ts->tv_nsec); } + cpu_tick_calibrate(1); } /* @@ -475,8 +479,8 @@ * x = a * 2^32 / 10^9 = a * 4.294967296 * * The range of th_adjustment is +/- 5000PPM so inside a 64bit int - * we can only multiply by about 850 without overflowing, but that - * leaves suitably precise fractions for multiply before divide. + * we can only multiply by about 850 without overflowing, that + * leaves no suitably precise fractions for multiply before divide. * * Divide before multiply with a fraction of 2199/512 results in a * systematic undercompensation of 10PPM of th_adjustment. On a @@ -749,11 +753,16 @@ tc_ticktock(void) { static int count; + static time_t last_calib; if (++count < tc_tick) return; count = 0; tc_windup(); + if (time_uptime != last_calib && !(time_uptime & 0xf)) { + cpu_tick_calibrate(0); + last_calib = time_uptime; + } } static void @@ -782,3 +791,143 @@ } SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL) + +/* Cpu tick handling -------------------------------------------------*/ + +static int cpu_tick_variable; +static uint64_t cpu_tick_frequency; + +static +uint64_t +tc_cpu_ticks(void) +{ + static uint64_t base; + static unsigned last; + unsigned u; + struct timecounter *tc; + + tc = timehands->th_counter; + u = tc->tc_get_timecount(tc) & tc->tc_counter_mask; + if (u < last) + base += tc->tc_counter_mask + 1; + last = u; + return (u + base); +} + +/* + * This function gets called ever 16 seconds on only one designated + * CPU in the system from hardclock() via tc_ticktock(). + * + * Whenever the real time clock is stepped we get called with reset=1 + * to make sure we handle suspend/resume and similar events correctly. + */ + +static void +cpu_tick_calibrate(int reset) +{ + static uint64_t c_last; + uint64_t c_this, c_delta; + static struct bintime t_last; + struct bintime t_this, t_delta; + + if (reset) { + /* The clock was stepped, abort & reset */ + t_last.sec = 0; + return; + } + + /* we don't calibrate fixed rate cputicks */ + if (!cpu_tick_variable) + return; + + getbinuptime(&t_this); + c_this = cpu_ticks(); + if (t_last.sec != 0) { + c_delta = c_this - c_last; + t_delta = t_this; + bintime_sub(&t_delta, &t_last); + if (0 && bootverbose) { + struct timespec ts; + bintime2timespec(&t_delta, &ts); + printf("%ju %ju.%016jx %ju.%09ju", + (uintmax_t)c_delta >> 4, + (uintmax_t)t_delta.sec, (uintmax_t)t_delta.frac, + (uintmax_t)ts.tv_sec, (uintmax_t)ts.tv_nsec); + } + /* + * Validate that 16 +/- 1/256 seconds passed. + * After division by 16 this gives us a precision of + * roughly 250PPM which is sufficient + */ + if (t_delta.sec > 16 || ( + t_delta.sec == 16 && t_delta.frac >= (0x01LL << 56))) { + /* too long */ + if (0 && bootverbose) + printf("\ttoo long\n"); + } else if (t_delta.sec < 15 || + (t_delta.sec == 15 && t_delta.frac <= (0xffLL << 56))) { + /* too short */ + if (0 && bootverbose) + printf("\ttoo short\n"); + } else { + /* just right */ + c_delta >>= 4; + if (c_delta > cpu_tick_frequency) { + if (0 && bootverbose) + printf("\thigher\n"); + cpu_tick_frequency = c_delta; + } else { + if (0 && bootverbose) + printf("\tlower\n"); + } + } + } + c_last = c_this; + t_last = t_this; +} + +void +set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var) +{ + + if (func == NULL) { + cpu_ticks = tc_cpu_ticks; + } else { + cpu_tick_frequency = freq; + cpu_tick_variable = var; + cpu_ticks = func; + } +} + +uint64_t +cpu_tickrate(void) +{ + + if (cpu_ticks == tc_cpu_ticks) + return (tc_getfrequency()); + return (cpu_tick_frequency); +} + +/* + * We need to be slightly careful converting cputicks to microseconds. + * There is plenty of margin in 64 bits of microseconds (half a million + * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply + * before divide conversion (to retain precision) we find that the + * margin shrinks to 1.5 hours (one millionth of 146y). + * With a three prong approach we never loose significant bits, no + * matter what the cputick rate and length of timeinterval is. + */ + +uint64_t +cputick2usec(uint64_t tick) +{ + + if (tick > 18446744073709551LL) /* floor(2^64 / 1000) */ + return (tick / (cpu_tickrate() / 1000000LL)); + else if (tick > 18446744073709LL) /* floor(2^64 / 1000000) */ + return ((tick * 1000LL) / (cpu_tickrate() / 1000LL)); + else + return ((tick * 1000000LL) / cpu_tickrate()); +} + +cpu_tick_f *cpu_ticks = tc_cpu_ticks; Index: kern/kern_thread.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_thread.c,v retrieving revision 1.216.2.4 diff -u -r1.216.2.4 kern_thread.c --- kern/kern_thread.c 26 Mar 2006 01:29:55 -0000 1.216.2.4 +++ kern/kern_thread.c 5 Apr 2006 01:43:34 -0000 @@ -440,7 +440,7 @@ void thread_exit(void) { - struct bintime new_switchtime; + uint64_t new_switchtime; struct thread *td; struct proc *p; struct ksegrp *kg; @@ -480,9 +480,11 @@ sched_thread_exit(td); /* Do the same timestamp bookkeeping that mi_switch() would do. */ - binuptime(&new_switchtime); - bintime_add(&p->p_rux.rux_runtime, &new_switchtime); - bintime_sub(&p->p_rux.rux_runtime, PCPU_PTR(switchtime)); + new_switchtime = cpu_ticks(); + p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime)); + p->p_rux.rux_uticks += td->td_uticks; + p->p_rux.rux_sticks += td->td_sticks; + p->p_rux.rux_iticks += td->td_iticks; PCPU_SET(switchtime, new_switchtime); PCPU_SET(switchticks, ticks); cnt.v_swtch++; Index: kern/subr_trap.c =================================================================== RCS file: /home/ncvs/src/sys/kern/subr_trap.c,v retrieving revision 1.281 diff -u -r1.281 subr_trap.c --- kern/subr_trap.c 28 Mar 2005 12:52:46 -0000 1.281 +++ kern/subr_trap.c 5 Apr 2006 01:43:35 -0000 @@ -74,10 +74,7 @@ * MPSAFE */ void -userret(td, frame, oticks) - struct thread *td; - struct trapframe *frame; - u_int oticks; +userret(struct thread *td, struct trapframe *frame) { struct proc *p = td->td_proc; @@ -123,10 +120,8 @@ * Charge system time if profiling. */ if (p->p_flag & P_PROFIL) { - quad_t ticks; - ticks = td->td_sticks - oticks; - addupc_task(td, TRAPF_PC(frame), (u_int)ticks * psratio); + addupc_task(td, TRAPF_PC(frame), td->td_pticks * psratio); } /* @@ -149,7 +144,6 @@ struct proc *p; struct ksegrp *kg; struct rlimit rlim; - u_int sticks; int sflag; int flags; int sig; @@ -168,7 +162,7 @@ mtx_assert(&Giant, MA_NOTOWNED); mtx_assert(&sched_lock, MA_NOTOWNED); td->td_frame = framep; - sticks = td->td_sticks; + td->td_pticks = 0; if ((p->p_flag & P_SA) && (td->td_mailbox == NULL)) thread_user_enter(td); @@ -230,7 +224,7 @@ PROC_LOCK(p); lim_rlimit(p, RLIMIT_CPU, &rlim); mtx_lock_spin(&sched_lock); - if (p->p_rux.rux_runtime.sec >= rlim.rlim_max) { + if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) { mtx_unlock_spin(&sched_lock); killproc(p, "exceeded maximum CPU limit"); } else { @@ -268,6 +262,6 @@ PROC_UNLOCK(p); } - userret(td, framep, sticks); + userret(td, framep); mtx_assert(&Giant, MA_NOTOWNED); } Index: powerpc/powerpc/trap.c =================================================================== RCS file: /home/ncvs/src/sys/powerpc/powerpc/trap.c,v retrieving revision 1.54.2.1 diff -u -r1.54.2.1 trap.c --- powerpc/powerpc/trap.c 8 Aug 2005 07:02:12 -0000 1.54.2.1 +++ powerpc/powerpc/trap.c 5 Apr 2006 01:43:43 -0000 @@ -143,7 +143,7 @@ struct thread *td; struct proc *p; int sig, type, user; - u_int sticks, ucode; + u_int ucode; PCPU_LAZY_INC(cnt.v_trap); @@ -153,13 +153,12 @@ type = ucode = frame->exc; sig = 0; user = frame->srr1 & PSL_PR; - sticks = 0; CTR3(KTR_TRAP, "trap: %s type=%s (%s)", p->p_comm, trapname(type), user ? "user" : "kernel"); if (user) { - sticks = td->td_sticks; + td->td_pticks = 0; td->td_frame = frame; if (td->td_ucred != p->p_ucred) cred_update_thread(td); @@ -257,7 +256,7 @@ trapsignal(td, sig, ucode); } - userret(td, frame, sticks); + userret(td, frame); mtx_assert(&Giant, MA_NOTOWNED); } Index: sparc64/sparc64/mp_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/sparc64/sparc64/mp_machdep.c,v retrieving revision 1.29.2.1 diff -u -r1.29.2.1 mp_machdep.c --- sparc64/sparc64/mp_machdep.c 31 Mar 2006 23:48:12 -0000 1.29.2.1 +++ sparc64/sparc64/mp_machdep.c 5 Apr 2006 01:43:43 -0000 @@ -363,7 +363,7 @@ /* ok, now grab sched_lock and enter the scheduler */ mtx_lock_spin(&sched_lock); spinlock_exit(); - binuptime(PCPU_PTR(switchtime)); + PCPU_SET(switchtime, cpu_ticks()); PCPU_SET(switchticks, ticks); cpu_throw(NULL, choosethread()); /* doesn't return */ } Index: sparc64/sparc64/tick.c =================================================================== RCS file: /home/ncvs/src/sys/sparc64/sparc64/tick.c,v retrieving revision 1.16.2.1 diff -u -r1.16.2.1 tick.c --- sparc64/sparc64/tick.c 31 Mar 2006 23:38:29 -0000 1.16.2.1 +++ sparc64/sparc64/tick.c 5 Apr 2006 01:43:43 -0000 @@ -64,6 +64,13 @@ static void tick_hardclock(struct clockframe *); +static uint64_t +tick_cputicks(void) +{ + + return (rd(tick)); +} + void cpu_initclocks(void) { @@ -148,6 +155,8 @@ * handled. */ tick_stop(); + + set_cputicker(tick_cputicks, tick_freq, 0); } void Index: sparc64/sparc64/trap.c =================================================================== RCS file: /home/ncvs/src/sys/sparc64/sparc64/trap.c,v retrieving revision 1.74 diff -u -r1.74 trap.c --- sparc64/sparc64/trap.c 12 Apr 2005 23:18:54 -0000 1.74 +++ sparc64/sparc64/trap.c 5 Apr 2006 01:43:43 -0000 @@ -230,7 +230,6 @@ { struct thread *td; struct proc *p; - u_int sticks; int error; int sig; @@ -247,7 +246,7 @@ KASSERT(td->td_proc != NULL, ("trap: curproc NULL")); p = td->td_proc; - sticks = td->td_sticks; + td->td_pticks = 0; td->td_frame = tf; if (td->td_ucred != p->p_ucred) cred_update_thread(td); @@ -287,7 +286,7 @@ trapsignal(td, sig, tf->tf_type); } - userret(td, tf, sticks); + userret(td, tf); mtx_assert(&Giant, MA_NOTOWNED); } else { KASSERT((tf->tf_type & T_KERNEL) != 0, @@ -496,7 +495,6 @@ register_t args[8]; register_t *argp; struct proc *p; - u_int sticks; u_long code; u_long tpc; int reg; @@ -517,7 +515,7 @@ reg = 0; regcnt = REG_MAXARGS; - sticks = td->td_sticks; + td->td_pticks = 0; td->td_frame = tf; if (td->td_ucred != p->p_ucred) cred_update_thread(td); @@ -640,7 +638,7 @@ /* * Handle reschedule and other end-of-syscall issues */ - userret(td, tf, sticks); + userret(td, tf); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) Index: sys/pcpu.h =================================================================== RCS file: /home/ncvs/src/sys/sys/pcpu.h,v retrieving revision 1.16 diff -u -r1.16 pcpu.h --- sys/pcpu.h 26 Apr 2005 17:07:40 -0000 1.16 +++ sys/pcpu.h 5 Apr 2006 01:43:43 -0000 @@ -60,7 +60,7 @@ struct thread *pc_fpcurthread; /* Fp state owner */ struct thread *pc_deadthread; /* Zombie thread or NULL */ struct pcb *pc_curpcb; /* Current pcb */ - struct bintime pc_switchtime; + uint64_t pc_switchtime; int pc_switchticks; u_int pc_cpuid; /* This cpu number */ cpumask_t pc_cpumask; /* This cpu mask */ Index: sys/proc.h =================================================================== RCS file: /home/ncvs/src/sys/sys/proc.h,v retrieving revision 1.432.2.5 diff -u -r1.432.2.5 proc.h --- sys/proc.h 10 Mar 2006 19:37:35 -0000 1.432.2.5 +++ sys/proc.h 5 Apr 2006 01:43:43 -0000 @@ -276,7 +276,10 @@ struct ucred *td_ucred; /* (k) Reference to credentials. */ struct thread *td_standin; /* (k + a) Use this for an upcall. */ struct kse_upcall *td_upcall; /* (k + j) Upcall structure. */ - u_int64_t td_sticks; /* (k) Statclock hits in system mode. */ + u_int td_pticks; /* (k) Statclock hits for profiling */ + u_int td_sticks; /* (k) Statclock hits in system mode. */ + u_int td_iticks; /* (k) Statclock hits in intr mode. */ + u_int td_uticks; /* (k) Statclock hits in user mode. */ u_int td_uuticks; /* (k) Statclock hits (usr), for UTS. */ u_int td_usticks; /* (k) Statclock hits (sys), for UTS. */ int td_intrval; /* (j) Return value of TDF_INTERRUPT. */ @@ -498,7 +501,7 @@ * Locking: (cj) means (j) for p_rux and (c) for p_crux. */ struct rusage_ext { - struct bintime rux_runtime; /* (cj) Real time. */ + u_int64_t rux_runtime; /* (cj) Real time. */ u_int64_t rux_uticks; /* (cj) Statclock hits in user mode. */ u_int64_t rux_sticks; /* (cj) Statclock hits in sys mode. */ u_int64_t rux_iticks; /* (cj) Statclock hits in intr mode. */ @@ -898,7 +901,7 @@ void cpu_switch(struct thread *old, struct thread *new); void cpu_throw(struct thread *old, struct thread *new) __dead2; void unsleep(struct thread *); -void userret(struct thread *, struct trapframe *, u_int); +void userret(struct thread *, struct trapframe *); void cpu_exit(struct thread *); void exit1(struct thread *, int) __dead2; Index: sys/systm.h =================================================================== RCS file: /home/ncvs/src/sys/sys/systm.h,v retrieving revision 1.234.2.4 diff -u -r1.234.2.4 systm.h --- sys/systm.h 2 Apr 2006 00:14:57 -0000 1.234.2.4 +++ sys/systm.h 5 Apr 2006 01:43:43 -0000 @@ -239,6 +239,12 @@ int unsetenv(const char *name); int testenv(const char *name); +typedef uint64_t (cpu_tick_f)(void); +void set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var); +extern cpu_tick_f *cpu_ticks; +uint64_t cpu_tickrate(void); +uint64_t cputick2usec(uint64_t tick); + #ifdef APM_FIXUP_CALLTODO struct timeval; void adjust_timeout_calltodo(struct timeval *time_change);