linux/arch/x86/include/asm/timer.h
Peter Zijlstra 84599f8a59 sched, x86: Fix cpufreq + sched_clock() TSC scaling
For freqency dependent TSCs we only scale the cycles, we do not account
for the discrepancy in absolute value.

Our current formula is: time = cycles * mult

(where mult is a function of the cpu-speed on variable tsc machines)

Suppose our current cycle count is 10, and we have a multiplier of 5,
then our time value would end up being 50.

Now cpufreq comes along and changes the multiplier to say 3 or 7,
which would result in our time being resp. 30 or 70.

That means that we can observe random jumps in the time value due to
frequency changes in both fwd and bwd direction.

So what this patch does is change the formula to:

  time = cycles * frequency + offset

And we calculate offset so that time_before == time_after, thereby
ridding us of these jumps in time.

[ Impact: fix/reduce sched_clock() jumps across frequency changing events ]

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Chucked-on-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2009-06-17 16:03:54 +02:00

73 lines
1.9 KiB
C

#ifndef _ASM_X86_TIMER_H
#define _ASM_X86_TIMER_H
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/percpu.h>
#include <linux/interrupt.h>
#define TICK_SIZE (tick_nsec / 1000)
unsigned long long native_sched_clock(void);
unsigned long native_calibrate_tsc(void);
#ifdef CONFIG_X86_32
extern int timer_ack;
extern irqreturn_t timer_interrupt(int irq, void *dev_id);
#endif /* CONFIG_X86_32 */
extern int recalibrate_cpu_khz(void);
extern int no_timer_check;
#ifndef CONFIG_PARAVIRT
#define calibrate_tsc() native_calibrate_tsc()
#endif
/* Accelerators for sched_clock()
* convert from cycles(64bits) => nanoseconds (64bits)
* basic equation:
* ns = cycles / (freq / ns_per_sec)
* ns = cycles * (ns_per_sec / freq)
* ns = cycles * (10^9 / (cpu_khz * 10^3))
* ns = cycles * (10^6 / cpu_khz)
*
* Then we use scaling math (suggested by george@mvista.com) to get:
* ns = cycles * (10^6 * SC / cpu_khz) / SC
* ns = cycles * cyc2ns_scale / SC
*
* And since SC is a constant power of two, we can convert the div
* into a shift.
*
* We can use khz divisor instead of mhz to keep a better precision, since
* cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
* (mathieu.desnoyers@polymtl.ca)
*
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
DECLARE_PER_CPU(unsigned long, cyc2ns);
DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
{
int cpu = smp_processor_id();
unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR;
return ns;
}
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
unsigned long long ns;
unsigned long flags;
local_irq_save(flags);
ns = __cycles_2_ns(cyc);
local_irq_restore(flags);
return ns;
}
#endif /* _ASM_X86_TIMER_H */