1 #include <linux/sched.h>
2 #include <linux/clocksource.h>
3 #include <linux/workqueue.h>
4 #include <linux/cpufreq.h>
5 #include <linux/jiffies.h>
6 #include <linux/init.h>
8 #include <linux/percpu.h>
10 #include <asm/delay.h>
13 #include <asm/timer.h>
15 #include "mach_timer.h"
17 static int tsc_enabled
;
20 * On some systems the TSC frequency does not
21 * change with the cpu frequency. So we need
22 * an extra value to store the TSC freq
25 EXPORT_SYMBOL_GPL(tsc_khz
);
28 static int __init
tsc_setup(char *str
)
30 printk(KERN_WARNING
"notsc: Kernel compiled with CONFIG_X86_TSC, "
31 "cannot disable TSC completely.\n");
32 mark_tsc_unstable("user disabled TSC");
37 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
40 static int __init
tsc_setup(char *str
)
42 setup_clear_cpu_cap(X86_FEATURE_TSC
);
47 __setup("notsc", tsc_setup
);
50 * code to mark and check if the TSC is unstable
51 * due to cpufreq or due to unsynced TSCs
53 static int tsc_unstable
;
55 int check_tsc_unstable(void)
59 EXPORT_SYMBOL_GPL(check_tsc_unstable
);
61 /* Accelerators for sched_clock()
62 * convert from cycles(64bits) => nanoseconds (64bits)
64 * ns = cycles / (freq / ns_per_sec)
65 * ns = cycles * (ns_per_sec / freq)
66 * ns = cycles * (10^9 / (cpu_khz * 10^3))
67 * ns = cycles * (10^6 / cpu_khz)
69 * Then we use scaling math (suggested by george@mvista.com) to get:
70 * ns = cycles * (10^6 * SC / cpu_khz) / SC
71 * ns = cycles * cyc2ns_scale / SC
73 * And since SC is a constant power of two, we can convert the div
76 * We can use khz divisor instead of mhz to keep a better precision, since
77 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
78 * (mathieu.desnoyers@polymtl.ca)
80 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
83 DEFINE_PER_CPU(unsigned long, cyc2ns
);
85 static void set_cyc2ns_scale(unsigned long cpu_khz
, int cpu
)
87 unsigned long long tsc_now
, ns_now
;
88 unsigned long flags
, *scale
;
90 local_irq_save(flags
);
91 sched_clock_idle_sleep_event();
93 scale
= &per_cpu(cyc2ns
, cpu
);
96 ns_now
= __cycles_2_ns(tsc_now
);
99 *scale
= (NSEC_PER_MSEC
<< CYC2NS_SCALE_FACTOR
)/cpu_khz
;
102 * Start smoothly with the new frequency:
104 sched_clock_idle_wakeup_event(0);
105 local_irq_restore(flags
);
109 * Scheduler clock - returns current time in nanosec units.
111 unsigned long long native_sched_clock(void)
113 unsigned long long this_offset
;
116 * Fall back to jiffies if there's no TSC available:
117 * ( But note that we still use it if the TSC is marked
118 * unstable. We do this because unlike Time Of Day,
119 * the scheduler clock tolerates small errors and it's
120 * very important for it to be as fast as the platform
123 if (unlikely(!tsc_enabled
&& !tsc_unstable
))
124 /* No locking but a rare wrong value is not a big deal: */
125 return (jiffies_64
- INITIAL_JIFFIES
) * (1000000000 / HZ
);
127 /* read the Time Stamp Counter: */
128 rdtscll(this_offset
);
130 /* return the value in ns */
131 return cycles_2_ns(this_offset
);
134 /* We need to define a real function for sched_clock, to override the
135 weak default version */
136 #ifdef CONFIG_PARAVIRT
137 unsigned long long sched_clock(void)
139 return paravirt_sched_clock();
142 unsigned long long sched_clock(void)
143 __attribute__((alias("native_sched_clock")));
146 unsigned long native_calculate_cpu_khz(void)
148 unsigned long long start
, end
;
150 u64 delta64
= (u64
)ULLONG_MAX
;
154 local_irq_save(flags
);
156 /* run 3 times to ensure the cache is warm and to get an accurate reading */
157 for (i
= 0; i
< 3; i
++) {
158 mach_prepare_counter();
160 mach_countup(&count
);
164 * Error: ECTCNEVERSET
165 * The CTC wasn't reliable: we got a hit on the very first read,
166 * or the CPU was so fast/slow that the quotient wouldn't fit in
172 /* cpu freq too slow: */
173 if ((end
- start
) <= CALIBRATE_TIME_MSEC
)
177 * We want the minimum time of all runs in case one of them
178 * is inaccurate due to SMI or other delay
180 delta64
= min(delta64
, (end
- start
));
183 /* cpu freq too fast (or every run was bad): */
184 if (delta64
> (1ULL<<32))
187 delta64
+= CALIBRATE_TIME_MSEC
/2; /* round for do_div */
188 do_div(delta64
,CALIBRATE_TIME_MSEC
);
190 local_irq_restore(flags
);
191 return (unsigned long)delta64
;
193 local_irq_restore(flags
);
197 int recalibrate_cpu_khz(void)
200 unsigned long cpu_khz_old
= cpu_khz
;
203 cpu_khz
= calculate_cpu_khz();
205 cpu_data(0).loops_per_jiffy
=
206 cpufreq_scale(cpu_data(0).loops_per_jiffy
,
207 cpu_khz_old
, cpu_khz
);
216 EXPORT_SYMBOL(recalibrate_cpu_khz
);
218 #ifdef CONFIG_CPU_FREQ
221 * if the CPU frequency is scaled, TSC-based delays will need a different
222 * loops_per_jiffy value to function properly.
224 static unsigned int ref_freq
;
225 static unsigned long loops_per_jiffy_ref
;
226 static unsigned long cpu_khz_ref
;
229 time_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
, void *data
)
231 struct cpufreq_freqs
*freq
= data
;
235 ref_freq
= freq
->new;
238 ref_freq
= freq
->old
;
239 loops_per_jiffy_ref
= cpu_data(freq
->cpu
).loops_per_jiffy
;
240 cpu_khz_ref
= cpu_khz
;
243 if ((val
== CPUFREQ_PRECHANGE
&& freq
->old
< freq
->new) ||
244 (val
== CPUFREQ_POSTCHANGE
&& freq
->old
> freq
->new) ||
245 (val
== CPUFREQ_RESUMECHANGE
)) {
246 if (!(freq
->flags
& CPUFREQ_CONST_LOOPS
))
247 cpu_data(freq
->cpu
).loops_per_jiffy
=
248 cpufreq_scale(loops_per_jiffy_ref
,
249 ref_freq
, freq
->new);
253 if (num_online_cpus() == 1)
254 cpu_khz
= cpufreq_scale(cpu_khz_ref
,
255 ref_freq
, freq
->new);
256 if (!(freq
->flags
& CPUFREQ_CONST_LOOPS
)) {
258 set_cyc2ns_scale(cpu_khz
, freq
->cpu
);
260 * TSC based sched_clock turns
263 mark_tsc_unstable("cpufreq changes");
271 static struct notifier_block time_cpufreq_notifier_block
= {
272 .notifier_call
= time_cpufreq_notifier
275 static int __init
cpufreq_tsc(void)
277 return cpufreq_register_notifier(&time_cpufreq_notifier_block
,
278 CPUFREQ_TRANSITION_NOTIFIER
);
280 core_initcall(cpufreq_tsc
);
284 /* clock source code */
286 static unsigned long current_tsc_khz
;
287 static struct clocksource clocksource_tsc
;
290 * We compare the TSC to the cycle_last value in the clocksource
291 * structure to avoid a nasty time-warp issue. This can be observed in
292 * a very small window right after one CPU updated cycle_last under
293 * xtime lock and the other CPU reads a TSC value which is smaller
294 * than the cycle_last reference value due to a TSC which is slighty
295 * behind. This delta is nowhere else observable, but in that case it
296 * results in a forward time jump in the range of hours due to the
297 * unsigned delta calculation of the time keeping core code, which is
298 * necessary to support wrapping clocksources like pm timer.
300 static cycle_t
read_tsc(void)
306 return ret
>= clocksource_tsc
.cycle_last
?
307 ret
: clocksource_tsc
.cycle_last
;
310 static struct clocksource clocksource_tsc
= {
314 .mask
= CLOCKSOURCE_MASK(64),
315 .mult
= 0, /* to be set */
317 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
|
318 CLOCK_SOURCE_MUST_VERIFY
,
321 void mark_tsc_unstable(char *reason
)
326 printk("Marking TSC unstable due to: %s.\n", reason
);
327 /* Can be called before registration */
328 if (clocksource_tsc
.mult
)
329 clocksource_change_rating(&clocksource_tsc
, 0);
331 clocksource_tsc
.rating
= 0;
334 EXPORT_SYMBOL_GPL(mark_tsc_unstable
);
336 static int __init
dmi_mark_tsc_unstable(const struct dmi_system_id
*d
)
338 printk(KERN_NOTICE
"%s detected: marking TSC unstable.\n",
344 /* List of systems that have known TSC problems */
345 static struct dmi_system_id __initdata bad_tsc_dmi_table
[] = {
347 .callback
= dmi_mark_tsc_unstable
,
348 .ident
= "IBM Thinkpad 380XD",
350 DMI_MATCH(DMI_BOARD_VENDOR
, "IBM"),
351 DMI_MATCH(DMI_BOARD_NAME
, "2635FA0"),
358 * Make an educated guess if the TSC is trustworthy and synchronized
361 __cpuinit
int unsynchronized_tsc(void)
363 if (!cpu_has_tsc
|| tsc_unstable
)
366 /* Anything with constant TSC should be synchronized */
367 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC
))
371 * Intel systems are normally all synchronized.
372 * Exceptions must mark TSC as unstable:
374 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
) {
375 /* assume multi socket systems are not synchronized: */
376 if (num_possible_cpus() > 1)
383 * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
385 #ifdef CONFIG_MGEODE_LX
386 /* RTSC counts during suspend */
387 #define RTSC_SUSP 0x100
389 static void __init
check_geode_tsc_reliable(void)
391 unsigned long res_low
, res_high
;
393 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0
, &res_low
, &res_high
);
394 if (res_low
& RTSC_SUSP
)
395 clocksource_tsc
.flags
&= ~CLOCK_SOURCE_MUST_VERIFY
;
398 static inline void check_geode_tsc_reliable(void) { }
402 void __init
tsc_init(void)
409 cpu_khz
= calculate_cpu_khz();
413 mark_tsc_unstable("could not calculate TSC khz");
417 printk("Detected %lu.%03lu MHz processor.\n",
418 (unsigned long)cpu_khz
/ 1000,
419 (unsigned long)cpu_khz
% 1000);
422 * Secondary CPUs do not run through tsc_init(), so set up
423 * all the scale factors for all CPUs, assuming the same
424 * speed as the bootup CPU. (cpufreq notifiers will fix this
425 * up if their speed diverges)
427 for_each_possible_cpu(cpu
)
428 set_cyc2ns_scale(cpu_khz
, cpu
);
432 /* Check and install the TSC clocksource */
433 dmi_check_system(bad_tsc_dmi_table
);
435 unsynchronized_tsc();
436 check_geode_tsc_reliable();
437 current_tsc_khz
= tsc_khz
;
438 clocksource_tsc
.mult
= clocksource_khz2mult(current_tsc_khz
,
439 clocksource_tsc
.shift
);
440 /* lower the rating if we already know its unstable: */
441 if (check_tsc_unstable()) {
442 clocksource_tsc
.rating
= 0;
443 clocksource_tsc
.flags
&= ~CLOCK_SOURCE_IS_CONTINUOUS
;
447 clocksource_register(&clocksource_tsc
);