sched: make early bootup sched_clock() use safer
[wrt350n-kernel.git] / arch / x86 / kernel / tsc_32.c
blob43517e324be83e73096fe28a39ec5275c9a378e0
1 #include <linux/sched.h>
2 #include <linux/clocksource.h>
3 #include <linux/workqueue.h>
4 #include <linux/cpufreq.h>
5 #include <linux/jiffies.h>
6 #include <linux/init.h>
7 #include <linux/dmi.h>
8 #include <linux/percpu.h>
10 #include <asm/delay.h>
11 #include <asm/tsc.h>
12 #include <asm/io.h>
13 #include <asm/timer.h>
15 #include "mach_timer.h"
17 static int tsc_enabled;
20 * On some systems the TSC frequency does not
21 * change with the cpu frequency. So we need
22 * an extra value to store the TSC freq
24 unsigned int tsc_khz;
25 EXPORT_SYMBOL_GPL(tsc_khz);
27 #ifdef CONFIG_X86_TSC
28 static int __init tsc_setup(char *str)
30 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
31 "cannot disable TSC.\n");
32 return 1;
34 #else
36 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
37 * in cpu/common.c
39 static int __init tsc_setup(char *str)
41 setup_clear_cpu_cap(X86_FEATURE_TSC);
42 return 1;
44 #endif
46 __setup("notsc", tsc_setup);
49 * code to mark and check if the TSC is unstable
50 * due to cpufreq or due to unsynced TSCs
52 static int tsc_unstable;
54 int check_tsc_unstable(void)
56 return tsc_unstable;
58 EXPORT_SYMBOL_GPL(check_tsc_unstable);
60 /* Accelerators for sched_clock()
61 * convert from cycles(64bits) => nanoseconds (64bits)
62 * basic equation:
63 * ns = cycles / (freq / ns_per_sec)
64 * ns = cycles * (ns_per_sec / freq)
65 * ns = cycles * (10^9 / (cpu_khz * 10^3))
66 * ns = cycles * (10^6 / cpu_khz)
68 * Then we use scaling math (suggested by george@mvista.com) to get:
69 * ns = cycles * (10^6 * SC / cpu_khz) / SC
70 * ns = cycles * cyc2ns_scale / SC
72 * And since SC is a constant power of two, we can convert the div
73 * into a shift.
75 * We can use khz divisor instead of mhz to keep a better precision, since
76 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
77 * (mathieu.desnoyers@polymtl.ca)
79 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
82 DEFINE_PER_CPU(unsigned long, cyc2ns);
84 static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
86 unsigned long flags, prev_scale, *scale;
87 unsigned long long tsc_now, ns_now;
89 local_irq_save(flags);
90 sched_clock_idle_sleep_event();
92 scale = &per_cpu(cyc2ns, cpu);
94 rdtscll(tsc_now);
95 ns_now = __cycles_2_ns(tsc_now);
97 prev_scale = *scale;
98 if (cpu_khz)
99 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
102 * Start smoothly with the new frequency:
104 sched_clock_idle_wakeup_event(0);
105 local_irq_restore(flags);
109 * Scheduler clock - returns current time in nanosec units.
111 unsigned long long native_sched_clock(void)
113 unsigned long long this_offset;
116 * Fall back to jiffies if there's no TSC available:
117 * ( But note that we still use it if the TSC is marked
118 * unstable. We do this because unlike Time Of Day,
119 * the scheduler clock tolerates small errors and it's
120 * very important for it to be as fast as the platform
121 * can achive it. )
123 if (unlikely(!tsc_enabled && !tsc_unstable))
124 /* No locking but a rare wrong value is not a big deal: */
125 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
127 /* read the Time Stamp Counter: */
128 rdtscll(this_offset);
130 /* return the value in ns */
131 return cycles_2_ns(this_offset);
134 /* We need to define a real function for sched_clock, to override the
135 weak default version */
136 #ifdef CONFIG_PARAVIRT
137 unsigned long long sched_clock(void)
139 return paravirt_sched_clock();
141 #else
142 unsigned long long sched_clock(void)
143 __attribute__((alias("native_sched_clock")));
144 #endif
146 unsigned long native_calculate_cpu_khz(void)
148 unsigned long long start, end;
149 unsigned long count;
150 u64 delta64 = (u64)ULLONG_MAX;
151 int i;
152 unsigned long flags;
154 local_irq_save(flags);
156 /* run 3 times to ensure the cache is warm and to get an accurate reading */
157 for (i = 0; i < 3; i++) {
158 mach_prepare_counter();
159 rdtscll(start);
160 mach_countup(&count);
161 rdtscll(end);
164 * Error: ECTCNEVERSET
165 * The CTC wasn't reliable: we got a hit on the very first read,
166 * or the CPU was so fast/slow that the quotient wouldn't fit in
167 * 32 bits..
169 if (count <= 1)
170 continue;
172 /* cpu freq too slow: */
173 if ((end - start) <= CALIBRATE_TIME_MSEC)
174 continue;
177 * We want the minimum time of all runs in case one of them
178 * is inaccurate due to SMI or other delay
180 delta64 = min(delta64, (end - start));
183 /* cpu freq too fast (or every run was bad): */
184 if (delta64 > (1ULL<<32))
185 goto err;
187 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
188 do_div(delta64,CALIBRATE_TIME_MSEC);
190 local_irq_restore(flags);
191 return (unsigned long)delta64;
192 err:
193 local_irq_restore(flags);
194 return 0;
197 int recalibrate_cpu_khz(void)
199 #ifndef CONFIG_SMP
200 unsigned long cpu_khz_old = cpu_khz;
202 if (cpu_has_tsc) {
203 cpu_khz = calculate_cpu_khz();
204 tsc_khz = cpu_khz;
205 cpu_data(0).loops_per_jiffy =
206 cpufreq_scale(cpu_data(0).loops_per_jiffy,
207 cpu_khz_old, cpu_khz);
208 return 0;
209 } else
210 return -ENODEV;
211 #else
212 return -ENODEV;
213 #endif
216 EXPORT_SYMBOL(recalibrate_cpu_khz);
218 #ifdef CONFIG_CPU_FREQ
221 * if the CPU frequency is scaled, TSC-based delays will need a different
222 * loops_per_jiffy value to function properly.
224 static unsigned int ref_freq = 0;
225 static unsigned long loops_per_jiffy_ref = 0;
226 static unsigned long cpu_khz_ref = 0;
228 static int
229 time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
231 struct cpufreq_freqs *freq = data;
233 if (!ref_freq) {
234 if (!freq->old){
235 ref_freq = freq->new;
236 return 0;
238 ref_freq = freq->old;
239 loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
240 cpu_khz_ref = cpu_khz;
243 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
244 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
245 (val == CPUFREQ_RESUMECHANGE)) {
246 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
247 cpu_data(freq->cpu).loops_per_jiffy =
248 cpufreq_scale(loops_per_jiffy_ref,
249 ref_freq, freq->new);
251 if (cpu_khz) {
253 if (num_online_cpus() == 1)
254 cpu_khz = cpufreq_scale(cpu_khz_ref,
255 ref_freq, freq->new);
256 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
257 tsc_khz = cpu_khz;
258 preempt_disable();
259 set_cyc2ns_scale(cpu_khz, smp_processor_id());
260 preempt_enable();
262 * TSC based sched_clock turns
263 * to junk w/ cpufreq
265 mark_tsc_unstable("cpufreq changes");
270 return 0;
273 static struct notifier_block time_cpufreq_notifier_block = {
274 .notifier_call = time_cpufreq_notifier
277 static int __init cpufreq_tsc(void)
279 return cpufreq_register_notifier(&time_cpufreq_notifier_block,
280 CPUFREQ_TRANSITION_NOTIFIER);
282 core_initcall(cpufreq_tsc);
284 #endif
286 /* clock source code */
288 static unsigned long current_tsc_khz = 0;
290 static cycle_t read_tsc(void)
292 cycle_t ret;
294 rdtscll(ret);
296 return ret;
299 static struct clocksource clocksource_tsc = {
300 .name = "tsc",
301 .rating = 300,
302 .read = read_tsc,
303 .mask = CLOCKSOURCE_MASK(64),
304 .mult = 0, /* to be set */
305 .shift = 22,
306 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
307 CLOCK_SOURCE_MUST_VERIFY,
310 void mark_tsc_unstable(char *reason)
312 if (!tsc_unstable) {
313 tsc_unstable = 1;
314 tsc_enabled = 0;
315 printk("Marking TSC unstable due to: %s.\n", reason);
316 /* Can be called before registration */
317 if (clocksource_tsc.mult)
318 clocksource_change_rating(&clocksource_tsc, 0);
319 else
320 clocksource_tsc.rating = 0;
323 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
325 static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
327 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
328 d->ident);
329 tsc_unstable = 1;
330 return 0;
333 /* List of systems that have known TSC problems */
334 static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
336 .callback = dmi_mark_tsc_unstable,
337 .ident = "IBM Thinkpad 380XD",
338 .matches = {
339 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
340 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
347 * Make an educated guess if the TSC is trustworthy and synchronized
348 * over all CPUs.
350 __cpuinit int unsynchronized_tsc(void)
352 if (!cpu_has_tsc || tsc_unstable)
353 return 1;
355 /* Anything with constant TSC should be synchronized */
356 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
357 return 0;
360 * Intel systems are normally all synchronized.
361 * Exceptions must mark TSC as unstable:
363 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
364 /* assume multi socket systems are not synchronized: */
365 if (num_possible_cpus() > 1)
366 tsc_unstable = 1;
368 return tsc_unstable;
372 * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
374 #ifdef CONFIG_MGEODE_LX
375 /* RTSC counts during suspend */
376 #define RTSC_SUSP 0x100
378 static void __init check_geode_tsc_reliable(void)
380 unsigned long res_low, res_high;
382 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
383 if (res_low & RTSC_SUSP)
384 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
386 #else
387 static inline void check_geode_tsc_reliable(void) { }
388 #endif
391 void __init tsc_init(void)
393 int cpu;
395 if (!cpu_has_tsc)
396 goto out_no_tsc;
398 cpu_khz = calculate_cpu_khz();
399 tsc_khz = cpu_khz;
401 if (!cpu_khz)
402 goto out_no_tsc;
404 printk("Detected %lu.%03lu MHz processor.\n",
405 (unsigned long)cpu_khz / 1000,
406 (unsigned long)cpu_khz % 1000);
409 * Secondary CPUs do not run through tsc_init(), so set up
410 * all the scale factors for all CPUs, assuming the same
411 * speed as the bootup CPU. (cpufreq notifiers will fix this
412 * up if their speed diverges)
414 for_each_possible_cpu(cpu)
415 set_cyc2ns_scale(cpu_khz, cpu);
417 use_tsc_delay();
419 /* Check and install the TSC clocksource */
420 dmi_check_system(bad_tsc_dmi_table);
422 unsynchronized_tsc();
423 check_geode_tsc_reliable();
424 current_tsc_khz = tsc_khz;
425 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
426 clocksource_tsc.shift);
427 /* lower the rating if we already know its unstable: */
428 if (check_tsc_unstable()) {
429 clocksource_tsc.rating = 0;
430 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
431 } else
432 tsc_enabled = 1;
434 clocksource_register(&clocksource_tsc);
436 return;
438 out_no_tsc:
439 setup_clear_cpu_cap(X86_FEATURE_TSC);