x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / arch / x86 / kernel / tsc_sync.c
blobf37930954d1596c8c366cfa87a5ddb629bb13513
1 /*
2 * check TSC synchronization.
4 * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar
6 * We check whether all boot CPUs have their TSC's synchronized,
7 * print a warning if not and turn off the TSC clock-source.
9 * The warp-check is point-to-point between two CPUs, the CPU
10 * initiating the bootup is the 'source CPU', the freshly booting
11 * CPU is the 'target CPU'.
13 * Only two CPUs may participate - they can enter in any order.
14 * ( The serial nature of the boot logic and the CPU hotplug lock
15 * protects against more than 2 CPUs entering this code. )
17 #include <linux/spinlock.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/smp.h>
21 #include <linux/nmi.h>
22 #include <asm/tsc.h>
25 * Entry/exit counters that make sure that both CPUs
26 * run the measurement code at once:
28 static __cpuinitdata atomic_t start_count;
29 static __cpuinitdata atomic_t stop_count;
32 * We use a raw spinlock in this exceptional case, because
33 * we want to have the fastest, inlined, non-debug version
34 * of a critical section, to be able to prove TSC time-warps:
36 static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
38 static __cpuinitdata cycles_t last_tsc;
39 static __cpuinitdata cycles_t max_warp;
40 static __cpuinitdata int nr_warps;
43 * TSC-warp measurement loop running on both CPUs:
45 static __cpuinit void check_tsc_warp(void)
47 cycles_t start, now, prev, end;
48 int i;
50 rdtsc_barrier();
51 start = get_cycles();
52 rdtsc_barrier();
54 * The measurement runs for 20 msecs:
56 end = start + tsc_khz * 20ULL;
57 now = start;
59 for (i = 0; ; i++) {
61 * We take the global lock, measure TSC, save the
62 * previous TSC that was measured (possibly on
63 * another CPU) and update the previous TSC timestamp.
65 __raw_spin_lock(&sync_lock);
66 prev = last_tsc;
67 rdtsc_barrier();
68 now = get_cycles();
69 rdtsc_barrier();
70 last_tsc = now;
71 __raw_spin_unlock(&sync_lock);
74 * Be nice every now and then (and also check whether
75 * measurement is done [we also insert a 10 million
76 * loops safety exit, so we dont lock up in case the
77 * TSC readout is totally broken]):
79 if (unlikely(!(i & 7))) {
80 if (now > end || i > 10000000)
81 break;
82 cpu_relax();
83 touch_nmi_watchdog();
86 * Outside the critical section we can now see whether
87 * we saw a time-warp of the TSC going backwards:
89 if (unlikely(prev > now)) {
90 __raw_spin_lock(&sync_lock);
91 max_warp = max(max_warp, prev - now);
92 nr_warps++;
93 __raw_spin_unlock(&sync_lock);
96 WARN(!(now-start),
97 "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
98 now-start, end-start);
102 * Source CPU calls into this - it waits for the freshly booted
103 * target CPU to arrive and then starts the measurement:
105 void __cpuinit check_tsc_sync_source(int cpu)
107 int cpus = 2;
110 * No need to check if we already know that the TSC is not
111 * synchronized:
113 if (unsynchronized_tsc())
114 return;
116 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
117 printk_once(KERN_INFO "Skipping synchronization checks as TSC is reliable.\n");
118 return;
121 pr_info("checking TSC synchronization [CPU#%d -> CPU#%d]:",
122 smp_processor_id(), cpu);
125 * Reset it - in case this is a second bootup:
127 atomic_set(&stop_count, 0);
130 * Wait for the target to arrive:
132 while (atomic_read(&start_count) != cpus-1)
133 cpu_relax();
135 * Trigger the target to continue into the measurement too:
137 atomic_inc(&start_count);
139 check_tsc_warp();
141 while (atomic_read(&stop_count) != cpus-1)
142 cpu_relax();
144 if (nr_warps) {
145 printk("\n");
146 pr_warning("Measured %Ld cycles TSC warp between CPUs, "
147 "turning off TSC clock.\n", max_warp);
148 mark_tsc_unstable("check_tsc_sync_source failed");
149 } else {
150 printk(" passed.\n");
154 * Reset it - just in case we boot another CPU later:
156 atomic_set(&start_count, 0);
157 nr_warps = 0;
158 max_warp = 0;
159 last_tsc = 0;
162 * Let the target continue with the bootup:
164 atomic_inc(&stop_count);
168 * Freshly booted CPUs call into this:
170 void __cpuinit check_tsc_sync_target(void)
172 int cpus = 2;
174 if (unsynchronized_tsc() || boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
175 return;
178 * Register this CPU's participation and wait for the
179 * source CPU to start the measurement:
181 atomic_inc(&start_count);
182 while (atomic_read(&start_count) != cpus)
183 cpu_relax();
185 check_tsc_warp();
188 * Ok, we are done:
190 atomic_inc(&stop_count);
193 * Wait for the source CPU to print stuff:
195 while (atomic_read(&stop_count) != cpus)
196 cpu_relax();