[PATCH] libata: flush COMRESET set and clear
[linux-2.6/verdex.git] / arch / x86_64 / kernel / nmi.c
blob61de0b34a01e850fedb824115b2e0560ed98ff59
1 /*
2 * linux/arch/x86_64/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
8 * Fixes:
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
11 * Pavel Machek and
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
15 #include <linux/config.h>
16 #include <linux/mm.h>
17 #include <linux/irq.h>
18 #include <linux/delay.h>
19 #include <linux/bootmem.h>
20 #include <linux/smp_lock.h>
21 #include <linux/interrupt.h>
22 #include <linux/mc146818rtc.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/sysdev.h>
26 #include <linux/nmi.h>
27 #include <linux/sysctl.h>
29 #include <asm/smp.h>
30 #include <asm/mtrr.h>
31 #include <asm/mpspec.h>
32 #include <asm/nmi.h>
33 #include <asm/msr.h>
34 #include <asm/proto.h>
35 #include <asm/kdebug.h>
38 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
39 * - it may be reserved by some other driver, or not
40 * - when not reserved by some other driver, it may be used for
41 * the NMI watchdog, or not
43 * This is maintained separately from nmi_active because the NMI
44 * watchdog may also be driven from the I/O APIC timer.
46 static DEFINE_SPINLOCK(lapic_nmi_owner_lock);
47 static unsigned int lapic_nmi_owner;
48 #define LAPIC_NMI_WATCHDOG (1<<0)
49 #define LAPIC_NMI_RESERVED (1<<1)
51 /* nmi_active:
52 * +1: the lapic NMI watchdog is active, but can be disabled
53 * 0: the lapic NMI watchdog has not been set up, and cannot
54 * be enabled
55 * -1: the lapic NMI watchdog is disabled, but can be enabled
57 int nmi_active; /* oprofile uses this */
58 int panic_on_timeout;
60 unsigned int nmi_watchdog = NMI_DEFAULT;
61 static unsigned int nmi_hz = HZ;
62 unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
64 /* Note that these events don't tick when the CPU idles. This means
65 the frequency varies with CPU load. */
67 #define K7_EVNTSEL_ENABLE (1 << 22)
68 #define K7_EVNTSEL_INT (1 << 20)
69 #define K7_EVNTSEL_OS (1 << 17)
70 #define K7_EVNTSEL_USR (1 << 16)
71 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
72 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
74 #define P6_EVNTSEL0_ENABLE (1 << 22)
75 #define P6_EVNTSEL_INT (1 << 20)
76 #define P6_EVNTSEL_OS (1 << 17)
77 #define P6_EVNTSEL_USR (1 << 16)
78 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
79 #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
81 /* Run after command line and cpu_init init, but before all other checks */
82 void __init nmi_watchdog_default(void)
84 if (nmi_watchdog != NMI_DEFAULT)
85 return;
87 /* For some reason the IO APIC watchdog doesn't work on the AMD
88 8111 chipset. For now switch to local APIC mode using
89 perfctr0 there. On Intel CPUs we don't have code to handle
90 the perfctr and the IO-APIC seems to work, so use that. */
92 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
93 nmi_watchdog = NMI_LOCAL_APIC;
94 printk(KERN_INFO
95 "Using local APIC NMI watchdog using perfctr0\n");
96 } else {
97 printk(KERN_INFO "Using IO APIC NMI watchdog\n");
98 nmi_watchdog = NMI_IO_APIC;
102 /* Why is there no CPUID flag for this? */
103 static __init int cpu_has_lapic(void)
105 switch (boot_cpu_data.x86_vendor) {
106 case X86_VENDOR_INTEL:
107 case X86_VENDOR_AMD:
108 return boot_cpu_data.x86 >= 6;
109 /* .... add more cpus here or find a different way to figure this out. */
110 default:
111 return 0;
115 static int __init check_nmi_watchdog (void)
117 int counts[NR_CPUS];
118 int cpu;
120 if (nmi_watchdog == NMI_NONE)
121 return 0;
123 if (nmi_watchdog == NMI_LOCAL_APIC && !cpu_has_lapic()) {
124 nmi_watchdog = NMI_NONE;
125 return -1;
128 printk(KERN_INFO "Testing NMI watchdog ... ");
130 for (cpu = 0; cpu < NR_CPUS; cpu++)
131 counts[cpu] = cpu_pda[cpu].__nmi_count;
132 local_irq_enable();
133 mdelay((10*1000)/nmi_hz); // wait 10 ticks
135 for (cpu = 0; cpu < NR_CPUS; cpu++) {
136 if (cpu_pda[cpu].__nmi_count - counts[cpu] <= 5) {
137 printk("CPU#%d: NMI appears to be stuck (%d)!\n",
138 cpu,
139 cpu_pda[cpu].__nmi_count);
140 nmi_active = 0;
141 lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
142 return -1;
145 printk("OK.\n");
147 /* now that we know it works we can reduce NMI frequency to
148 something more reasonable; makes a difference in some configs */
149 if (nmi_watchdog == NMI_LOCAL_APIC)
150 nmi_hz = 1;
152 return 0;
154 /* Have this called later during boot so counters are updating */
155 late_initcall(check_nmi_watchdog);
157 int __init setup_nmi_watchdog(char *str)
159 int nmi;
161 if (!strncmp(str,"panic",5)) {
162 panic_on_timeout = 1;
163 str = strchr(str, ',');
164 if (!str)
165 return 1;
166 ++str;
169 get_option(&str, &nmi);
171 if (nmi >= NMI_INVALID)
172 return 0;
173 nmi_watchdog = nmi;
174 return 1;
177 __setup("nmi_watchdog=", setup_nmi_watchdog);
179 static void disable_lapic_nmi_watchdog(void)
181 if (nmi_active <= 0)
182 return;
183 switch (boot_cpu_data.x86_vendor) {
184 case X86_VENDOR_AMD:
185 wrmsr(MSR_K7_EVNTSEL0, 0, 0);
186 break;
187 case X86_VENDOR_INTEL:
188 wrmsr(MSR_IA32_EVNTSEL0, 0, 0);
189 break;
191 nmi_active = -1;
192 /* tell do_nmi() and others that we're not active any more */
193 nmi_watchdog = 0;
196 static void enable_lapic_nmi_watchdog(void)
198 if (nmi_active < 0) {
199 nmi_watchdog = NMI_LOCAL_APIC;
200 setup_apic_nmi_watchdog();
204 int reserve_lapic_nmi(void)
206 unsigned int old_owner;
208 spin_lock(&lapic_nmi_owner_lock);
209 old_owner = lapic_nmi_owner;
210 lapic_nmi_owner |= LAPIC_NMI_RESERVED;
211 spin_unlock(&lapic_nmi_owner_lock);
212 if (old_owner & LAPIC_NMI_RESERVED)
213 return -EBUSY;
214 if (old_owner & LAPIC_NMI_WATCHDOG)
215 disable_lapic_nmi_watchdog();
216 return 0;
219 void release_lapic_nmi(void)
221 unsigned int new_owner;
223 spin_lock(&lapic_nmi_owner_lock);
224 new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED;
225 lapic_nmi_owner = new_owner;
226 spin_unlock(&lapic_nmi_owner_lock);
227 if (new_owner & LAPIC_NMI_WATCHDOG)
228 enable_lapic_nmi_watchdog();
231 void disable_timer_nmi_watchdog(void)
233 if ((nmi_watchdog != NMI_IO_APIC) || (nmi_active <= 0))
234 return;
236 disable_irq(0);
237 unset_nmi_callback();
238 nmi_active = -1;
239 nmi_watchdog = NMI_NONE;
242 void enable_timer_nmi_watchdog(void)
244 if (nmi_active < 0) {
245 nmi_watchdog = NMI_IO_APIC;
246 touch_nmi_watchdog();
247 nmi_active = 1;
248 enable_irq(0);
252 #ifdef CONFIG_PM
254 static int nmi_pm_active; /* nmi_active before suspend */
256 static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
258 nmi_pm_active = nmi_active;
259 disable_lapic_nmi_watchdog();
260 return 0;
263 static int lapic_nmi_resume(struct sys_device *dev)
265 if (nmi_pm_active > 0)
266 enable_lapic_nmi_watchdog();
267 return 0;
270 static struct sysdev_class nmi_sysclass = {
271 set_kset_name("lapic_nmi"),
272 .resume = lapic_nmi_resume,
273 .suspend = lapic_nmi_suspend,
276 static struct sys_device device_lapic_nmi = {
277 .id = 0,
278 .cls = &nmi_sysclass,
281 static int __init init_lapic_nmi_sysfs(void)
283 int error;
285 if (nmi_active == 0 || nmi_watchdog != NMI_LOCAL_APIC)
286 return 0;
288 error = sysdev_class_register(&nmi_sysclass);
289 if (!error)
290 error = sysdev_register(&device_lapic_nmi);
291 return error;
293 /* must come after the local APIC's device_initcall() */
294 late_initcall(init_lapic_nmi_sysfs);
296 #endif /* CONFIG_PM */
299 * Activate the NMI watchdog via the local APIC.
300 * Original code written by Keith Owens.
303 static void setup_k7_watchdog(void)
305 int i;
306 unsigned int evntsel;
308 /* No check, so can start with slow frequency */
309 nmi_hz = 1;
311 /* XXX should check these in EFER */
313 nmi_perfctr_msr = MSR_K7_PERFCTR0;
315 for(i = 0; i < 4; ++i) {
316 /* Simulator may not support it */
317 if (checking_wrmsrl(MSR_K7_EVNTSEL0+i, 0UL))
318 return;
319 wrmsrl(MSR_K7_PERFCTR0+i, 0UL);
322 evntsel = K7_EVNTSEL_INT
323 | K7_EVNTSEL_OS
324 | K7_EVNTSEL_USR
325 | K7_NMI_EVENT;
327 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
328 wrmsrl(MSR_K7_PERFCTR0, -((u64)cpu_khz*1000) / nmi_hz);
329 apic_write(APIC_LVTPC, APIC_DM_NMI);
330 evntsel |= K7_EVNTSEL_ENABLE;
331 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
334 void setup_apic_nmi_watchdog(void)
336 switch (boot_cpu_data.x86_vendor) {
337 case X86_VENDOR_AMD:
338 if (boot_cpu_data.x86 != 15)
339 return;
340 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
341 return;
342 setup_k7_watchdog();
343 break;
344 default:
345 return;
347 lapic_nmi_owner = LAPIC_NMI_WATCHDOG;
348 nmi_active = 1;
352 * the best way to detect whether a CPU has a 'hard lockup' problem
353 * is to check it's local APIC timer IRQ counts. If they are not
354 * changing then that CPU has some problem.
356 * as these watchdog NMI IRQs are generated on every CPU, we only
357 * have to check the current processor.
359 * since NMIs don't listen to _any_ locks, we have to be extremely
360 * careful not to rely on unsafe variables. The printk might lock
361 * up though, so we have to break up any console locks first ...
362 * [when there will be more tty-related locks, break them up
363 * here too!]
366 static unsigned int
367 last_irq_sums [NR_CPUS],
368 alert_counter [NR_CPUS];
370 void touch_nmi_watchdog (void)
372 int i;
375 * Just reset the alert counters, (other CPUs might be
376 * spinning on locks we hold):
378 for (i = 0; i < NR_CPUS; i++)
379 alert_counter[i] = 0;
382 void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
384 int sum, cpu;
386 cpu = safe_smp_processor_id();
387 sum = read_pda(apic_timer_irqs);
388 if (last_irq_sums[cpu] == sum) {
390 * Ayiee, looks like this CPU is stuck ...
391 * wait a few IRQs (5 seconds) before doing the oops ...
393 alert_counter[cpu]++;
394 if (alert_counter[cpu] == 5*nmi_hz) {
395 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
396 == NOTIFY_STOP) {
397 alert_counter[cpu] = 0;
398 return;
400 die_nmi("NMI Watchdog detected LOCKUP on CPU%d", regs);
402 } else {
403 last_irq_sums[cpu] = sum;
404 alert_counter[cpu] = 0;
406 if (nmi_perfctr_msr)
407 wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1);
410 static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
412 return 0;
415 static nmi_callback_t nmi_callback = dummy_nmi_callback;
417 asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
419 int cpu = safe_smp_processor_id();
421 nmi_enter();
422 add_pda(__nmi_count,1);
423 if (!nmi_callback(regs, cpu))
424 default_do_nmi(regs);
425 nmi_exit();
428 void set_nmi_callback(nmi_callback_t callback)
430 nmi_callback = callback;
433 void unset_nmi_callback(void)
435 nmi_callback = dummy_nmi_callback;
438 #ifdef CONFIG_SYSCTL
440 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
442 unsigned char reason = get_nmi_reason();
443 char buf[64];
445 if (!(reason & 0xc0)) {
446 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
447 die_nmi(buf,regs);
449 return 0;
453 * proc handler for /proc/sys/kernel/unknown_nmi_panic
455 int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file,
456 void __user *buffer, size_t *length, loff_t *ppos)
458 int old_state;
460 old_state = unknown_nmi_panic;
461 proc_dointvec(table, write, file, buffer, length, ppos);
462 if (!!old_state == !!unknown_nmi_panic)
463 return 0;
465 if (unknown_nmi_panic) {
466 if (reserve_lapic_nmi() < 0) {
467 unknown_nmi_panic = 0;
468 return -EBUSY;
469 } else {
470 set_nmi_callback(unknown_nmi_panic_callback);
472 } else {
473 release_lapic_nmi();
474 unset_nmi_callback();
476 return 0;
479 #endif
481 EXPORT_SYMBOL(nmi_active);
482 EXPORT_SYMBOL(nmi_watchdog);
483 EXPORT_SYMBOL(reserve_lapic_nmi);
484 EXPORT_SYMBOL(release_lapic_nmi);
485 EXPORT_SYMBOL(disable_timer_nmi_watchdog);
486 EXPORT_SYMBOL(enable_timer_nmi_watchdog);
487 EXPORT_SYMBOL(touch_nmi_watchdog);