2 * linux/arch/x86_64/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
16 #include <linux/delay.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/sysdev.h>
20 #include <linux/nmi.h>
21 #include <linux/sysctl.h>
22 #include <linux/kprobes.h>
26 #include <asm/proto.h>
27 #include <asm/kdebug.h>
30 /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
31 * evtsel_nmi_owner tracks the ownership of the event selection
32 * - different performance counters/ event selection may be reserved for
33 * different subsystems this reservation system just tries to coordinate
36 static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner
);
37 static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner
[2]);
39 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
40 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
42 #define NMI_MAX_COUNTER_BITS 66
45 * >0: the lapic NMI watchdog is active, but can be disabled
46 * <0: the lapic NMI watchdog has not been set up, and cannot
48 * 0: the lapic NMI watchdog is disabled, but can be enabled
50 atomic_t nmi_active
= ATOMIC_INIT(0); /* oprofile uses this */
53 unsigned int nmi_watchdog
= NMI_DEFAULT
;
54 static unsigned int nmi_hz
= HZ
;
56 struct nmi_watchdog_ctlblk
{
59 unsigned int cccr_msr
;
60 unsigned int perfctr_msr
; /* the MSR to reset in NMI handler */
61 unsigned int evntsel_msr
; /* the MSR to select the events to handle */
63 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk
, nmi_watchdog_ctlblk
);
65 /* local prototypes */
66 static void stop_apic_nmi_watchdog(void *unused
);
67 static int unknown_nmi_panic_callback(struct pt_regs
*regs
, int cpu
);
69 /* converts an msr to an appropriate reservation bit */
70 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr
)
72 /* returns the bit offset of the performance counter register */
73 switch (boot_cpu_data
.x86_vendor
) {
75 return (msr
- MSR_K7_PERFCTR0
);
76 case X86_VENDOR_INTEL
:
77 return (msr
- MSR_P4_BPU_PERFCTR0
);
82 /* converts an msr to an appropriate reservation bit */
83 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr
)
85 /* returns the bit offset of the event selection register */
86 switch (boot_cpu_data
.x86_vendor
) {
88 return (msr
- MSR_K7_EVNTSEL0
);
89 case X86_VENDOR_INTEL
:
90 return (msr
- MSR_P4_BSU_ESCR0
);
95 /* checks for a bit availability (hack for oprofile) */
96 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter
)
98 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
100 return (!test_bit(counter
, &__get_cpu_var(perfctr_nmi_owner
)));
103 /* checks the an msr for availability */
104 int avail_to_resrv_perfctr_nmi(unsigned int msr
)
106 unsigned int counter
;
108 counter
= nmi_perfctr_msr_to_bit(msr
);
109 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
111 return (!test_bit(counter
, &__get_cpu_var(perfctr_nmi_owner
)));
114 int reserve_perfctr_nmi(unsigned int msr
)
116 unsigned int counter
;
118 counter
= nmi_perfctr_msr_to_bit(msr
);
119 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
121 if (!test_and_set_bit(counter
, &__get_cpu_var(perfctr_nmi_owner
)))
126 void release_perfctr_nmi(unsigned int msr
)
128 unsigned int counter
;
130 counter
= nmi_perfctr_msr_to_bit(msr
);
131 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
133 clear_bit(counter
, &__get_cpu_var(perfctr_nmi_owner
));
136 int reserve_evntsel_nmi(unsigned int msr
)
138 unsigned int counter
;
140 counter
= nmi_evntsel_msr_to_bit(msr
);
141 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
143 if (!test_and_set_bit(counter
, &__get_cpu_var(evntsel_nmi_owner
)))
148 void release_evntsel_nmi(unsigned int msr
)
150 unsigned int counter
;
152 counter
= nmi_evntsel_msr_to_bit(msr
);
153 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
155 clear_bit(counter
, &__get_cpu_var(evntsel_nmi_owner
));
158 static __cpuinit
inline int nmi_known_cpu(void)
160 switch (boot_cpu_data
.x86_vendor
) {
162 return boot_cpu_data
.x86
== 15;
163 case X86_VENDOR_INTEL
:
164 return boot_cpu_data
.x86
== 15;
169 /* Run after command line and cpu_init init, but before all other checks */
170 void __cpuinit
nmi_watchdog_default(void)
172 if (nmi_watchdog
!= NMI_DEFAULT
)
175 nmi_watchdog
= NMI_LOCAL_APIC
;
177 nmi_watchdog
= NMI_IO_APIC
;
181 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
182 * the CPU is idle. To make sure the NMI watchdog really ticks on all
183 * CPUs during the test make them busy.
185 static __init
void nmi_cpu_busy(void *data
)
187 volatile int *endflag
= data
;
188 local_irq_enable_in_hardirq();
189 /* Intentionally don't use cpu_relax here. This is
190 to make sure that the performance counter really ticks,
191 even if there is a simulator or similar that catches the
192 pause instruction. On a real HT machine this is fine because
193 all other CPUs are busy with "useless" delay loops and don't
194 care if they get somewhat less cycles. */
195 while (*endflag
== 0)
200 int __init
check_nmi_watchdog (void)
202 volatile int endflag
= 0;
206 if ((nmi_watchdog
== NMI_NONE
) || (nmi_watchdog
== NMI_DEFAULT
))
209 if (!atomic_read(&nmi_active
))
212 counts
= kmalloc(NR_CPUS
* sizeof(int), GFP_KERNEL
);
216 printk(KERN_INFO
"testing NMI watchdog ... ");
219 if (nmi_watchdog
== NMI_LOCAL_APIC
)
220 smp_call_function(nmi_cpu_busy
, (void *)&endflag
, 0, 0);
223 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++)
224 counts
[cpu
] = cpu_pda(cpu
)->__nmi_count
;
226 mdelay((10*1000)/nmi_hz
); // wait 10 ticks
228 for_each_online_cpu(cpu
) {
229 if (!per_cpu(nmi_watchdog_ctlblk
, cpu
).enabled
)
231 if (cpu_pda(cpu
)->__nmi_count
- counts
[cpu
] <= 5) {
232 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
235 cpu_pda(cpu
)->__nmi_count
);
236 per_cpu(nmi_watchdog_ctlblk
, cpu
).enabled
= 0;
237 atomic_dec(&nmi_active
);
240 if (!atomic_read(&nmi_active
)) {
242 atomic_set(&nmi_active
, -1);
248 /* now that we know it works we can reduce NMI frequency to
249 something more reasonable; makes a difference in some configs */
250 if (nmi_watchdog
== NMI_LOCAL_APIC
)
257 int __init
setup_nmi_watchdog(char *str
)
261 if (!strncmp(str
,"panic",5)) {
262 panic_on_timeout
= 1;
263 str
= strchr(str
, ',');
269 get_option(&str
, &nmi
);
271 if ((nmi
>= NMI_INVALID
) || (nmi
< NMI_NONE
))
274 if ((nmi
== NMI_LOCAL_APIC
) && (nmi_known_cpu() == 0))
275 return 0; /* no lapic support */
280 __setup("nmi_watchdog=", setup_nmi_watchdog
);
282 static void disable_lapic_nmi_watchdog(void)
284 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
286 if (atomic_read(&nmi_active
) <= 0)
289 on_each_cpu(stop_apic_nmi_watchdog
, NULL
, 0, 1);
291 BUG_ON(atomic_read(&nmi_active
) != 0);
294 static void enable_lapic_nmi_watchdog(void)
296 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
298 /* are we already enabled */
299 if (atomic_read(&nmi_active
) != 0)
302 /* are we lapic aware */
303 if (nmi_known_cpu() <= 0)
306 on_each_cpu(setup_apic_nmi_watchdog
, NULL
, 0, 1);
307 touch_nmi_watchdog();
310 void disable_timer_nmi_watchdog(void)
312 BUG_ON(nmi_watchdog
!= NMI_IO_APIC
);
314 if (atomic_read(&nmi_active
) <= 0)
318 on_each_cpu(stop_apic_nmi_watchdog
, NULL
, 0, 1);
320 BUG_ON(atomic_read(&nmi_active
) != 0);
323 void enable_timer_nmi_watchdog(void)
325 BUG_ON(nmi_watchdog
!= NMI_IO_APIC
);
327 if (atomic_read(&nmi_active
) == 0) {
328 touch_nmi_watchdog();
329 on_each_cpu(setup_apic_nmi_watchdog
, NULL
, 0, 1);
336 static int nmi_pm_active
; /* nmi_active before suspend */
338 static int lapic_nmi_suspend(struct sys_device
*dev
, pm_message_t state
)
340 nmi_pm_active
= atomic_read(&nmi_active
);
341 disable_lapic_nmi_watchdog();
345 static int lapic_nmi_resume(struct sys_device
*dev
)
347 if (nmi_pm_active
> 0)
348 enable_lapic_nmi_watchdog();
352 static struct sysdev_class nmi_sysclass
= {
353 set_kset_name("lapic_nmi"),
354 .resume
= lapic_nmi_resume
,
355 .suspend
= lapic_nmi_suspend
,
358 static struct sys_device device_lapic_nmi
= {
360 .cls
= &nmi_sysclass
,
363 static int __init
init_lapic_nmi_sysfs(void)
367 /* should really be a BUG_ON but b/c this is an
368 * init call, it just doesn't work. -dcz
370 if (nmi_watchdog
!= NMI_LOCAL_APIC
)
373 if ( atomic_read(&nmi_active
) < 0 )
376 error
= sysdev_class_register(&nmi_sysclass
);
378 error
= sysdev_register(&device_lapic_nmi
);
381 /* must come after the local APIC's device_initcall() */
382 late_initcall(init_lapic_nmi_sysfs
);
384 #endif /* CONFIG_PM */
387 * Activate the NMI watchdog via the local APIC.
388 * Original code written by Keith Owens.
391 /* Note that these events don't tick when the CPU idles. This means
392 the frequency varies with CPU load. */
394 #define K7_EVNTSEL_ENABLE (1 << 22)
395 #define K7_EVNTSEL_INT (1 << 20)
396 #define K7_EVNTSEL_OS (1 << 17)
397 #define K7_EVNTSEL_USR (1 << 16)
398 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
399 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
401 static int setup_k7_watchdog(void)
403 unsigned int perfctr_msr
, evntsel_msr
;
404 unsigned int evntsel
;
405 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
407 perfctr_msr
= MSR_K7_PERFCTR0
;
408 evntsel_msr
= MSR_K7_EVNTSEL0
;
409 if (!reserve_perfctr_nmi(perfctr_msr
))
412 if (!reserve_evntsel_nmi(evntsel_msr
))
415 /* Simulator may not support it */
416 if (checking_wrmsrl(evntsel_msr
, 0UL))
418 wrmsrl(perfctr_msr
, 0UL);
420 evntsel
= K7_EVNTSEL_INT
425 /* setup the timer */
426 wrmsr(evntsel_msr
, evntsel
, 0);
427 wrmsrl(perfctr_msr
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
428 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
429 evntsel
|= K7_EVNTSEL_ENABLE
;
430 wrmsr(evntsel_msr
, evntsel
, 0);
432 wd
->perfctr_msr
= perfctr_msr
;
433 wd
->evntsel_msr
= evntsel_msr
;
434 wd
->cccr_msr
= 0; //unused
435 wd
->check_bit
= 1ULL<<63;
438 release_evntsel_nmi(evntsel_msr
);
440 release_perfctr_nmi(perfctr_msr
);
445 static void stop_k7_watchdog(void)
447 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
449 wrmsr(wd
->evntsel_msr
, 0, 0);
451 release_evntsel_nmi(wd
->evntsel_msr
);
452 release_perfctr_nmi(wd
->perfctr_msr
);
455 /* Note that these events don't tick when the CPU idles. This means
456 the frequency varies with CPU load. */
458 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
459 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
460 #define P4_ESCR_OS (1<<3)
461 #define P4_ESCR_USR (1<<2)
462 #define P4_CCCR_OVF_PMI0 (1<<26)
463 #define P4_CCCR_OVF_PMI1 (1<<27)
464 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
465 #define P4_CCCR_COMPLEMENT (1<<19)
466 #define P4_CCCR_COMPARE (1<<18)
467 #define P4_CCCR_REQUIRED (3<<16)
468 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
469 #define P4_CCCR_ENABLE (1<<12)
470 #define P4_CCCR_OVF (1<<31)
471 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
472 CRU_ESCR0 (with any non-null event selector) through a complemented
473 max threshold. [IA32-Vol3, Section 14.9.9] */
475 static int setup_p4_watchdog(void)
477 unsigned int perfctr_msr
, evntsel_msr
, cccr_msr
;
478 unsigned int evntsel
, cccr_val
;
479 unsigned int misc_enable
, dummy
;
481 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
483 rdmsr(MSR_IA32_MISC_ENABLE
, misc_enable
, dummy
);
484 if (!(misc_enable
& MSR_P4_MISC_ENABLE_PERF_AVAIL
))
488 /* detect which hyperthread we are on */
489 if (smp_num_siblings
== 2) {
490 unsigned int ebx
, apicid
;
493 apicid
= (ebx
>> 24) & 0xff;
499 /* performance counters are shared resources
500 * assign each hyperthread its own set
501 * (re-use the ESCR0 register, seems safe
502 * and keeps the cccr_val the same)
506 perfctr_msr
= MSR_P4_IQ_PERFCTR0
;
507 evntsel_msr
= MSR_P4_CRU_ESCR0
;
508 cccr_msr
= MSR_P4_IQ_CCCR0
;
509 cccr_val
= P4_CCCR_OVF_PMI0
| P4_CCCR_ESCR_SELECT(4);
512 perfctr_msr
= MSR_P4_IQ_PERFCTR1
;
513 evntsel_msr
= MSR_P4_CRU_ESCR0
;
514 cccr_msr
= MSR_P4_IQ_CCCR1
;
515 cccr_val
= P4_CCCR_OVF_PMI1
| P4_CCCR_ESCR_SELECT(4);
518 if (!reserve_perfctr_nmi(perfctr_msr
))
521 if (!reserve_evntsel_nmi(evntsel_msr
))
524 evntsel
= P4_ESCR_EVENT_SELECT(0x3F)
528 cccr_val
|= P4_CCCR_THRESHOLD(15)
533 wrmsr(evntsel_msr
, evntsel
, 0);
534 wrmsr(cccr_msr
, cccr_val
, 0);
535 wrmsrl(perfctr_msr
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
536 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
537 cccr_val
|= P4_CCCR_ENABLE
;
538 wrmsr(cccr_msr
, cccr_val
, 0);
540 wd
->perfctr_msr
= perfctr_msr
;
541 wd
->evntsel_msr
= evntsel_msr
;
542 wd
->cccr_msr
= cccr_msr
;
543 wd
->check_bit
= 1ULL<<39;
546 release_perfctr_nmi(perfctr_msr
);
551 static void stop_p4_watchdog(void)
553 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
555 wrmsr(wd
->cccr_msr
, 0, 0);
556 wrmsr(wd
->evntsel_msr
, 0, 0);
558 release_evntsel_nmi(wd
->evntsel_msr
);
559 release_perfctr_nmi(wd
->perfctr_msr
);
562 void setup_apic_nmi_watchdog(void *unused
)
564 /* only support LOCAL and IO APICs for now */
565 if ((nmi_watchdog
!= NMI_LOCAL_APIC
) &&
566 (nmi_watchdog
!= NMI_IO_APIC
))
569 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
570 switch (boot_cpu_data
.x86_vendor
) {
572 if (strstr(boot_cpu_data
.x86_model_id
, "Screwdriver"))
574 if (!setup_k7_watchdog())
577 case X86_VENDOR_INTEL
:
578 if (!setup_p4_watchdog())
585 __get_cpu_var(nmi_watchdog_ctlblk
.enabled
) = 1;
586 atomic_inc(&nmi_active
);
589 static void stop_apic_nmi_watchdog(void *unused
)
591 /* only support LOCAL and IO APICs for now */
592 if ((nmi_watchdog
!= NMI_LOCAL_APIC
) &&
593 (nmi_watchdog
!= NMI_IO_APIC
))
596 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
597 switch (boot_cpu_data
.x86_vendor
) {
599 if (strstr(boot_cpu_data
.x86_model_id
, "Screwdriver"))
603 case X86_VENDOR_INTEL
:
610 __get_cpu_var(nmi_watchdog_ctlblk
.enabled
) = 0;
611 atomic_dec(&nmi_active
);
615 * the best way to detect whether a CPU has a 'hard lockup' problem
616 * is to check it's local APIC timer IRQ counts. If they are not
617 * changing then that CPU has some problem.
619 * as these watchdog NMI IRQs are generated on every CPU, we only
620 * have to check the current processor.
623 static DEFINE_PER_CPU(unsigned, last_irq_sum
);
624 static DEFINE_PER_CPU(local_t
, alert_counter
);
625 static DEFINE_PER_CPU(int, nmi_touch
);
627 void touch_nmi_watchdog (void)
629 if (nmi_watchdog
> 0) {
633 * Tell other CPUs to reset their alert counters. We cannot
634 * do it ourselves because the alert count increase is not
637 for_each_present_cpu (cpu
)
638 per_cpu(nmi_touch
, cpu
) = 1;
641 touch_softlockup_watchdog();
644 int __kprobes
nmi_watchdog_tick(struct pt_regs
* regs
, unsigned reason
)
648 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
652 /* check for other users first */
653 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
)
659 sum
= read_pda(apic_timer_irqs
);
660 if (__get_cpu_var(nmi_touch
)) {
661 __get_cpu_var(nmi_touch
) = 0;
665 #ifdef CONFIG_X86_MCE
666 /* Could check oops_in_progress here too, but it's safer
668 if (atomic_read(&mce_entry
) > 0)
671 /* if the apic timer isn't firing, this cpu isn't doing much */
672 if (!touched
&& __get_cpu_var(last_irq_sum
) == sum
) {
674 * Ayiee, looks like this CPU is stuck ...
675 * wait a few IRQs (5 seconds) before doing the oops ...
677 local_inc(&__get_cpu_var(alert_counter
));
678 if (local_read(&__get_cpu_var(alert_counter
)) == 5*nmi_hz
)
679 die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs
);
681 __get_cpu_var(last_irq_sum
) = sum
;
682 local_set(&__get_cpu_var(alert_counter
), 0);
685 /* see if the nmi watchdog went off */
687 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
688 rdmsrl(wd
->perfctr_msr
, dummy
);
689 if (dummy
& wd
->check_bit
){
690 /* this wasn't a watchdog timer interrupt */
694 /* only Intel uses the cccr msr */
695 if (wd
->cccr_msr
!= 0) {
698 * - An overflown perfctr will assert its interrupt
699 * until the OVF flag in its CCCR is cleared.
700 * - LVTPC is masked on interrupt and must be
701 * unmasked by the LVTPC handler.
703 rdmsrl(wd
->cccr_msr
, dummy
);
704 dummy
&= ~P4_CCCR_OVF
;
705 wrmsrl(wd
->cccr_msr
, dummy
);
706 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
708 /* start the cycle over again */
709 wrmsrl(wd
->perfctr_msr
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
711 } else if (nmi_watchdog
== NMI_IO_APIC
) {
712 /* don't know how to accurately check for this.
713 * just assume it was a watchdog timer interrupt
714 * This matches the old behaviour.
718 printk(KERN_WARNING
"Unknown enabled NMI hardware?!\n");
724 asmlinkage __kprobes
void do_nmi(struct pt_regs
* regs
, long error_code
)
727 add_pda(__nmi_count
,1);
728 default_do_nmi(regs
);
732 int do_nmi_callback(struct pt_regs
* regs
, int cpu
)
735 if (unknown_nmi_panic
)
736 return unknown_nmi_panic_callback(regs
, cpu
);
743 static int unknown_nmi_panic_callback(struct pt_regs
*regs
, int cpu
)
745 unsigned char reason
= get_nmi_reason();
748 sprintf(buf
, "NMI received for unknown reason %02x\n", reason
);
755 EXPORT_SYMBOL(nmi_active
);
756 EXPORT_SYMBOL(nmi_watchdog
);
757 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi
);
758 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit
);
759 EXPORT_SYMBOL(reserve_perfctr_nmi
);
760 EXPORT_SYMBOL(release_perfctr_nmi
);
761 EXPORT_SYMBOL(reserve_evntsel_nmi
);
762 EXPORT_SYMBOL(release_evntsel_nmi
);
763 EXPORT_SYMBOL(disable_timer_nmi_watchdog
);
764 EXPORT_SYMBOL(enable_timer_nmi_watchdog
);
765 EXPORT_SYMBOL(touch_nmi_watchdog
);