2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it.
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
13 #include <linux/rcupdate.h>
14 #include <linux/kallsyms.h>
15 #include <linux/sysdev.h>
16 #include <linux/miscdevice.h>
18 #include <linux/capability.h>
19 #include <linux/cpu.h>
20 #include <linux/percpu.h>
21 #include <linux/poll.h>
22 #include <linux/thread_info.h>
23 #include <linux/ctype.h>
24 #include <linux/kmod.h>
25 #include <linux/kdebug.h>
26 #include <asm/processor.h>
29 #include <asm/uaccess.h>
33 #define MISC_MCELOG_MINOR 227
38 static int mce_dont_init
;
42 * 0: always panic on uncorrected errors, log corrected errors
43 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
44 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
45 * 3: never panic or SIGBUS, log all errors (for testing only)
47 static int tolerant
= 1;
49 static unsigned long bank
[NR_BANKS
] = { [0 ... NR_BANKS
-1] = ~0UL };
50 static unsigned long notify_user
;
52 static int mce_bootlog
= 1;
53 static atomic_t mce_events
;
55 static char trigger
[128];
56 static char *trigger_argv
[2] = { trigger
, NULL
};
58 static DECLARE_WAIT_QUEUE_HEAD(mce_wait
);
61 * Lockless MCE logging infrastructure.
62 * This avoids deadlocks on printk locks without having to break locks. Also
63 * separate MCEs from kernel messages to avoid bogus bug reports.
66 static struct mce_log mcelog
= {
71 void mce_log(struct mce
*mce
)
74 atomic_inc(&mce_events
);
78 entry
= rcu_dereference(mcelog
.next
);
80 /* When the buffer fills up discard new entries. Assume
81 that the earlier errors are the more interesting. */
82 if (entry
>= MCE_LOG_LEN
) {
83 set_bit(MCE_OVERFLOW
, (unsigned long *)&mcelog
.flags
);
86 /* Old left over entry. Skip. */
87 if (mcelog
.entry
[entry
].finished
) {
95 if (cmpxchg(&mcelog
.next
, entry
, next
) == entry
)
98 memcpy(mcelog
.entry
+ entry
, mce
, sizeof(struct mce
));
100 mcelog
.entry
[entry
].finished
= 1;
103 set_bit(0, ¬ify_user
);
106 static void print_mce(struct mce
*m
)
108 printk(KERN_EMERG
"\n"
109 KERN_EMERG
"HARDWARE ERROR\n"
111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
112 m
->cpu
, m
->mcgstatus
, m
->bank
, m
->status
);
114 printk(KERN_EMERG
"RIP%s %02x:<%016Lx> ",
115 !(m
->mcgstatus
& MCG_STATUS_EIPV
) ? " !INEXACT!" : "",
117 if (m
->cs
== __KERNEL_CS
)
118 print_symbol("{%s}", m
->ip
);
121 printk(KERN_EMERG
"TSC %Lx ", m
->tsc
);
123 printk("ADDR %Lx ", m
->addr
);
125 printk("MISC %Lx ", m
->misc
);
127 printk(KERN_EMERG
"This is not a software problem!\n");
128 printk(KERN_EMERG
"Run through mcelog --ascii to decode "
129 "and contact your hardware vendor\n");
132 static void mce_panic(char *msg
, struct mce
*backup
, unsigned long start
)
137 for (i
= 0; i
< MCE_LOG_LEN
; i
++) {
138 unsigned long tsc
= mcelog
.entry
[i
].tsc
;
140 if (time_before(tsc
, start
))
142 print_mce(&mcelog
.entry
[i
]);
143 if (backup
&& mcelog
.entry
[i
].tsc
== backup
->tsc
)
151 static int mce_available(struct cpuinfo_x86
*c
)
153 return cpu_has(c
, X86_FEATURE_MCE
) && cpu_has(c
, X86_FEATURE_MCA
);
156 static inline void mce_get_rip(struct mce
*m
, struct pt_regs
*regs
)
158 if (regs
&& (m
->mcgstatus
& MCG_STATUS_RIPV
)) {
166 /* Assume the RIP in the MSR is exact. Is this true? */
167 m
->mcgstatus
|= MCG_STATUS_EIPV
;
168 rdmsrl(rip_msr
, m
->ip
);
174 * The actual machine check handler
176 void do_machine_check(struct pt_regs
* regs
, long error_code
)
178 struct mce m
, panicm
;
181 int panicm_found
= 0;
183 * If no_way_out gets set, there is no safe way to recover from this
184 * MCE. If tolerant is cranked up, we'll try anyway.
188 * If kill_it gets set, there might be a way to recover from this
193 atomic_inc(&mce_entry
);
196 && notify_die(DIE_NMI
, "machine check", regs
, error_code
,
197 18, SIGKILL
) == NOTIFY_STOP
)
201 memset(&m
, 0, sizeof(struct mce
));
202 m
.cpu
= smp_processor_id();
203 rdmsrl(MSR_IA32_MCG_STATUS
, m
.mcgstatus
);
204 /* if the restart IP is not valid, we're done for */
205 if (!(m
.mcgstatus
& MCG_STATUS_RIPV
))
211 for (i
= 0; i
< banks
; i
++) {
220 rdmsrl(MSR_IA32_MC0_STATUS
+ i
*4, m
.status
);
221 if ((m
.status
& MCI_STATUS_VAL
) == 0)
224 if (m
.status
& MCI_STATUS_EN
) {
225 /* if PCC was set, there's no way out */
226 no_way_out
|= !!(m
.status
& MCI_STATUS_PCC
);
228 * If this error was uncorrectable and there was
229 * an overflow, we're in trouble. If no overflow,
230 * we might get away with just killing a task.
232 if (m
.status
& MCI_STATUS_UC
) {
233 if (tolerant
< 1 || m
.status
& MCI_STATUS_OVER
)
239 if (m
.status
& MCI_STATUS_MISCV
)
240 rdmsrl(MSR_IA32_MC0_MISC
+ i
*4, m
.misc
);
241 if (m
.status
& MCI_STATUS_ADDRV
)
242 rdmsrl(MSR_IA32_MC0_ADDR
+ i
*4, m
.addr
);
244 mce_get_rip(&m
, regs
);
247 if (error_code
!= -2)
250 /* Did this bank cause the exception? */
251 /* Assume that the bank with uncorrectable errors did it,
252 and that there is only a single one. */
253 if ((m
.status
& MCI_STATUS_UC
) && (m
.status
& MCI_STATUS_EN
)) {
258 add_taint(TAINT_MACHINE_CHECK
);
261 /* Never do anything final in the polling timer */
265 /* If we didn't find an uncorrectable error, pick
266 the last one (shouldn't happen, just being safe). */
271 * If we have decided that we just CAN'T continue, and the user
272 * has not set tolerant to an insane level, give up and die.
274 if (no_way_out
&& tolerant
< 3)
275 mce_panic("Machine check", &panicm
, mcestart
);
278 * If the error seems to be unrecoverable, something should be
279 * done. Try to kill as little as possible. If we can kill just
280 * one task, do that. If the user has set the tolerance very
281 * high, don't try to do anything at all.
283 if (kill_it
&& tolerant
< 3) {
287 * If the EIPV bit is set, it means the saved IP is the
288 * instruction which caused the MCE.
290 if (m
.mcgstatus
& MCG_STATUS_EIPV
)
291 user_space
= panicm
.ip
&& (panicm
.cs
& 3);
294 * If we know that the error was in user space, send a
295 * SIGBUS. Otherwise, panic if tolerance is low.
297 * do_exit() takes an awful lot of locks and has a slight
298 * risk of deadlocking.
302 } else if (panic_on_oops
|| tolerant
< 2) {
303 mce_panic("Uncorrected machine check",
308 /* notify userspace ASAP */
309 set_thread_flag(TIF_MCE_NOTIFY
);
312 /* the last thing we do is clear state */
313 for (i
= 0; i
< banks
; i
++)
314 wrmsrl(MSR_IA32_MC0_STATUS
+4*i
, 0);
315 wrmsrl(MSR_IA32_MCG_STATUS
, 0);
317 atomic_dec(&mce_entry
);
320 #ifdef CONFIG_X86_MCE_INTEL
322 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
323 * @cpu: The CPU on which the event occurred.
324 * @status: Event status information
326 * This function should be called by the thermal interrupt after the
327 * event has been processed and the decision was made to log the event
330 * The status parameter will be saved to the 'status' field of 'struct mce'
331 * and historically has been the register value of the
332 * MSR_IA32_THERMAL_STATUS (Intel) msr.
334 void mce_log_therm_throt_event(unsigned int cpu
, __u64 status
)
338 memset(&m
, 0, sizeof(m
));
340 m
.bank
= MCE_THERMAL_BANK
;
345 #endif /* CONFIG_X86_MCE_INTEL */
348 * Periodic polling timer for "silent" machine check errors. If the
349 * poller finds an MCE, poll 2x faster. When the poller finds no more
350 * errors, poll 2x slower (up to check_interval seconds).
353 static int check_interval
= 5 * 60; /* 5 minutes */
354 static int next_interval
; /* in jiffies */
355 static void mcheck_timer(struct work_struct
*work
);
356 static DECLARE_DELAYED_WORK(mcheck_work
, mcheck_timer
);
358 static void mcheck_check_cpu(void *info
)
360 if (mce_available(¤t_cpu_data
))
361 do_machine_check(NULL
, 0);
364 static void mcheck_timer(struct work_struct
*work
)
366 on_each_cpu(mcheck_check_cpu
, NULL
, 1, 1);
369 * Alert userspace if needed. If we logged an MCE, reduce the
370 * polling interval, otherwise increase the polling interval.
372 if (mce_notify_user()) {
373 next_interval
= max(next_interval
/2, HZ
/100);
375 next_interval
= min(next_interval
* 2,
376 (int)round_jiffies_relative(check_interval
*HZ
));
379 schedule_delayed_work(&mcheck_work
, next_interval
);
383 * This is only called from process context. This is where we do
384 * anything we need to alert userspace about new MCEs. This is called
385 * directly from the poller and also from entry.S and idle, thanks to
388 int mce_notify_user(void)
390 clear_thread_flag(TIF_MCE_NOTIFY
);
391 if (test_and_clear_bit(0, ¬ify_user
)) {
392 static unsigned long last_print
;
393 unsigned long now
= jiffies
;
395 wake_up_interruptible(&mce_wait
);
397 call_usermodehelper(trigger
, trigger_argv
, NULL
,
400 if (time_after_eq(now
, last_print
+ (check_interval
*HZ
))) {
402 printk(KERN_INFO
"Machine check events logged\n");
410 /* see if the idle task needs to notify userspace */
412 mce_idle_callback(struct notifier_block
*nfb
, unsigned long action
, void *junk
)
414 /* IDLE_END should be safe - interrupts are back on */
415 if (action
== IDLE_END
&& test_thread_flag(TIF_MCE_NOTIFY
))
421 static struct notifier_block mce_idle_notifier
= {
422 .notifier_call
= mce_idle_callback
,
425 static __init
int periodic_mcheck_init(void)
427 next_interval
= check_interval
* HZ
;
429 schedule_delayed_work(&mcheck_work
,
430 round_jiffies_relative(next_interval
));
431 idle_notifier_register(&mce_idle_notifier
);
434 __initcall(periodic_mcheck_init
);
438 * Initialize Machine Checks for a CPU.
440 static void mce_init(void *dummy
)
445 rdmsrl(MSR_IA32_MCG_CAP
, cap
);
447 if (banks
> NR_BANKS
) {
448 printk(KERN_INFO
"MCE: warning: using only %d banks\n", banks
);
451 /* Use accurate RIP reporting if available. */
452 if ((cap
& (1<<9)) && ((cap
>> 16) & 0xff) >= 9)
453 rip_msr
= MSR_IA32_MCG_EIP
;
455 /* Log the machine checks left over from the previous reset.
456 This also clears all registers */
457 do_machine_check(NULL
, mce_bootlog
? -1 : -2);
459 set_in_cr4(X86_CR4_MCE
);
462 wrmsr(MSR_IA32_MCG_CTL
, 0xffffffff, 0xffffffff);
464 for (i
= 0; i
< banks
; i
++) {
465 wrmsrl(MSR_IA32_MC0_CTL
+4*i
, bank
[i
]);
466 wrmsrl(MSR_IA32_MC0_STATUS
+4*i
, 0);
470 /* Add per CPU specific workarounds here */
471 static void __cpuinit
mce_cpu_quirks(struct cpuinfo_x86
*c
)
473 /* This should be disabled by the BIOS, but isn't always */
474 if (c
->x86_vendor
== X86_VENDOR_AMD
&& c
->x86
== 15) {
475 /* disable GART TBL walk error reporting, which trips off
476 incorrectly with the IOMMU & 3ware & Cerberus. */
477 clear_bit(10, &bank
[4]);
478 /* Lots of broken BIOS around that don't clear them
479 by default and leave crap in there. Don't log. */
485 static void __cpuinit
mce_cpu_features(struct cpuinfo_x86
*c
)
487 switch (c
->x86_vendor
) {
488 case X86_VENDOR_INTEL
:
489 mce_intel_feature_init(c
);
492 mce_amd_feature_init(c
);
500 * Called for each booted CPU to set up machine checks.
501 * Must be called with preempt off.
503 void __cpuinit
mcheck_init(struct cpuinfo_x86
*c
)
505 static cpumask_t mce_cpus
= CPU_MASK_NONE
;
510 cpu_test_and_set(smp_processor_id(), mce_cpus
) ||
519 * Character device to read and clear the MCE log.
522 static DEFINE_SPINLOCK(mce_state_lock
);
523 static int open_count
; /* #times opened */
524 static int open_exclu
; /* already open exclusive? */
526 static int mce_open(struct inode
*inode
, struct file
*file
)
528 spin_lock(&mce_state_lock
);
530 if (open_exclu
|| (open_count
&& (file
->f_flags
& O_EXCL
))) {
531 spin_unlock(&mce_state_lock
);
535 if (file
->f_flags
& O_EXCL
)
539 spin_unlock(&mce_state_lock
);
541 return nonseekable_open(inode
, file
);
544 static int mce_release(struct inode
*inode
, struct file
*file
)
546 spin_lock(&mce_state_lock
);
551 spin_unlock(&mce_state_lock
);
556 static void collect_tscs(void *data
)
558 unsigned long *cpu_tsc
= (unsigned long *)data
;
560 rdtscll(cpu_tsc
[smp_processor_id()]);
563 static ssize_t
mce_read(struct file
*filp
, char __user
*ubuf
, size_t usize
,
566 unsigned long *cpu_tsc
;
567 static DEFINE_MUTEX(mce_read_mutex
);
569 char __user
*buf
= ubuf
;
572 cpu_tsc
= kmalloc(NR_CPUS
* sizeof(long), GFP_KERNEL
);
576 mutex_lock(&mce_read_mutex
);
577 next
= rcu_dereference(mcelog
.next
);
579 /* Only supports full reads right now */
580 if (*off
!= 0 || usize
< MCE_LOG_LEN
*sizeof(struct mce
)) {
581 mutex_unlock(&mce_read_mutex
);
587 for (i
= 0; i
< next
; i
++) {
588 unsigned long start
= jiffies
;
590 while (!mcelog
.entry
[i
].finished
) {
591 if (time_after_eq(jiffies
, start
+ 2)) {
592 memset(mcelog
.entry
+ i
,0, sizeof(struct mce
));
598 err
|= copy_to_user(buf
, mcelog
.entry
+ i
, sizeof(struct mce
));
599 buf
+= sizeof(struct mce
);
604 memset(mcelog
.entry
, 0, next
* sizeof(struct mce
));
610 * Collect entries that were still getting written before the
613 on_each_cpu(collect_tscs
, cpu_tsc
, 1, 1);
614 for (i
= next
; i
< MCE_LOG_LEN
; i
++) {
615 if (mcelog
.entry
[i
].finished
&&
616 mcelog
.entry
[i
].tsc
< cpu_tsc
[mcelog
.entry
[i
].cpu
]) {
617 err
|= copy_to_user(buf
, mcelog
.entry
+i
,
620 buf
+= sizeof(struct mce
);
621 memset(&mcelog
.entry
[i
], 0, sizeof(struct mce
));
624 mutex_unlock(&mce_read_mutex
);
626 return err
? -EFAULT
: buf
- ubuf
;
629 static unsigned int mce_poll(struct file
*file
, poll_table
*wait
)
631 poll_wait(file
, &mce_wait
, wait
);
632 if (rcu_dereference(mcelog
.next
))
633 return POLLIN
| POLLRDNORM
;
637 static long mce_ioctl(struct file
*f
, unsigned int cmd
, unsigned long arg
)
639 int __user
*p
= (int __user
*)arg
;
641 if (!capable(CAP_SYS_ADMIN
))
644 case MCE_GET_RECORD_LEN
:
645 return put_user(sizeof(struct mce
), p
);
646 case MCE_GET_LOG_LEN
:
647 return put_user(MCE_LOG_LEN
, p
);
648 case MCE_GETCLEAR_FLAGS
: {
652 flags
= mcelog
.flags
;
653 } while (cmpxchg(&mcelog
.flags
, flags
, 0) != flags
);
654 return put_user(flags
, p
);
661 static const struct file_operations mce_chrdev_ops
= {
663 .release
= mce_release
,
666 .unlocked_ioctl
= mce_ioctl
,
669 static struct miscdevice mce_log_device
= {
675 static unsigned long old_cr4 __initdata
;
677 void __init
stop_mce(void)
679 old_cr4
= read_cr4();
680 clear_in_cr4(X86_CR4_MCE
);
683 void __init
restart_mce(void)
685 if (old_cr4
& X86_CR4_MCE
)
686 set_in_cr4(X86_CR4_MCE
);
690 * Old style boot options parsing. Only for compatibility.
692 static int __init
mcheck_disable(char *str
)
698 /* mce=off disables machine check. Note you can re-enable it later
700 mce=TOLERANCELEVEL (number, see above)
701 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
702 mce=nobootlog Don't log MCEs from before booting. */
703 static int __init
mcheck_enable(char *str
)
705 if (!strcmp(str
, "off"))
707 else if (!strcmp(str
, "bootlog") || !strcmp(str
,"nobootlog"))
708 mce_bootlog
= str
[0] == 'b';
709 else if (isdigit(str
[0]))
710 get_option(&str
, &tolerant
);
712 printk("mce= argument %s ignored. Please use /sys", str
);
716 __setup("nomce", mcheck_disable
);
717 __setup("mce=", mcheck_enable
);
723 /* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
724 Only one CPU is active at this time, the others get readded later using
726 static int mce_resume(struct sys_device
*dev
)
732 /* Reinit MCEs after user configuration changes */
733 static void mce_restart(void)
736 cancel_delayed_work(&mcheck_work
);
737 /* Timer race is harmless here */
738 on_each_cpu(mce_init
, NULL
, 1, 1);
739 next_interval
= check_interval
* HZ
;
741 schedule_delayed_work(&mcheck_work
,
742 round_jiffies_relative(next_interval
));
745 static struct sysdev_class mce_sysclass
= {
746 .resume
= mce_resume
,
747 .name
= "machinecheck",
750 DEFINE_PER_CPU(struct sys_device
, device_mce
);
752 /* Why are there no generic functions for this? */
753 #define ACCESSOR(name, var, start) \
754 static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
755 return sprintf(buf, "%lx\n", (unsigned long)var); \
757 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \
759 unsigned long new = simple_strtoul(buf, &end, 0); \
760 if (end == buf) return -EINVAL; \
765 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
767 /* TBD should generate these dynamically based on number of available banks */
768 ACCESSOR(bank0ctl
,bank
[0],mce_restart())
769 ACCESSOR(bank1ctl
,bank
[1],mce_restart())
770 ACCESSOR(bank2ctl
,bank
[2],mce_restart())
771 ACCESSOR(bank3ctl
,bank
[3],mce_restart())
772 ACCESSOR(bank4ctl
,bank
[4],mce_restart())
773 ACCESSOR(bank5ctl
,bank
[5],mce_restart())
775 static ssize_t
show_trigger(struct sys_device
*s
, char *buf
)
777 strcpy(buf
, trigger
);
779 return strlen(trigger
) + 1;
782 static ssize_t
set_trigger(struct sys_device
*s
,const char *buf
,size_t siz
)
786 strncpy(trigger
, buf
, sizeof(trigger
));
787 trigger
[sizeof(trigger
)-1] = 0;
788 len
= strlen(trigger
);
789 p
= strchr(trigger
, '\n');
794 static SYSDEV_ATTR(trigger
, 0644, show_trigger
, set_trigger
);
795 ACCESSOR(tolerant
,tolerant
,)
796 ACCESSOR(check_interval
,check_interval
,mce_restart())
797 static struct sysdev_attribute
*mce_attributes
[] = {
798 &attr_bank0ctl
, &attr_bank1ctl
, &attr_bank2ctl
,
799 &attr_bank3ctl
, &attr_bank4ctl
, &attr_bank5ctl
,
800 &attr_tolerant
, &attr_check_interval
, &attr_trigger
,
804 static cpumask_t mce_device_initialized
= CPU_MASK_NONE
;
806 /* Per cpu sysdev init. All of the cpus still share the same ctl bank */
807 static __cpuinit
int mce_create_device(unsigned int cpu
)
812 if (!mce_available(&boot_cpu_data
))
815 memset(&per_cpu(device_mce
, cpu
).kobj
, 0, sizeof(struct kobject
));
816 per_cpu(device_mce
,cpu
).id
= cpu
;
817 per_cpu(device_mce
,cpu
).cls
= &mce_sysclass
;
819 err
= sysdev_register(&per_cpu(device_mce
,cpu
));
823 for (i
= 0; mce_attributes
[i
]; i
++) {
824 err
= sysdev_create_file(&per_cpu(device_mce
,cpu
),
829 cpu_set(cpu
, mce_device_initialized
);
834 sysdev_remove_file(&per_cpu(device_mce
,cpu
),
837 sysdev_unregister(&per_cpu(device_mce
,cpu
));
842 static void mce_remove_device(unsigned int cpu
)
846 if (!cpu_isset(cpu
, mce_device_initialized
))
849 for (i
= 0; mce_attributes
[i
]; i
++)
850 sysdev_remove_file(&per_cpu(device_mce
,cpu
),
852 sysdev_unregister(&per_cpu(device_mce
,cpu
));
853 cpu_clear(cpu
, mce_device_initialized
);
856 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
857 static int __cpuinit
mce_cpu_callback(struct notifier_block
*nfb
,
858 unsigned long action
, void *hcpu
)
860 unsigned int cpu
= (unsigned long)hcpu
;
864 case CPU_ONLINE_FROZEN
:
865 mce_create_device(cpu
);
868 case CPU_DEAD_FROZEN
:
869 mce_remove_device(cpu
);
875 static struct notifier_block mce_cpu_notifier __cpuinitdata
= {
876 .notifier_call
= mce_cpu_callback
,
879 static __init
int mce_init_device(void)
884 if (!mce_available(&boot_cpu_data
))
886 err
= sysdev_class_register(&mce_sysclass
);
890 for_each_online_cpu(i
) {
891 err
= mce_create_device(i
);
896 register_hotcpu_notifier(&mce_cpu_notifier
);
897 misc_register(&mce_log_device
);
901 device_initcall(mce_init_device
);