2 * (c) 2005-2015 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
7 * Written by Jacob Shin - AMD, Inc.
8 * Maintained by: Borislav Petkov <bp@alien8.de>
10 * All MC4_MISCi registers are shared between cores on a node.
12 #include <linux/interrupt.h>
13 #include <linux/notifier.h>
14 #include <linux/kobject.h>
15 #include <linux/percpu.h>
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/sysfs.h>
19 #include <linux/slab.h>
20 #include <linux/init.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
24 #include <asm/amd_nb.h>
29 #include <asm/trace/irq_vectors.h>
32 #define THRESHOLD_MAX 0xFFF
33 #define INT_TYPE_APIC 0x00020000
34 #define MASK_VALID_HI 0x80000000
35 #define MASK_CNTP_HI 0x40000000
36 #define MASK_LOCKED_HI 0x20000000
37 #define MASK_LVTOFF_HI 0x00F00000
38 #define MASK_COUNT_EN_HI 0x00080000
39 #define MASK_INT_TYPE_HI 0x00060000
40 #define MASK_OVERFLOW_HI 0x00010000
41 #define MASK_ERR_COUNT_HI 0x00000FFF
42 #define MASK_BLKPTR_LO 0xFF000000
43 #define MCG_XBLK_ADDR 0xC0000400
45 /* Deferred error settings */
46 #define MSR_CU_DEF_ERR 0xC0000410
47 #define MASK_DEF_LVTOFF 0x000000F0
48 #define MASK_DEF_INT_TYPE 0x00000006
49 #define DEF_LVT_OFF 0x2
50 #define DEF_INT_TYPE_APIC 0x2
52 static const char * const th_names
[] = {
61 static DEFINE_PER_CPU(struct threshold_bank
**, threshold_banks
);
62 static DEFINE_PER_CPU(unsigned char, bank_map
); /* see which banks are on */
64 static void amd_threshold_interrupt(void);
65 static void amd_deferred_error_interrupt(void);
67 static void default_deferred_error_interrupt(void)
69 pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR
);
71 void (*deferred_error_int_vector
)(void) = default_deferred_error_interrupt
;
77 struct thresh_restart
{
78 struct threshold_block
*b
;
85 static inline bool is_shared_bank(int bank
)
87 /* Bank 4 is for northbridge reporting and is thus shared */
91 static const char *bank4_names(const struct threshold_block
*b
)
105 WARN(1, "Funny MSR: 0x%08x\n", b
->address
);
111 static bool lvt_interrupt_supported(unsigned int bank
, u32 msr_high_bits
)
114 * bank 4 supports APIC LVT interrupts implicitly since forever.
120 * IntP: interrupt present; if this bit is set, the thresholding
121 * bank can generate APIC LVT interrupts
123 return msr_high_bits
& BIT(28);
126 static int lvt_off_valid(struct threshold_block
*b
, int apic
, u32 lo
, u32 hi
)
128 int msr
= (hi
& MASK_LVTOFF_HI
) >> 20;
131 pr_err(FW_BUG
"cpu %d, failed to setup threshold interrupt "
132 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b
->cpu
,
133 b
->bank
, b
->block
, b
->address
, hi
, lo
);
138 pr_err(FW_BUG
"cpu %d, invalid threshold interrupt offset %d "
139 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
140 b
->cpu
, apic
, b
->bank
, b
->block
, b
->address
, hi
, lo
);
148 * Called via smp_call_function_single(), must be called with correct
151 static void threshold_restart_bank(void *_tr
)
153 struct thresh_restart
*tr
= _tr
;
156 rdmsr(tr
->b
->address
, lo
, hi
);
158 if (tr
->b
->threshold_limit
< (hi
& THRESHOLD_MAX
))
159 tr
->reset
= 1; /* limit cannot be lower than err count */
161 if (tr
->reset
) { /* reset err count and overflow bit */
163 (hi
& ~(MASK_ERR_COUNT_HI
| MASK_OVERFLOW_HI
)) |
164 (THRESHOLD_MAX
- tr
->b
->threshold_limit
);
165 } else if (tr
->old_limit
) { /* change limit w/o reset */
166 int new_count
= (hi
& THRESHOLD_MAX
) +
167 (tr
->old_limit
- tr
->b
->threshold_limit
);
169 hi
= (hi
& ~MASK_ERR_COUNT_HI
) |
170 (new_count
& THRESHOLD_MAX
);
174 hi
&= ~MASK_INT_TYPE_HI
;
176 if (!tr
->b
->interrupt_capable
)
179 if (tr
->set_lvt_off
) {
180 if (lvt_off_valid(tr
->b
, tr
->lvt_off
, lo
, hi
)) {
181 /* set new lvt offset */
182 hi
&= ~MASK_LVTOFF_HI
;
183 hi
|= tr
->lvt_off
<< 20;
187 if (tr
->b
->interrupt_enable
)
192 hi
|= MASK_COUNT_EN_HI
;
193 wrmsr(tr
->b
->address
, lo
, hi
);
196 static void mce_threshold_block_init(struct threshold_block
*b
, int offset
)
198 struct thresh_restart tr
= {
204 b
->threshold_limit
= THRESHOLD_MAX
;
205 threshold_restart_bank(&tr
);
208 static int setup_APIC_mce_threshold(int reserved
, int new)
210 if (reserved
< 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR
,
211 APIC_EILVT_MSG_FIX
, 0))
217 static int setup_APIC_deferred_error(int reserved
, int new)
219 if (reserved
< 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR
,
220 APIC_EILVT_MSG_FIX
, 0))
226 static void deferred_error_interrupt_enable(struct cpuinfo_x86
*c
)
228 u32 low
= 0, high
= 0;
229 int def_offset
= -1, def_new
;
231 if (rdmsr_safe(MSR_CU_DEF_ERR
, &low
, &high
))
234 def_new
= (low
& MASK_DEF_LVTOFF
) >> 4;
235 if (!(low
& MASK_DEF_LVTOFF
)) {
236 pr_err(FW_BUG
"Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
237 def_new
= DEF_LVT_OFF
;
238 low
= (low
& ~MASK_DEF_LVTOFF
) | (DEF_LVT_OFF
<< 4);
241 def_offset
= setup_APIC_deferred_error(def_offset
, def_new
);
242 if ((def_offset
== def_new
) &&
243 (deferred_error_int_vector
!= amd_deferred_error_interrupt
))
244 deferred_error_int_vector
= amd_deferred_error_interrupt
;
246 low
= (low
& ~MASK_DEF_INT_TYPE
) | DEF_INT_TYPE_APIC
;
247 wrmsr(MSR_CU_DEF_ERR
, low
, high
);
250 /* cpu init entry point, called from mce.c with preempt off */
251 void mce_amd_feature_init(struct cpuinfo_x86
*c
)
253 struct threshold_block b
;
254 unsigned int cpu
= smp_processor_id();
255 u32 low
= 0, high
= 0, address
= 0;
256 unsigned int bank
, block
;
257 int offset
= -1, new;
259 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
260 for (block
= 0; block
< NR_BLOCKS
; ++block
) {
262 address
= MSR_IA32_MCx_MISC(bank
);
263 else if (block
== 1) {
264 address
= (low
& MASK_BLKPTR_LO
) >> 21;
268 address
+= MCG_XBLK_ADDR
;
272 if (rdmsr_safe(address
, &low
, &high
))
275 if (!(high
& MASK_VALID_HI
))
278 if (!(high
& MASK_CNTP_HI
) ||
279 (high
& MASK_LOCKED_HI
))
283 per_cpu(bank_map
, cpu
) |= (1 << bank
);
285 memset(&b
, 0, sizeof(b
));
290 b
.interrupt_capable
= lvt_interrupt_supported(bank
, high
);
292 if (!b
.interrupt_capable
)
295 b
.interrupt_enable
= 1;
296 new = (high
& MASK_LVTOFF_HI
) >> 20;
297 offset
= setup_APIC_mce_threshold(offset
, new);
299 if ((offset
== new) &&
300 (mce_threshold_vector
!= amd_threshold_interrupt
))
301 mce_threshold_vector
= amd_threshold_interrupt
;
304 mce_threshold_block_init(&b
, offset
);
308 if (mce_flags
.succor
)
309 deferred_error_interrupt_enable(c
);
312 static void __log_error(unsigned int bank
, bool threshold_err
, u64 misc
)
317 rdmsrl(MSR_IA32_MCx_STATUS(bank
), status
);
318 if (!(status
& MCI_STATUS_VAL
))
329 if (m
.status
& MCI_STATUS_ADDRV
)
330 rdmsrl(MSR_IA32_MCx_ADDR(bank
), m
.addr
);
333 wrmsrl(MSR_IA32_MCx_STATUS(bank
), 0);
336 static inline void __smp_deferred_error_interrupt(void)
338 inc_irq_stat(irq_deferred_error_count
);
339 deferred_error_int_vector();
342 asmlinkage __visible
void smp_deferred_error_interrupt(void)
345 __smp_deferred_error_interrupt();
349 asmlinkage __visible
void smp_trace_deferred_error_interrupt(void)
352 trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR
);
353 __smp_deferred_error_interrupt();
354 trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR
);
358 /* APIC interrupt handler for deferred errors */
359 static void amd_deferred_error_interrupt(void)
364 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
365 rdmsrl(MSR_IA32_MCx_STATUS(bank
), status
);
367 if (!(status
& MCI_STATUS_VAL
) ||
368 !(status
& MCI_STATUS_DEFERRED
))
371 __log_error(bank
, false, 0);
377 * APIC Interrupt Handler
381 * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
382 * the interrupt goes off when error_count reaches threshold_limit.
383 * the handler will simply log mcelog w/ software defined bank number.
386 static void amd_threshold_interrupt(void)
388 u32 low
= 0, high
= 0, address
= 0;
389 int cpu
= smp_processor_id();
390 unsigned int bank
, block
;
392 /* assume first bank caused it */
393 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
394 if (!(per_cpu(bank_map
, cpu
) & (1 << bank
)))
396 for (block
= 0; block
< NR_BLOCKS
; ++block
) {
398 address
= MSR_IA32_MCx_MISC(bank
);
399 } else if (block
== 1) {
400 address
= (low
& MASK_BLKPTR_LO
) >> 21;
403 address
+= MCG_XBLK_ADDR
;
408 if (rdmsr_safe(address
, &low
, &high
))
411 if (!(high
& MASK_VALID_HI
)) {
418 if (!(high
& MASK_CNTP_HI
) ||
419 (high
& MASK_LOCKED_HI
))
423 * Log the machine check that caused the threshold
426 if (high
& MASK_OVERFLOW_HI
)
433 __log_error(bank
, true, ((u64
)high
<< 32) | low
);
440 struct threshold_attr
{
441 struct attribute attr
;
442 ssize_t (*show
) (struct threshold_block
*, char *);
443 ssize_t (*store
) (struct threshold_block
*, const char *, size_t count
);
446 #define SHOW_FIELDS(name) \
447 static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
449 return sprintf(buf, "%lu\n", (unsigned long) b->name); \
451 SHOW_FIELDS(interrupt_enable
)
452 SHOW_FIELDS(threshold_limit
)
455 store_interrupt_enable(struct threshold_block
*b
, const char *buf
, size_t size
)
457 struct thresh_restart tr
;
460 if (!b
->interrupt_capable
)
463 if (kstrtoul(buf
, 0, &new) < 0)
466 b
->interrupt_enable
= !!new;
468 memset(&tr
, 0, sizeof(tr
));
471 smp_call_function_single(b
->cpu
, threshold_restart_bank
, &tr
, 1);
477 store_threshold_limit(struct threshold_block
*b
, const char *buf
, size_t size
)
479 struct thresh_restart tr
;
482 if (kstrtoul(buf
, 0, &new) < 0)
485 if (new > THRESHOLD_MAX
)
490 memset(&tr
, 0, sizeof(tr
));
491 tr
.old_limit
= b
->threshold_limit
;
492 b
->threshold_limit
= new;
495 smp_call_function_single(b
->cpu
, threshold_restart_bank
, &tr
, 1);
500 static ssize_t
show_error_count(struct threshold_block
*b
, char *buf
)
504 rdmsr_on_cpu(b
->cpu
, b
->address
, &lo
, &hi
);
506 return sprintf(buf
, "%u\n", ((hi
& THRESHOLD_MAX
) -
507 (THRESHOLD_MAX
- b
->threshold_limit
)));
510 static struct threshold_attr error_count
= {
511 .attr
= {.name
= __stringify(error_count
), .mode
= 0444 },
512 .show
= show_error_count
,
515 #define RW_ATTR(val) \
516 static struct threshold_attr val = { \
517 .attr = {.name = __stringify(val), .mode = 0644 }, \
518 .show = show_## val, \
519 .store = store_## val, \
522 RW_ATTR(interrupt_enable
);
523 RW_ATTR(threshold_limit
);
525 static struct attribute
*default_attrs
[] = {
526 &threshold_limit
.attr
,
528 NULL
, /* possibly interrupt_enable if supported, see below */
532 #define to_block(k) container_of(k, struct threshold_block, kobj)
533 #define to_attr(a) container_of(a, struct threshold_attr, attr)
535 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
537 struct threshold_block
*b
= to_block(kobj
);
538 struct threshold_attr
*a
= to_attr(attr
);
541 ret
= a
->show
? a
->show(b
, buf
) : -EIO
;
546 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
547 const char *buf
, size_t count
)
549 struct threshold_block
*b
= to_block(kobj
);
550 struct threshold_attr
*a
= to_attr(attr
);
553 ret
= a
->store
? a
->store(b
, buf
, count
) : -EIO
;
558 static const struct sysfs_ops threshold_ops
= {
563 static struct kobj_type threshold_ktype
= {
564 .sysfs_ops
= &threshold_ops
,
565 .default_attrs
= default_attrs
,
568 static int allocate_threshold_blocks(unsigned int cpu
, unsigned int bank
,
569 unsigned int block
, u32 address
)
571 struct threshold_block
*b
= NULL
;
575 if ((bank
>= mca_cfg
.banks
) || (block
>= NR_BLOCKS
))
578 if (rdmsr_safe_on_cpu(cpu
, address
, &low
, &high
))
581 if (!(high
& MASK_VALID_HI
)) {
588 if (!(high
& MASK_CNTP_HI
) ||
589 (high
& MASK_LOCKED_HI
))
592 b
= kzalloc(sizeof(struct threshold_block
), GFP_KERNEL
);
599 b
->address
= address
;
600 b
->interrupt_enable
= 0;
601 b
->interrupt_capable
= lvt_interrupt_supported(bank
, high
);
602 b
->threshold_limit
= THRESHOLD_MAX
;
604 if (b
->interrupt_capable
) {
605 threshold_ktype
.default_attrs
[2] = &interrupt_enable
.attr
;
606 b
->interrupt_enable
= 1;
608 threshold_ktype
.default_attrs
[2] = NULL
;
611 INIT_LIST_HEAD(&b
->miscj
);
613 if (per_cpu(threshold_banks
, cpu
)[bank
]->blocks
) {
615 &per_cpu(threshold_banks
, cpu
)[bank
]->blocks
->miscj
);
617 per_cpu(threshold_banks
, cpu
)[bank
]->blocks
= b
;
620 err
= kobject_init_and_add(&b
->kobj
, &threshold_ktype
,
621 per_cpu(threshold_banks
, cpu
)[bank
]->kobj
,
622 (bank
== 4 ? bank4_names(b
) : th_names
[bank
]));
627 address
= (low
& MASK_BLKPTR_LO
) >> 21;
630 address
+= MCG_XBLK_ADDR
;
635 err
= allocate_threshold_blocks(cpu
, bank
, ++block
, address
);
640 kobject_uevent(&b
->kobj
, KOBJ_ADD
);
646 kobject_put(&b
->kobj
);
653 static int __threshold_add_blocks(struct threshold_bank
*b
)
655 struct list_head
*head
= &b
->blocks
->miscj
;
656 struct threshold_block
*pos
= NULL
;
657 struct threshold_block
*tmp
= NULL
;
660 err
= kobject_add(&b
->blocks
->kobj
, b
->kobj
, b
->blocks
->kobj
.name
);
664 list_for_each_entry_safe(pos
, tmp
, head
, miscj
) {
666 err
= kobject_add(&pos
->kobj
, b
->kobj
, pos
->kobj
.name
);
668 list_for_each_entry_safe_reverse(pos
, tmp
, head
, miscj
)
669 kobject_del(&pos
->kobj
);
677 static int threshold_create_bank(unsigned int cpu
, unsigned int bank
)
679 struct device
*dev
= per_cpu(mce_device
, cpu
);
680 struct amd_northbridge
*nb
= NULL
;
681 struct threshold_bank
*b
= NULL
;
682 const char *name
= th_names
[bank
];
685 if (is_shared_bank(bank
)) {
686 nb
= node_to_amd_nb(amd_get_nb_id(cpu
));
688 /* threshold descriptor already initialized on this node? */
689 if (nb
&& nb
->bank4
) {
692 err
= kobject_add(b
->kobj
, &dev
->kobj
, name
);
696 per_cpu(threshold_banks
, cpu
)[bank
] = b
;
697 atomic_inc(&b
->cpus
);
699 err
= __threshold_add_blocks(b
);
705 b
= kzalloc(sizeof(struct threshold_bank
), GFP_KERNEL
);
711 b
->kobj
= kobject_create_and_add(name
, &dev
->kobj
);
717 per_cpu(threshold_banks
, cpu
)[bank
] = b
;
719 if (is_shared_bank(bank
)) {
720 atomic_set(&b
->cpus
, 1);
722 /* nb is already initialized, see above */
729 err
= allocate_threshold_blocks(cpu
, bank
, 0, MSR_IA32_MCx_MISC(bank
));
740 /* create dir/files for all valid threshold banks */
741 static int threshold_create_device(unsigned int cpu
)
744 struct threshold_bank
**bp
;
747 bp
= kzalloc(sizeof(struct threshold_bank
*) * mca_cfg
.banks
,
752 per_cpu(threshold_banks
, cpu
) = bp
;
754 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
755 if (!(per_cpu(bank_map
, cpu
) & (1 << bank
)))
757 err
= threshold_create_bank(cpu
, bank
);
765 static void deallocate_threshold_block(unsigned int cpu
,
768 struct threshold_block
*pos
= NULL
;
769 struct threshold_block
*tmp
= NULL
;
770 struct threshold_bank
*head
= per_cpu(threshold_banks
, cpu
)[bank
];
775 list_for_each_entry_safe(pos
, tmp
, &head
->blocks
->miscj
, miscj
) {
776 kobject_put(&pos
->kobj
);
777 list_del(&pos
->miscj
);
781 kfree(per_cpu(threshold_banks
, cpu
)[bank
]->blocks
);
782 per_cpu(threshold_banks
, cpu
)[bank
]->blocks
= NULL
;
785 static void __threshold_remove_blocks(struct threshold_bank
*b
)
787 struct threshold_block
*pos
= NULL
;
788 struct threshold_block
*tmp
= NULL
;
790 kobject_del(b
->kobj
);
792 list_for_each_entry_safe(pos
, tmp
, &b
->blocks
->miscj
, miscj
)
793 kobject_del(&pos
->kobj
);
796 static void threshold_remove_bank(unsigned int cpu
, int bank
)
798 struct amd_northbridge
*nb
;
799 struct threshold_bank
*b
;
801 b
= per_cpu(threshold_banks
, cpu
)[bank
];
808 if (is_shared_bank(bank
)) {
809 if (!atomic_dec_and_test(&b
->cpus
)) {
810 __threshold_remove_blocks(b
);
811 per_cpu(threshold_banks
, cpu
)[bank
] = NULL
;
815 * the last CPU on this node using the shared bank is
816 * going away, remove that bank now.
818 nb
= node_to_amd_nb(amd_get_nb_id(cpu
));
823 deallocate_threshold_block(cpu
, bank
);
826 kobject_del(b
->kobj
);
827 kobject_put(b
->kobj
);
829 per_cpu(threshold_banks
, cpu
)[bank
] = NULL
;
832 static void threshold_remove_device(unsigned int cpu
)
836 for (bank
= 0; bank
< mca_cfg
.banks
; ++bank
) {
837 if (!(per_cpu(bank_map
, cpu
) & (1 << bank
)))
839 threshold_remove_bank(cpu
, bank
);
841 kfree(per_cpu(threshold_banks
, cpu
));
844 /* get notified when a cpu comes on/off */
846 amd_64_threshold_cpu_callback(unsigned long action
, unsigned int cpu
)
850 case CPU_ONLINE_FROZEN
:
851 threshold_create_device(cpu
);
854 case CPU_DEAD_FROZEN
:
855 threshold_remove_device(cpu
);
862 static __init
int threshold_init_device(void)
866 /* to hit CPUs online before the notifier is up */
867 for_each_online_cpu(lcpu
) {
868 int err
= threshold_create_device(lcpu
);
873 threshold_cpu_callback
= amd_64_threshold_cpu_callback
;
878 * there are 3 funcs which need to be _initcalled in a logic sequence:
879 * 1. xen_late_init_mcelog
880 * 2. mcheck_init_device
881 * 3. threshold_init_device
883 * xen_late_init_mcelog must register xen_mce_chrdev_device before
884 * native mce_chrdev_device registration if running under xen platform;
886 * mcheck_init_device should be inited before threshold_init_device to
887 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
889 * so we use following _initcalls
890 * 1. device_initcall(xen_late_init_mcelog);
891 * 2. device_initcall_sync(mcheck_init_device);
892 * 3. late_initcall(threshold_init_device);
894 * when running under xen, the initcall order is 1,2,3;
895 * on baremetal, we skip 1 and we do only 2 and 3.
897 late_initcall(threshold_init_device
);