2 * (c) 2005-2012 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
7 * Written by Jacob Shin - AMD, Inc.
9 * Support: borislav.petkov@amd.com
12 * - added support for AMD Family 0x10 processors
16 * All MC4_MISCi registers are shared between multi-cores
18 #include <linux/interrupt.h>
19 #include <linux/notifier.h>
20 #include <linux/kobject.h>
21 #include <linux/percpu.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/sysfs.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/cpu.h>
28 #include <linux/smp.h>
30 #include <asm/amd_nb.h>
38 #define THRESHOLD_MAX 0xFFF
39 #define INT_TYPE_APIC 0x00020000
40 #define MASK_VALID_HI 0x80000000
41 #define MASK_CNTP_HI 0x40000000
42 #define MASK_LOCKED_HI 0x20000000
43 #define MASK_LVTOFF_HI 0x00F00000
44 #define MASK_COUNT_EN_HI 0x00080000
45 #define MASK_INT_TYPE_HI 0x00060000
46 #define MASK_OVERFLOW_HI 0x00010000
47 #define MASK_ERR_COUNT_HI 0x00000FFF
48 #define MASK_BLKPTR_LO 0xFF000000
49 #define MCG_XBLK_ADDR 0xC0000400
51 static const char * const th_names
[] = {
60 static DEFINE_PER_CPU(struct threshold_bank
* [NR_BANKS
], threshold_banks
);
62 static unsigned char shared_bank
[NR_BANKS
] = {
66 static DEFINE_PER_CPU(unsigned char, bank_map
); /* see which banks are on */
68 static void amd_threshold_interrupt(void);
74 struct thresh_restart
{
75 struct threshold_block
*b
;
82 static const char * const bank4_names(struct threshold_block
*b
)
96 WARN(1, "Funny MSR: 0x%08x\n", b
->address
);
102 static bool lvt_interrupt_supported(unsigned int bank
, u32 msr_high_bits
)
105 * bank 4 supports APIC LVT interrupts implicitly since forever.
111 * IntP: interrupt present; if this bit is set, the thresholding
112 * bank can generate APIC LVT interrupts
114 return msr_high_bits
& BIT(28);
117 static int lvt_off_valid(struct threshold_block
*b
, int apic
, u32 lo
, u32 hi
)
119 int msr
= (hi
& MASK_LVTOFF_HI
) >> 20;
122 pr_err(FW_BUG
"cpu %d, failed to setup threshold interrupt "
123 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b
->cpu
,
124 b
->bank
, b
->block
, b
->address
, hi
, lo
);
129 pr_err(FW_BUG
"cpu %d, invalid threshold interrupt offset %d "
130 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
131 b
->cpu
, apic
, b
->bank
, b
->block
, b
->address
, hi
, lo
);
139 * Called via smp_call_function_single(), must be called with correct
142 static void threshold_restart_bank(void *_tr
)
144 struct thresh_restart
*tr
= _tr
;
147 rdmsr(tr
->b
->address
, lo
, hi
);
149 if (tr
->b
->threshold_limit
< (hi
& THRESHOLD_MAX
))
150 tr
->reset
= 1; /* limit cannot be lower than err count */
152 if (tr
->reset
) { /* reset err count and overflow bit */
154 (hi
& ~(MASK_ERR_COUNT_HI
| MASK_OVERFLOW_HI
)) |
155 (THRESHOLD_MAX
- tr
->b
->threshold_limit
);
156 } else if (tr
->old_limit
) { /* change limit w/o reset */
157 int new_count
= (hi
& THRESHOLD_MAX
) +
158 (tr
->old_limit
- tr
->b
->threshold_limit
);
160 hi
= (hi
& ~MASK_ERR_COUNT_HI
) |
161 (new_count
& THRESHOLD_MAX
);
165 hi
&= ~MASK_INT_TYPE_HI
;
167 if (!tr
->b
->interrupt_capable
)
170 if (tr
->set_lvt_off
) {
171 if (lvt_off_valid(tr
->b
, tr
->lvt_off
, lo
, hi
)) {
172 /* set new lvt offset */
173 hi
&= ~MASK_LVTOFF_HI
;
174 hi
|= tr
->lvt_off
<< 20;
178 if (tr
->b
->interrupt_enable
)
183 hi
|= MASK_COUNT_EN_HI
;
184 wrmsr(tr
->b
->address
, lo
, hi
);
187 static void mce_threshold_block_init(struct threshold_block
*b
, int offset
)
189 struct thresh_restart tr
= {
195 b
->threshold_limit
= THRESHOLD_MAX
;
196 threshold_restart_bank(&tr
);
199 static int setup_APIC_mce(int reserved
, int new)
201 if (reserved
< 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR
,
202 APIC_EILVT_MSG_FIX
, 0))
208 /* cpu init entry point, called from mce.c with preempt off */
209 void mce_amd_feature_init(struct cpuinfo_x86
*c
)
211 struct threshold_block b
;
212 unsigned int cpu
= smp_processor_id();
213 u32 low
= 0, high
= 0, address
= 0;
214 unsigned int bank
, block
;
217 for (bank
= 0; bank
< NR_BANKS
; ++bank
) {
218 for (block
= 0; block
< NR_BLOCKS
; ++block
) {
220 address
= MSR_IA32_MC0_MISC
+ bank
* 4;
221 else if (block
== 1) {
222 address
= (low
& MASK_BLKPTR_LO
) >> 21;
226 address
+= MCG_XBLK_ADDR
;
230 if (rdmsr_safe(address
, &low
, &high
))
233 if (!(high
& MASK_VALID_HI
))
236 if (!(high
& MASK_CNTP_HI
) ||
237 (high
& MASK_LOCKED_HI
))
241 per_cpu(bank_map
, cpu
) |= (1 << bank
);
243 memset(&b
, 0, sizeof(b
));
248 b
.interrupt_capable
= lvt_interrupt_supported(bank
, high
);
250 if (b
.interrupt_capable
) {
251 int new = (high
& MASK_LVTOFF_HI
) >> 20;
252 offset
= setup_APIC_mce(offset
, new);
255 mce_threshold_block_init(&b
, offset
);
256 mce_threshold_vector
= amd_threshold_interrupt
;
262 * APIC Interrupt Handler
266 * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
267 * the interrupt goes off when error_count reaches threshold_limit.
268 * the handler will simply log mcelog w/ software defined bank number.
270 static void amd_threshold_interrupt(void)
272 u32 low
= 0, high
= 0, address
= 0;
273 unsigned int bank
, block
;
278 /* assume first bank caused it */
279 for (bank
= 0; bank
< NR_BANKS
; ++bank
) {
280 if (!(per_cpu(bank_map
, m
.cpu
) & (1 << bank
)))
282 for (block
= 0; block
< NR_BLOCKS
; ++block
) {
284 address
= MSR_IA32_MC0_MISC
+ bank
* 4;
285 } else if (block
== 1) {
286 address
= (low
& MASK_BLKPTR_LO
) >> 21;
289 address
+= MCG_XBLK_ADDR
;
294 if (rdmsr_safe(address
, &low
, &high
))
297 if (!(high
& MASK_VALID_HI
)) {
304 if (!(high
& MASK_CNTP_HI
) ||
305 (high
& MASK_LOCKED_HI
))
309 * Log the machine check that caused the threshold
312 machine_check_poll(MCP_TIMESTAMP
,
313 &__get_cpu_var(mce_poll_banks
));
315 if (high
& MASK_OVERFLOW_HI
) {
316 rdmsrl(address
, m
.misc
);
317 rdmsrl(MSR_IA32_MC0_STATUS
+ bank
* 4,
319 m
.bank
= K8_MCE_THRESHOLD_BASE
333 struct threshold_attr
{
334 struct attribute attr
;
335 ssize_t (*show
) (struct threshold_block
*, char *);
336 ssize_t (*store
) (struct threshold_block
*, const char *, size_t count
);
339 #define SHOW_FIELDS(name) \
340 static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
342 return sprintf(buf, "%lu\n", (unsigned long) b->name); \
344 SHOW_FIELDS(interrupt_enable
)
345 SHOW_FIELDS(threshold_limit
)
348 store_interrupt_enable(struct threshold_block
*b
, const char *buf
, size_t size
)
350 struct thresh_restart tr
;
353 if (!b
->interrupt_capable
)
356 if (strict_strtoul(buf
, 0, &new) < 0)
359 b
->interrupt_enable
= !!new;
361 memset(&tr
, 0, sizeof(tr
));
364 smp_call_function_single(b
->cpu
, threshold_restart_bank
, &tr
, 1);
370 store_threshold_limit(struct threshold_block
*b
, const char *buf
, size_t size
)
372 struct thresh_restart tr
;
375 if (strict_strtoul(buf
, 0, &new) < 0)
378 if (new > THRESHOLD_MAX
)
383 memset(&tr
, 0, sizeof(tr
));
384 tr
.old_limit
= b
->threshold_limit
;
385 b
->threshold_limit
= new;
388 smp_call_function_single(b
->cpu
, threshold_restart_bank
, &tr
, 1);
393 static ssize_t
show_error_count(struct threshold_block
*b
, char *buf
)
397 rdmsr_on_cpu(b
->cpu
, b
->address
, &lo
, &hi
);
399 return sprintf(buf
, "%u\n", ((hi
& THRESHOLD_MAX
) -
400 (THRESHOLD_MAX
- b
->threshold_limit
)));
403 static struct threshold_attr error_count
= {
404 .attr
= {.name
= __stringify(error_count
), .mode
= 0444 },
405 .show
= show_error_count
,
408 #define RW_ATTR(val) \
409 static struct threshold_attr val = { \
410 .attr = {.name = __stringify(val), .mode = 0644 }, \
411 .show = show_## val, \
412 .store = store_## val, \
415 RW_ATTR(interrupt_enable
);
416 RW_ATTR(threshold_limit
);
418 static struct attribute
*default_attrs
[] = {
419 &threshold_limit
.attr
,
421 NULL
, /* possibly interrupt_enable if supported, see below */
425 #define to_block(k) container_of(k, struct threshold_block, kobj)
426 #define to_attr(a) container_of(a, struct threshold_attr, attr)
428 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
430 struct threshold_block
*b
= to_block(kobj
);
431 struct threshold_attr
*a
= to_attr(attr
);
434 ret
= a
->show
? a
->show(b
, buf
) : -EIO
;
439 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
440 const char *buf
, size_t count
)
442 struct threshold_block
*b
= to_block(kobj
);
443 struct threshold_attr
*a
= to_attr(attr
);
446 ret
= a
->store
? a
->store(b
, buf
, count
) : -EIO
;
451 static const struct sysfs_ops threshold_ops
= {
456 static struct kobj_type threshold_ktype
= {
457 .sysfs_ops
= &threshold_ops
,
458 .default_attrs
= default_attrs
,
461 static __cpuinit
int allocate_threshold_blocks(unsigned int cpu
,
466 struct threshold_block
*b
= NULL
;
470 if ((bank
>= NR_BANKS
) || (block
>= NR_BLOCKS
))
473 if (rdmsr_safe_on_cpu(cpu
, address
, &low
, &high
))
476 if (!(high
& MASK_VALID_HI
)) {
483 if (!(high
& MASK_CNTP_HI
) ||
484 (high
& MASK_LOCKED_HI
))
487 b
= kzalloc(sizeof(struct threshold_block
), GFP_KERNEL
);
494 b
->address
= address
;
495 b
->interrupt_enable
= 0;
496 b
->interrupt_capable
= lvt_interrupt_supported(bank
, high
);
497 b
->threshold_limit
= THRESHOLD_MAX
;
499 if (b
->interrupt_capable
)
500 threshold_ktype
.default_attrs
[2] = &interrupt_enable
.attr
;
502 threshold_ktype
.default_attrs
[2] = NULL
;
504 INIT_LIST_HEAD(&b
->miscj
);
506 if (per_cpu(threshold_banks
, cpu
)[bank
]->blocks
) {
508 &per_cpu(threshold_banks
, cpu
)[bank
]->blocks
->miscj
);
510 per_cpu(threshold_banks
, cpu
)[bank
]->blocks
= b
;
513 err
= kobject_init_and_add(&b
->kobj
, &threshold_ktype
,
514 per_cpu(threshold_banks
, cpu
)[bank
]->kobj
,
515 (bank
== 4 ? bank4_names(b
) : th_names
[bank
]));
520 address
= (low
& MASK_BLKPTR_LO
) >> 21;
523 address
+= MCG_XBLK_ADDR
;
528 err
= allocate_threshold_blocks(cpu
, bank
, ++block
, address
);
533 kobject_uevent(&b
->kobj
, KOBJ_ADD
);
539 kobject_put(&b
->kobj
);
546 static __cpuinit
int __threshold_add_blocks(struct threshold_bank
*b
)
548 struct list_head
*head
= &b
->blocks
->miscj
;
549 struct threshold_block
*pos
= NULL
;
550 struct threshold_block
*tmp
= NULL
;
553 err
= kobject_add(&b
->blocks
->kobj
, b
->kobj
, b
->blocks
->kobj
.name
);
557 list_for_each_entry_safe(pos
, tmp
, head
, miscj
) {
559 err
= kobject_add(&pos
->kobj
, b
->kobj
, pos
->kobj
.name
);
561 list_for_each_entry_safe_reverse(pos
, tmp
, head
, miscj
)
562 kobject_del(&pos
->kobj
);
570 static __cpuinit
int threshold_create_bank(unsigned int cpu
, unsigned int bank
)
572 struct device
*dev
= per_cpu(mce_device
, cpu
);
573 struct amd_northbridge
*nb
= NULL
;
574 struct threshold_bank
*b
= NULL
;
575 const char *name
= th_names
[bank
];
578 if (shared_bank
[bank
]) {
579 nb
= node_to_amd_nb(amd_get_nb_id(cpu
));
581 /* threshold descriptor already initialized on this node? */
582 if (nb
&& nb
->bank4
) {
585 err
= kobject_add(b
->kobj
, &dev
->kobj
, name
);
589 per_cpu(threshold_banks
, cpu
)[bank
] = b
;
590 atomic_inc(&b
->cpus
);
592 err
= __threshold_add_blocks(b
);
598 b
= kzalloc(sizeof(struct threshold_bank
), GFP_KERNEL
);
604 b
->kobj
= kobject_create_and_add(name
, &dev
->kobj
);
610 per_cpu(threshold_banks
, cpu
)[bank
] = b
;
612 if (shared_bank
[bank
]) {
613 atomic_set(&b
->cpus
, 1);
615 /* nb is already initialized, see above */
622 err
= allocate_threshold_blocks(cpu
, bank
, 0,
623 MSR_IA32_MC0_MISC
+ bank
* 4);
634 /* create dir/files for all valid threshold banks */
635 static __cpuinit
int threshold_create_device(unsigned int cpu
)
640 for (bank
= 0; bank
< NR_BANKS
; ++bank
) {
641 if (!(per_cpu(bank_map
, cpu
) & (1 << bank
)))
643 err
= threshold_create_bank(cpu
, bank
);
651 static void deallocate_threshold_block(unsigned int cpu
,
654 struct threshold_block
*pos
= NULL
;
655 struct threshold_block
*tmp
= NULL
;
656 struct threshold_bank
*head
= per_cpu(threshold_banks
, cpu
)[bank
];
661 list_for_each_entry_safe(pos
, tmp
, &head
->blocks
->miscj
, miscj
) {
662 kobject_put(&pos
->kobj
);
663 list_del(&pos
->miscj
);
667 kfree(per_cpu(threshold_banks
, cpu
)[bank
]->blocks
);
668 per_cpu(threshold_banks
, cpu
)[bank
]->blocks
= NULL
;
671 static void __threshold_remove_blocks(struct threshold_bank
*b
)
673 struct threshold_block
*pos
= NULL
;
674 struct threshold_block
*tmp
= NULL
;
676 kobject_del(b
->kobj
);
678 list_for_each_entry_safe(pos
, tmp
, &b
->blocks
->miscj
, miscj
)
679 kobject_del(&pos
->kobj
);
682 static void threshold_remove_bank(unsigned int cpu
, int bank
)
684 struct amd_northbridge
*nb
;
685 struct threshold_bank
*b
;
687 b
= per_cpu(threshold_banks
, cpu
)[bank
];
694 if (shared_bank
[bank
]) {
695 if (!atomic_dec_and_test(&b
->cpus
)) {
696 __threshold_remove_blocks(b
);
697 per_cpu(threshold_banks
, cpu
)[bank
] = NULL
;
701 * the last CPU on this node using the shared bank is
702 * going away, remove that bank now.
704 nb
= node_to_amd_nb(amd_get_nb_id(cpu
));
709 deallocate_threshold_block(cpu
, bank
);
712 kobject_del(b
->kobj
);
713 kobject_put(b
->kobj
);
715 per_cpu(threshold_banks
, cpu
)[bank
] = NULL
;
718 static void threshold_remove_device(unsigned int cpu
)
722 for (bank
= 0; bank
< NR_BANKS
; ++bank
) {
723 if (!(per_cpu(bank_map
, cpu
) & (1 << bank
)))
725 threshold_remove_bank(cpu
, bank
);
729 /* get notified when a cpu comes on/off */
730 static void __cpuinit
731 amd_64_threshold_cpu_callback(unsigned long action
, unsigned int cpu
)
735 case CPU_ONLINE_FROZEN
:
736 threshold_create_device(cpu
);
739 case CPU_DEAD_FROZEN
:
740 threshold_remove_device(cpu
);
747 static __init
int threshold_init_device(void)
751 /* to hit CPUs online before the notifier is up */
752 for_each_online_cpu(lcpu
) {
753 int err
= threshold_create_device(lcpu
);
758 threshold_cpu_callback
= amd_64_threshold_cpu_callback
;
763 * there are 3 funcs which need to be _initcalled in a logic sequence:
764 * 1. xen_late_init_mcelog
765 * 2. mcheck_init_device
766 * 3. threshold_init_device
768 * xen_late_init_mcelog must register xen_mce_chrdev_device before
769 * native mce_chrdev_device registration if running under xen platform;
771 * mcheck_init_device should be inited before threshold_init_device to
772 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
774 * so we use following _initcalls
775 * 1. device_initcall(xen_late_init_mcelog);
776 * 2. device_initcall_sync(mcheck_init_device);
777 * 3. late_initcall(threshold_init_device);
779 * when running under xen, the initcall order is 1,2,3;
780 * on baremetal, we skip 1 and we do only 2 and 3.
782 late_initcall(threshold_init_device
);