2 * (c) 2005, 2006 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
7 * Written by Jacob Shin - AMD, Inc.
9 * Support : jacob.shin@amd.com
12 * - added support for AMD Family 0x10 processors
14 * All MC4_MISCi registers are shared between multi-cores
16 #include <linux/interrupt.h>
17 #include <linux/notifier.h>
18 #include <linux/kobject.h>
19 #include <linux/percpu.h>
20 #include <linux/errno.h>
21 #include <linux/sched.h>
22 #include <linux/sysfs.h>
23 #include <linux/slab.h>
24 #include <linux/init.h>
25 #include <linux/cpu.h>
26 #include <linux/smp.h>
35 #define THRESHOLD_MAX 0xFFF
36 #define INT_TYPE_APIC 0x00020000
37 #define MASK_VALID_HI 0x80000000
38 #define MASK_CNTP_HI 0x40000000
39 #define MASK_LOCKED_HI 0x20000000
40 #define MASK_LVTOFF_HI 0x00F00000
41 #define MASK_COUNT_EN_HI 0x00080000
42 #define MASK_INT_TYPE_HI 0x00060000
43 #define MASK_OVERFLOW_HI 0x00010000
44 #define MASK_ERR_COUNT_HI 0x00000FFF
45 #define MASK_BLKPTR_LO 0xFF000000
46 #define MCG_XBLK_ADDR 0xC0000400
48 struct threshold_block
{
56 struct list_head miscj
;
59 struct threshold_bank
{
61 struct threshold_block
*blocks
;
64 static DEFINE_PER_CPU(struct threshold_bank
* [NR_BANKS
], threshold_banks
);
66 static unsigned char shared_bank
[NR_BANKS
] = {
70 static DEFINE_PER_CPU(unsigned char, bank_map
); /* see which banks are on */
72 static void amd_threshold_interrupt(void);
78 struct thresh_restart
{
79 struct threshold_block
*b
;
86 static int lvt_off_valid(struct threshold_block
*b
, int apic
, u32 lo
, u32 hi
)
88 int msr
= (hi
& MASK_LVTOFF_HI
) >> 20;
91 pr_err(FW_BUG
"cpu %d, failed to setup threshold interrupt "
92 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b
->cpu
,
93 b
->bank
, b
->block
, b
->address
, hi
, lo
);
98 pr_err(FW_BUG
"cpu %d, invalid threshold interrupt offset %d "
99 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
100 b
->cpu
, apic
, b
->bank
, b
->block
, b
->address
, hi
, lo
);
107 /* must be called with correct cpu affinity */
108 /* Called via smp_call_function_single() */
109 static void threshold_restart_bank(void *_tr
)
111 struct thresh_restart
*tr
= _tr
;
114 rdmsr(tr
->b
->address
, lo
, hi
);
116 if (tr
->b
->threshold_limit
< (hi
& THRESHOLD_MAX
))
117 tr
->reset
= 1; /* limit cannot be lower than err count */
119 if (tr
->reset
) { /* reset err count and overflow bit */
121 (hi
& ~(MASK_ERR_COUNT_HI
| MASK_OVERFLOW_HI
)) |
122 (THRESHOLD_MAX
- tr
->b
->threshold_limit
);
123 } else if (tr
->old_limit
) { /* change limit w/o reset */
124 int new_count
= (hi
& THRESHOLD_MAX
) +
125 (tr
->old_limit
- tr
->b
->threshold_limit
);
127 hi
= (hi
& ~MASK_ERR_COUNT_HI
) |
128 (new_count
& THRESHOLD_MAX
);
131 if (tr
->set_lvt_off
) {
132 if (lvt_off_valid(tr
->b
, tr
->lvt_off
, lo
, hi
)) {
133 /* set new lvt offset */
134 hi
&= ~MASK_LVTOFF_HI
;
135 hi
|= tr
->lvt_off
<< 20;
139 tr
->b
->interrupt_enable
?
140 (hi
= (hi
& ~MASK_INT_TYPE_HI
) | INT_TYPE_APIC
) :
141 (hi
&= ~MASK_INT_TYPE_HI
);
143 hi
|= MASK_COUNT_EN_HI
;
144 wrmsr(tr
->b
->address
, lo
, hi
);
147 static void mce_threshold_block_init(struct threshold_block
*b
, int offset
)
149 struct thresh_restart tr
= {
155 b
->threshold_limit
= THRESHOLD_MAX
;
156 threshold_restart_bank(&tr
);
159 static int setup_APIC_mce(int reserved
, int new)
161 if (reserved
< 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR
,
162 APIC_EILVT_MSG_FIX
, 0))
168 /* cpu init entry point, called from mce.c with preempt off */
169 void mce_amd_feature_init(struct cpuinfo_x86
*c
)
171 struct threshold_block b
;
172 unsigned int cpu
= smp_processor_id();
173 u32 low
= 0, high
= 0, address
= 0;
174 unsigned int bank
, block
;
177 for (bank
= 0; bank
< NR_BANKS
; ++bank
) {
178 for (block
= 0; block
< NR_BLOCKS
; ++block
) {
180 address
= MSR_IA32_MC0_MISC
+ bank
* 4;
181 else if (block
== 1) {
182 address
= (low
& MASK_BLKPTR_LO
) >> 21;
186 address
+= MCG_XBLK_ADDR
;
190 if (rdmsr_safe(address
, &low
, &high
))
193 if (!(high
& MASK_VALID_HI
))
196 if (!(high
& MASK_CNTP_HI
) ||
197 (high
& MASK_LOCKED_HI
))
201 per_cpu(bank_map
, cpu
) |= (1 << bank
);
202 if (shared_bank
[bank
] && c
->cpu_core_id
)
205 offset
= setup_APIC_mce(offset
,
206 (high
& MASK_LVTOFF_HI
) >> 20);
208 memset(&b
, 0, sizeof(b
));
214 mce_threshold_block_init(&b
, offset
);
215 mce_threshold_vector
= amd_threshold_interrupt
;
221 * APIC Interrupt Handler
225 * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
226 * the interrupt goes off when error_count reaches threshold_limit.
227 * the handler will simply log mcelog w/ software defined bank number.
229 static void amd_threshold_interrupt(void)
231 u32 low
= 0, high
= 0, address
= 0;
232 unsigned int bank
, block
;
237 /* assume first bank caused it */
238 for (bank
= 0; bank
< NR_BANKS
; ++bank
) {
239 if (!(per_cpu(bank_map
, m
.cpu
) & (1 << bank
)))
241 for (block
= 0; block
< NR_BLOCKS
; ++block
) {
243 address
= MSR_IA32_MC0_MISC
+ bank
* 4;
244 } else if (block
== 1) {
245 address
= (low
& MASK_BLKPTR_LO
) >> 21;
248 address
+= MCG_XBLK_ADDR
;
253 if (rdmsr_safe(address
, &low
, &high
))
256 if (!(high
& MASK_VALID_HI
)) {
263 if (!(high
& MASK_CNTP_HI
) ||
264 (high
& MASK_LOCKED_HI
))
268 * Log the machine check that caused the threshold
271 machine_check_poll(MCP_TIMESTAMP
,
272 &__get_cpu_var(mce_poll_banks
));
274 if (high
& MASK_OVERFLOW_HI
) {
275 rdmsrl(address
, m
.misc
);
276 rdmsrl(MSR_IA32_MC0_STATUS
+ bank
* 4,
278 m
.bank
= K8_MCE_THRESHOLD_BASE
292 struct threshold_attr
{
293 struct attribute attr
;
294 ssize_t (*show
) (struct threshold_block
*, char *);
295 ssize_t (*store
) (struct threshold_block
*, const char *, size_t count
);
298 #define SHOW_FIELDS(name) \
299 static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
301 return sprintf(buf, "%lx\n", (unsigned long) b->name); \
303 SHOW_FIELDS(interrupt_enable
)
304 SHOW_FIELDS(threshold_limit
)
307 store_interrupt_enable(struct threshold_block
*b
, const char *buf
, size_t size
)
309 struct thresh_restart tr
;
312 if (strict_strtoul(buf
, 0, &new) < 0)
315 b
->interrupt_enable
= !!new;
317 memset(&tr
, 0, sizeof(tr
));
320 smp_call_function_single(b
->cpu
, threshold_restart_bank
, &tr
, 1);
326 store_threshold_limit(struct threshold_block
*b
, const char *buf
, size_t size
)
328 struct thresh_restart tr
;
331 if (strict_strtoul(buf
, 0, &new) < 0)
334 if (new > THRESHOLD_MAX
)
339 memset(&tr
, 0, sizeof(tr
));
340 tr
.old_limit
= b
->threshold_limit
;
341 b
->threshold_limit
= new;
344 smp_call_function_single(b
->cpu
, threshold_restart_bank
, &tr
, 1);
349 struct threshold_block_cross_cpu
{
350 struct threshold_block
*tb
;
354 static void local_error_count_handler(void *_tbcc
)
356 struct threshold_block_cross_cpu
*tbcc
= _tbcc
;
357 struct threshold_block
*b
= tbcc
->tb
;
360 rdmsr(b
->address
, low
, high
);
361 tbcc
->retval
= (high
& 0xFFF) - (THRESHOLD_MAX
- b
->threshold_limit
);
364 static ssize_t
show_error_count(struct threshold_block
*b
, char *buf
)
366 struct threshold_block_cross_cpu tbcc
= { .tb
= b
, };
368 smp_call_function_single(b
->cpu
, local_error_count_handler
, &tbcc
, 1);
369 return sprintf(buf
, "%lx\n", tbcc
.retval
);
372 static ssize_t
store_error_count(struct threshold_block
*b
,
373 const char *buf
, size_t count
)
375 struct thresh_restart tr
= { .b
= b
, .reset
= 1, .old_limit
= 0 };
377 smp_call_function_single(b
->cpu
, threshold_restart_bank
, &tr
, 1);
381 #define RW_ATTR(val) \
382 static struct threshold_attr val = { \
383 .attr = {.name = __stringify(val), .mode = 0644 }, \
384 .show = show_## val, \
385 .store = store_## val, \
388 RW_ATTR(interrupt_enable
);
389 RW_ATTR(threshold_limit
);
390 RW_ATTR(error_count
);
392 static struct attribute
*default_attrs
[] = {
393 &interrupt_enable
.attr
,
394 &threshold_limit
.attr
,
399 #define to_block(k) container_of(k, struct threshold_block, kobj)
400 #define to_attr(a) container_of(a, struct threshold_attr, attr)
402 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
404 struct threshold_block
*b
= to_block(kobj
);
405 struct threshold_attr
*a
= to_attr(attr
);
408 ret
= a
->show
? a
->show(b
, buf
) : -EIO
;
413 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
414 const char *buf
, size_t count
)
416 struct threshold_block
*b
= to_block(kobj
);
417 struct threshold_attr
*a
= to_attr(attr
);
420 ret
= a
->store
? a
->store(b
, buf
, count
) : -EIO
;
425 static const struct sysfs_ops threshold_ops
= {
430 static struct kobj_type threshold_ktype
= {
431 .sysfs_ops
= &threshold_ops
,
432 .default_attrs
= default_attrs
,
435 static __cpuinit
int allocate_threshold_blocks(unsigned int cpu
,
440 struct threshold_block
*b
= NULL
;
444 if ((bank
>= NR_BANKS
) || (block
>= NR_BLOCKS
))
447 if (rdmsr_safe_on_cpu(cpu
, address
, &low
, &high
))
450 if (!(high
& MASK_VALID_HI
)) {
457 if (!(high
& MASK_CNTP_HI
) ||
458 (high
& MASK_LOCKED_HI
))
461 b
= kzalloc(sizeof(struct threshold_block
), GFP_KERNEL
);
468 b
->address
= address
;
469 b
->interrupt_enable
= 0;
470 b
->threshold_limit
= THRESHOLD_MAX
;
472 INIT_LIST_HEAD(&b
->miscj
);
474 if (per_cpu(threshold_banks
, cpu
)[bank
]->blocks
) {
476 &per_cpu(threshold_banks
, cpu
)[bank
]->blocks
->miscj
);
478 per_cpu(threshold_banks
, cpu
)[bank
]->blocks
= b
;
481 err
= kobject_init_and_add(&b
->kobj
, &threshold_ktype
,
482 per_cpu(threshold_banks
, cpu
)[bank
]->kobj
,
488 address
= (low
& MASK_BLKPTR_LO
) >> 21;
491 address
+= MCG_XBLK_ADDR
;
496 err
= allocate_threshold_blocks(cpu
, bank
, ++block
, address
);
501 kobject_uevent(&b
->kobj
, KOBJ_ADD
);
507 kobject_put(&b
->kobj
);
514 static __cpuinit
long
515 local_allocate_threshold_blocks(int cpu
, unsigned int bank
)
517 return allocate_threshold_blocks(cpu
, bank
, 0,
518 MSR_IA32_MC0_MISC
+ bank
* 4);
521 /* symlinks sibling shared banks to first core. first core owns dir/files. */
522 static __cpuinit
int threshold_create_bank(unsigned int cpu
, unsigned int bank
)
525 struct threshold_bank
*b
= NULL
;
526 struct device
*dev
= mce_device
[cpu
];
529 sprintf(name
, "threshold_bank%i", bank
);
532 if (cpu_data(cpu
).cpu_core_id
&& shared_bank
[bank
]) { /* symlink */
533 i
= cpumask_first(cpu_llc_shared_mask(cpu
));
535 /* first core not up yet */
536 if (cpu_data(i
).cpu_core_id
)
540 if (per_cpu(threshold_banks
, cpu
)[bank
])
543 b
= per_cpu(threshold_banks
, i
)[bank
];
548 err
= sysfs_create_link(&dev
->kobj
, b
->kobj
, name
);
552 cpumask_copy(b
->cpus
, cpu_llc_shared_mask(cpu
));
553 per_cpu(threshold_banks
, cpu
)[bank
] = b
;
559 b
= kzalloc(sizeof(struct threshold_bank
), GFP_KERNEL
);
564 if (!zalloc_cpumask_var(&b
->cpus
, GFP_KERNEL
)) {
570 b
->kobj
= kobject_create_and_add(name
, &dev
->kobj
);
575 cpumask_setall(b
->cpus
);
577 cpumask_set_cpu(cpu
, b
->cpus
);
580 per_cpu(threshold_banks
, cpu
)[bank
] = b
;
582 err
= local_allocate_threshold_blocks(cpu
, bank
);
586 for_each_cpu(i
, b
->cpus
) {
592 err
= sysfs_create_link(&dev
->kobj
,b
->kobj
, name
);
596 per_cpu(threshold_banks
, i
)[bank
] = b
;
602 per_cpu(threshold_banks
, cpu
)[bank
] = NULL
;
603 free_cpumask_var(b
->cpus
);
609 /* create dir/files for all valid threshold banks */
610 static __cpuinit
int threshold_create_device(unsigned int cpu
)
615 for (bank
= 0; bank
< NR_BANKS
; ++bank
) {
616 if (!(per_cpu(bank_map
, cpu
) & (1 << bank
)))
618 err
= threshold_create_bank(cpu
, bank
);
627 * let's be hotplug friendly.
628 * in case of multiple core processors, the first core always takes ownership
629 * of shared sysfs dir/files, and rest of the cores will be symlinked to it.
632 static void deallocate_threshold_block(unsigned int cpu
,
635 struct threshold_block
*pos
= NULL
;
636 struct threshold_block
*tmp
= NULL
;
637 struct threshold_bank
*head
= per_cpu(threshold_banks
, cpu
)[bank
];
642 list_for_each_entry_safe(pos
, tmp
, &head
->blocks
->miscj
, miscj
) {
643 kobject_put(&pos
->kobj
);
644 list_del(&pos
->miscj
);
648 kfree(per_cpu(threshold_banks
, cpu
)[bank
]->blocks
);
649 per_cpu(threshold_banks
, cpu
)[bank
]->blocks
= NULL
;
652 static void threshold_remove_bank(unsigned int cpu
, int bank
)
654 struct threshold_bank
*b
;
659 b
= per_cpu(threshold_banks
, cpu
)[bank
];
665 sprintf(name
, "threshold_bank%i", bank
);
668 /* sibling symlink */
669 if (shared_bank
[bank
] && b
->blocks
->cpu
!= cpu
) {
670 sysfs_remove_link(&mce_device
[cpu
]->kobj
, name
);
671 per_cpu(threshold_banks
, cpu
)[bank
] = NULL
;
677 /* remove all sibling symlinks before unregistering */
678 for_each_cpu(i
, b
->cpus
) {
684 sysfs_remove_link(&dev
->kobj
, name
);
685 per_cpu(threshold_banks
, i
)[bank
] = NULL
;
688 deallocate_threshold_block(cpu
, bank
);
691 kobject_del(b
->kobj
);
692 kobject_put(b
->kobj
);
693 free_cpumask_var(b
->cpus
);
695 per_cpu(threshold_banks
, cpu
)[bank
] = NULL
;
698 static void threshold_remove_device(unsigned int cpu
)
702 for (bank
= 0; bank
< NR_BANKS
; ++bank
) {
703 if (!(per_cpu(bank_map
, cpu
) & (1 << bank
)))
705 threshold_remove_bank(cpu
, bank
);
709 /* get notified when a cpu comes on/off */
710 static void __cpuinit
711 amd_64_threshold_cpu_callback(unsigned long action
, unsigned int cpu
)
715 case CPU_ONLINE_FROZEN
:
716 threshold_create_device(cpu
);
719 case CPU_DEAD_FROZEN
:
720 threshold_remove_device(cpu
);
727 static __init
int threshold_init_device(void)
731 /* to hit CPUs online before the notifier is up */
732 for_each_online_cpu(lcpu
) {
733 int err
= threshold_create_device(lcpu
);
738 threshold_cpu_callback
= amd_64_threshold_cpu_callback
;
742 device_initcall(threshold_init_device
);