2 * Intel specific MCE features.
3 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
4 * Copyright (C) 2008, 2009 Intel Corporation
9 #include <linux/init.h>
10 #include <linux/interrupt.h>
11 #include <linux/percpu.h>
12 #include <linux/sched.h>
14 #include <asm/processor.h>
18 #include "mce-internal.h"
21 * Support for Intel Correct Machine Check Interrupts. This allows
22 * the CPU to raise an interrupt when a corrected machine check happened.
23 * Normally we pick those up using a regular polling timer.
24 * Also supports reliable discovery of shared banks.
28 * CMCI can be delivered to multiple cpus that share a machine check bank
29 * so we need to designate a single cpu to process errors logged in each bank
30 * in the interrupt handler (otherwise we would have many races and potential
31 * double reporting of the same error).
32 * Note that this can change when a cpu is offlined or brought online since
33 * some MCA banks are shared across cpus. When a cpu is offlined, cmci_clear()
34 * disables CMCI on all banks owned by the cpu and clears this bitfield. At
35 * this point, cmci_rediscover() kicks in and a different cpu may end up
36 * taking ownership of some of the shared MCA banks that were previously
37 * owned by the offlined cpu.
39 static DEFINE_PER_CPU(mce_banks_t
, mce_banks_owned
);
42 * cmci_discover_lock protects against parallel discovery attempts
43 * which could race against each other.
45 static DEFINE_RAW_SPINLOCK(cmci_discover_lock
);
47 #define CMCI_THRESHOLD 1
48 #define CMCI_POLL_INTERVAL (30 * HZ)
49 #define CMCI_STORM_INTERVAL (1 * HZ)
50 #define CMCI_STORM_THRESHOLD 15
52 static DEFINE_PER_CPU(unsigned long, cmci_time_stamp
);
53 static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt
);
54 static DEFINE_PER_CPU(unsigned int, cmci_storm_state
);
62 static atomic_t cmci_storm_on_cpus
;
64 static int cmci_supported(int *banks
)
68 if (mca_cfg
.cmci_disabled
|| mca_cfg
.ignore_ce
)
72 * Vendor check is not strictly needed, but the initial
73 * initialization is vendor keyed and this
74 * makes sure none of the backdoors are entered otherwise.
76 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
78 if (!cpu_has_apic
|| lapic_get_maxlvt() < 6)
80 rdmsrl(MSR_IA32_MCG_CAP
, cap
);
81 *banks
= min_t(unsigned, MAX_NR_BANKS
, cap
& 0xff);
82 return !!(cap
& MCG_CMCI_P
);
85 void mce_intel_cmci_poll(void)
87 if (__this_cpu_read(cmci_storm_state
) == CMCI_STORM_NONE
)
89 machine_check_poll(MCP_TIMESTAMP
, &__get_cpu_var(mce_banks_owned
));
92 void mce_intel_hcpu_update(unsigned long cpu
)
94 if (per_cpu(cmci_storm_state
, cpu
) == CMCI_STORM_ACTIVE
)
95 atomic_dec(&cmci_storm_on_cpus
);
97 per_cpu(cmci_storm_state
, cpu
) = CMCI_STORM_NONE
;
100 unsigned long mce_intel_adjust_timer(unsigned long interval
)
104 if (interval
< CMCI_POLL_INTERVAL
)
107 switch (__this_cpu_read(cmci_storm_state
)) {
108 case CMCI_STORM_ACTIVE
:
110 * We switch back to interrupt mode once the poll timer has
111 * silenced itself. That means no events recorded and the
112 * timer interval is back to our poll interval.
114 __this_cpu_write(cmci_storm_state
, CMCI_STORM_SUBSIDED
);
115 r
= atomic_sub_return(1, &cmci_storm_on_cpus
);
117 pr_notice("CMCI storm subsided: switching to interrupt mode\n");
120 case CMCI_STORM_SUBSIDED
:
122 * We wait for all cpus to go back to SUBSIDED
123 * state. When that happens we switch back to
126 if (!atomic_read(&cmci_storm_on_cpus
)) {
127 __this_cpu_write(cmci_storm_state
, CMCI_STORM_NONE
);
131 return CMCI_POLL_INTERVAL
;
134 * We have shiny weather. Let the poll do whatever it
141 static bool cmci_storm_detect(void)
143 unsigned int cnt
= __this_cpu_read(cmci_storm_cnt
);
144 unsigned long ts
= __this_cpu_read(cmci_time_stamp
);
145 unsigned long now
= jiffies
;
148 if (__this_cpu_read(cmci_storm_state
) != CMCI_STORM_NONE
)
151 if (time_before_eq(now
, ts
+ CMCI_STORM_INTERVAL
)) {
155 __this_cpu_write(cmci_time_stamp
, now
);
157 __this_cpu_write(cmci_storm_cnt
, cnt
);
159 if (cnt
<= CMCI_STORM_THRESHOLD
)
163 __this_cpu_write(cmci_storm_state
, CMCI_STORM_ACTIVE
);
164 r
= atomic_add_return(1, &cmci_storm_on_cpus
);
165 mce_timer_kick(CMCI_POLL_INTERVAL
);
168 pr_notice("CMCI storm detected: switching to poll mode\n");
173 * The interrupt handler. This is called on every event.
174 * Just call the poller directly to log any events.
175 * This could in theory increase the threshold under high load,
176 * but doesn't for now.
178 static void intel_threshold_interrupt(void)
180 if (cmci_storm_detect())
182 machine_check_poll(MCP_TIMESTAMP
, &__get_cpu_var(mce_banks_owned
));
187 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
188 * on this CPU. Use the algorithm recommended in the SDM to discover shared
191 static void cmci_discover(int banks
)
193 unsigned long *owned
= (void *)&__get_cpu_var(mce_banks_owned
);
196 int bios_wrong_thresh
= 0;
198 raw_spin_lock_irqsave(&cmci_discover_lock
, flags
);
199 for (i
= 0; i
< banks
; i
++) {
201 int bios_zero_thresh
= 0;
203 if (test_bit(i
, owned
))
206 /* Skip banks in firmware first mode */
207 if (test_bit(i
, mce_banks_ce_disabled
))
210 rdmsrl(MSR_IA32_MCx_CTL2(i
), val
);
212 /* Already owned by someone else? */
213 if (val
& MCI_CTL2_CMCI_EN
) {
215 __clear_bit(i
, __get_cpu_var(mce_poll_banks
));
219 if (!mca_cfg
.bios_cmci_threshold
) {
220 val
&= ~MCI_CTL2_CMCI_THRESHOLD_MASK
;
221 val
|= CMCI_THRESHOLD
;
222 } else if (!(val
& MCI_CTL2_CMCI_THRESHOLD_MASK
)) {
224 * If bios_cmci_threshold boot option was specified
225 * but the threshold is zero, we'll try to initialize
228 bios_zero_thresh
= 1;
229 val
|= CMCI_THRESHOLD
;
232 val
|= MCI_CTL2_CMCI_EN
;
233 wrmsrl(MSR_IA32_MCx_CTL2(i
), val
);
234 rdmsrl(MSR_IA32_MCx_CTL2(i
), val
);
236 /* Did the enable bit stick? -- the bank supports CMCI */
237 if (val
& MCI_CTL2_CMCI_EN
) {
239 __clear_bit(i
, __get_cpu_var(mce_poll_banks
));
241 * We are able to set thresholds for some banks that
242 * had a threshold of 0. This means the BIOS has not
243 * set the thresholds properly or does not work with
244 * this boot option. Note down now and report later.
246 if (mca_cfg
.bios_cmci_threshold
&& bios_zero_thresh
&&
247 (val
& MCI_CTL2_CMCI_THRESHOLD_MASK
))
248 bios_wrong_thresh
= 1;
250 WARN_ON(!test_bit(i
, __get_cpu_var(mce_poll_banks
)));
253 raw_spin_unlock_irqrestore(&cmci_discover_lock
, flags
);
254 if (mca_cfg
.bios_cmci_threshold
&& bios_wrong_thresh
) {
256 "bios_cmci_threshold: Some banks do not have valid thresholds set\n");
258 "bios_cmci_threshold: Make sure your BIOS supports this boot option\n");
263 * Just in case we missed an event during initialization check
264 * all the CMCI owned banks.
266 void cmci_recheck(void)
271 if (!mce_available(__this_cpu_ptr(&cpu_info
)) || !cmci_supported(&banks
))
273 local_irq_save(flags
);
274 machine_check_poll(MCP_TIMESTAMP
, &__get_cpu_var(mce_banks_owned
));
275 local_irq_restore(flags
);
278 /* Caller must hold the lock on cmci_discover_lock */
279 static void __cmci_disable_bank(int bank
)
283 if (!test_bit(bank
, __get_cpu_var(mce_banks_owned
)))
285 rdmsrl(MSR_IA32_MCx_CTL2(bank
), val
);
286 val
&= ~MCI_CTL2_CMCI_EN
;
287 wrmsrl(MSR_IA32_MCx_CTL2(bank
), val
);
288 __clear_bit(bank
, __get_cpu_var(mce_banks_owned
));
292 * Disable CMCI on this CPU for all banks it owns when it goes down.
293 * This allows other CPUs to claim the banks on rediscovery.
295 void cmci_clear(void)
301 if (!cmci_supported(&banks
))
303 raw_spin_lock_irqsave(&cmci_discover_lock
, flags
);
304 for (i
= 0; i
< banks
; i
++)
305 __cmci_disable_bank(i
);
306 raw_spin_unlock_irqrestore(&cmci_discover_lock
, flags
);
309 static void cmci_rediscover_work_func(void *arg
)
313 /* Recheck banks in case CPUs don't all have the same */
314 if (cmci_supported(&banks
))
315 cmci_discover(banks
);
318 /* After a CPU went down cycle through all the others and rediscover */
319 void cmci_rediscover(void)
323 if (!cmci_supported(&banks
))
326 on_each_cpu(cmci_rediscover_work_func
, NULL
, 1);
330 * Reenable CMCI on this CPU in case a CPU down failed.
332 void cmci_reenable(void)
335 if (cmci_supported(&banks
))
336 cmci_discover(banks
);
339 void cmci_disable_bank(int bank
)
344 if (!cmci_supported(&banks
))
347 raw_spin_lock_irqsave(&cmci_discover_lock
, flags
);
348 __cmci_disable_bank(bank
);
349 raw_spin_unlock_irqrestore(&cmci_discover_lock
, flags
);
352 static void intel_init_cmci(void)
356 if (!cmci_supported(&banks
))
359 mce_threshold_vector
= intel_threshold_interrupt
;
360 cmci_discover(banks
);
362 * For CPU #0 this runs with still disabled APIC, but that's
363 * ok because only the vector is set up. We still do another
364 * check for the banks later for CPU #0 just to make sure
365 * to not miss any events.
367 apic_write(APIC_LVTCMCI
, THRESHOLD_APIC_VECTOR
|APIC_DM_FIXED
);
371 void mce_intel_feature_init(struct cpuinfo_x86
*c
)
373 intel_init_thermal(c
);