2 * local apic based NMI watchdog for various CPUs.
4 * This file also handles reservation of performance counters for coordination
5 * with other users (like oprofile).
7 * Note that these events normally don't tick when the CPU idles. This means
8 * the frequency varies with CPU load.
10 * Original code for K7/P6 written by Keith Owens
14 #include <linux/percpu.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/bitops.h>
18 #include <linux/smp.h>
19 #include <linux/nmi.h>
20 #include <linux/kprobes.h>
23 #include <asm/perf_counter.h>
25 struct nmi_watchdog_ctlblk
{
26 unsigned int cccr_msr
;
27 unsigned int perfctr_msr
; /* the MSR to reset in NMI handler */
28 unsigned int evntsel_msr
; /* the MSR to select the events to handle */
31 /* Interface defining a CPU specific perfctr watchdog */
34 void (*unreserve
)(void);
35 int (*setup
)(unsigned nmi_hz
);
36 void (*rearm
)(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
);
43 static const struct wd_ops
*wd_ops
;
46 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
47 * offset from MSR_P4_BSU_ESCR0.
49 * It will be the max for all platforms (for now)
51 #define NMI_MAX_COUNTER_BITS 66
54 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
55 * evtsel_nmi_owner tracks the ownership of the event selection
56 * - different performance counters/ event selection may be reserved for
57 * different subsystems this reservation system just tries to coordinate
60 static DECLARE_BITMAP(perfctr_nmi_owner
, NMI_MAX_COUNTER_BITS
);
61 static DECLARE_BITMAP(evntsel_nmi_owner
, NMI_MAX_COUNTER_BITS
);
63 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk
, nmi_watchdog_ctlblk
);
65 /* converts an msr to an appropriate reservation bit */
66 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr
)
68 /* returns the bit offset of the performance counter register */
69 switch (boot_cpu_data
.x86_vendor
) {
71 return (msr
- MSR_K7_PERFCTR0
);
72 case X86_VENDOR_INTEL
:
73 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
74 return (msr
- MSR_ARCH_PERFMON_PERFCTR0
);
76 switch (boot_cpu_data
.x86
) {
78 return (msr
- MSR_P6_PERFCTR0
);
80 return (msr
- MSR_P4_BPU_PERFCTR0
);
87 * converts an msr to an appropriate reservation bit
88 * returns the bit offset of the event selection register
90 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr
)
92 /* returns the bit offset of the event selection register */
93 switch (boot_cpu_data
.x86_vendor
) {
95 return (msr
- MSR_K7_EVNTSEL0
);
96 case X86_VENDOR_INTEL
:
97 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
98 return (msr
- MSR_ARCH_PERFMON_EVENTSEL0
);
100 switch (boot_cpu_data
.x86
) {
102 return (msr
- MSR_P6_EVNTSEL0
);
104 return (msr
- MSR_P4_BSU_ESCR0
);
111 /* checks for a bit availability (hack for oprofile) */
112 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter
)
114 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
116 return (!test_bit(counter
, perfctr_nmi_owner
));
119 /* checks the an msr for availability */
120 int avail_to_resrv_perfctr_nmi(unsigned int msr
)
122 unsigned int counter
;
124 counter
= nmi_perfctr_msr_to_bit(msr
);
125 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
127 return (!test_bit(counter
, perfctr_nmi_owner
));
129 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit
);
131 int reserve_perfctr_nmi(unsigned int msr
)
133 unsigned int counter
;
135 counter
= nmi_perfctr_msr_to_bit(msr
);
136 /* register not managed by the allocator? */
137 if (counter
> NMI_MAX_COUNTER_BITS
)
140 if (!test_and_set_bit(counter
, perfctr_nmi_owner
))
144 EXPORT_SYMBOL(reserve_perfctr_nmi
);
146 void release_perfctr_nmi(unsigned int msr
)
148 unsigned int counter
;
150 counter
= nmi_perfctr_msr_to_bit(msr
);
151 /* register not managed by the allocator? */
152 if (counter
> NMI_MAX_COUNTER_BITS
)
155 clear_bit(counter
, perfctr_nmi_owner
);
157 EXPORT_SYMBOL(release_perfctr_nmi
);
159 int reserve_evntsel_nmi(unsigned int msr
)
161 unsigned int counter
;
163 counter
= nmi_evntsel_msr_to_bit(msr
);
164 /* register not managed by the allocator? */
165 if (counter
> NMI_MAX_COUNTER_BITS
)
168 if (!test_and_set_bit(counter
, evntsel_nmi_owner
))
172 EXPORT_SYMBOL(reserve_evntsel_nmi
);
174 void release_evntsel_nmi(unsigned int msr
)
176 unsigned int counter
;
178 counter
= nmi_evntsel_msr_to_bit(msr
);
179 /* register not managed by the allocator? */
180 if (counter
> NMI_MAX_COUNTER_BITS
)
183 clear_bit(counter
, evntsel_nmi_owner
);
185 EXPORT_SYMBOL(release_evntsel_nmi
);
187 void disable_lapic_nmi_watchdog(void)
189 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
191 if (atomic_read(&nmi_active
) <= 0)
194 on_each_cpu(stop_apic_nmi_watchdog
, NULL
, 1);
199 BUG_ON(atomic_read(&nmi_active
) != 0);
202 void enable_lapic_nmi_watchdog(void)
204 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
206 /* are we already enabled */
207 if (atomic_read(&nmi_active
) != 0)
210 /* are we lapic aware */
213 if (!wd_ops
->reserve()) {
214 printk(KERN_ERR
"NMI watchdog: cannot reserve perfctrs\n");
218 on_each_cpu(setup_apic_nmi_watchdog
, NULL
, 1);
219 touch_nmi_watchdog();
223 * Activate the NMI watchdog via the local APIC.
226 static unsigned int adjust_for_32bit_ctr(unsigned int hz
)
229 unsigned int retval
= hz
;
232 * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter
233 * are writable, with higher bits sign extending from bit 31.
234 * So, we can only program the counter with 31 bit values and
235 * 32nd bit should be 1, for 33.. to be 1.
236 * Find the appropriate nmi_hz
238 counter_val
= (u64
)cpu_khz
* 1000;
239 do_div(counter_val
, retval
);
240 if (counter_val
> 0x7fffffffULL
) {
241 u64 count
= (u64
)cpu_khz
* 1000;
242 do_div(count
, 0x7fffffffUL
);
248 static void write_watchdog_counter(unsigned int perfctr_msr
,
249 const char *descr
, unsigned nmi_hz
)
251 u64 count
= (u64
)cpu_khz
* 1000;
253 do_div(count
, nmi_hz
);
255 pr_debug("setting %s to -0x%08Lx\n", descr
, count
);
256 wrmsrl(perfctr_msr
, 0 - count
);
259 static void write_watchdog_counter32(unsigned int perfctr_msr
,
260 const char *descr
, unsigned nmi_hz
)
262 u64 count
= (u64
)cpu_khz
* 1000;
264 do_div(count
, nmi_hz
);
266 pr_debug("setting %s to -0x%08Lx\n", descr
, count
);
267 wrmsr(perfctr_msr
, (u32
)(-count
), 0);
271 * AMD K7/K8/Family10h/Family11h support.
272 * AMD keeps this interface nicely stable so there is not much variety
274 #define K7_EVNTSEL_ENABLE (1 << 22)
275 #define K7_EVNTSEL_INT (1 << 20)
276 #define K7_EVNTSEL_OS (1 << 17)
277 #define K7_EVNTSEL_USR (1 << 16)
278 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
279 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
281 static int setup_k7_watchdog(unsigned nmi_hz
)
283 unsigned int perfctr_msr
, evntsel_msr
;
284 unsigned int evntsel
;
285 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
287 perfctr_msr
= wd_ops
->perfctr
;
288 evntsel_msr
= wd_ops
->evntsel
;
290 wrmsrl(perfctr_msr
, 0UL);
292 evntsel
= K7_EVNTSEL_INT
297 /* setup the timer */
298 wrmsr(evntsel_msr
, evntsel
, 0);
299 write_watchdog_counter(perfctr_msr
, "K7_PERFCTR0",nmi_hz
);
301 /* initialize the wd struct before enabling */
302 wd
->perfctr_msr
= perfctr_msr
;
303 wd
->evntsel_msr
= evntsel_msr
;
304 wd
->cccr_msr
= 0; /* unused */
306 /* ok, everything is initialized, announce that we're set */
307 cpu_nmi_set_wd_enabled();
309 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
310 evntsel
|= K7_EVNTSEL_ENABLE
;
311 wrmsr(evntsel_msr
, evntsel
, 0);
316 static void single_msr_stop_watchdog(void)
318 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
320 wrmsr(wd
->evntsel_msr
, 0, 0);
323 static int single_msr_reserve(void)
325 if (!reserve_perfctr_nmi(wd_ops
->perfctr
))
328 if (!reserve_evntsel_nmi(wd_ops
->evntsel
)) {
329 release_perfctr_nmi(wd_ops
->perfctr
);
335 static void single_msr_unreserve(void)
337 release_evntsel_nmi(wd_ops
->evntsel
);
338 release_perfctr_nmi(wd_ops
->perfctr
);
341 static void __kprobes
342 single_msr_rearm(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
)
344 /* start the cycle over again */
345 write_watchdog_counter(wd
->perfctr_msr
, NULL
, nmi_hz
);
348 static const struct wd_ops k7_wd_ops
= {
349 .reserve
= single_msr_reserve
,
350 .unreserve
= single_msr_unreserve
,
351 .setup
= setup_k7_watchdog
,
352 .rearm
= single_msr_rearm
,
353 .stop
= single_msr_stop_watchdog
,
354 .perfctr
= MSR_K7_PERFCTR0
,
355 .evntsel
= MSR_K7_EVNTSEL0
,
356 .checkbit
= 1ULL << 47,
360 * Intel Model 6 (PPro+,P2,P3,P-M,Core1)
362 #define P6_EVNTSEL0_ENABLE (1 << 22)
363 #define P6_EVNTSEL_INT (1 << 20)
364 #define P6_EVNTSEL_OS (1 << 17)
365 #define P6_EVNTSEL_USR (1 << 16)
366 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
367 #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
369 static int setup_p6_watchdog(unsigned nmi_hz
)
371 unsigned int perfctr_msr
, evntsel_msr
;
372 unsigned int evntsel
;
373 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
375 perfctr_msr
= wd_ops
->perfctr
;
376 evntsel_msr
= wd_ops
->evntsel
;
378 /* KVM doesn't implement this MSR */
379 if (wrmsr_safe(perfctr_msr
, 0, 0) < 0)
382 evntsel
= P6_EVNTSEL_INT
387 /* setup the timer */
388 wrmsr(evntsel_msr
, evntsel
, 0);
389 nmi_hz
= adjust_for_32bit_ctr(nmi_hz
);
390 write_watchdog_counter32(perfctr_msr
, "P6_PERFCTR0",nmi_hz
);
392 /* initialize the wd struct before enabling */
393 wd
->perfctr_msr
= perfctr_msr
;
394 wd
->evntsel_msr
= evntsel_msr
;
395 wd
->cccr_msr
= 0; /* unused */
397 /* ok, everything is initialized, announce that we're set */
398 cpu_nmi_set_wd_enabled();
400 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
401 evntsel
|= P6_EVNTSEL0_ENABLE
;
402 wrmsr(evntsel_msr
, evntsel
, 0);
407 static void __kprobes
p6_rearm(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
)
410 * P6 based Pentium M need to re-unmask
411 * the apic vector but it doesn't hurt
413 * ArchPerfom/Core Duo also needs this
415 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
417 /* P6/ARCH_PERFMON has 32 bit counter write */
418 write_watchdog_counter32(wd
->perfctr_msr
, NULL
,nmi_hz
);
421 static const struct wd_ops p6_wd_ops
= {
422 .reserve
= single_msr_reserve
,
423 .unreserve
= single_msr_unreserve
,
424 .setup
= setup_p6_watchdog
,
426 .stop
= single_msr_stop_watchdog
,
427 .perfctr
= MSR_P6_PERFCTR0
,
428 .evntsel
= MSR_P6_EVNTSEL0
,
429 .checkbit
= 1ULL << 39,
433 * Intel P4 performance counters.
434 * By far the most complicated of all.
436 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1 << 7)
437 #define P4_ESCR_EVENT_SELECT(N) ((N) << 25)
438 #define P4_ESCR_OS (1 << 3)
439 #define P4_ESCR_USR (1 << 2)
440 #define P4_CCCR_OVF_PMI0 (1 << 26)
441 #define P4_CCCR_OVF_PMI1 (1 << 27)
442 #define P4_CCCR_THRESHOLD(N) ((N) << 20)
443 #define P4_CCCR_COMPLEMENT (1 << 19)
444 #define P4_CCCR_COMPARE (1 << 18)
445 #define P4_CCCR_REQUIRED (3 << 16)
446 #define P4_CCCR_ESCR_SELECT(N) ((N) << 13)
447 #define P4_CCCR_ENABLE (1 << 12)
448 #define P4_CCCR_OVF (1 << 31)
450 #define P4_CONTROLS 18
451 static unsigned int p4_controls
[18] = {
472 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
473 * CRU_ESCR0 (with any non-null event selector) through a complemented
474 * max threshold. [IA32-Vol3, Section 14.9.9]
476 static int setup_p4_watchdog(unsigned nmi_hz
)
478 unsigned int perfctr_msr
, evntsel_msr
, cccr_msr
;
479 unsigned int evntsel
, cccr_val
;
480 unsigned int misc_enable
, dummy
;
482 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
484 rdmsr(MSR_IA32_MISC_ENABLE
, misc_enable
, dummy
);
485 if (!(misc_enable
& MSR_P4_MISC_ENABLE_PERF_AVAIL
))
489 /* detect which hyperthread we are on */
490 if (smp_num_siblings
== 2) {
491 unsigned int ebx
, apicid
;
494 apicid
= (ebx
>> 24) & 0xff;
501 * performance counters are shared resources
502 * assign each hyperthread its own set
503 * (re-use the ESCR0 register, seems safe
504 * and keeps the cccr_val the same)
508 perfctr_msr
= MSR_P4_IQ_PERFCTR0
;
509 evntsel_msr
= MSR_P4_CRU_ESCR0
;
510 cccr_msr
= MSR_P4_IQ_CCCR0
;
511 cccr_val
= P4_CCCR_OVF_PMI0
| P4_CCCR_ESCR_SELECT(4);
514 * If we're on the kdump kernel or other situation, we may
515 * still have other performance counter registers set to
516 * interrupt and they'll keep interrupting forever because
517 * of the P4_CCCR_OVF quirk. So we need to ACK all the
518 * pending interrupts and disable all the registers here,
519 * before reenabling the NMI delivery. Refer to p4_rearm()
520 * about the P4_CCCR_OVF quirk.
523 unsigned int low
, high
;
526 for (i
= 0; i
< P4_CONTROLS
; i
++) {
527 rdmsr(p4_controls
[i
], low
, high
);
528 low
&= ~(P4_CCCR_ENABLE
| P4_CCCR_OVF
);
529 wrmsr(p4_controls
[i
], low
, high
);
534 perfctr_msr
= MSR_P4_IQ_PERFCTR1
;
535 evntsel_msr
= MSR_P4_CRU_ESCR0
;
536 cccr_msr
= MSR_P4_IQ_CCCR1
;
538 /* Pentium 4 D processors don't support P4_CCCR_OVF_PMI1 */
539 if (boot_cpu_data
.x86_model
== 4 && boot_cpu_data
.x86_mask
== 4)
540 cccr_val
= P4_CCCR_OVF_PMI0
;
542 cccr_val
= P4_CCCR_OVF_PMI1
;
543 cccr_val
|= P4_CCCR_ESCR_SELECT(4);
546 evntsel
= P4_ESCR_EVENT_SELECT(0x3F)
550 cccr_val
|= P4_CCCR_THRESHOLD(15)
555 wrmsr(evntsel_msr
, evntsel
, 0);
556 wrmsr(cccr_msr
, cccr_val
, 0);
557 write_watchdog_counter(perfctr_msr
, "P4_IQ_COUNTER0", nmi_hz
);
559 wd
->perfctr_msr
= perfctr_msr
;
560 wd
->evntsel_msr
= evntsel_msr
;
561 wd
->cccr_msr
= cccr_msr
;
563 /* ok, everything is initialized, announce that we're set */
564 cpu_nmi_set_wd_enabled();
566 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
567 cccr_val
|= P4_CCCR_ENABLE
;
568 wrmsr(cccr_msr
, cccr_val
, 0);
572 static void stop_p4_watchdog(void)
574 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
575 wrmsr(wd
->cccr_msr
, 0, 0);
576 wrmsr(wd
->evntsel_msr
, 0, 0);
579 static int p4_reserve(void)
581 if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0
))
584 if (smp_num_siblings
> 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1
))
587 if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0
))
589 /* RED-PEN why is ESCR1 not reserved here? */
593 if (smp_num_siblings
> 1)
594 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1
);
597 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0
);
601 static void p4_unreserve(void)
604 if (smp_num_siblings
> 1)
605 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1
);
607 release_evntsel_nmi(MSR_P4_CRU_ESCR0
);
608 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0
);
611 static void __kprobes
p4_rearm(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
)
616 * - An overflown perfctr will assert its interrupt
617 * until the OVF flag in its CCCR is cleared.
618 * - LVTPC is masked on interrupt and must be
619 * unmasked by the LVTPC handler.
621 rdmsrl(wd
->cccr_msr
, dummy
);
622 dummy
&= ~P4_CCCR_OVF
;
623 wrmsrl(wd
->cccr_msr
, dummy
);
624 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
625 /* start the cycle over again */
626 write_watchdog_counter(wd
->perfctr_msr
, NULL
, nmi_hz
);
629 static const struct wd_ops p4_wd_ops
= {
630 .reserve
= p4_reserve
,
631 .unreserve
= p4_unreserve
,
632 .setup
= setup_p4_watchdog
,
634 .stop
= stop_p4_watchdog
,
635 /* RED-PEN this is wrong for the other sibling */
636 .perfctr
= MSR_P4_BPU_PERFCTR0
,
637 .evntsel
= MSR_P4_BSU_ESCR0
,
638 .checkbit
= 1ULL << 39,
642 * Watchdog using the Intel architected PerfMon.
643 * Used for Core2 and hopefully all future Intel CPUs.
645 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
646 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
648 static struct wd_ops intel_arch_wd_ops
;
650 static int setup_intel_arch_watchdog(unsigned nmi_hz
)
653 union cpuid10_eax eax
;
655 unsigned int perfctr_msr
, evntsel_msr
;
656 unsigned int evntsel
;
657 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
660 * Check whether the Architectural PerfMon supports
661 * Unhalted Core Cycles Event or not.
662 * NOTE: Corresponding bit = 0 in ebx indicates event present.
664 cpuid(10, &(eax
.full
), &ebx
, &unused
, &unused
);
665 if ((eax
.split
.mask_length
< (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX
+1)) ||
666 (ebx
& ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT
))
669 perfctr_msr
= wd_ops
->perfctr
;
670 evntsel_msr
= wd_ops
->evntsel
;
672 wrmsrl(perfctr_msr
, 0UL);
674 evntsel
= ARCH_PERFMON_EVENTSEL_INT
675 | ARCH_PERFMON_EVENTSEL_OS
676 | ARCH_PERFMON_EVENTSEL_USR
677 | ARCH_PERFMON_NMI_EVENT_SEL
678 | ARCH_PERFMON_NMI_EVENT_UMASK
;
680 /* setup the timer */
681 wrmsr(evntsel_msr
, evntsel
, 0);
682 nmi_hz
= adjust_for_32bit_ctr(nmi_hz
);
683 write_watchdog_counter32(perfctr_msr
, "INTEL_ARCH_PERFCTR0", nmi_hz
);
685 wd
->perfctr_msr
= perfctr_msr
;
686 wd
->evntsel_msr
= evntsel_msr
;
687 wd
->cccr_msr
= 0; /* unused */
689 /* ok, everything is initialized, announce that we're set */
690 cpu_nmi_set_wd_enabled();
692 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
693 evntsel
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
694 wrmsr(evntsel_msr
, evntsel
, 0);
695 intel_arch_wd_ops
.checkbit
= 1ULL << (eax
.split
.bit_width
- 1);
699 static struct wd_ops intel_arch_wd_ops __read_mostly
= {
700 .reserve
= single_msr_reserve
,
701 .unreserve
= single_msr_unreserve
,
702 .setup
= setup_intel_arch_watchdog
,
704 .stop
= single_msr_stop_watchdog
,
705 .perfctr
= MSR_ARCH_PERFMON_PERFCTR1
,
706 .evntsel
= MSR_ARCH_PERFMON_EVENTSEL1
,
709 static void probe_nmi_watchdog(void)
711 switch (boot_cpu_data
.x86_vendor
) {
713 if (boot_cpu_data
.x86
!= 6 && boot_cpu_data
.x86
!= 15 &&
714 boot_cpu_data
.x86
!= 16)
718 case X86_VENDOR_INTEL
:
719 /* Work around where perfctr1 doesn't have a working enable
720 * bit as described in the following errata:
721 * AE49 Core Duo and Intel Core Solo 65 nm
722 * AN49 Intel Pentium Dual-Core
723 * AF49 Dual-Core Intel Xeon Processor LV
725 if ((boot_cpu_data
.x86
== 6 && boot_cpu_data
.x86_model
== 14) ||
726 ((boot_cpu_data
.x86
== 6 && boot_cpu_data
.x86_model
== 15 &&
727 boot_cpu_data
.x86_mask
== 4))) {
728 intel_arch_wd_ops
.perfctr
= MSR_ARCH_PERFMON_PERFCTR0
;
729 intel_arch_wd_ops
.evntsel
= MSR_ARCH_PERFMON_EVENTSEL0
;
731 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
732 wd_ops
= &intel_arch_wd_ops
;
735 switch (boot_cpu_data
.x86
) {
737 if (boot_cpu_data
.x86_model
> 13)
752 /* Interface to nmi.c */
754 int lapic_watchdog_init(unsigned nmi_hz
)
757 probe_nmi_watchdog();
759 printk(KERN_INFO
"NMI watchdog: CPU not supported\n");
763 if (!wd_ops
->reserve()) {
765 "NMI watchdog: cannot reserve perfctrs\n");
770 if (!(wd_ops
->setup(nmi_hz
))) {
771 printk(KERN_ERR
"Cannot setup NMI watchdog on CPU %d\n",
772 raw_smp_processor_id());
779 void lapic_watchdog_stop(void)
785 unsigned lapic_adjust_nmi_hz(unsigned hz
)
787 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
788 if (wd
->perfctr_msr
== MSR_P6_PERFCTR0
||
789 wd
->perfctr_msr
== MSR_ARCH_PERFMON_PERFCTR1
)
790 hz
= adjust_for_32bit_ctr(hz
);
794 int __kprobes
lapic_wd_event(unsigned nmi_hz
)
796 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
799 rdmsrl(wd
->perfctr_msr
, ctr
);
800 if (ctr
& wd_ops
->checkbit
) /* perfctr still running? */
803 wd_ops
->rearm(wd
, nmi_hz
);