2 * local apic based NMI watchdog for various CPUs.
4 * This file also handles reservation of performance counters for coordination
5 * with other users (like oprofile).
7 * Note that these events normally don't tick when the CPU idles. This means
8 * the frequency varies with CPU load.
10 * Original code for K7/P6 written by Keith Owens
14 #include <linux/percpu.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/bitops.h>
18 #include <linux/smp.h>
19 #include <linux/nmi.h>
20 #include <linux/kprobes.h>
23 #include <asm/perf_event.h>
25 struct nmi_watchdog_ctlblk
{
26 unsigned int cccr_msr
;
27 unsigned int perfctr_msr
; /* the MSR to reset in NMI handler */
28 unsigned int evntsel_msr
; /* the MSR to select the events to handle */
31 /* Interface defining a CPU specific perfctr watchdog */
34 void (*unreserve
)(void);
35 int (*setup
)(unsigned nmi_hz
);
36 void (*rearm
)(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
);
43 static const struct wd_ops
*wd_ops
;
46 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
47 * offset from MSR_P4_BSU_ESCR0.
49 * It will be the max for all platforms (for now)
51 #define NMI_MAX_COUNTER_BITS 66
54 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
55 * evtsel_nmi_owner tracks the ownership of the event selection
56 * - different performance counters/ event selection may be reserved for
57 * different subsystems this reservation system just tries to coordinate
60 static DECLARE_BITMAP(perfctr_nmi_owner
, NMI_MAX_COUNTER_BITS
);
61 static DECLARE_BITMAP(evntsel_nmi_owner
, NMI_MAX_COUNTER_BITS
);
63 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk
, nmi_watchdog_ctlblk
);
65 /* converts an msr to an appropriate reservation bit */
66 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr
)
68 /* returns the bit offset of the performance counter register */
69 switch (boot_cpu_data
.x86_vendor
) {
71 return msr
- MSR_K7_PERFCTR0
;
72 case X86_VENDOR_INTEL
:
73 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
74 return msr
- MSR_ARCH_PERFMON_PERFCTR0
;
76 switch (boot_cpu_data
.x86
) {
78 return msr
- MSR_P6_PERFCTR0
;
80 return msr
- MSR_P4_BPU_PERFCTR0
;
87 * converts an msr to an appropriate reservation bit
88 * returns the bit offset of the event selection register
90 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr
)
92 /* returns the bit offset of the event selection register */
93 switch (boot_cpu_data
.x86_vendor
) {
95 return msr
- MSR_K7_EVNTSEL0
;
96 case X86_VENDOR_INTEL
:
97 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
98 return msr
- MSR_ARCH_PERFMON_EVENTSEL0
;
100 switch (boot_cpu_data
.x86
) {
102 return msr
- MSR_P6_EVNTSEL0
;
104 return msr
- MSR_P4_BSU_ESCR0
;
111 /* checks for a bit availability (hack for oprofile) */
112 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter
)
114 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
116 return !test_bit(counter
, perfctr_nmi_owner
);
118 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit
);
120 int reserve_perfctr_nmi(unsigned int msr
)
122 unsigned int counter
;
124 counter
= nmi_perfctr_msr_to_bit(msr
);
125 /* register not managed by the allocator? */
126 if (counter
> NMI_MAX_COUNTER_BITS
)
129 if (!test_and_set_bit(counter
, perfctr_nmi_owner
))
133 EXPORT_SYMBOL(reserve_perfctr_nmi
);
135 void release_perfctr_nmi(unsigned int msr
)
137 unsigned int counter
;
139 counter
= nmi_perfctr_msr_to_bit(msr
);
140 /* register not managed by the allocator? */
141 if (counter
> NMI_MAX_COUNTER_BITS
)
144 clear_bit(counter
, perfctr_nmi_owner
);
146 EXPORT_SYMBOL(release_perfctr_nmi
);
148 int reserve_evntsel_nmi(unsigned int msr
)
150 unsigned int counter
;
152 counter
= nmi_evntsel_msr_to_bit(msr
);
153 /* register not managed by the allocator? */
154 if (counter
> NMI_MAX_COUNTER_BITS
)
157 if (!test_and_set_bit(counter
, evntsel_nmi_owner
))
161 EXPORT_SYMBOL(reserve_evntsel_nmi
);
163 void release_evntsel_nmi(unsigned int msr
)
165 unsigned int counter
;
167 counter
= nmi_evntsel_msr_to_bit(msr
);
168 /* register not managed by the allocator? */
169 if (counter
> NMI_MAX_COUNTER_BITS
)
172 clear_bit(counter
, evntsel_nmi_owner
);
174 EXPORT_SYMBOL(release_evntsel_nmi
);
176 void disable_lapic_nmi_watchdog(void)
178 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
180 if (atomic_read(&nmi_active
) <= 0)
183 on_each_cpu(stop_apic_nmi_watchdog
, NULL
, 1);
188 BUG_ON(atomic_read(&nmi_active
) != 0);
191 void enable_lapic_nmi_watchdog(void)
193 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
195 /* are we already enabled */
196 if (atomic_read(&nmi_active
) != 0)
199 /* are we lapic aware */
202 if (!wd_ops
->reserve()) {
203 printk(KERN_ERR
"NMI watchdog: cannot reserve perfctrs\n");
207 on_each_cpu(setup_apic_nmi_watchdog
, NULL
, 1);
208 touch_nmi_watchdog();
212 * Activate the NMI watchdog via the local APIC.
215 static unsigned int adjust_for_32bit_ctr(unsigned int hz
)
218 unsigned int retval
= hz
;
221 * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter
222 * are writable, with higher bits sign extending from bit 31.
223 * So, we can only program the counter with 31 bit values and
224 * 32nd bit should be 1, for 33.. to be 1.
225 * Find the appropriate nmi_hz
227 counter_val
= (u64
)cpu_khz
* 1000;
228 do_div(counter_val
, retval
);
229 if (counter_val
> 0x7fffffffULL
) {
230 u64 count
= (u64
)cpu_khz
* 1000;
231 do_div(count
, 0x7fffffffUL
);
237 static void write_watchdog_counter(unsigned int perfctr_msr
,
238 const char *descr
, unsigned nmi_hz
)
240 u64 count
= (u64
)cpu_khz
* 1000;
242 do_div(count
, nmi_hz
);
244 pr_debug("setting %s to -0x%08Lx\n", descr
, count
);
245 wrmsrl(perfctr_msr
, 0 - count
);
248 static void write_watchdog_counter32(unsigned int perfctr_msr
,
249 const char *descr
, unsigned nmi_hz
)
251 u64 count
= (u64
)cpu_khz
* 1000;
253 do_div(count
, nmi_hz
);
255 pr_debug("setting %s to -0x%08Lx\n", descr
, count
);
256 wrmsr(perfctr_msr
, (u32
)(-count
), 0);
260 * AMD K7/K8/Family10h/Family11h support.
261 * AMD keeps this interface nicely stable so there is not much variety
263 #define K7_EVNTSEL_ENABLE (1 << 22)
264 #define K7_EVNTSEL_INT (1 << 20)
265 #define K7_EVNTSEL_OS (1 << 17)
266 #define K7_EVNTSEL_USR (1 << 16)
267 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
268 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
270 static int setup_k7_watchdog(unsigned nmi_hz
)
272 unsigned int perfctr_msr
, evntsel_msr
;
273 unsigned int evntsel
;
274 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
276 perfctr_msr
= wd_ops
->perfctr
;
277 evntsel_msr
= wd_ops
->evntsel
;
279 wrmsrl(perfctr_msr
, 0UL);
281 evntsel
= K7_EVNTSEL_INT
286 /* setup the timer */
287 wrmsr(evntsel_msr
, evntsel
, 0);
288 write_watchdog_counter(perfctr_msr
, "K7_PERFCTR0", nmi_hz
);
290 /* initialize the wd struct before enabling */
291 wd
->perfctr_msr
= perfctr_msr
;
292 wd
->evntsel_msr
= evntsel_msr
;
293 wd
->cccr_msr
= 0; /* unused */
295 /* ok, everything is initialized, announce that we're set */
296 cpu_nmi_set_wd_enabled();
298 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
299 evntsel
|= K7_EVNTSEL_ENABLE
;
300 wrmsr(evntsel_msr
, evntsel
, 0);
305 static void single_msr_stop_watchdog(void)
307 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
309 wrmsr(wd
->evntsel_msr
, 0, 0);
312 static int single_msr_reserve(void)
314 if (!reserve_perfctr_nmi(wd_ops
->perfctr
))
317 if (!reserve_evntsel_nmi(wd_ops
->evntsel
)) {
318 release_perfctr_nmi(wd_ops
->perfctr
);
324 static void single_msr_unreserve(void)
326 release_evntsel_nmi(wd_ops
->evntsel
);
327 release_perfctr_nmi(wd_ops
->perfctr
);
330 static void __kprobes
331 single_msr_rearm(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
)
333 /* start the cycle over again */
334 write_watchdog_counter(wd
->perfctr_msr
, NULL
, nmi_hz
);
337 static const struct wd_ops k7_wd_ops
= {
338 .reserve
= single_msr_reserve
,
339 .unreserve
= single_msr_unreserve
,
340 .setup
= setup_k7_watchdog
,
341 .rearm
= single_msr_rearm
,
342 .stop
= single_msr_stop_watchdog
,
343 .perfctr
= MSR_K7_PERFCTR0
,
344 .evntsel
= MSR_K7_EVNTSEL0
,
345 .checkbit
= 1ULL << 47,
349 * Intel Model 6 (PPro+,P2,P3,P-M,Core1)
351 #define P6_EVNTSEL0_ENABLE (1 << 22)
352 #define P6_EVNTSEL_INT (1 << 20)
353 #define P6_EVNTSEL_OS (1 << 17)
354 #define P6_EVNTSEL_USR (1 << 16)
355 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
356 #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
358 static int setup_p6_watchdog(unsigned nmi_hz
)
360 unsigned int perfctr_msr
, evntsel_msr
;
361 unsigned int evntsel
;
362 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
364 perfctr_msr
= wd_ops
->perfctr
;
365 evntsel_msr
= wd_ops
->evntsel
;
367 /* KVM doesn't implement this MSR */
368 if (wrmsr_safe(perfctr_msr
, 0, 0) < 0)
371 evntsel
= P6_EVNTSEL_INT
376 /* setup the timer */
377 wrmsr(evntsel_msr
, evntsel
, 0);
378 nmi_hz
= adjust_for_32bit_ctr(nmi_hz
);
379 write_watchdog_counter32(perfctr_msr
, "P6_PERFCTR0", nmi_hz
);
381 /* initialize the wd struct before enabling */
382 wd
->perfctr_msr
= perfctr_msr
;
383 wd
->evntsel_msr
= evntsel_msr
;
384 wd
->cccr_msr
= 0; /* unused */
386 /* ok, everything is initialized, announce that we're set */
387 cpu_nmi_set_wd_enabled();
389 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
390 evntsel
|= P6_EVNTSEL0_ENABLE
;
391 wrmsr(evntsel_msr
, evntsel
, 0);
396 static void __kprobes
p6_rearm(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
)
399 * P6 based Pentium M need to re-unmask
400 * the apic vector but it doesn't hurt
402 * ArchPerfom/Core Duo also needs this
404 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
406 /* P6/ARCH_PERFMON has 32 bit counter write */
407 write_watchdog_counter32(wd
->perfctr_msr
, NULL
, nmi_hz
);
410 static const struct wd_ops p6_wd_ops
= {
411 .reserve
= single_msr_reserve
,
412 .unreserve
= single_msr_unreserve
,
413 .setup
= setup_p6_watchdog
,
415 .stop
= single_msr_stop_watchdog
,
416 .perfctr
= MSR_P6_PERFCTR0
,
417 .evntsel
= MSR_P6_EVNTSEL0
,
418 .checkbit
= 1ULL << 39,
422 * Intel P4 performance counters.
423 * By far the most complicated of all.
425 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1 << 7)
426 #define P4_ESCR_EVENT_SELECT(N) ((N) << 25)
427 #define P4_ESCR_OS (1 << 3)
428 #define P4_ESCR_USR (1 << 2)
429 #define P4_CCCR_OVF_PMI0 (1 << 26)
430 #define P4_CCCR_OVF_PMI1 (1 << 27)
431 #define P4_CCCR_THRESHOLD(N) ((N) << 20)
432 #define P4_CCCR_COMPLEMENT (1 << 19)
433 #define P4_CCCR_COMPARE (1 << 18)
434 #define P4_CCCR_REQUIRED (3 << 16)
435 #define P4_CCCR_ESCR_SELECT(N) ((N) << 13)
436 #define P4_CCCR_ENABLE (1 << 12)
437 #define P4_CCCR_OVF (1 << 31)
439 #define P4_CONTROLS 18
440 static unsigned int p4_controls
[18] = {
461 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
462 * CRU_ESCR0 (with any non-null event selector) through a complemented
463 * max threshold. [IA32-Vol3, Section 14.9.9]
465 static int setup_p4_watchdog(unsigned nmi_hz
)
467 unsigned int perfctr_msr
, evntsel_msr
, cccr_msr
;
468 unsigned int evntsel
, cccr_val
;
469 unsigned int misc_enable
, dummy
;
471 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
473 rdmsr(MSR_IA32_MISC_ENABLE
, misc_enable
, dummy
);
474 if (!(misc_enable
& MSR_P4_MISC_ENABLE_PERF_AVAIL
))
478 /* detect which hyperthread we are on */
479 if (smp_num_siblings
== 2) {
480 unsigned int ebx
, apicid
;
483 apicid
= (ebx
>> 24) & 0xff;
490 * performance counters are shared resources
491 * assign each hyperthread its own set
492 * (re-use the ESCR0 register, seems safe
493 * and keeps the cccr_val the same)
497 perfctr_msr
= MSR_P4_IQ_PERFCTR0
;
498 evntsel_msr
= MSR_P4_CRU_ESCR0
;
499 cccr_msr
= MSR_P4_IQ_CCCR0
;
500 cccr_val
= P4_CCCR_OVF_PMI0
| P4_CCCR_ESCR_SELECT(4);
503 * If we're on the kdump kernel or other situation, we may
504 * still have other performance counter registers set to
505 * interrupt and they'll keep interrupting forever because
506 * of the P4_CCCR_OVF quirk. So we need to ACK all the
507 * pending interrupts and disable all the registers here,
508 * before reenabling the NMI delivery. Refer to p4_rearm()
509 * about the P4_CCCR_OVF quirk.
512 unsigned int low
, high
;
515 for (i
= 0; i
< P4_CONTROLS
; i
++) {
516 rdmsr(p4_controls
[i
], low
, high
);
517 low
&= ~(P4_CCCR_ENABLE
| P4_CCCR_OVF
);
518 wrmsr(p4_controls
[i
], low
, high
);
523 perfctr_msr
= MSR_P4_IQ_PERFCTR1
;
524 evntsel_msr
= MSR_P4_CRU_ESCR0
;
525 cccr_msr
= MSR_P4_IQ_CCCR1
;
527 /* Pentium 4 D processors don't support P4_CCCR_OVF_PMI1 */
528 if (boot_cpu_data
.x86_model
== 4 && boot_cpu_data
.x86_mask
== 4)
529 cccr_val
= P4_CCCR_OVF_PMI0
;
531 cccr_val
= P4_CCCR_OVF_PMI1
;
532 cccr_val
|= P4_CCCR_ESCR_SELECT(4);
535 evntsel
= P4_ESCR_EVENT_SELECT(0x3F)
539 cccr_val
|= P4_CCCR_THRESHOLD(15)
544 wrmsr(evntsel_msr
, evntsel
, 0);
545 wrmsr(cccr_msr
, cccr_val
, 0);
546 write_watchdog_counter(perfctr_msr
, "P4_IQ_COUNTER0", nmi_hz
);
548 wd
->perfctr_msr
= perfctr_msr
;
549 wd
->evntsel_msr
= evntsel_msr
;
550 wd
->cccr_msr
= cccr_msr
;
552 /* ok, everything is initialized, announce that we're set */
553 cpu_nmi_set_wd_enabled();
555 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
556 cccr_val
|= P4_CCCR_ENABLE
;
557 wrmsr(cccr_msr
, cccr_val
, 0);
561 static void stop_p4_watchdog(void)
563 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
564 wrmsr(wd
->cccr_msr
, 0, 0);
565 wrmsr(wd
->evntsel_msr
, 0, 0);
568 static int p4_reserve(void)
570 if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0
))
573 if (smp_num_siblings
> 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1
))
576 if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0
))
578 /* RED-PEN why is ESCR1 not reserved here? */
582 if (smp_num_siblings
> 1)
583 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1
);
586 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0
);
590 static void p4_unreserve(void)
593 if (smp_num_siblings
> 1)
594 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1
);
596 release_evntsel_nmi(MSR_P4_CRU_ESCR0
);
597 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0
);
600 static void __kprobes
p4_rearm(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
)
605 * - An overflown perfctr will assert its interrupt
606 * until the OVF flag in its CCCR is cleared.
607 * - LVTPC is masked on interrupt and must be
608 * unmasked by the LVTPC handler.
610 rdmsrl(wd
->cccr_msr
, dummy
);
611 dummy
&= ~P4_CCCR_OVF
;
612 wrmsrl(wd
->cccr_msr
, dummy
);
613 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
614 /* start the cycle over again */
615 write_watchdog_counter(wd
->perfctr_msr
, NULL
, nmi_hz
);
618 static const struct wd_ops p4_wd_ops
= {
619 .reserve
= p4_reserve
,
620 .unreserve
= p4_unreserve
,
621 .setup
= setup_p4_watchdog
,
623 .stop
= stop_p4_watchdog
,
624 /* RED-PEN this is wrong for the other sibling */
625 .perfctr
= MSR_P4_BPU_PERFCTR0
,
626 .evntsel
= MSR_P4_BSU_ESCR0
,
627 .checkbit
= 1ULL << 39,
631 * Watchdog using the Intel architected PerfMon.
632 * Used for Core2 and hopefully all future Intel CPUs.
634 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
635 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
637 static struct wd_ops intel_arch_wd_ops
;
639 static int setup_intel_arch_watchdog(unsigned nmi_hz
)
642 union cpuid10_eax eax
;
644 unsigned int perfctr_msr
, evntsel_msr
;
645 unsigned int evntsel
;
646 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
649 * Check whether the Architectural PerfMon supports
650 * Unhalted Core Cycles Event or not.
651 * NOTE: Corresponding bit = 0 in ebx indicates event present.
653 cpuid(10, &(eax
.full
), &ebx
, &unused
, &unused
);
654 if ((eax
.split
.mask_length
<
655 (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX
+1)) ||
656 (ebx
& ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT
))
659 perfctr_msr
= wd_ops
->perfctr
;
660 evntsel_msr
= wd_ops
->evntsel
;
662 wrmsrl(perfctr_msr
, 0UL);
664 evntsel
= ARCH_PERFMON_EVENTSEL_INT
665 | ARCH_PERFMON_EVENTSEL_OS
666 | ARCH_PERFMON_EVENTSEL_USR
667 | ARCH_PERFMON_NMI_EVENT_SEL
668 | ARCH_PERFMON_NMI_EVENT_UMASK
;
670 /* setup the timer */
671 wrmsr(evntsel_msr
, evntsel
, 0);
672 nmi_hz
= adjust_for_32bit_ctr(nmi_hz
);
673 write_watchdog_counter32(perfctr_msr
, "INTEL_ARCH_PERFCTR0", nmi_hz
);
675 wd
->perfctr_msr
= perfctr_msr
;
676 wd
->evntsel_msr
= evntsel_msr
;
677 wd
->cccr_msr
= 0; /* unused */
679 /* ok, everything is initialized, announce that we're set */
680 cpu_nmi_set_wd_enabled();
682 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
683 evntsel
|= ARCH_PERFMON_EVENTSEL_ENABLE
;
684 wrmsr(evntsel_msr
, evntsel
, 0);
685 intel_arch_wd_ops
.checkbit
= 1ULL << (eax
.split
.bit_width
- 1);
689 static struct wd_ops intel_arch_wd_ops __read_mostly
= {
690 .reserve
= single_msr_reserve
,
691 .unreserve
= single_msr_unreserve
,
692 .setup
= setup_intel_arch_watchdog
,
694 .stop
= single_msr_stop_watchdog
,
695 .perfctr
= MSR_ARCH_PERFMON_PERFCTR1
,
696 .evntsel
= MSR_ARCH_PERFMON_EVENTSEL1
,
699 static void probe_nmi_watchdog(void)
701 switch (boot_cpu_data
.x86_vendor
) {
703 if (boot_cpu_data
.x86
== 6 ||
704 (boot_cpu_data
.x86
>= 0xf && boot_cpu_data
.x86
<= 0x15))
707 case X86_VENDOR_INTEL
:
708 /* Work around where perfctr1 doesn't have a working enable
709 * bit as described in the following errata:
710 * AE49 Core Duo and Intel Core Solo 65 nm
711 * AN49 Intel Pentium Dual-Core
712 * AF49 Dual-Core Intel Xeon Processor LV
714 if ((boot_cpu_data
.x86
== 6 && boot_cpu_data
.x86_model
== 14) ||
715 ((boot_cpu_data
.x86
== 6 && boot_cpu_data
.x86_model
== 15 &&
716 boot_cpu_data
.x86_mask
== 4))) {
717 intel_arch_wd_ops
.perfctr
= MSR_ARCH_PERFMON_PERFCTR0
;
718 intel_arch_wd_ops
.evntsel
= MSR_ARCH_PERFMON_EVENTSEL0
;
720 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
721 wd_ops
= &intel_arch_wd_ops
;
724 switch (boot_cpu_data
.x86
) {
726 if (boot_cpu_data
.x86_model
> 13)
741 /* Interface to nmi.c */
743 int lapic_watchdog_init(unsigned nmi_hz
)
746 probe_nmi_watchdog();
748 printk(KERN_INFO
"NMI watchdog: CPU not supported\n");
752 if (!wd_ops
->reserve()) {
754 "NMI watchdog: cannot reserve perfctrs\n");
759 if (!(wd_ops
->setup(nmi_hz
))) {
760 printk(KERN_ERR
"Cannot setup NMI watchdog on CPU %d\n",
761 raw_smp_processor_id());
768 void lapic_watchdog_stop(void)
774 unsigned lapic_adjust_nmi_hz(unsigned hz
)
776 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
777 if (wd
->perfctr_msr
== MSR_P6_PERFCTR0
||
778 wd
->perfctr_msr
== MSR_ARCH_PERFMON_PERFCTR1
)
779 hz
= adjust_for_32bit_ctr(hz
);
783 int __kprobes
lapic_wd_event(unsigned nmi_hz
)
785 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
788 rdmsrl(wd
->perfctr_msr
, ctr
);
789 if (ctr
& wd_ops
->checkbit
) /* perfctr still running? */
792 wd_ops
->rearm(wd
, nmi_hz
);