1 #ifdef CONFIG_CPU_SUP_INTEL
4 * Intel PerfMon, used on Core and later.
6 static const u64 intel_perfmon_event_map
[] =
8 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
9 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
10 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
11 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
12 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
13 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
14 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
17 static struct event_constraint intel_core_event_constraints
[] =
19 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
20 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
21 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
22 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
23 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
24 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
28 static struct event_constraint intel_core2_event_constraints
[] =
30 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
31 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
33 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
34 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
35 * ratio between these counters.
37 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
38 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
44 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
45 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
46 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
47 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
51 static struct event_constraint intel_nehalem_event_constraints
[] =
53 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
56 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
57 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
58 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
59 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
60 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
61 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
62 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
63 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
67 static struct event_constraint intel_westmere_event_constraints
[] =
69 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
70 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
72 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
73 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
74 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
78 static struct event_constraint intel_gen_event_constraints
[] =
80 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
81 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
82 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
86 static u64
intel_pmu_event_map(int hw_event
)
88 return intel_perfmon_event_map
[hw_event
];
91 static __initconst u64 westmere_hw_cache_event_ids
92 [PERF_COUNT_HW_CACHE_MAX
]
93 [PERF_COUNT_HW_CACHE_OP_MAX
]
94 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
98 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
99 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
102 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
103 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
105 [ C(OP_PREFETCH
) ] = {
106 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
107 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
112 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
113 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
116 [ C(RESULT_ACCESS
) ] = -1,
117 [ C(RESULT_MISS
) ] = -1,
119 [ C(OP_PREFETCH
) ] = {
120 [ C(RESULT_ACCESS
) ] = 0x0,
121 [ C(RESULT_MISS
) ] = 0x0,
126 [ C(RESULT_ACCESS
) ] = 0x0324, /* L2_RQSTS.LOADS */
127 [ C(RESULT_MISS
) ] = 0x0224, /* L2_RQSTS.LD_MISS */
130 [ C(RESULT_ACCESS
) ] = 0x0c24, /* L2_RQSTS.RFOS */
131 [ C(RESULT_MISS
) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
133 [ C(OP_PREFETCH
) ] = {
134 [ C(RESULT_ACCESS
) ] = 0x4f2e, /* LLC Reference */
135 [ C(RESULT_MISS
) ] = 0x412e, /* LLC Misses */
140 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
141 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
144 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
145 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
147 [ C(OP_PREFETCH
) ] = {
148 [ C(RESULT_ACCESS
) ] = 0x0,
149 [ C(RESULT_MISS
) ] = 0x0,
154 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
155 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
158 [ C(RESULT_ACCESS
) ] = -1,
159 [ C(RESULT_MISS
) ] = -1,
161 [ C(OP_PREFETCH
) ] = {
162 [ C(RESULT_ACCESS
) ] = -1,
163 [ C(RESULT_MISS
) ] = -1,
168 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
169 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
172 [ C(RESULT_ACCESS
) ] = -1,
173 [ C(RESULT_MISS
) ] = -1,
175 [ C(OP_PREFETCH
) ] = {
176 [ C(RESULT_ACCESS
) ] = -1,
177 [ C(RESULT_MISS
) ] = -1,
182 static __initconst u64 nehalem_hw_cache_event_ids
183 [PERF_COUNT_HW_CACHE_MAX
]
184 [PERF_COUNT_HW_CACHE_OP_MAX
]
185 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
189 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
190 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
193 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
194 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
196 [ C(OP_PREFETCH
) ] = {
197 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
198 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
203 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
204 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
207 [ C(RESULT_ACCESS
) ] = -1,
208 [ C(RESULT_MISS
) ] = -1,
210 [ C(OP_PREFETCH
) ] = {
211 [ C(RESULT_ACCESS
) ] = 0x0,
212 [ C(RESULT_MISS
) ] = 0x0,
217 [ C(RESULT_ACCESS
) ] = 0x0324, /* L2_RQSTS.LOADS */
218 [ C(RESULT_MISS
) ] = 0x0224, /* L2_RQSTS.LD_MISS */
221 [ C(RESULT_ACCESS
) ] = 0x0c24, /* L2_RQSTS.RFOS */
222 [ C(RESULT_MISS
) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
224 [ C(OP_PREFETCH
) ] = {
225 [ C(RESULT_ACCESS
) ] = 0x4f2e, /* LLC Reference */
226 [ C(RESULT_MISS
) ] = 0x412e, /* LLC Misses */
231 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
232 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
235 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
236 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
238 [ C(OP_PREFETCH
) ] = {
239 [ C(RESULT_ACCESS
) ] = 0x0,
240 [ C(RESULT_MISS
) ] = 0x0,
245 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
246 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
249 [ C(RESULT_ACCESS
) ] = -1,
250 [ C(RESULT_MISS
) ] = -1,
252 [ C(OP_PREFETCH
) ] = {
253 [ C(RESULT_ACCESS
) ] = -1,
254 [ C(RESULT_MISS
) ] = -1,
259 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
260 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
263 [ C(RESULT_ACCESS
) ] = -1,
264 [ C(RESULT_MISS
) ] = -1,
266 [ C(OP_PREFETCH
) ] = {
267 [ C(RESULT_ACCESS
) ] = -1,
268 [ C(RESULT_MISS
) ] = -1,
273 static __initconst u64 core2_hw_cache_event_ids
274 [PERF_COUNT_HW_CACHE_MAX
]
275 [PERF_COUNT_HW_CACHE_OP_MAX
]
276 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
280 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
281 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
284 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
285 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
287 [ C(OP_PREFETCH
) ] = {
288 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
289 [ C(RESULT_MISS
) ] = 0,
294 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
295 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
298 [ C(RESULT_ACCESS
) ] = -1,
299 [ C(RESULT_MISS
) ] = -1,
301 [ C(OP_PREFETCH
) ] = {
302 [ C(RESULT_ACCESS
) ] = 0,
303 [ C(RESULT_MISS
) ] = 0,
308 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
309 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
312 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
313 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
315 [ C(OP_PREFETCH
) ] = {
316 [ C(RESULT_ACCESS
) ] = 0,
317 [ C(RESULT_MISS
) ] = 0,
322 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
323 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
326 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
327 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
329 [ C(OP_PREFETCH
) ] = {
330 [ C(RESULT_ACCESS
) ] = 0,
331 [ C(RESULT_MISS
) ] = 0,
336 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
337 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
340 [ C(RESULT_ACCESS
) ] = -1,
341 [ C(RESULT_MISS
) ] = -1,
343 [ C(OP_PREFETCH
) ] = {
344 [ C(RESULT_ACCESS
) ] = -1,
345 [ C(RESULT_MISS
) ] = -1,
350 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
351 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
354 [ C(RESULT_ACCESS
) ] = -1,
355 [ C(RESULT_MISS
) ] = -1,
357 [ C(OP_PREFETCH
) ] = {
358 [ C(RESULT_ACCESS
) ] = -1,
359 [ C(RESULT_MISS
) ] = -1,
364 static __initconst u64 atom_hw_cache_event_ids
365 [PERF_COUNT_HW_CACHE_MAX
]
366 [PERF_COUNT_HW_CACHE_OP_MAX
]
367 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
371 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
372 [ C(RESULT_MISS
) ] = 0,
375 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
376 [ C(RESULT_MISS
) ] = 0,
378 [ C(OP_PREFETCH
) ] = {
379 [ C(RESULT_ACCESS
) ] = 0x0,
380 [ C(RESULT_MISS
) ] = 0,
385 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
386 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
389 [ C(RESULT_ACCESS
) ] = -1,
390 [ C(RESULT_MISS
) ] = -1,
392 [ C(OP_PREFETCH
) ] = {
393 [ C(RESULT_ACCESS
) ] = 0,
394 [ C(RESULT_MISS
) ] = 0,
399 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
400 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
403 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
404 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
406 [ C(OP_PREFETCH
) ] = {
407 [ C(RESULT_ACCESS
) ] = 0,
408 [ C(RESULT_MISS
) ] = 0,
413 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
414 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
417 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
418 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
420 [ C(OP_PREFETCH
) ] = {
421 [ C(RESULT_ACCESS
) ] = 0,
422 [ C(RESULT_MISS
) ] = 0,
427 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
428 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
431 [ C(RESULT_ACCESS
) ] = -1,
432 [ C(RESULT_MISS
) ] = -1,
434 [ C(OP_PREFETCH
) ] = {
435 [ C(RESULT_ACCESS
) ] = -1,
436 [ C(RESULT_MISS
) ] = -1,
441 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
442 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
445 [ C(RESULT_ACCESS
) ] = -1,
446 [ C(RESULT_MISS
) ] = -1,
448 [ C(OP_PREFETCH
) ] = {
449 [ C(RESULT_ACCESS
) ] = -1,
450 [ C(RESULT_MISS
) ] = -1,
455 static u64
intel_pmu_raw_event(u64 hw_event
)
457 #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
458 #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
459 #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
460 #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
461 #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
463 #define CORE_EVNTSEL_MASK \
464 (INTEL_ARCH_EVTSEL_MASK | \
465 INTEL_ARCH_UNIT_MASK | \
466 INTEL_ARCH_EDGE_MASK | \
467 INTEL_ARCH_INV_MASK | \
470 return hw_event
& CORE_EVNTSEL_MASK
;
473 static void intel_pmu_enable_bts(u64 config
)
475 unsigned long debugctlmsr
;
477 debugctlmsr
= get_debugctlmsr();
479 debugctlmsr
|= X86_DEBUGCTL_TR
;
480 debugctlmsr
|= X86_DEBUGCTL_BTS
;
481 debugctlmsr
|= X86_DEBUGCTL_BTINT
;
483 if (!(config
& ARCH_PERFMON_EVENTSEL_OS
))
484 debugctlmsr
|= X86_DEBUGCTL_BTS_OFF_OS
;
486 if (!(config
& ARCH_PERFMON_EVENTSEL_USR
))
487 debugctlmsr
|= X86_DEBUGCTL_BTS_OFF_USR
;
489 update_debugctlmsr(debugctlmsr
);
492 static void intel_pmu_disable_bts(void)
494 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
495 unsigned long debugctlmsr
;
500 debugctlmsr
= get_debugctlmsr();
503 ~(X86_DEBUGCTL_TR
| X86_DEBUGCTL_BTS
| X86_DEBUGCTL_BTINT
|
504 X86_DEBUGCTL_BTS_OFF_OS
| X86_DEBUGCTL_BTS_OFF_USR
);
506 update_debugctlmsr(debugctlmsr
);
509 static void intel_pmu_disable_all(void)
511 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
513 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
515 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
516 intel_pmu_disable_bts();
519 static void intel_pmu_enable_all(void)
521 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
523 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, x86_pmu
.intel_ctrl
);
525 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
526 struct perf_event
*event
=
527 cpuc
->events
[X86_PMC_IDX_FIXED_BTS
];
529 if (WARN_ON_ONCE(!event
))
532 intel_pmu_enable_bts(event
->hw
.config
);
536 static inline u64
intel_pmu_get_status(void)
540 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
545 static inline void intel_pmu_ack_status(u64 ack
)
547 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
551 intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
553 int idx
= hwc
->idx
- X86_PMC_IDX_FIXED
;
556 mask
= 0xfULL
<< (idx
* 4);
558 rdmsrl(hwc
->config_base
, ctrl_val
);
560 (void)checking_wrmsrl(hwc
->config_base
, ctrl_val
);
563 static void intel_pmu_drain_bts_buffer(void)
565 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
566 struct debug_store
*ds
= cpuc
->ds
;
572 struct perf_event
*event
= cpuc
->events
[X86_PMC_IDX_FIXED_BTS
];
573 struct bts_record
*at
, *top
;
574 struct perf_output_handle handle
;
575 struct perf_event_header header
;
576 struct perf_sample_data data
;
585 at
= (struct bts_record
*)(unsigned long)ds
->bts_buffer_base
;
586 top
= (struct bts_record
*)(unsigned long)ds
->bts_index
;
591 ds
->bts_index
= ds
->bts_buffer_base
;
593 perf_sample_data_init(&data
, 0);
595 data
.period
= event
->hw
.last_period
;
599 * Prepare a generic sample, i.e. fill in the invariant fields.
600 * We will overwrite the from and to address before we output
603 perf_prepare_sample(&header
, &data
, event
, ®s
);
605 if (perf_output_begin(&handle
, event
,
606 header
.size
* (top
- at
), 1, 1))
609 for (; at
< top
; at
++) {
613 perf_output_sample(&handle
, &header
, &data
, event
);
616 perf_output_end(&handle
);
618 /* There's new data available. */
619 event
->hw
.interrupts
++;
620 event
->pending_kill
= POLL_IN
;
624 intel_pmu_disable_event(struct perf_event
*event
)
626 struct hw_perf_event
*hwc
= &event
->hw
;
628 if (unlikely(hwc
->idx
== X86_PMC_IDX_FIXED_BTS
)) {
629 intel_pmu_disable_bts();
630 intel_pmu_drain_bts_buffer();
634 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
635 intel_pmu_disable_fixed(hwc
);
639 x86_pmu_disable_event(event
);
643 intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
645 int idx
= hwc
->idx
- X86_PMC_IDX_FIXED
;
646 u64 ctrl_val
, bits
, mask
;
650 * Enable IRQ generation (0x8),
651 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
655 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
657 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
661 * ANY bit is supported in v3 and up
663 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
667 mask
= 0xfULL
<< (idx
* 4);
669 rdmsrl(hwc
->config_base
, ctrl_val
);
672 err
= checking_wrmsrl(hwc
->config_base
, ctrl_val
);
675 static void intel_pmu_enable_event(struct perf_event
*event
)
677 struct hw_perf_event
*hwc
= &event
->hw
;
679 if (unlikely(hwc
->idx
== X86_PMC_IDX_FIXED_BTS
)) {
680 if (!__get_cpu_var(cpu_hw_events
).enabled
)
683 intel_pmu_enable_bts(hwc
->config
);
687 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
688 intel_pmu_enable_fixed(hwc
);
692 __x86_pmu_enable_event(hwc
);
696 * Save and restart an expired event. Called by NMI contexts,
697 * so it has to be careful about preempting normal event ops:
699 static int intel_pmu_save_and_restart(struct perf_event
*event
)
701 x86_perf_event_update(event
);
702 return x86_perf_event_set_period(event
);
705 static void intel_pmu_reset(void)
707 struct debug_store
*ds
= __get_cpu_var(cpu_hw_events
).ds
;
711 if (!x86_pmu
.num_events
)
714 local_irq_save(flags
);
716 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
718 for (idx
= 0; idx
< x86_pmu
.num_events
; idx
++) {
719 checking_wrmsrl(x86_pmu
.eventsel
+ idx
, 0ull);
720 checking_wrmsrl(x86_pmu
.perfctr
+ idx
, 0ull);
722 for (idx
= 0; idx
< x86_pmu
.num_events_fixed
; idx
++) {
723 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
726 ds
->bts_index
= ds
->bts_buffer_base
;
728 local_irq_restore(flags
);
732 * This handler is triggered by the local APIC, so the APIC IRQ handling
735 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
737 struct perf_sample_data data
;
738 struct cpu_hw_events
*cpuc
;
742 perf_sample_data_init(&data
, 0);
744 cpuc
= &__get_cpu_var(cpu_hw_events
);
746 intel_pmu_disable_all();
747 intel_pmu_drain_bts_buffer();
748 status
= intel_pmu_get_status();
750 intel_pmu_enable_all();
757 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
758 perf_event_print_debug();
763 inc_irq_stat(apic_perf_irqs
);
765 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
766 struct perf_event
*event
= cpuc
->events
[bit
];
768 if (!test_bit(bit
, cpuc
->active_mask
))
771 if (!intel_pmu_save_and_restart(event
))
774 data
.period
= event
->hw
.last_period
;
776 if (perf_event_overflow(event
, 1, &data
, regs
))
780 intel_pmu_ack_status(ack
);
783 * Repeat if there is more work to be done:
785 status
= intel_pmu_get_status();
790 intel_pmu_enable_all();
794 static struct event_constraint bts_constraint
=
795 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS
, 0);
797 static struct event_constraint
*
798 intel_special_constraints(struct perf_event
*event
)
800 unsigned int hw_event
;
802 hw_event
= event
->hw
.config
& INTEL_ARCH_EVENT_MASK
;
804 if (unlikely((hw_event
==
805 x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
)) &&
806 (event
->hw
.sample_period
== 1))) {
808 return &bts_constraint
;
813 static struct event_constraint
*
814 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
816 struct event_constraint
*c
;
818 c
= intel_special_constraints(event
);
822 return x86_get_event_constraints(cpuc
, event
);
825 static __initconst
struct x86_pmu core_pmu
= {
827 .handle_irq
= x86_pmu_handle_irq
,
828 .disable_all
= x86_pmu_disable_all
,
829 .enable_all
= x86_pmu_enable_all
,
830 .enable
= x86_pmu_enable_event
,
831 .disable
= x86_pmu_disable_event
,
832 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
833 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
834 .event_map
= intel_pmu_event_map
,
835 .raw_event
= intel_pmu_raw_event
,
836 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
839 * Intel PMCs cannot be accessed sanely above 32 bit width,
840 * so we install an artificial 1<<31 period regardless of
841 * the generic event period:
843 .max_period
= (1ULL << 31) - 1,
844 .get_event_constraints
= intel_get_event_constraints
,
845 .event_constraints
= intel_core_event_constraints
,
848 static __initconst
struct x86_pmu intel_pmu
= {
850 .handle_irq
= intel_pmu_handle_irq
,
851 .disable_all
= intel_pmu_disable_all
,
852 .enable_all
= intel_pmu_enable_all
,
853 .enable
= intel_pmu_enable_event
,
854 .disable
= intel_pmu_disable_event
,
855 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
856 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
857 .event_map
= intel_pmu_event_map
,
858 .raw_event
= intel_pmu_raw_event
,
859 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
862 * Intel PMCs cannot be accessed sanely above 32 bit width,
863 * so we install an artificial 1<<31 period regardless of
864 * the generic event period:
866 .max_period
= (1ULL << 31) - 1,
867 .enable_bts
= intel_pmu_enable_bts
,
868 .disable_bts
= intel_pmu_disable_bts
,
869 .get_event_constraints
= intel_get_event_constraints
,
871 .cpu_starting
= init_debug_store_on_cpu
,
872 .cpu_dying
= fini_debug_store_on_cpu
,
875 static __init
int intel_pmu_init(void)
877 union cpuid10_edx edx
;
878 union cpuid10_eax eax
;
883 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
884 /* check for P6 processor family */
885 if (boot_cpu_data
.x86
== 6) {
886 return p6_pmu_init();
893 * Check whether the Architectural PerfMon supports
894 * Branch Misses Retired hw_event or not.
896 cpuid(10, &eax
.full
, &ebx
, &unused
, &edx
.full
);
897 if (eax
.split
.mask_length
<= ARCH_PERFMON_BRANCH_MISSES_RETIRED
)
900 version
= eax
.split
.version_id
;
906 x86_pmu
.version
= version
;
907 x86_pmu
.num_events
= eax
.split
.num_events
;
908 x86_pmu
.event_bits
= eax
.split
.bit_width
;
909 x86_pmu
.event_mask
= (1ULL << eax
.split
.bit_width
) - 1;
912 * Quirk: v2 perfmon does not report fixed-purpose events, so
913 * assume at least 3 events:
916 x86_pmu
.num_events_fixed
= max((int)edx
.split
.num_events_fixed
, 3);
919 * Install the hw-cache-events table:
921 switch (boot_cpu_data
.x86_model
) {
922 case 14: /* 65 nm core solo/duo, "Yonah" */
923 pr_cont("Core events, ");
926 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
927 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
928 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
929 case 29: /* six-core 45 nm xeon "Dunnington" */
930 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
931 sizeof(hw_cache_event_ids
));
933 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
934 pr_cont("Core2 events, ");
937 case 26: /* 45 nm nehalem, "Bloomfield" */
938 case 30: /* 45 nm nehalem, "Lynnfield" */
939 case 46: /* 45 nm nehalem-ex, "Beckton" */
940 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
941 sizeof(hw_cache_event_ids
));
943 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
944 pr_cont("Nehalem/Corei7 events, ");
947 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
948 sizeof(hw_cache_event_ids
));
950 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
951 pr_cont("Atom events, ");
954 case 37: /* 32 nm nehalem, "Clarkdale" */
955 case 44: /* 32 nm nehalem, "Gulftown" */
956 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
957 sizeof(hw_cache_event_ids
));
959 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
960 pr_cont("Westmere events, ");
965 * default constraints for v2 and up
967 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
968 pr_cont("generic architected perfmon, ");
973 #else /* CONFIG_CPU_SUP_INTEL */
975 static int intel_pmu_init(void)
980 #endif /* CONFIG_CPU_SUP_INTEL */