1 #ifdef CONFIG_CPU_SUP_INTEL
3 #define MAX_EXTRA_REGS 2
9 int ref
; /* reference count */
10 unsigned int extra_reg
; /* extra MSR number */
11 u64 extra_config
; /* extra MSR config */
16 * This used to coordinate shared registers for HT threads.
18 struct intel_percore
{
19 raw_spinlock_t lock
; /* protect structure */
20 struct er_account regs
[MAX_EXTRA_REGS
];
21 int refcnt
; /* number of threads */
26 * Intel PerfMon, used on Core and later.
28 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
30 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
31 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
32 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
33 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
34 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
35 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
36 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
39 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
41 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
42 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
43 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
44 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
45 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
46 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
50 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
52 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
53 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
56 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
57 * ratio between these counters.
59 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
60 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
61 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
62 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
63 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
64 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
65 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
66 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
67 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
68 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
69 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
73 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
75 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
76 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
77 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
78 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
79 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
80 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
81 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
82 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
83 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
84 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
85 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
89 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
91 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff),
95 static struct event_constraint intel_nehalem_percore_constraints
[] __read_mostly
=
97 INTEL_EVENT_CONSTRAINT(0xb7, 0),
101 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
103 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
104 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
105 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
106 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
107 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
108 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
109 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
113 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
115 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
116 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
117 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
118 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
119 INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
120 INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
121 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
122 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
126 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
128 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff),
129 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0xffff),
133 static struct event_constraint intel_westmere_percore_constraints
[] __read_mostly
=
135 INTEL_EVENT_CONSTRAINT(0xb7, 0),
136 INTEL_EVENT_CONSTRAINT(0xbb, 0),
140 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
142 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
143 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
144 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
148 static u64
intel_pmu_event_map(int hw_event
)
150 return intel_perfmon_event_map
[hw_event
];
153 static __initconst
const u64 snb_hw_cache_event_ids
154 [PERF_COUNT_HW_CACHE_MAX
]
155 [PERF_COUNT_HW_CACHE_OP_MAX
]
156 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
160 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
161 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
164 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
165 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
167 [ C(OP_PREFETCH
) ] = {
168 [ C(RESULT_ACCESS
) ] = 0x0,
169 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
174 [ C(RESULT_ACCESS
) ] = 0x0,
175 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
178 [ C(RESULT_ACCESS
) ] = -1,
179 [ C(RESULT_MISS
) ] = -1,
181 [ C(OP_PREFETCH
) ] = {
182 [ C(RESULT_ACCESS
) ] = 0x0,
183 [ C(RESULT_MISS
) ] = 0x0,
188 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
189 [ C(RESULT_ACCESS
) ] = 0x01b7,
190 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
191 [ C(RESULT_MISS
) ] = 0x01b7,
194 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
195 [ C(RESULT_ACCESS
) ] = 0x01b7,
196 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
197 [ C(RESULT_MISS
) ] = 0x01b7,
199 [ C(OP_PREFETCH
) ] = {
200 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
201 [ C(RESULT_ACCESS
) ] = 0x01b7,
202 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
203 [ C(RESULT_MISS
) ] = 0x01b7,
208 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
209 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
212 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
213 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
215 [ C(OP_PREFETCH
) ] = {
216 [ C(RESULT_ACCESS
) ] = 0x0,
217 [ C(RESULT_MISS
) ] = 0x0,
222 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
223 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
226 [ C(RESULT_ACCESS
) ] = -1,
227 [ C(RESULT_MISS
) ] = -1,
229 [ C(OP_PREFETCH
) ] = {
230 [ C(RESULT_ACCESS
) ] = -1,
231 [ C(RESULT_MISS
) ] = -1,
236 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
237 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
240 [ C(RESULT_ACCESS
) ] = -1,
241 [ C(RESULT_MISS
) ] = -1,
243 [ C(OP_PREFETCH
) ] = {
244 [ C(RESULT_ACCESS
) ] = -1,
245 [ C(RESULT_MISS
) ] = -1,
250 static __initconst
const u64 westmere_hw_cache_event_ids
251 [PERF_COUNT_HW_CACHE_MAX
]
252 [PERF_COUNT_HW_CACHE_OP_MAX
]
253 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
257 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
258 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
261 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
262 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
264 [ C(OP_PREFETCH
) ] = {
265 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
266 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
271 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
272 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
275 [ C(RESULT_ACCESS
) ] = -1,
276 [ C(RESULT_MISS
) ] = -1,
278 [ C(OP_PREFETCH
) ] = {
279 [ C(RESULT_ACCESS
) ] = 0x0,
280 [ C(RESULT_MISS
) ] = 0x0,
285 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
286 [ C(RESULT_ACCESS
) ] = 0x01b7,
287 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
288 [ C(RESULT_MISS
) ] = 0x01b7,
291 * Use RFO, not WRITEBACK, because a write miss would typically occur
295 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
296 [ C(RESULT_ACCESS
) ] = 0x01b7,
297 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
298 [ C(RESULT_MISS
) ] = 0x01b7,
300 [ C(OP_PREFETCH
) ] = {
301 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
302 [ C(RESULT_ACCESS
) ] = 0x01b7,
303 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
304 [ C(RESULT_MISS
) ] = 0x01b7,
309 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
310 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
313 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
314 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
316 [ C(OP_PREFETCH
) ] = {
317 [ C(RESULT_ACCESS
) ] = 0x0,
318 [ C(RESULT_MISS
) ] = 0x0,
323 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
324 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
327 [ C(RESULT_ACCESS
) ] = -1,
328 [ C(RESULT_MISS
) ] = -1,
330 [ C(OP_PREFETCH
) ] = {
331 [ C(RESULT_ACCESS
) ] = -1,
332 [ C(RESULT_MISS
) ] = -1,
337 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
338 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
341 [ C(RESULT_ACCESS
) ] = -1,
342 [ C(RESULT_MISS
) ] = -1,
344 [ C(OP_PREFETCH
) ] = {
345 [ C(RESULT_ACCESS
) ] = -1,
346 [ C(RESULT_MISS
) ] = -1,
352 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
353 * See IA32 SDM Vol 3B 30.6.1.3
356 #define NHM_DMND_DATA_RD (1 << 0)
357 #define NHM_DMND_RFO (1 << 1)
358 #define NHM_DMND_IFETCH (1 << 2)
359 #define NHM_DMND_WB (1 << 3)
360 #define NHM_PF_DATA_RD (1 << 4)
361 #define NHM_PF_DATA_RFO (1 << 5)
362 #define NHM_PF_IFETCH (1 << 6)
363 #define NHM_OFFCORE_OTHER (1 << 7)
364 #define NHM_UNCORE_HIT (1 << 8)
365 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
366 #define NHM_OTHER_CORE_HITM (1 << 10)
368 #define NHM_REMOTE_CACHE_FWD (1 << 12)
369 #define NHM_REMOTE_DRAM (1 << 13)
370 #define NHM_LOCAL_DRAM (1 << 14)
371 #define NHM_NON_DRAM (1 << 15)
373 #define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM)
375 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
376 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
377 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
379 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
380 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD)
381 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
383 static __initconst
const u64 nehalem_hw_cache_extra_regs
384 [PERF_COUNT_HW_CACHE_MAX
]
385 [PERF_COUNT_HW_CACHE_OP_MAX
]
386 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
390 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
391 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
394 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
395 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
397 [ C(OP_PREFETCH
) ] = {
398 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
399 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
404 static __initconst
const u64 nehalem_hw_cache_event_ids
405 [PERF_COUNT_HW_CACHE_MAX
]
406 [PERF_COUNT_HW_CACHE_OP_MAX
]
407 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
411 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
412 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
415 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
416 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
418 [ C(OP_PREFETCH
) ] = {
419 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
420 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
425 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
426 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
429 [ C(RESULT_ACCESS
) ] = -1,
430 [ C(RESULT_MISS
) ] = -1,
432 [ C(OP_PREFETCH
) ] = {
433 [ C(RESULT_ACCESS
) ] = 0x0,
434 [ C(RESULT_MISS
) ] = 0x0,
439 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
440 [ C(RESULT_ACCESS
) ] = 0x01b7,
441 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
442 [ C(RESULT_MISS
) ] = 0x01b7,
445 * Use RFO, not WRITEBACK, because a write miss would typically occur
449 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
450 [ C(RESULT_ACCESS
) ] = 0x01b7,
451 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
452 [ C(RESULT_MISS
) ] = 0x01b7,
454 [ C(OP_PREFETCH
) ] = {
455 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
456 [ C(RESULT_ACCESS
) ] = 0x01b7,
457 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
458 [ C(RESULT_MISS
) ] = 0x01b7,
463 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
464 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
467 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
468 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
470 [ C(OP_PREFETCH
) ] = {
471 [ C(RESULT_ACCESS
) ] = 0x0,
472 [ C(RESULT_MISS
) ] = 0x0,
477 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
478 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
481 [ C(RESULT_ACCESS
) ] = -1,
482 [ C(RESULT_MISS
) ] = -1,
484 [ C(OP_PREFETCH
) ] = {
485 [ C(RESULT_ACCESS
) ] = -1,
486 [ C(RESULT_MISS
) ] = -1,
491 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
492 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
495 [ C(RESULT_ACCESS
) ] = -1,
496 [ C(RESULT_MISS
) ] = -1,
498 [ C(OP_PREFETCH
) ] = {
499 [ C(RESULT_ACCESS
) ] = -1,
500 [ C(RESULT_MISS
) ] = -1,
505 static __initconst
const u64 core2_hw_cache_event_ids
506 [PERF_COUNT_HW_CACHE_MAX
]
507 [PERF_COUNT_HW_CACHE_OP_MAX
]
508 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
512 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
513 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
516 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
517 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
519 [ C(OP_PREFETCH
) ] = {
520 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
521 [ C(RESULT_MISS
) ] = 0,
526 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
527 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
530 [ C(RESULT_ACCESS
) ] = -1,
531 [ C(RESULT_MISS
) ] = -1,
533 [ C(OP_PREFETCH
) ] = {
534 [ C(RESULT_ACCESS
) ] = 0,
535 [ C(RESULT_MISS
) ] = 0,
540 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
541 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
544 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
545 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
547 [ C(OP_PREFETCH
) ] = {
548 [ C(RESULT_ACCESS
) ] = 0,
549 [ C(RESULT_MISS
) ] = 0,
554 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
555 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
558 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
559 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
561 [ C(OP_PREFETCH
) ] = {
562 [ C(RESULT_ACCESS
) ] = 0,
563 [ C(RESULT_MISS
) ] = 0,
568 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
569 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
572 [ C(RESULT_ACCESS
) ] = -1,
573 [ C(RESULT_MISS
) ] = -1,
575 [ C(OP_PREFETCH
) ] = {
576 [ C(RESULT_ACCESS
) ] = -1,
577 [ C(RESULT_MISS
) ] = -1,
582 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
583 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
586 [ C(RESULT_ACCESS
) ] = -1,
587 [ C(RESULT_MISS
) ] = -1,
589 [ C(OP_PREFETCH
) ] = {
590 [ C(RESULT_ACCESS
) ] = -1,
591 [ C(RESULT_MISS
) ] = -1,
596 static __initconst
const u64 atom_hw_cache_event_ids
597 [PERF_COUNT_HW_CACHE_MAX
]
598 [PERF_COUNT_HW_CACHE_OP_MAX
]
599 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
603 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
604 [ C(RESULT_MISS
) ] = 0,
607 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
608 [ C(RESULT_MISS
) ] = 0,
610 [ C(OP_PREFETCH
) ] = {
611 [ C(RESULT_ACCESS
) ] = 0x0,
612 [ C(RESULT_MISS
) ] = 0,
617 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
618 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
621 [ C(RESULT_ACCESS
) ] = -1,
622 [ C(RESULT_MISS
) ] = -1,
624 [ C(OP_PREFETCH
) ] = {
625 [ C(RESULT_ACCESS
) ] = 0,
626 [ C(RESULT_MISS
) ] = 0,
631 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
632 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
635 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
636 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
638 [ C(OP_PREFETCH
) ] = {
639 [ C(RESULT_ACCESS
) ] = 0,
640 [ C(RESULT_MISS
) ] = 0,
645 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
646 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
649 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
650 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
652 [ C(OP_PREFETCH
) ] = {
653 [ C(RESULT_ACCESS
) ] = 0,
654 [ C(RESULT_MISS
) ] = 0,
659 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
660 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
663 [ C(RESULT_ACCESS
) ] = -1,
664 [ C(RESULT_MISS
) ] = -1,
666 [ C(OP_PREFETCH
) ] = {
667 [ C(RESULT_ACCESS
) ] = -1,
668 [ C(RESULT_MISS
) ] = -1,
673 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
674 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
677 [ C(RESULT_ACCESS
) ] = -1,
678 [ C(RESULT_MISS
) ] = -1,
680 [ C(OP_PREFETCH
) ] = {
681 [ C(RESULT_ACCESS
) ] = -1,
682 [ C(RESULT_MISS
) ] = -1,
687 static void intel_pmu_disable_all(void)
689 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
691 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
693 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
694 intel_pmu_disable_bts();
696 intel_pmu_pebs_disable_all();
697 intel_pmu_lbr_disable_all();
700 static void intel_pmu_enable_all(int added
)
702 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
704 intel_pmu_pebs_enable_all();
705 intel_pmu_lbr_enable_all();
706 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, x86_pmu
.intel_ctrl
);
708 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
709 struct perf_event
*event
=
710 cpuc
->events
[X86_PMC_IDX_FIXED_BTS
];
712 if (WARN_ON_ONCE(!event
))
715 intel_pmu_enable_bts(event
->hw
.config
);
721 * Intel Errata AAK100 (model 26)
722 * Intel Errata AAP53 (model 30)
723 * Intel Errata BD53 (model 44)
725 * The official story:
726 * These chips need to be 'reset' when adding counters by programming the
727 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
728 * in sequence on the same PMC or on different PMCs.
730 * In practise it appears some of these events do in fact count, and
731 * we need to programm all 4 events.
733 static void intel_pmu_nhm_workaround(void)
735 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
736 static const unsigned long nhm_magic
[4] = {
742 struct perf_event
*event
;
746 * The Errata requires below steps:
747 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
748 * 2) Configure 4 PERFEVTSELx with the magic events and clear
749 * the corresponding PMCx;
750 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
751 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
752 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
756 * The real steps we choose are a little different from above.
757 * A) To reduce MSR operations, we don't run step 1) as they
758 * are already cleared before this function is called;
759 * B) Call x86_perf_event_update to save PMCx before configuring
760 * PERFEVTSELx with magic number;
761 * C) With step 5), we do clear only when the PERFEVTSELx is
762 * not used currently.
763 * D) Call x86_perf_event_set_period to restore PMCx;
766 /* We always operate 4 pairs of PERF Counters */
767 for (i
= 0; i
< 4; i
++) {
768 event
= cpuc
->events
[i
];
770 x86_perf_event_update(event
);
773 for (i
= 0; i
< 4; i
++) {
774 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
775 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
778 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
779 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
781 for (i
= 0; i
< 4; i
++) {
782 event
= cpuc
->events
[i
];
785 x86_perf_event_set_period(event
);
786 __x86_pmu_enable_event(&event
->hw
,
787 ARCH_PERFMON_EVENTSEL_ENABLE
);
789 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
793 static void intel_pmu_nhm_enable_all(int added
)
796 intel_pmu_nhm_workaround();
797 intel_pmu_enable_all(added
);
800 static inline u64
intel_pmu_get_status(void)
804 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
809 static inline void intel_pmu_ack_status(u64 ack
)
811 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
814 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
816 int idx
= hwc
->idx
- X86_PMC_IDX_FIXED
;
819 mask
= 0xfULL
<< (idx
* 4);
821 rdmsrl(hwc
->config_base
, ctrl_val
);
823 wrmsrl(hwc
->config_base
, ctrl_val
);
826 static void intel_pmu_disable_event(struct perf_event
*event
)
828 struct hw_perf_event
*hwc
= &event
->hw
;
830 if (unlikely(hwc
->idx
== X86_PMC_IDX_FIXED_BTS
)) {
831 intel_pmu_disable_bts();
832 intel_pmu_drain_bts_buffer();
836 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
837 intel_pmu_disable_fixed(hwc
);
841 x86_pmu_disable_event(event
);
843 if (unlikely(event
->attr
.precise_ip
))
844 intel_pmu_pebs_disable(event
);
847 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
849 int idx
= hwc
->idx
- X86_PMC_IDX_FIXED
;
850 u64 ctrl_val
, bits
, mask
;
853 * Enable IRQ generation (0x8),
854 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
858 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
860 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
864 * ANY bit is supported in v3 and up
866 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
870 mask
= 0xfULL
<< (idx
* 4);
872 rdmsrl(hwc
->config_base
, ctrl_val
);
875 wrmsrl(hwc
->config_base
, ctrl_val
);
878 static void intel_pmu_enable_event(struct perf_event
*event
)
880 struct hw_perf_event
*hwc
= &event
->hw
;
882 if (unlikely(hwc
->idx
== X86_PMC_IDX_FIXED_BTS
)) {
883 if (!__this_cpu_read(cpu_hw_events
.enabled
))
886 intel_pmu_enable_bts(hwc
->config
);
890 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
891 intel_pmu_enable_fixed(hwc
);
895 if (unlikely(event
->attr
.precise_ip
))
896 intel_pmu_pebs_enable(event
);
898 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
902 * Save and restart an expired event. Called by NMI contexts,
903 * so it has to be careful about preempting normal event ops:
905 static int intel_pmu_save_and_restart(struct perf_event
*event
)
907 x86_perf_event_update(event
);
908 return x86_perf_event_set_period(event
);
911 static void intel_pmu_reset(void)
913 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
917 if (!x86_pmu
.num_counters
)
920 local_irq_save(flags
);
922 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
924 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
925 checking_wrmsrl(x86_pmu_config_addr(idx
), 0ull);
926 checking_wrmsrl(x86_pmu_event_addr(idx
), 0ull);
928 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
929 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
932 ds
->bts_index
= ds
->bts_buffer_base
;
934 local_irq_restore(flags
);
938 * This handler is triggered by the local APIC, so the APIC IRQ handling
941 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
943 struct perf_sample_data data
;
944 struct cpu_hw_events
*cpuc
;
949 perf_sample_data_init(&data
, 0);
951 cpuc
= &__get_cpu_var(cpu_hw_events
);
954 * Some chipsets need to unmask the LVTPC in a particular spot
955 * inside the nmi handler. As a result, the unmasking was pushed
956 * into all the nmi handlers.
958 * This handler doesn't seem to have any issues with the unmasking
959 * so it was left at the top.
961 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
963 intel_pmu_disable_all();
964 handled
= intel_pmu_drain_bts_buffer();
965 status
= intel_pmu_get_status();
967 intel_pmu_enable_all(0);
973 intel_pmu_ack_status(status
);
975 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
976 perf_event_print_debug();
981 inc_irq_stat(apic_perf_irqs
);
983 intel_pmu_lbr_read();
986 * PEBS overflow sets bit 62 in the global status register
988 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
990 x86_pmu
.drain_pebs(regs
);
993 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
994 struct perf_event
*event
= cpuc
->events
[bit
];
998 if (!test_bit(bit
, cpuc
->active_mask
))
1001 if (!intel_pmu_save_and_restart(event
))
1004 data
.period
= event
->hw
.last_period
;
1006 if (perf_event_overflow(event
, 1, &data
, regs
))
1007 x86_pmu_stop(event
, 0);
1011 * Repeat if there is more work to be done:
1013 status
= intel_pmu_get_status();
1018 intel_pmu_enable_all(0);
1022 static struct event_constraint
*
1023 intel_bts_constraints(struct perf_event
*event
)
1025 struct hw_perf_event
*hwc
= &event
->hw
;
1026 unsigned int hw_event
, bts_event
;
1028 if (event
->attr
.freq
)
1031 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1032 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1034 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1035 return &bts_constraint
;
1040 static struct event_constraint
*
1041 intel_percore_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1043 struct hw_perf_event
*hwc
= &event
->hw
;
1044 unsigned int e
= hwc
->config
& ARCH_PERFMON_EVENTSEL_EVENT
;
1045 struct event_constraint
*c
;
1046 struct intel_percore
*pc
;
1047 struct er_account
*era
;
1052 if (!x86_pmu
.percore_constraints
|| hwc
->extra_alloc
)
1055 for (c
= x86_pmu
.percore_constraints
; c
->cmask
; c
++) {
1060 * Allocate resource per core.
1062 pc
= cpuc
->per_core
;
1065 c
= &emptyconstraint
;
1066 raw_spin_lock(&pc
->lock
);
1069 for (i
= 0; i
< MAX_EXTRA_REGS
; i
++) {
1071 if (era
->ref
> 0 && hwc
->extra_reg
== era
->extra_reg
) {
1072 /* Allow sharing same config */
1073 if (hwc
->extra_config
== era
->extra_config
) {
1075 cpuc
->percore_used
= 1;
1076 hwc
->extra_alloc
= 1;
1082 } else if (era
->ref
== 0 && free_slot
== -1)
1085 if (!found
&& free_slot
!= -1) {
1086 era
= &pc
->regs
[free_slot
];
1088 era
->extra_reg
= hwc
->extra_reg
;
1089 era
->extra_config
= hwc
->extra_config
;
1090 cpuc
->percore_used
= 1;
1091 hwc
->extra_alloc
= 1;
1094 raw_spin_unlock(&pc
->lock
);
1101 static struct event_constraint
*
1102 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1104 struct event_constraint
*c
;
1106 c
= intel_bts_constraints(event
);
1110 c
= intel_pebs_constraints(event
);
1114 c
= intel_percore_constraints(cpuc
, event
);
1118 return x86_get_event_constraints(cpuc
, event
);
1121 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1122 struct perf_event
*event
)
1124 struct extra_reg
*er
;
1125 struct intel_percore
*pc
;
1126 struct er_account
*era
;
1127 struct hw_perf_event
*hwc
= &event
->hw
;
1130 if (!cpuc
->percore_used
)
1133 for (er
= x86_pmu
.extra_regs
; er
->msr
; er
++) {
1134 if (er
->event
!= (hwc
->config
& er
->config_mask
))
1137 pc
= cpuc
->per_core
;
1138 raw_spin_lock(&pc
->lock
);
1139 for (i
= 0; i
< MAX_EXTRA_REGS
; i
++) {
1142 era
->extra_config
== hwc
->extra_config
&&
1143 era
->extra_reg
== er
->msr
) {
1145 hwc
->extra_alloc
= 0;
1150 for (i
= 0; i
< MAX_EXTRA_REGS
; i
++)
1151 allref
+= pc
->regs
[i
].ref
;
1153 cpuc
->percore_used
= 0;
1154 raw_spin_unlock(&pc
->lock
);
1159 static int intel_pmu_hw_config(struct perf_event
*event
)
1161 int ret
= x86_pmu_hw_config(event
);
1166 if (event
->attr
.precise_ip
&&
1167 (event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1169 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1170 * (0x003c) so that we can use it with PEBS.
1172 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1173 * PEBS capable. However we can use INST_RETIRED.ANY_P
1174 * (0x00c0), which is a PEBS capable event, to get the same
1177 * INST_RETIRED.ANY_P counts the number of cycles that retires
1178 * CNTMASK instructions. By setting CNTMASK to a value (16)
1179 * larger than the maximum number of instructions that can be
1180 * retired per cycle (4) and then inverting the condition, we
1181 * count all cycles that retire 16 or less instructions, which
1184 * Thereby we gain a PEBS capable cycle counter.
1186 u64 alt_config
= 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
1188 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1189 event
->hw
.config
= alt_config
;
1192 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1195 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1198 if (x86_pmu
.version
< 3)
1201 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1204 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1209 static __initconst
const struct x86_pmu core_pmu
= {
1211 .handle_irq
= x86_pmu_handle_irq
,
1212 .disable_all
= x86_pmu_disable_all
,
1213 .enable_all
= x86_pmu_enable_all
,
1214 .enable
= x86_pmu_enable_event
,
1215 .disable
= x86_pmu_disable_event
,
1216 .hw_config
= x86_pmu_hw_config
,
1217 .schedule_events
= x86_schedule_events
,
1218 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1219 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1220 .event_map
= intel_pmu_event_map
,
1221 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1224 * Intel PMCs cannot be accessed sanely above 32 bit width,
1225 * so we install an artificial 1<<31 period regardless of
1226 * the generic event period:
1228 .max_period
= (1ULL << 31) - 1,
1229 .get_event_constraints
= intel_get_event_constraints
,
1230 .put_event_constraints
= intel_put_event_constraints
,
1231 .event_constraints
= intel_core_event_constraints
,
1234 static int intel_pmu_cpu_prepare(int cpu
)
1236 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1238 if (!cpu_has_ht_siblings())
1241 cpuc
->per_core
= kzalloc_node(sizeof(struct intel_percore
),
1242 GFP_KERNEL
, cpu_to_node(cpu
));
1243 if (!cpuc
->per_core
)
1246 raw_spin_lock_init(&cpuc
->per_core
->lock
);
1247 cpuc
->per_core
->core_id
= -1;
1251 static void intel_pmu_cpu_starting(int cpu
)
1253 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1254 int core_id
= topology_core_id(cpu
);
1257 init_debug_store_on_cpu(cpu
);
1259 * Deal with CPUs that don't clear their LBRs on power-up.
1261 intel_pmu_lbr_reset();
1263 if (!cpu_has_ht_siblings())
1266 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
1267 struct intel_percore
*pc
= per_cpu(cpu_hw_events
, i
).per_core
;
1269 if (pc
&& pc
->core_id
== core_id
) {
1270 kfree(cpuc
->per_core
);
1271 cpuc
->per_core
= pc
;
1276 cpuc
->per_core
->core_id
= core_id
;
1277 cpuc
->per_core
->refcnt
++;
1280 static void intel_pmu_cpu_dying(int cpu
)
1282 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1283 struct intel_percore
*pc
= cpuc
->per_core
;
1286 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
1288 cpuc
->per_core
= NULL
;
1291 fini_debug_store_on_cpu(cpu
);
1294 static __initconst
const struct x86_pmu intel_pmu
= {
1296 .handle_irq
= intel_pmu_handle_irq
,
1297 .disable_all
= intel_pmu_disable_all
,
1298 .enable_all
= intel_pmu_enable_all
,
1299 .enable
= intel_pmu_enable_event
,
1300 .disable
= intel_pmu_disable_event
,
1301 .hw_config
= intel_pmu_hw_config
,
1302 .schedule_events
= x86_schedule_events
,
1303 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1304 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1305 .event_map
= intel_pmu_event_map
,
1306 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1309 * Intel PMCs cannot be accessed sanely above 32 bit width,
1310 * so we install an artificial 1<<31 period regardless of
1311 * the generic event period:
1313 .max_period
= (1ULL << 31) - 1,
1314 .get_event_constraints
= intel_get_event_constraints
,
1315 .put_event_constraints
= intel_put_event_constraints
,
1317 .cpu_prepare
= intel_pmu_cpu_prepare
,
1318 .cpu_starting
= intel_pmu_cpu_starting
,
1319 .cpu_dying
= intel_pmu_cpu_dying
,
1322 static void intel_clovertown_quirks(void)
1325 * PEBS is unreliable due to:
1327 * AJ67 - PEBS may experience CPL leaks
1328 * AJ68 - PEBS PMI may be delayed by one event
1329 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1330 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1332 * AJ67 could be worked around by restricting the OS/USR flags.
1333 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1335 * AJ106 could possibly be worked around by not allowing LBR
1336 * usage from PEBS, including the fixup.
1337 * AJ68 could possibly be worked around by always programming
1338 * a pebs_event_reset[0] value and coping with the lost events.
1340 * But taken together it might just make sense to not enable PEBS on
1343 printk(KERN_WARNING
"PEBS disabled due to CPU errata.\n");
1345 x86_pmu
.pebs_constraints
= NULL
;
1348 static __init
int intel_pmu_init(void)
1350 union cpuid10_edx edx
;
1351 union cpuid10_eax eax
;
1352 unsigned int unused
;
1356 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
1357 switch (boot_cpu_data
.x86
) {
1359 return p6_pmu_init();
1361 return p4_pmu_init();
1367 * Check whether the Architectural PerfMon supports
1368 * Branch Misses Retired hw_event or not.
1370 cpuid(10, &eax
.full
, &ebx
, &unused
, &edx
.full
);
1371 if (eax
.split
.mask_length
<= ARCH_PERFMON_BRANCH_MISSES_RETIRED
)
1374 version
= eax
.split
.version_id
;
1378 x86_pmu
= intel_pmu
;
1380 x86_pmu
.version
= version
;
1381 x86_pmu
.num_counters
= eax
.split
.num_counters
;
1382 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
1383 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
1386 * Quirk: v2 perfmon does not report fixed-purpose events, so
1387 * assume at least 3 events:
1390 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
1393 * v2 and above have a perf capabilities MSR
1398 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
1399 x86_pmu
.intel_cap
.capabilities
= capabilities
;
1405 * Install the hw-cache-events table:
1407 switch (boot_cpu_data
.x86_model
) {
1408 case 14: /* 65 nm core solo/duo, "Yonah" */
1409 pr_cont("Core events, ");
1412 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1413 x86_pmu
.quirks
= intel_clovertown_quirks
;
1414 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1415 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1416 case 29: /* six-core 45 nm xeon "Dunnington" */
1417 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
1418 sizeof(hw_cache_event_ids
));
1420 intel_pmu_lbr_init_core();
1422 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
1423 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
1424 pr_cont("Core2 events, ");
1427 case 26: /* 45 nm nehalem, "Bloomfield" */
1428 case 30: /* 45 nm nehalem, "Lynnfield" */
1429 case 46: /* 45 nm nehalem-ex, "Beckton" */
1430 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
1431 sizeof(hw_cache_event_ids
));
1432 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
1433 sizeof(hw_cache_extra_regs
));
1435 intel_pmu_lbr_init_nhm();
1437 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
1438 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
1439 x86_pmu
.percore_constraints
= intel_nehalem_percore_constraints
;
1440 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
1441 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
1443 /* UOPS_ISSUED.STALLED_CYCLES */
1444 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = 0x180010e;
1445 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1446 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = 0x1803fb1;
1450 * Erratum AAJ80 detected, we work it around by using
1451 * the BR_MISP_EXEC.ANY event. This will over-count
1452 * branch-misses, but it's still much better than the
1453 * architectural event which is often completely bogus:
1455 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
1457 pr_cont("erratum AAJ80 worked around, ");
1459 pr_cont("Nehalem events, ");
1463 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
1464 sizeof(hw_cache_event_ids
));
1466 intel_pmu_lbr_init_atom();
1468 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
1469 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
1470 pr_cont("Atom events, ");
1473 case 37: /* 32 nm nehalem, "Clarkdale" */
1474 case 44: /* 32 nm nehalem, "Gulftown" */
1475 case 47: /* 32 nm Xeon E7 */
1476 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
1477 sizeof(hw_cache_event_ids
));
1478 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
1479 sizeof(hw_cache_extra_regs
));
1481 intel_pmu_lbr_init_nhm();
1483 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
1484 x86_pmu
.percore_constraints
= intel_westmere_percore_constraints
;
1485 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
1486 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
1487 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
1489 /* UOPS_ISSUED.STALLED_CYCLES */
1490 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = 0x180010e;
1491 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1492 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = 0x1803fb1;
1494 pr_cont("Westmere events, ");
1497 case 42: /* SandyBridge */
1498 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
1499 sizeof(hw_cache_event_ids
));
1501 intel_pmu_lbr_init_nhm();
1503 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
1504 x86_pmu
.pebs_constraints
= intel_snb_pebs_events
;
1506 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
1507 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = 0x180010e;
1508 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
1509 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = 0x18001b1;
1511 pr_cont("SandyBridge events, ");
1516 * default constraints for v2 and up
1518 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
1519 pr_cont("generic architected perfmon, ");
1524 #else /* CONFIG_CPU_SUP_INTEL */
1526 static int intel_pmu_init(void)
1531 #endif /* CONFIG_CPU_SUP_INTEL */