4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/hardirq.h>
19 #include "perf_event.h"
22 * Intel PerfMon, used on Core and later.
24 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
26 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
27 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
28 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
29 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
30 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
31 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
32 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
33 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
36 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
38 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
39 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
40 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
41 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
42 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
43 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
47 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
49 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
50 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
51 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
52 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
53 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
54 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
55 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
56 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
57 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
58 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
59 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
60 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
61 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
65 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
67 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
68 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
69 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
70 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
71 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
72 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
73 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
74 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
75 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
76 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
77 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
81 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
87 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
89 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
90 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
91 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
92 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
93 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
94 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
95 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
99 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
101 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
102 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
103 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
104 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
105 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
106 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
110 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
112 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
113 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
117 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
122 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
124 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
125 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
126 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
130 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
131 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0x3fffffffffull
, RSP_0
),
132 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0x3fffffffffull
, RSP_1
),
136 static u64
intel_pmu_event_map(int hw_event
)
138 return intel_perfmon_event_map
[hw_event
];
141 #define SNB_DMND_DATA_RD (1ULL << 0)
142 #define SNB_DMND_RFO (1ULL << 1)
143 #define SNB_DMND_IFETCH (1ULL << 2)
144 #define SNB_DMND_WB (1ULL << 3)
145 #define SNB_PF_DATA_RD (1ULL << 4)
146 #define SNB_PF_RFO (1ULL << 5)
147 #define SNB_PF_IFETCH (1ULL << 6)
148 #define SNB_LLC_DATA_RD (1ULL << 7)
149 #define SNB_LLC_RFO (1ULL << 8)
150 #define SNB_LLC_IFETCH (1ULL << 9)
151 #define SNB_BUS_LOCKS (1ULL << 10)
152 #define SNB_STRM_ST (1ULL << 11)
153 #define SNB_OTHER (1ULL << 15)
154 #define SNB_RESP_ANY (1ULL << 16)
155 #define SNB_NO_SUPP (1ULL << 17)
156 #define SNB_LLC_HITM (1ULL << 18)
157 #define SNB_LLC_HITE (1ULL << 19)
158 #define SNB_LLC_HITS (1ULL << 20)
159 #define SNB_LLC_HITF (1ULL << 21)
160 #define SNB_LOCAL (1ULL << 22)
161 #define SNB_REMOTE (0xffULL << 23)
162 #define SNB_SNP_NONE (1ULL << 31)
163 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
164 #define SNB_SNP_MISS (1ULL << 33)
165 #define SNB_NO_FWD (1ULL << 34)
166 #define SNB_SNP_FWD (1ULL << 35)
167 #define SNB_HITM (1ULL << 36)
168 #define SNB_NON_DRAM (1ULL << 37)
170 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
171 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
172 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
174 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
175 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
178 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
179 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
181 #define SNB_L3_ACCESS SNB_RESP_ANY
182 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
184 static __initconst
const u64 snb_hw_cache_extra_regs
185 [PERF_COUNT_HW_CACHE_MAX
]
186 [PERF_COUNT_HW_CACHE_OP_MAX
]
187 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
191 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_L3_ACCESS
,
192 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_L3_MISS
,
195 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_L3_ACCESS
,
196 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_L3_MISS
,
198 [ C(OP_PREFETCH
) ] = {
199 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_L3_ACCESS
,
200 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_L3_MISS
,
205 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_DRAM_ANY
,
206 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_DRAM_REMOTE
,
209 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_DRAM_ANY
,
210 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_DRAM_REMOTE
,
212 [ C(OP_PREFETCH
) ] = {
213 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_ANY
,
214 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_REMOTE
,
219 static __initconst
const u64 snb_hw_cache_event_ids
220 [PERF_COUNT_HW_CACHE_MAX
]
221 [PERF_COUNT_HW_CACHE_OP_MAX
]
222 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
226 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
227 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
230 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
231 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
233 [ C(OP_PREFETCH
) ] = {
234 [ C(RESULT_ACCESS
) ] = 0x0,
235 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
240 [ C(RESULT_ACCESS
) ] = 0x0,
241 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
244 [ C(RESULT_ACCESS
) ] = -1,
245 [ C(RESULT_MISS
) ] = -1,
247 [ C(OP_PREFETCH
) ] = {
248 [ C(RESULT_ACCESS
) ] = 0x0,
249 [ C(RESULT_MISS
) ] = 0x0,
254 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
255 [ C(RESULT_ACCESS
) ] = 0x01b7,
256 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
257 [ C(RESULT_MISS
) ] = 0x01b7,
260 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
261 [ C(RESULT_ACCESS
) ] = 0x01b7,
262 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
263 [ C(RESULT_MISS
) ] = 0x01b7,
265 [ C(OP_PREFETCH
) ] = {
266 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
267 [ C(RESULT_ACCESS
) ] = 0x01b7,
268 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
269 [ C(RESULT_MISS
) ] = 0x01b7,
274 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
275 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
278 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
279 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
281 [ C(OP_PREFETCH
) ] = {
282 [ C(RESULT_ACCESS
) ] = 0x0,
283 [ C(RESULT_MISS
) ] = 0x0,
288 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
289 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
292 [ C(RESULT_ACCESS
) ] = -1,
293 [ C(RESULT_MISS
) ] = -1,
295 [ C(OP_PREFETCH
) ] = {
296 [ C(RESULT_ACCESS
) ] = -1,
297 [ C(RESULT_MISS
) ] = -1,
302 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
303 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
306 [ C(RESULT_ACCESS
) ] = -1,
307 [ C(RESULT_MISS
) ] = -1,
309 [ C(OP_PREFETCH
) ] = {
310 [ C(RESULT_ACCESS
) ] = -1,
311 [ C(RESULT_MISS
) ] = -1,
316 [ C(RESULT_ACCESS
) ] = 0x01b7,
317 [ C(RESULT_MISS
) ] = 0x01b7,
320 [ C(RESULT_ACCESS
) ] = 0x01b7,
321 [ C(RESULT_MISS
) ] = 0x01b7,
323 [ C(OP_PREFETCH
) ] = {
324 [ C(RESULT_ACCESS
) ] = 0x01b7,
325 [ C(RESULT_MISS
) ] = 0x01b7,
331 static __initconst
const u64 westmere_hw_cache_event_ids
332 [PERF_COUNT_HW_CACHE_MAX
]
333 [PERF_COUNT_HW_CACHE_OP_MAX
]
334 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
338 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
339 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
342 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
343 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
345 [ C(OP_PREFETCH
) ] = {
346 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
347 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
352 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
353 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
356 [ C(RESULT_ACCESS
) ] = -1,
357 [ C(RESULT_MISS
) ] = -1,
359 [ C(OP_PREFETCH
) ] = {
360 [ C(RESULT_ACCESS
) ] = 0x0,
361 [ C(RESULT_MISS
) ] = 0x0,
366 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
367 [ C(RESULT_ACCESS
) ] = 0x01b7,
368 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
369 [ C(RESULT_MISS
) ] = 0x01b7,
372 * Use RFO, not WRITEBACK, because a write miss would typically occur
376 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
377 [ C(RESULT_ACCESS
) ] = 0x01b7,
378 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
379 [ C(RESULT_MISS
) ] = 0x01b7,
381 [ C(OP_PREFETCH
) ] = {
382 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
383 [ C(RESULT_ACCESS
) ] = 0x01b7,
384 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
385 [ C(RESULT_MISS
) ] = 0x01b7,
390 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
391 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
394 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
395 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
397 [ C(OP_PREFETCH
) ] = {
398 [ C(RESULT_ACCESS
) ] = 0x0,
399 [ C(RESULT_MISS
) ] = 0x0,
404 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
405 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
408 [ C(RESULT_ACCESS
) ] = -1,
409 [ C(RESULT_MISS
) ] = -1,
411 [ C(OP_PREFETCH
) ] = {
412 [ C(RESULT_ACCESS
) ] = -1,
413 [ C(RESULT_MISS
) ] = -1,
418 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
419 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
422 [ C(RESULT_ACCESS
) ] = -1,
423 [ C(RESULT_MISS
) ] = -1,
425 [ C(OP_PREFETCH
) ] = {
426 [ C(RESULT_ACCESS
) ] = -1,
427 [ C(RESULT_MISS
) ] = -1,
432 [ C(RESULT_ACCESS
) ] = 0x01b7,
433 [ C(RESULT_MISS
) ] = 0x01b7,
436 [ C(RESULT_ACCESS
) ] = 0x01b7,
437 [ C(RESULT_MISS
) ] = 0x01b7,
439 [ C(OP_PREFETCH
) ] = {
440 [ C(RESULT_ACCESS
) ] = 0x01b7,
441 [ C(RESULT_MISS
) ] = 0x01b7,
447 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
448 * See IA32 SDM Vol 3B 30.6.1.3
451 #define NHM_DMND_DATA_RD (1 << 0)
452 #define NHM_DMND_RFO (1 << 1)
453 #define NHM_DMND_IFETCH (1 << 2)
454 #define NHM_DMND_WB (1 << 3)
455 #define NHM_PF_DATA_RD (1 << 4)
456 #define NHM_PF_DATA_RFO (1 << 5)
457 #define NHM_PF_IFETCH (1 << 6)
458 #define NHM_OFFCORE_OTHER (1 << 7)
459 #define NHM_UNCORE_HIT (1 << 8)
460 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
461 #define NHM_OTHER_CORE_HITM (1 << 10)
463 #define NHM_REMOTE_CACHE_FWD (1 << 12)
464 #define NHM_REMOTE_DRAM (1 << 13)
465 #define NHM_LOCAL_DRAM (1 << 14)
466 #define NHM_NON_DRAM (1 << 15)
468 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
469 #define NHM_REMOTE (NHM_REMOTE_DRAM)
471 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
472 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
473 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
475 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
476 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
477 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
479 static __initconst
const u64 nehalem_hw_cache_extra_regs
480 [PERF_COUNT_HW_CACHE_MAX
]
481 [PERF_COUNT_HW_CACHE_OP_MAX
]
482 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
486 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
487 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
490 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
491 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
493 [ C(OP_PREFETCH
) ] = {
494 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
495 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
500 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
501 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
504 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
505 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
507 [ C(OP_PREFETCH
) ] = {
508 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
509 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
514 static __initconst
const u64 nehalem_hw_cache_event_ids
515 [PERF_COUNT_HW_CACHE_MAX
]
516 [PERF_COUNT_HW_CACHE_OP_MAX
]
517 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
521 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
522 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
525 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
526 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
528 [ C(OP_PREFETCH
) ] = {
529 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
530 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
535 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
536 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
539 [ C(RESULT_ACCESS
) ] = -1,
540 [ C(RESULT_MISS
) ] = -1,
542 [ C(OP_PREFETCH
) ] = {
543 [ C(RESULT_ACCESS
) ] = 0x0,
544 [ C(RESULT_MISS
) ] = 0x0,
549 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
550 [ C(RESULT_ACCESS
) ] = 0x01b7,
551 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
552 [ C(RESULT_MISS
) ] = 0x01b7,
555 * Use RFO, not WRITEBACK, because a write miss would typically occur
559 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
560 [ C(RESULT_ACCESS
) ] = 0x01b7,
561 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
562 [ C(RESULT_MISS
) ] = 0x01b7,
564 [ C(OP_PREFETCH
) ] = {
565 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
566 [ C(RESULT_ACCESS
) ] = 0x01b7,
567 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
568 [ C(RESULT_MISS
) ] = 0x01b7,
573 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
574 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
577 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
578 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
580 [ C(OP_PREFETCH
) ] = {
581 [ C(RESULT_ACCESS
) ] = 0x0,
582 [ C(RESULT_MISS
) ] = 0x0,
587 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
588 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
591 [ C(RESULT_ACCESS
) ] = -1,
592 [ C(RESULT_MISS
) ] = -1,
594 [ C(OP_PREFETCH
) ] = {
595 [ C(RESULT_ACCESS
) ] = -1,
596 [ C(RESULT_MISS
) ] = -1,
601 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
602 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
605 [ C(RESULT_ACCESS
) ] = -1,
606 [ C(RESULT_MISS
) ] = -1,
608 [ C(OP_PREFETCH
) ] = {
609 [ C(RESULT_ACCESS
) ] = -1,
610 [ C(RESULT_MISS
) ] = -1,
615 [ C(RESULT_ACCESS
) ] = 0x01b7,
616 [ C(RESULT_MISS
) ] = 0x01b7,
619 [ C(RESULT_ACCESS
) ] = 0x01b7,
620 [ C(RESULT_MISS
) ] = 0x01b7,
622 [ C(OP_PREFETCH
) ] = {
623 [ C(RESULT_ACCESS
) ] = 0x01b7,
624 [ C(RESULT_MISS
) ] = 0x01b7,
629 static __initconst
const u64 core2_hw_cache_event_ids
630 [PERF_COUNT_HW_CACHE_MAX
]
631 [PERF_COUNT_HW_CACHE_OP_MAX
]
632 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
636 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
637 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
640 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
641 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
643 [ C(OP_PREFETCH
) ] = {
644 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
645 [ C(RESULT_MISS
) ] = 0,
650 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
651 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
654 [ C(RESULT_ACCESS
) ] = -1,
655 [ C(RESULT_MISS
) ] = -1,
657 [ C(OP_PREFETCH
) ] = {
658 [ C(RESULT_ACCESS
) ] = 0,
659 [ C(RESULT_MISS
) ] = 0,
664 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
665 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
668 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
669 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
671 [ C(OP_PREFETCH
) ] = {
672 [ C(RESULT_ACCESS
) ] = 0,
673 [ C(RESULT_MISS
) ] = 0,
678 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
679 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
682 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
683 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
685 [ C(OP_PREFETCH
) ] = {
686 [ C(RESULT_ACCESS
) ] = 0,
687 [ C(RESULT_MISS
) ] = 0,
692 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
693 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
696 [ C(RESULT_ACCESS
) ] = -1,
697 [ C(RESULT_MISS
) ] = -1,
699 [ C(OP_PREFETCH
) ] = {
700 [ C(RESULT_ACCESS
) ] = -1,
701 [ C(RESULT_MISS
) ] = -1,
706 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
707 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
710 [ C(RESULT_ACCESS
) ] = -1,
711 [ C(RESULT_MISS
) ] = -1,
713 [ C(OP_PREFETCH
) ] = {
714 [ C(RESULT_ACCESS
) ] = -1,
715 [ C(RESULT_MISS
) ] = -1,
720 static __initconst
const u64 atom_hw_cache_event_ids
721 [PERF_COUNT_HW_CACHE_MAX
]
722 [PERF_COUNT_HW_CACHE_OP_MAX
]
723 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
727 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
728 [ C(RESULT_MISS
) ] = 0,
731 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
732 [ C(RESULT_MISS
) ] = 0,
734 [ C(OP_PREFETCH
) ] = {
735 [ C(RESULT_ACCESS
) ] = 0x0,
736 [ C(RESULT_MISS
) ] = 0,
741 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
742 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
745 [ C(RESULT_ACCESS
) ] = -1,
746 [ C(RESULT_MISS
) ] = -1,
748 [ C(OP_PREFETCH
) ] = {
749 [ C(RESULT_ACCESS
) ] = 0,
750 [ C(RESULT_MISS
) ] = 0,
755 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
756 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
759 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
760 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
762 [ C(OP_PREFETCH
) ] = {
763 [ C(RESULT_ACCESS
) ] = 0,
764 [ C(RESULT_MISS
) ] = 0,
769 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
770 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
773 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
774 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
776 [ C(OP_PREFETCH
) ] = {
777 [ C(RESULT_ACCESS
) ] = 0,
778 [ C(RESULT_MISS
) ] = 0,
783 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
784 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
787 [ C(RESULT_ACCESS
) ] = -1,
788 [ C(RESULT_MISS
) ] = -1,
790 [ C(OP_PREFETCH
) ] = {
791 [ C(RESULT_ACCESS
) ] = -1,
792 [ C(RESULT_MISS
) ] = -1,
797 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
798 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
801 [ C(RESULT_ACCESS
) ] = -1,
802 [ C(RESULT_MISS
) ] = -1,
804 [ C(OP_PREFETCH
) ] = {
805 [ C(RESULT_ACCESS
) ] = -1,
806 [ C(RESULT_MISS
) ] = -1,
811 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event
*event
)
813 /* user explicitly requested branch sampling */
814 if (has_branch_stack(event
))
817 /* implicit branch sampling to correct PEBS skid */
818 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
824 static void intel_pmu_disable_all(void)
826 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
828 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
830 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
831 intel_pmu_disable_bts();
833 intel_pmu_pebs_disable_all();
834 intel_pmu_lbr_disable_all();
837 static void intel_pmu_enable_all(int added
)
839 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
841 intel_pmu_pebs_enable_all();
842 intel_pmu_lbr_enable_all();
843 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
844 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
846 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
847 struct perf_event
*event
=
848 cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
850 if (WARN_ON_ONCE(!event
))
853 intel_pmu_enable_bts(event
->hw
.config
);
859 * Intel Errata AAK100 (model 26)
860 * Intel Errata AAP53 (model 30)
861 * Intel Errata BD53 (model 44)
863 * The official story:
864 * These chips need to be 'reset' when adding counters by programming the
865 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
866 * in sequence on the same PMC or on different PMCs.
868 * In practise it appears some of these events do in fact count, and
869 * we need to programm all 4 events.
871 static void intel_pmu_nhm_workaround(void)
873 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
874 static const unsigned long nhm_magic
[4] = {
880 struct perf_event
*event
;
884 * The Errata requires below steps:
885 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
886 * 2) Configure 4 PERFEVTSELx with the magic events and clear
887 * the corresponding PMCx;
888 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
889 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
890 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
894 * The real steps we choose are a little different from above.
895 * A) To reduce MSR operations, we don't run step 1) as they
896 * are already cleared before this function is called;
897 * B) Call x86_perf_event_update to save PMCx before configuring
898 * PERFEVTSELx with magic number;
899 * C) With step 5), we do clear only when the PERFEVTSELx is
900 * not used currently.
901 * D) Call x86_perf_event_set_period to restore PMCx;
904 /* We always operate 4 pairs of PERF Counters */
905 for (i
= 0; i
< 4; i
++) {
906 event
= cpuc
->events
[i
];
908 x86_perf_event_update(event
);
911 for (i
= 0; i
< 4; i
++) {
912 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
913 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
916 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
917 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
919 for (i
= 0; i
< 4; i
++) {
920 event
= cpuc
->events
[i
];
923 x86_perf_event_set_period(event
);
924 __x86_pmu_enable_event(&event
->hw
,
925 ARCH_PERFMON_EVENTSEL_ENABLE
);
927 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
931 static void intel_pmu_nhm_enable_all(int added
)
934 intel_pmu_nhm_workaround();
935 intel_pmu_enable_all(added
);
938 static inline u64
intel_pmu_get_status(void)
942 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
947 static inline void intel_pmu_ack_status(u64 ack
)
949 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
952 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
954 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
957 mask
= 0xfULL
<< (idx
* 4);
959 rdmsrl(hwc
->config_base
, ctrl_val
);
961 wrmsrl(hwc
->config_base
, ctrl_val
);
964 static void intel_pmu_disable_event(struct perf_event
*event
)
966 struct hw_perf_event
*hwc
= &event
->hw
;
967 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
969 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
970 intel_pmu_disable_bts();
971 intel_pmu_drain_bts_buffer();
975 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
976 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
979 * must disable before any actual event
980 * because any event may be combined with LBR
982 if (intel_pmu_needs_lbr_smpl(event
))
983 intel_pmu_lbr_disable(event
);
985 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
986 intel_pmu_disable_fixed(hwc
);
990 x86_pmu_disable_event(event
);
992 if (unlikely(event
->attr
.precise_ip
))
993 intel_pmu_pebs_disable(event
);
996 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
998 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
999 u64 ctrl_val
, bits
, mask
;
1002 * Enable IRQ generation (0x8),
1003 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1007 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
1009 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1013 * ANY bit is supported in v3 and up
1015 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
1019 mask
= 0xfULL
<< (idx
* 4);
1021 rdmsrl(hwc
->config_base
, ctrl_val
);
1024 wrmsrl(hwc
->config_base
, ctrl_val
);
1027 static void intel_pmu_enable_event(struct perf_event
*event
)
1029 struct hw_perf_event
*hwc
= &event
->hw
;
1030 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1032 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1033 if (!__this_cpu_read(cpu_hw_events
.enabled
))
1036 intel_pmu_enable_bts(hwc
->config
);
1040 * must enabled before any actual event
1041 * because any event may be combined with LBR
1043 if (intel_pmu_needs_lbr_smpl(event
))
1044 intel_pmu_lbr_enable(event
);
1046 if (event
->attr
.exclude_host
)
1047 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
1048 if (event
->attr
.exclude_guest
)
1049 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
1051 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1052 intel_pmu_enable_fixed(hwc
);
1056 if (unlikely(event
->attr
.precise_ip
))
1057 intel_pmu_pebs_enable(event
);
1059 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1063 * Save and restart an expired event. Called by NMI contexts,
1064 * so it has to be careful about preempting normal event ops:
1066 int intel_pmu_save_and_restart(struct perf_event
*event
)
1068 x86_perf_event_update(event
);
1069 return x86_perf_event_set_period(event
);
1072 static void intel_pmu_reset(void)
1074 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1075 unsigned long flags
;
1078 if (!x86_pmu
.num_counters
)
1081 local_irq_save(flags
);
1083 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1085 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1086 wrmsrl_safe(x86_pmu_config_addr(idx
), 0ull);
1087 wrmsrl_safe(x86_pmu_event_addr(idx
), 0ull);
1089 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
1090 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1093 ds
->bts_index
= ds
->bts_buffer_base
;
1095 local_irq_restore(flags
);
1099 * This handler is triggered by the local APIC, so the APIC IRQ handling
1102 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1104 struct perf_sample_data data
;
1105 struct cpu_hw_events
*cpuc
;
1110 cpuc
= &__get_cpu_var(cpu_hw_events
);
1113 * Some chipsets need to unmask the LVTPC in a particular spot
1114 * inside the nmi handler. As a result, the unmasking was pushed
1115 * into all the nmi handlers.
1117 * This handler doesn't seem to have any issues with the unmasking
1118 * so it was left at the top.
1120 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1122 intel_pmu_disable_all();
1123 handled
= intel_pmu_drain_bts_buffer();
1124 status
= intel_pmu_get_status();
1126 intel_pmu_enable_all(0);
1132 intel_pmu_ack_status(status
);
1133 if (++loops
> 100) {
1134 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1135 perf_event_print_debug();
1140 inc_irq_stat(apic_perf_irqs
);
1142 intel_pmu_lbr_read();
1145 * PEBS overflow sets bit 62 in the global status register
1147 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1149 x86_pmu
.drain_pebs(regs
);
1152 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1153 struct perf_event
*event
= cpuc
->events
[bit
];
1157 if (!test_bit(bit
, cpuc
->active_mask
))
1160 if (!intel_pmu_save_and_restart(event
))
1163 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
1165 if (has_branch_stack(event
))
1166 data
.br_stack
= &cpuc
->lbr_stack
;
1168 if (perf_event_overflow(event
, &data
, regs
))
1169 x86_pmu_stop(event
, 0);
1173 * Repeat if there is more work to be done:
1175 status
= intel_pmu_get_status();
1180 intel_pmu_enable_all(0);
1184 static struct event_constraint
*
1185 intel_bts_constraints(struct perf_event
*event
)
1187 struct hw_perf_event
*hwc
= &event
->hw
;
1188 unsigned int hw_event
, bts_event
;
1190 if (event
->attr
.freq
)
1193 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1194 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1196 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1197 return &bts_constraint
;
1202 static int intel_alt_er(int idx
)
1204 if (!(x86_pmu
.er_flags
& ERF_HAS_RSP_1
))
1207 if (idx
== EXTRA_REG_RSP_0
)
1208 return EXTRA_REG_RSP_1
;
1210 if (idx
== EXTRA_REG_RSP_1
)
1211 return EXTRA_REG_RSP_0
;
1216 static void intel_fixup_er(struct perf_event
*event
, int idx
)
1218 event
->hw
.extra_reg
.idx
= idx
;
1220 if (idx
== EXTRA_REG_RSP_0
) {
1221 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1222 event
->hw
.config
|= 0x01b7;
1223 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1224 } else if (idx
== EXTRA_REG_RSP_1
) {
1225 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1226 event
->hw
.config
|= 0x01bb;
1227 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1232 * manage allocation of shared extra msr for certain events
1235 * per-cpu: to be shared between the various events on a single PMU
1236 * per-core: per-cpu + shared by HT threads
1238 static struct event_constraint
*
1239 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1240 struct perf_event
*event
,
1241 struct hw_perf_event_extra
*reg
)
1243 struct event_constraint
*c
= &emptyconstraint
;
1244 struct er_account
*era
;
1245 unsigned long flags
;
1249 * reg->alloc can be set due to existing state, so for fake cpuc we
1250 * need to ignore this, otherwise we might fail to allocate proper fake
1251 * state for this extra reg constraint. Also see the comment below.
1253 if (reg
->alloc
&& !cpuc
->is_fake
)
1254 return NULL
; /* call x86_get_event_constraint() */
1257 era
= &cpuc
->shared_regs
->regs
[idx
];
1259 * we use spin_lock_irqsave() to avoid lockdep issues when
1260 * passing a fake cpuc
1262 raw_spin_lock_irqsave(&era
->lock
, flags
);
1264 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1267 * If its a fake cpuc -- as per validate_{group,event}() we
1268 * shouldn't touch event state and we can avoid doing so
1269 * since both will only call get_event_constraints() once
1270 * on each event, this avoids the need for reg->alloc.
1272 * Not doing the ER fixup will only result in era->reg being
1273 * wrong, but since we won't actually try and program hardware
1274 * this isn't a problem either.
1276 if (!cpuc
->is_fake
) {
1277 if (idx
!= reg
->idx
)
1278 intel_fixup_er(event
, idx
);
1281 * x86_schedule_events() can call get_event_constraints()
1282 * multiple times on events in the case of incremental
1283 * scheduling(). reg->alloc ensures we only do the ER
1289 /* lock in msr value */
1290 era
->config
= reg
->config
;
1291 era
->reg
= reg
->reg
;
1294 atomic_inc(&era
->ref
);
1297 * need to call x86_get_event_constraint()
1298 * to check if associated event has constraints
1302 idx
= intel_alt_er(idx
);
1303 if (idx
!= reg
->idx
) {
1304 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1308 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1314 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1315 struct hw_perf_event_extra
*reg
)
1317 struct er_account
*era
;
1320 * Only put constraint if extra reg was actually allocated. Also takes
1321 * care of event which do not use an extra shared reg.
1323 * Also, if this is a fake cpuc we shouldn't touch any event state
1324 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1325 * either since it'll be thrown out.
1327 if (!reg
->alloc
|| cpuc
->is_fake
)
1330 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1332 /* one fewer user */
1333 atomic_dec(&era
->ref
);
1335 /* allocate again next time */
1339 static struct event_constraint
*
1340 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1341 struct perf_event
*event
)
1343 struct event_constraint
*c
= NULL
, *d
;
1344 struct hw_perf_event_extra
*xreg
, *breg
;
1346 xreg
= &event
->hw
.extra_reg
;
1347 if (xreg
->idx
!= EXTRA_REG_NONE
) {
1348 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
1349 if (c
== &emptyconstraint
)
1352 breg
= &event
->hw
.branch_reg
;
1353 if (breg
->idx
!= EXTRA_REG_NONE
) {
1354 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
1355 if (d
== &emptyconstraint
) {
1356 __intel_shared_reg_put_constraints(cpuc
, xreg
);
1363 struct event_constraint
*
1364 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1366 struct event_constraint
*c
;
1368 if (x86_pmu
.event_constraints
) {
1369 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
1370 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
1375 return &unconstrained
;
1378 static struct event_constraint
*
1379 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1381 struct event_constraint
*c
;
1383 c
= intel_bts_constraints(event
);
1387 c
= intel_pebs_constraints(event
);
1391 c
= intel_shared_regs_constraints(cpuc
, event
);
1395 return x86_get_event_constraints(cpuc
, event
);
1399 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1400 struct perf_event
*event
)
1402 struct hw_perf_event_extra
*reg
;
1404 reg
= &event
->hw
.extra_reg
;
1405 if (reg
->idx
!= EXTRA_REG_NONE
)
1406 __intel_shared_reg_put_constraints(cpuc
, reg
);
1408 reg
= &event
->hw
.branch_reg
;
1409 if (reg
->idx
!= EXTRA_REG_NONE
)
1410 __intel_shared_reg_put_constraints(cpuc
, reg
);
1413 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1414 struct perf_event
*event
)
1416 intel_put_shared_regs_event_constraints(cpuc
, event
);
1419 static void intel_pebs_aliases_core2(struct perf_event
*event
)
1421 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1423 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1424 * (0x003c) so that we can use it with PEBS.
1426 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1427 * PEBS capable. However we can use INST_RETIRED.ANY_P
1428 * (0x00c0), which is a PEBS capable event, to get the same
1431 * INST_RETIRED.ANY_P counts the number of cycles that retires
1432 * CNTMASK instructions. By setting CNTMASK to a value (16)
1433 * larger than the maximum number of instructions that can be
1434 * retired per cycle (4) and then inverting the condition, we
1435 * count all cycles that retire 16 or less instructions, which
1438 * Thereby we gain a PEBS capable cycle counter.
1440 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
1442 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1443 event
->hw
.config
= alt_config
;
1447 static void intel_pebs_aliases_snb(struct perf_event
*event
)
1449 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1451 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1452 * (0x003c) so that we can use it with PEBS.
1454 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1455 * PEBS capable. However we can use UOPS_RETIRED.ALL
1456 * (0x01c2), which is a PEBS capable event, to get the same
1459 * UOPS_RETIRED.ALL counts the number of cycles that retires
1460 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1461 * larger than the maximum number of micro-ops that can be
1462 * retired per cycle (4) and then inverting the condition, we
1463 * count all cycles that retire 16 or less micro-ops, which
1466 * Thereby we gain a PEBS capable cycle counter.
1468 u64 alt_config
= X86_CONFIG(.event
=0xc2, .umask
=0x01, .inv
=1, .cmask
=16);
1470 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1471 event
->hw
.config
= alt_config
;
1475 static int intel_pmu_hw_config(struct perf_event
*event
)
1477 int ret
= x86_pmu_hw_config(event
);
1482 if (event
->attr
.precise_ip
&& x86_pmu
.pebs_aliases
)
1483 x86_pmu
.pebs_aliases(event
);
1485 if (intel_pmu_needs_lbr_smpl(event
)) {
1486 ret
= intel_pmu_setup_lbr_filter(event
);
1491 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1494 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1497 if (x86_pmu
.version
< 3)
1500 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1503 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1508 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
1510 if (x86_pmu
.guest_get_msrs
)
1511 return x86_pmu
.guest_get_msrs(nr
);
1515 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
1517 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
1519 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1520 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1522 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
1523 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
1524 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
1526 * If PMU counter has PEBS enabled it is not enough to disable counter
1527 * on a guest entry since PEBS memory write can overshoot guest entry
1528 * and corrupt guest memory. Disabling PEBS solves the problem.
1530 arr
[1].msr
= MSR_IA32_PEBS_ENABLE
;
1531 arr
[1].host
= cpuc
->pebs_enabled
;
1538 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
1540 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1541 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1544 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1545 struct perf_event
*event
= cpuc
->events
[idx
];
1547 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
1548 arr
[idx
].host
= arr
[idx
].guest
= 0;
1550 if (!test_bit(idx
, cpuc
->active_mask
))
1553 arr
[idx
].host
= arr
[idx
].guest
=
1554 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
1556 if (event
->attr
.exclude_host
)
1557 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1558 else if (event
->attr
.exclude_guest
)
1559 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1562 *nr
= x86_pmu
.num_counters
;
1566 static void core_pmu_enable_event(struct perf_event
*event
)
1568 if (!event
->attr
.exclude_host
)
1569 x86_pmu_enable_event(event
);
1572 static void core_pmu_enable_all(int added
)
1574 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1577 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1578 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
1580 if (!test_bit(idx
, cpuc
->active_mask
) ||
1581 cpuc
->events
[idx
]->attr
.exclude_host
)
1584 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1588 PMU_FORMAT_ATTR(event
, "config:0-7" );
1589 PMU_FORMAT_ATTR(umask
, "config:8-15" );
1590 PMU_FORMAT_ATTR(edge
, "config:18" );
1591 PMU_FORMAT_ATTR(pc
, "config:19" );
1592 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
1593 PMU_FORMAT_ATTR(inv
, "config:23" );
1594 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
1596 static struct attribute
*intel_arch_formats_attr
[] = {
1597 &format_attr_event
.attr
,
1598 &format_attr_umask
.attr
,
1599 &format_attr_edge
.attr
,
1600 &format_attr_pc
.attr
,
1601 &format_attr_inv
.attr
,
1602 &format_attr_cmask
.attr
,
1606 ssize_t
intel_event_sysfs_show(char *page
, u64 config
)
1608 u64 event
= (config
& ARCH_PERFMON_EVENTSEL_EVENT
);
1610 return x86_event_sysfs_show(page
, config
, event
);
1613 static __initconst
const struct x86_pmu core_pmu
= {
1615 .handle_irq
= x86_pmu_handle_irq
,
1616 .disable_all
= x86_pmu_disable_all
,
1617 .enable_all
= core_pmu_enable_all
,
1618 .enable
= core_pmu_enable_event
,
1619 .disable
= x86_pmu_disable_event
,
1620 .hw_config
= x86_pmu_hw_config
,
1621 .schedule_events
= x86_schedule_events
,
1622 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1623 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1624 .event_map
= intel_pmu_event_map
,
1625 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1628 * Intel PMCs cannot be accessed sanely above 32 bit width,
1629 * so we install an artificial 1<<31 period regardless of
1630 * the generic event period:
1632 .max_period
= (1ULL << 31) - 1,
1633 .get_event_constraints
= intel_get_event_constraints
,
1634 .put_event_constraints
= intel_put_event_constraints
,
1635 .event_constraints
= intel_core_event_constraints
,
1636 .guest_get_msrs
= core_guest_get_msrs
,
1637 .format_attrs
= intel_arch_formats_attr
,
1638 .events_sysfs_show
= intel_event_sysfs_show
,
1641 struct intel_shared_regs
*allocate_shared_regs(int cpu
)
1643 struct intel_shared_regs
*regs
;
1646 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
1647 GFP_KERNEL
, cpu_to_node(cpu
));
1650 * initialize the locks to keep lockdep happy
1652 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
1653 raw_spin_lock_init(®s
->regs
[i
].lock
);
1660 static int intel_pmu_cpu_prepare(int cpu
)
1662 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1664 if (!(x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
))
1667 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
1668 if (!cpuc
->shared_regs
)
1674 static void intel_pmu_cpu_starting(int cpu
)
1676 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1677 int core_id
= topology_core_id(cpu
);
1680 init_debug_store_on_cpu(cpu
);
1682 * Deal with CPUs that don't clear their LBRs on power-up.
1684 intel_pmu_lbr_reset();
1686 cpuc
->lbr_sel
= NULL
;
1688 if (!cpuc
->shared_regs
)
1691 if (!(x86_pmu
.er_flags
& ERF_NO_HT_SHARING
)) {
1692 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
1693 struct intel_shared_regs
*pc
;
1695 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
1696 if (pc
&& pc
->core_id
== core_id
) {
1697 cpuc
->kfree_on_online
= cpuc
->shared_regs
;
1698 cpuc
->shared_regs
= pc
;
1702 cpuc
->shared_regs
->core_id
= core_id
;
1703 cpuc
->shared_regs
->refcnt
++;
1706 if (x86_pmu
.lbr_sel_map
)
1707 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
1710 static void intel_pmu_cpu_dying(int cpu
)
1712 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1713 struct intel_shared_regs
*pc
;
1715 pc
= cpuc
->shared_regs
;
1717 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
1719 cpuc
->shared_regs
= NULL
;
1722 fini_debug_store_on_cpu(cpu
);
1725 static void intel_pmu_flush_branch_stack(void)
1728 * Intel LBR does not tag entries with the
1729 * PID of the current task, then we need to
1731 * For now, we simply reset it
1734 intel_pmu_lbr_reset();
1737 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
1739 static struct attribute
*intel_arch3_formats_attr
[] = {
1740 &format_attr_event
.attr
,
1741 &format_attr_umask
.attr
,
1742 &format_attr_edge
.attr
,
1743 &format_attr_pc
.attr
,
1744 &format_attr_any
.attr
,
1745 &format_attr_inv
.attr
,
1746 &format_attr_cmask
.attr
,
1748 &format_attr_offcore_rsp
.attr
, /* XXX do NHM/WSM + SNB breakout */
1752 static __initconst
const struct x86_pmu intel_pmu
= {
1754 .handle_irq
= intel_pmu_handle_irq
,
1755 .disable_all
= intel_pmu_disable_all
,
1756 .enable_all
= intel_pmu_enable_all
,
1757 .enable
= intel_pmu_enable_event
,
1758 .disable
= intel_pmu_disable_event
,
1759 .hw_config
= intel_pmu_hw_config
,
1760 .schedule_events
= x86_schedule_events
,
1761 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1762 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1763 .event_map
= intel_pmu_event_map
,
1764 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1767 * Intel PMCs cannot be accessed sanely above 32 bit width,
1768 * so we install an artificial 1<<31 period regardless of
1769 * the generic event period:
1771 .max_period
= (1ULL << 31) - 1,
1772 .get_event_constraints
= intel_get_event_constraints
,
1773 .put_event_constraints
= intel_put_event_constraints
,
1774 .pebs_aliases
= intel_pebs_aliases_core2
,
1776 .format_attrs
= intel_arch3_formats_attr
,
1777 .events_sysfs_show
= intel_event_sysfs_show
,
1779 .cpu_prepare
= intel_pmu_cpu_prepare
,
1780 .cpu_starting
= intel_pmu_cpu_starting
,
1781 .cpu_dying
= intel_pmu_cpu_dying
,
1782 .guest_get_msrs
= intel_guest_get_msrs
,
1783 .flush_branch_stack
= intel_pmu_flush_branch_stack
,
1786 static __init
void intel_clovertown_quirk(void)
1789 * PEBS is unreliable due to:
1791 * AJ67 - PEBS may experience CPL leaks
1792 * AJ68 - PEBS PMI may be delayed by one event
1793 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1794 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1796 * AJ67 could be worked around by restricting the OS/USR flags.
1797 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1799 * AJ106 could possibly be worked around by not allowing LBR
1800 * usage from PEBS, including the fixup.
1801 * AJ68 could possibly be worked around by always programming
1802 * a pebs_event_reset[0] value and coping with the lost events.
1804 * But taken together it might just make sense to not enable PEBS on
1807 pr_warn("PEBS disabled due to CPU errata\n");
1809 x86_pmu
.pebs_constraints
= NULL
;
1812 static int intel_snb_pebs_broken(int cpu
)
1814 u32 rev
= UINT_MAX
; /* default to broken for unknown models */
1816 switch (cpu_data(cpu
).x86_model
) {
1821 case 45: /* SNB-EP */
1822 switch (cpu_data(cpu
).x86_mask
) {
1823 case 6: rev
= 0x618; break;
1824 case 7: rev
= 0x70c; break;
1828 return (cpu_data(cpu
).microcode
< rev
);
1831 static void intel_snb_check_microcode(void)
1833 int pebs_broken
= 0;
1837 for_each_online_cpu(cpu
) {
1838 if ((pebs_broken
= intel_snb_pebs_broken(cpu
)))
1843 if (pebs_broken
== x86_pmu
.pebs_broken
)
1847 * Serialized by the microcode lock..
1849 if (x86_pmu
.pebs_broken
) {
1850 pr_info("PEBS enabled due to microcode update\n");
1851 x86_pmu
.pebs_broken
= 0;
1853 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
1854 x86_pmu
.pebs_broken
= 1;
1858 static __init
void intel_sandybridge_quirk(void)
1860 x86_pmu
.check_microcode
= intel_snb_check_microcode
;
1861 intel_snb_check_microcode();
1864 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
1865 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
1866 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
1867 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
1868 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
1869 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
1870 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
1871 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
1874 static __init
void intel_arch_events_quirk(void)
1878 /* disable event that reported as not presend by cpuid */
1879 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
1880 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
1881 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1882 intel_arch_events_map
[bit
].name
);
1886 static __init
void intel_nehalem_quirk(void)
1888 union cpuid10_ebx ebx
;
1890 ebx
.full
= x86_pmu
.events_maskl
;
1891 if (ebx
.split
.no_branch_misses_retired
) {
1893 * Erratum AAJ80 detected, we work it around by using
1894 * the BR_MISP_EXEC.ANY event. This will over-count
1895 * branch-misses, but it's still much better than the
1896 * architectural event which is often completely bogus:
1898 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
1899 ebx
.split
.no_branch_misses_retired
= 0;
1900 x86_pmu
.events_maskl
= ebx
.full
;
1901 pr_info("CPU erratum AAJ80 worked around\n");
1905 __init
int intel_pmu_init(void)
1907 union cpuid10_edx edx
;
1908 union cpuid10_eax eax
;
1909 union cpuid10_ebx ebx
;
1910 struct event_constraint
*c
;
1911 unsigned int unused
;
1914 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
1915 switch (boot_cpu_data
.x86
) {
1917 return p6_pmu_init();
1919 return knc_pmu_init();
1921 return p4_pmu_init();
1927 * Check whether the Architectural PerfMon supports
1928 * Branch Misses Retired hw_event or not.
1930 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
1931 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
1934 version
= eax
.split
.version_id
;
1938 x86_pmu
= intel_pmu
;
1940 x86_pmu
.version
= version
;
1941 x86_pmu
.num_counters
= eax
.split
.num_counters
;
1942 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
1943 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
1945 x86_pmu
.events_maskl
= ebx
.full
;
1946 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
1948 x86_pmu
.max_pebs_events
= min_t(unsigned, MAX_PEBS_EVENTS
, x86_pmu
.num_counters
);
1951 * Quirk: v2 perfmon does not report fixed-purpose events, so
1952 * assume at least 3 events:
1955 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
1958 * v2 and above have a perf capabilities MSR
1963 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
1964 x86_pmu
.intel_cap
.capabilities
= capabilities
;
1969 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
1972 * Install the hw-cache-events table:
1974 switch (boot_cpu_data
.x86_model
) {
1975 case 14: /* 65 nm core solo/duo, "Yonah" */
1976 pr_cont("Core events, ");
1979 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1980 x86_add_quirk(intel_clovertown_quirk
);
1981 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1982 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1983 case 29: /* six-core 45 nm xeon "Dunnington" */
1984 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
1985 sizeof(hw_cache_event_ids
));
1987 intel_pmu_lbr_init_core();
1989 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
1990 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
1991 pr_cont("Core2 events, ");
1994 case 26: /* 45 nm nehalem, "Bloomfield" */
1995 case 30: /* 45 nm nehalem, "Lynnfield" */
1996 case 46: /* 45 nm nehalem-ex, "Beckton" */
1997 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
1998 sizeof(hw_cache_event_ids
));
1999 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2000 sizeof(hw_cache_extra_regs
));
2002 intel_pmu_lbr_init_nhm();
2004 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
2005 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
2006 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2007 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
2009 /* UOPS_ISSUED.STALLED_CYCLES */
2010 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2011 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2012 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2013 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2014 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2016 x86_add_quirk(intel_nehalem_quirk
);
2018 pr_cont("Nehalem events, ");
2022 case 38: /* Lincroft */
2023 case 39: /* Penwell */
2024 case 53: /* Cloverview */
2025 case 54: /* Cedarview */
2026 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
2027 sizeof(hw_cache_event_ids
));
2029 intel_pmu_lbr_init_atom();
2031 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2032 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
2033 pr_cont("Atom events, ");
2036 case 37: /* 32 nm nehalem, "Clarkdale" */
2037 case 44: /* 32 nm nehalem, "Gulftown" */
2038 case 47: /* 32 nm Xeon E7 */
2039 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
2040 sizeof(hw_cache_event_ids
));
2041 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2042 sizeof(hw_cache_extra_regs
));
2044 intel_pmu_lbr_init_nhm();
2046 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
2047 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2048 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
2049 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
2050 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2052 /* UOPS_ISSUED.STALLED_CYCLES */
2053 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2054 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2055 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2056 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2057 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2059 pr_cont("Westmere events, ");
2062 case 42: /* SandyBridge */
2063 case 45: /* SandyBridge, "Romely-EP" */
2064 x86_add_quirk(intel_sandybridge_quirk
);
2065 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2066 sizeof(hw_cache_event_ids
));
2067 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2068 sizeof(hw_cache_extra_regs
));
2070 intel_pmu_lbr_init_snb();
2072 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
2073 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
2074 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2075 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2076 /* all extra regs are per-cpu when HT is on */
2077 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2078 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2080 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2081 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2082 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2083 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2084 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2085 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
2087 pr_cont("SandyBridge events, ");
2089 case 58: /* IvyBridge */
2090 case 62: /* IvyBridge EP */
2091 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2092 sizeof(hw_cache_event_ids
));
2093 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2094 sizeof(hw_cache_extra_regs
));
2096 intel_pmu_lbr_init_snb();
2098 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
2099 x86_pmu
.pebs_constraints
= intel_ivb_pebs_event_constraints
;
2100 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2101 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2102 /* all extra regs are per-cpu when HT is on */
2103 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2104 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2106 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2107 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2108 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2110 pr_cont("IvyBridge events, ");
2115 switch (x86_pmu
.version
) {
2117 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
2118 pr_cont("generic architected perfmon v1, ");
2122 * default constraints for v2 and up
2124 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2125 pr_cont("generic architected perfmon, ");
2130 if (x86_pmu
.num_counters
> INTEL_PMC_MAX_GENERIC
) {
2131 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
2132 x86_pmu
.num_counters
, INTEL_PMC_MAX_GENERIC
);
2133 x86_pmu
.num_counters
= INTEL_PMC_MAX_GENERIC
;
2135 x86_pmu
.intel_ctrl
= (1 << x86_pmu
.num_counters
) - 1;
2137 if (x86_pmu
.num_counters_fixed
> INTEL_PMC_MAX_FIXED
) {
2138 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
2139 x86_pmu
.num_counters_fixed
, INTEL_PMC_MAX_FIXED
);
2140 x86_pmu
.num_counters_fixed
= INTEL_PMC_MAX_FIXED
;
2143 x86_pmu
.intel_ctrl
|=
2144 ((1LL << x86_pmu
.num_counters_fixed
)-1) << INTEL_PMC_IDX_FIXED
;
2146 if (x86_pmu
.event_constraints
) {
2148 * event on fixed counter2 (REF_CYCLES) only works on this
2149 * counter, so do not extend mask to generic counters
2151 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2152 if (c
->cmask
!= X86_RAW_EVENT_MASK
2153 || c
->idxmsk64
== INTEL_PMC_MSK_FIXED_REF_CYCLES
) {
2157 c
->idxmsk64
|= (1ULL << x86_pmu
.num_counters
) - 1;
2158 c
->weight
+= x86_pmu
.num_counters
;