1 #ifdef CONFIG_CPU_SUP_INTEL
6 * Used to coordinate shared registers between HT threads or
7 * among events on a single PMU.
9 struct intel_shared_regs
{
10 struct er_account regs
[EXTRA_REG_MAX
];
11 int refcnt
; /* per-core: #HT threads */
12 unsigned core_id
; /* per-core: core id */
16 * Intel PerfMon, used on Core and later.
18 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
20 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
21 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
22 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
23 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
24 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
25 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
26 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
29 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
31 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
32 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
33 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
34 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
35 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
36 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
40 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
42 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
43 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
45 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
46 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
47 * ratio between these counters.
49 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
50 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
51 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
52 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
53 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
54 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
55 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
56 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
57 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
58 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
59 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
63 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
65 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
66 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
67 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
68 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
69 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
70 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
71 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
72 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
73 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
74 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
75 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
79 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
81 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
85 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
87 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
88 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
89 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
90 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
91 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
92 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
93 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
97 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
99 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
100 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
101 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
102 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
103 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
104 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
108 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
110 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
111 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
115 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
120 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
122 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
123 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
124 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
128 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
129 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0x3fffffffffull
, RSP_0
),
130 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0x3fffffffffull
, RSP_1
),
134 static u64
intel_pmu_event_map(int hw_event
)
136 return intel_perfmon_event_map
[hw_event
];
139 static __initconst
const u64 snb_hw_cache_event_ids
140 [PERF_COUNT_HW_CACHE_MAX
]
141 [PERF_COUNT_HW_CACHE_OP_MAX
]
142 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
146 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
147 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
150 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
151 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
153 [ C(OP_PREFETCH
) ] = {
154 [ C(RESULT_ACCESS
) ] = 0x0,
155 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
160 [ C(RESULT_ACCESS
) ] = 0x0,
161 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
164 [ C(RESULT_ACCESS
) ] = -1,
165 [ C(RESULT_MISS
) ] = -1,
167 [ C(OP_PREFETCH
) ] = {
168 [ C(RESULT_ACCESS
) ] = 0x0,
169 [ C(RESULT_MISS
) ] = 0x0,
174 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
175 [ C(RESULT_ACCESS
) ] = 0x01b7,
176 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
177 [ C(RESULT_MISS
) ] = 0x01b7,
180 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
181 [ C(RESULT_ACCESS
) ] = 0x01b7,
182 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
183 [ C(RESULT_MISS
) ] = 0x01b7,
185 [ C(OP_PREFETCH
) ] = {
186 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
187 [ C(RESULT_ACCESS
) ] = 0x01b7,
188 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
189 [ C(RESULT_MISS
) ] = 0x01b7,
194 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
195 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
198 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
199 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
201 [ C(OP_PREFETCH
) ] = {
202 [ C(RESULT_ACCESS
) ] = 0x0,
203 [ C(RESULT_MISS
) ] = 0x0,
208 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
209 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
212 [ C(RESULT_ACCESS
) ] = -1,
213 [ C(RESULT_MISS
) ] = -1,
215 [ C(OP_PREFETCH
) ] = {
216 [ C(RESULT_ACCESS
) ] = -1,
217 [ C(RESULT_MISS
) ] = -1,
222 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
223 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
226 [ C(RESULT_ACCESS
) ] = -1,
227 [ C(RESULT_MISS
) ] = -1,
229 [ C(OP_PREFETCH
) ] = {
230 [ C(RESULT_ACCESS
) ] = -1,
231 [ C(RESULT_MISS
) ] = -1,
236 [ C(RESULT_ACCESS
) ] = -1,
237 [ C(RESULT_MISS
) ] = -1,
240 [ C(RESULT_ACCESS
) ] = -1,
241 [ C(RESULT_MISS
) ] = -1,
243 [ C(OP_PREFETCH
) ] = {
244 [ C(RESULT_ACCESS
) ] = -1,
245 [ C(RESULT_MISS
) ] = -1,
251 static __initconst
const u64 westmere_hw_cache_event_ids
252 [PERF_COUNT_HW_CACHE_MAX
]
253 [PERF_COUNT_HW_CACHE_OP_MAX
]
254 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
258 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
259 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
262 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
263 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
265 [ C(OP_PREFETCH
) ] = {
266 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
267 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
272 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
273 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
276 [ C(RESULT_ACCESS
) ] = -1,
277 [ C(RESULT_MISS
) ] = -1,
279 [ C(OP_PREFETCH
) ] = {
280 [ C(RESULT_ACCESS
) ] = 0x0,
281 [ C(RESULT_MISS
) ] = 0x0,
286 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
287 [ C(RESULT_ACCESS
) ] = 0x01b7,
288 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
289 [ C(RESULT_MISS
) ] = 0x01b7,
292 * Use RFO, not WRITEBACK, because a write miss would typically occur
296 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
297 [ C(RESULT_ACCESS
) ] = 0x01b7,
298 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
299 [ C(RESULT_MISS
) ] = 0x01b7,
301 [ C(OP_PREFETCH
) ] = {
302 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
303 [ C(RESULT_ACCESS
) ] = 0x01b7,
304 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
305 [ C(RESULT_MISS
) ] = 0x01b7,
310 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
311 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
314 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
315 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
317 [ C(OP_PREFETCH
) ] = {
318 [ C(RESULT_ACCESS
) ] = 0x0,
319 [ C(RESULT_MISS
) ] = 0x0,
324 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
325 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
328 [ C(RESULT_ACCESS
) ] = -1,
329 [ C(RESULT_MISS
) ] = -1,
331 [ C(OP_PREFETCH
) ] = {
332 [ C(RESULT_ACCESS
) ] = -1,
333 [ C(RESULT_MISS
) ] = -1,
338 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
339 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
342 [ C(RESULT_ACCESS
) ] = -1,
343 [ C(RESULT_MISS
) ] = -1,
345 [ C(OP_PREFETCH
) ] = {
346 [ C(RESULT_ACCESS
) ] = -1,
347 [ C(RESULT_MISS
) ] = -1,
352 [ C(RESULT_ACCESS
) ] = 0x01b7,
353 [ C(RESULT_MISS
) ] = 0x01b7,
356 [ C(RESULT_ACCESS
) ] = 0x01b7,
357 [ C(RESULT_MISS
) ] = 0x01b7,
359 [ C(OP_PREFETCH
) ] = {
360 [ C(RESULT_ACCESS
) ] = 0x01b7,
361 [ C(RESULT_MISS
) ] = 0x01b7,
367 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
368 * See IA32 SDM Vol 3B 30.6.1.3
371 #define NHM_DMND_DATA_RD (1 << 0)
372 #define NHM_DMND_RFO (1 << 1)
373 #define NHM_DMND_IFETCH (1 << 2)
374 #define NHM_DMND_WB (1 << 3)
375 #define NHM_PF_DATA_RD (1 << 4)
376 #define NHM_PF_DATA_RFO (1 << 5)
377 #define NHM_PF_IFETCH (1 << 6)
378 #define NHM_OFFCORE_OTHER (1 << 7)
379 #define NHM_UNCORE_HIT (1 << 8)
380 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
381 #define NHM_OTHER_CORE_HITM (1 << 10)
383 #define NHM_REMOTE_CACHE_FWD (1 << 12)
384 #define NHM_REMOTE_DRAM (1 << 13)
385 #define NHM_LOCAL_DRAM (1 << 14)
386 #define NHM_NON_DRAM (1 << 15)
388 #define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM)
390 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
391 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
392 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
394 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
395 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD)
396 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
398 static __initconst
const u64 nehalem_hw_cache_extra_regs
399 [PERF_COUNT_HW_CACHE_MAX
]
400 [PERF_COUNT_HW_CACHE_OP_MAX
]
401 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
405 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
406 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
409 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
410 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
412 [ C(OP_PREFETCH
) ] = {
413 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
414 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
419 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_ALL_DRAM
,
420 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE_DRAM
,
423 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_ALL_DRAM
,
424 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE_DRAM
,
426 [ C(OP_PREFETCH
) ] = {
427 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_ALL_DRAM
,
428 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE_DRAM
,
433 static __initconst
const u64 nehalem_hw_cache_event_ids
434 [PERF_COUNT_HW_CACHE_MAX
]
435 [PERF_COUNT_HW_CACHE_OP_MAX
]
436 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
440 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
441 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
444 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
445 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
447 [ C(OP_PREFETCH
) ] = {
448 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
449 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
454 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
455 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
458 [ C(RESULT_ACCESS
) ] = -1,
459 [ C(RESULT_MISS
) ] = -1,
461 [ C(OP_PREFETCH
) ] = {
462 [ C(RESULT_ACCESS
) ] = 0x0,
463 [ C(RESULT_MISS
) ] = 0x0,
468 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
469 [ C(RESULT_ACCESS
) ] = 0x01b7,
470 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
471 [ C(RESULT_MISS
) ] = 0x01b7,
474 * Use RFO, not WRITEBACK, because a write miss would typically occur
478 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
479 [ C(RESULT_ACCESS
) ] = 0x01b7,
480 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
481 [ C(RESULT_MISS
) ] = 0x01b7,
483 [ C(OP_PREFETCH
) ] = {
484 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
485 [ C(RESULT_ACCESS
) ] = 0x01b7,
486 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
487 [ C(RESULT_MISS
) ] = 0x01b7,
492 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
493 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
496 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
497 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
499 [ C(OP_PREFETCH
) ] = {
500 [ C(RESULT_ACCESS
) ] = 0x0,
501 [ C(RESULT_MISS
) ] = 0x0,
506 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
507 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
510 [ C(RESULT_ACCESS
) ] = -1,
511 [ C(RESULT_MISS
) ] = -1,
513 [ C(OP_PREFETCH
) ] = {
514 [ C(RESULT_ACCESS
) ] = -1,
515 [ C(RESULT_MISS
) ] = -1,
520 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
521 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
524 [ C(RESULT_ACCESS
) ] = -1,
525 [ C(RESULT_MISS
) ] = -1,
527 [ C(OP_PREFETCH
) ] = {
528 [ C(RESULT_ACCESS
) ] = -1,
529 [ C(RESULT_MISS
) ] = -1,
534 [ C(RESULT_ACCESS
) ] = 0x01b7,
535 [ C(RESULT_MISS
) ] = 0x01b7,
538 [ C(RESULT_ACCESS
) ] = 0x01b7,
539 [ C(RESULT_MISS
) ] = 0x01b7,
541 [ C(OP_PREFETCH
) ] = {
542 [ C(RESULT_ACCESS
) ] = 0x01b7,
543 [ C(RESULT_MISS
) ] = 0x01b7,
548 static __initconst
const u64 core2_hw_cache_event_ids
549 [PERF_COUNT_HW_CACHE_MAX
]
550 [PERF_COUNT_HW_CACHE_OP_MAX
]
551 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
555 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
556 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
559 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
560 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
562 [ C(OP_PREFETCH
) ] = {
563 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
564 [ C(RESULT_MISS
) ] = 0,
569 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
570 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
573 [ C(RESULT_ACCESS
) ] = -1,
574 [ C(RESULT_MISS
) ] = -1,
576 [ C(OP_PREFETCH
) ] = {
577 [ C(RESULT_ACCESS
) ] = 0,
578 [ C(RESULT_MISS
) ] = 0,
583 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
584 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
587 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
588 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
590 [ C(OP_PREFETCH
) ] = {
591 [ C(RESULT_ACCESS
) ] = 0,
592 [ C(RESULT_MISS
) ] = 0,
597 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
598 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
601 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
602 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
604 [ C(OP_PREFETCH
) ] = {
605 [ C(RESULT_ACCESS
) ] = 0,
606 [ C(RESULT_MISS
) ] = 0,
611 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
612 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
615 [ C(RESULT_ACCESS
) ] = -1,
616 [ C(RESULT_MISS
) ] = -1,
618 [ C(OP_PREFETCH
) ] = {
619 [ C(RESULT_ACCESS
) ] = -1,
620 [ C(RESULT_MISS
) ] = -1,
625 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
626 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
629 [ C(RESULT_ACCESS
) ] = -1,
630 [ C(RESULT_MISS
) ] = -1,
632 [ C(OP_PREFETCH
) ] = {
633 [ C(RESULT_ACCESS
) ] = -1,
634 [ C(RESULT_MISS
) ] = -1,
639 static __initconst
const u64 atom_hw_cache_event_ids
640 [PERF_COUNT_HW_CACHE_MAX
]
641 [PERF_COUNT_HW_CACHE_OP_MAX
]
642 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
646 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
647 [ C(RESULT_MISS
) ] = 0,
650 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
651 [ C(RESULT_MISS
) ] = 0,
653 [ C(OP_PREFETCH
) ] = {
654 [ C(RESULT_ACCESS
) ] = 0x0,
655 [ C(RESULT_MISS
) ] = 0,
660 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
661 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
664 [ C(RESULT_ACCESS
) ] = -1,
665 [ C(RESULT_MISS
) ] = -1,
667 [ C(OP_PREFETCH
) ] = {
668 [ C(RESULT_ACCESS
) ] = 0,
669 [ C(RESULT_MISS
) ] = 0,
674 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
675 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
678 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
679 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
681 [ C(OP_PREFETCH
) ] = {
682 [ C(RESULT_ACCESS
) ] = 0,
683 [ C(RESULT_MISS
) ] = 0,
688 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
689 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
692 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
693 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
695 [ C(OP_PREFETCH
) ] = {
696 [ C(RESULT_ACCESS
) ] = 0,
697 [ C(RESULT_MISS
) ] = 0,
702 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
703 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
706 [ C(RESULT_ACCESS
) ] = -1,
707 [ C(RESULT_MISS
) ] = -1,
709 [ C(OP_PREFETCH
) ] = {
710 [ C(RESULT_ACCESS
) ] = -1,
711 [ C(RESULT_MISS
) ] = -1,
716 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
717 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
720 [ C(RESULT_ACCESS
) ] = -1,
721 [ C(RESULT_MISS
) ] = -1,
723 [ C(OP_PREFETCH
) ] = {
724 [ C(RESULT_ACCESS
) ] = -1,
725 [ C(RESULT_MISS
) ] = -1,
730 static void intel_pmu_disable_all(void)
732 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
734 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
736 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
737 intel_pmu_disable_bts();
739 intel_pmu_pebs_disable_all();
740 intel_pmu_lbr_disable_all();
743 static void intel_pmu_enable_all(int added
)
745 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
747 intel_pmu_pebs_enable_all();
748 intel_pmu_lbr_enable_all();
749 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, x86_pmu
.intel_ctrl
);
751 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
752 struct perf_event
*event
=
753 cpuc
->events
[X86_PMC_IDX_FIXED_BTS
];
755 if (WARN_ON_ONCE(!event
))
758 intel_pmu_enable_bts(event
->hw
.config
);
764 * Intel Errata AAK100 (model 26)
765 * Intel Errata AAP53 (model 30)
766 * Intel Errata BD53 (model 44)
768 * The official story:
769 * These chips need to be 'reset' when adding counters by programming the
770 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
771 * in sequence on the same PMC or on different PMCs.
773 * In practise it appears some of these events do in fact count, and
774 * we need to programm all 4 events.
776 static void intel_pmu_nhm_workaround(void)
778 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
779 static const unsigned long nhm_magic
[4] = {
785 struct perf_event
*event
;
789 * The Errata requires below steps:
790 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
791 * 2) Configure 4 PERFEVTSELx with the magic events and clear
792 * the corresponding PMCx;
793 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
794 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
795 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
799 * The real steps we choose are a little different from above.
800 * A) To reduce MSR operations, we don't run step 1) as they
801 * are already cleared before this function is called;
802 * B) Call x86_perf_event_update to save PMCx before configuring
803 * PERFEVTSELx with magic number;
804 * C) With step 5), we do clear only when the PERFEVTSELx is
805 * not used currently.
806 * D) Call x86_perf_event_set_period to restore PMCx;
809 /* We always operate 4 pairs of PERF Counters */
810 for (i
= 0; i
< 4; i
++) {
811 event
= cpuc
->events
[i
];
813 x86_perf_event_update(event
);
816 for (i
= 0; i
< 4; i
++) {
817 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
818 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
821 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
822 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
824 for (i
= 0; i
< 4; i
++) {
825 event
= cpuc
->events
[i
];
828 x86_perf_event_set_period(event
);
829 __x86_pmu_enable_event(&event
->hw
,
830 ARCH_PERFMON_EVENTSEL_ENABLE
);
832 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
836 static void intel_pmu_nhm_enable_all(int added
)
839 intel_pmu_nhm_workaround();
840 intel_pmu_enable_all(added
);
843 static inline u64
intel_pmu_get_status(void)
847 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
852 static inline void intel_pmu_ack_status(u64 ack
)
854 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
857 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
859 int idx
= hwc
->idx
- X86_PMC_IDX_FIXED
;
862 mask
= 0xfULL
<< (idx
* 4);
864 rdmsrl(hwc
->config_base
, ctrl_val
);
866 wrmsrl(hwc
->config_base
, ctrl_val
);
869 static void intel_pmu_disable_event(struct perf_event
*event
)
871 struct hw_perf_event
*hwc
= &event
->hw
;
873 if (unlikely(hwc
->idx
== X86_PMC_IDX_FIXED_BTS
)) {
874 intel_pmu_disable_bts();
875 intel_pmu_drain_bts_buffer();
879 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
880 intel_pmu_disable_fixed(hwc
);
884 x86_pmu_disable_event(event
);
886 if (unlikely(event
->attr
.precise_ip
))
887 intel_pmu_pebs_disable(event
);
890 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
892 int idx
= hwc
->idx
- X86_PMC_IDX_FIXED
;
893 u64 ctrl_val
, bits
, mask
;
896 * Enable IRQ generation (0x8),
897 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
901 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
903 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
907 * ANY bit is supported in v3 and up
909 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
913 mask
= 0xfULL
<< (idx
* 4);
915 rdmsrl(hwc
->config_base
, ctrl_val
);
918 wrmsrl(hwc
->config_base
, ctrl_val
);
921 static void intel_pmu_enable_event(struct perf_event
*event
)
923 struct hw_perf_event
*hwc
= &event
->hw
;
925 if (unlikely(hwc
->idx
== X86_PMC_IDX_FIXED_BTS
)) {
926 if (!__this_cpu_read(cpu_hw_events
.enabled
))
929 intel_pmu_enable_bts(hwc
->config
);
933 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
934 intel_pmu_enable_fixed(hwc
);
938 if (unlikely(event
->attr
.precise_ip
))
939 intel_pmu_pebs_enable(event
);
941 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
945 * Save and restart an expired event. Called by NMI contexts,
946 * so it has to be careful about preempting normal event ops:
948 static int intel_pmu_save_and_restart(struct perf_event
*event
)
950 x86_perf_event_update(event
);
951 return x86_perf_event_set_period(event
);
954 static void intel_pmu_reset(void)
956 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
960 if (!x86_pmu
.num_counters
)
963 local_irq_save(flags
);
965 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
967 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
968 checking_wrmsrl(x86_pmu_config_addr(idx
), 0ull);
969 checking_wrmsrl(x86_pmu_event_addr(idx
), 0ull);
971 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
972 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
975 ds
->bts_index
= ds
->bts_buffer_base
;
977 local_irq_restore(flags
);
981 * This handler is triggered by the local APIC, so the APIC IRQ handling
984 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
986 struct perf_sample_data data
;
987 struct cpu_hw_events
*cpuc
;
992 perf_sample_data_init(&data
, 0);
994 cpuc
= &__get_cpu_var(cpu_hw_events
);
997 * Some chipsets need to unmask the LVTPC in a particular spot
998 * inside the nmi handler. As a result, the unmasking was pushed
999 * into all the nmi handlers.
1001 * This handler doesn't seem to have any issues with the unmasking
1002 * so it was left at the top.
1004 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1006 intel_pmu_disable_all();
1007 handled
= intel_pmu_drain_bts_buffer();
1008 status
= intel_pmu_get_status();
1010 intel_pmu_enable_all(0);
1016 intel_pmu_ack_status(status
);
1017 if (++loops
> 100) {
1018 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1019 perf_event_print_debug();
1024 inc_irq_stat(apic_perf_irqs
);
1026 intel_pmu_lbr_read();
1029 * PEBS overflow sets bit 62 in the global status register
1031 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1033 x86_pmu
.drain_pebs(regs
);
1036 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1037 struct perf_event
*event
= cpuc
->events
[bit
];
1041 if (!test_bit(bit
, cpuc
->active_mask
))
1044 if (!intel_pmu_save_and_restart(event
))
1047 data
.period
= event
->hw
.last_period
;
1049 if (perf_event_overflow(event
, &data
, regs
))
1050 x86_pmu_stop(event
, 0);
1054 * Repeat if there is more work to be done:
1056 status
= intel_pmu_get_status();
1061 intel_pmu_enable_all(0);
1065 static struct event_constraint
*
1066 intel_bts_constraints(struct perf_event
*event
)
1068 struct hw_perf_event
*hwc
= &event
->hw
;
1069 unsigned int hw_event
, bts_event
;
1071 if (event
->attr
.freq
)
1074 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1075 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1077 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1078 return &bts_constraint
;
1083 static bool intel_try_alt_er(struct perf_event
*event
, int orig_idx
)
1085 if (!(x86_pmu
.er_flags
& ERF_HAS_RSP_1
))
1088 if (event
->hw
.extra_reg
.idx
== EXTRA_REG_RSP_0
) {
1089 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1090 event
->hw
.config
|= 0x01bb;
1091 event
->hw
.extra_reg
.idx
= EXTRA_REG_RSP_1
;
1092 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1093 } else if (event
->hw
.extra_reg
.idx
== EXTRA_REG_RSP_1
) {
1094 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1095 event
->hw
.config
|= 0x01b7;
1096 event
->hw
.extra_reg
.idx
= EXTRA_REG_RSP_0
;
1097 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1100 if (event
->hw
.extra_reg
.idx
== orig_idx
)
1107 * manage allocation of shared extra msr for certain events
1110 * per-cpu: to be shared between the various events on a single PMU
1111 * per-core: per-cpu + shared by HT threads
1113 static struct event_constraint
*
1114 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1115 struct perf_event
*event
)
1117 struct event_constraint
*c
= &emptyconstraint
;
1118 struct hw_perf_event_extra
*reg
= &event
->hw
.extra_reg
;
1119 struct er_account
*era
;
1120 unsigned long flags
;
1121 int orig_idx
= reg
->idx
;
1123 /* already allocated shared msr */
1125 return &unconstrained
;
1128 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1130 * we use spin_lock_irqsave() to avoid lockdep issues when
1131 * passing a fake cpuc
1133 raw_spin_lock_irqsave(&era
->lock
, flags
);
1135 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1137 /* lock in msr value */
1138 era
->config
= reg
->config
;
1139 era
->reg
= reg
->reg
;
1142 atomic_inc(&era
->ref
);
1144 /* no need to reallocate during incremental event scheduling */
1148 * All events using extra_reg are unconstrained.
1149 * Avoids calling x86_get_event_constraints()
1151 * Must revisit if extra_reg controlling events
1152 * ever have constraints. Worst case we go through
1153 * the regular event constraint table.
1156 } else if (intel_try_alt_er(event
, orig_idx
)) {
1157 raw_spin_unlock(&era
->lock
);
1160 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1166 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1167 struct hw_perf_event_extra
*reg
)
1169 struct er_account
*era
;
1172 * only put constraint if extra reg was actually
1173 * allocated. Also takes care of event which do
1174 * not use an extra shared reg
1179 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1181 /* one fewer user */
1182 atomic_dec(&era
->ref
);
1184 /* allocate again next time */
1188 static struct event_constraint
*
1189 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1190 struct perf_event
*event
)
1192 struct event_constraint
*c
= NULL
;
1194 if (event
->hw
.extra_reg
.idx
!= EXTRA_REG_NONE
)
1195 c
= __intel_shared_reg_get_constraints(cpuc
, event
);
1200 static struct event_constraint
*
1201 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1203 struct event_constraint
*c
;
1205 c
= intel_bts_constraints(event
);
1209 c
= intel_pebs_constraints(event
);
1213 c
= intel_shared_regs_constraints(cpuc
, event
);
1217 return x86_get_event_constraints(cpuc
, event
);
1221 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1222 struct perf_event
*event
)
1224 struct hw_perf_event_extra
*reg
;
1226 reg
= &event
->hw
.extra_reg
;
1227 if (reg
->idx
!= EXTRA_REG_NONE
)
1228 __intel_shared_reg_put_constraints(cpuc
, reg
);
1231 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1232 struct perf_event
*event
)
1234 intel_put_shared_regs_event_constraints(cpuc
, event
);
1237 static int intel_pmu_hw_config(struct perf_event
*event
)
1239 int ret
= x86_pmu_hw_config(event
);
1244 if (event
->attr
.precise_ip
&&
1245 (event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1247 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1248 * (0x003c) so that we can use it with PEBS.
1250 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1251 * PEBS capable. However we can use INST_RETIRED.ANY_P
1252 * (0x00c0), which is a PEBS capable event, to get the same
1255 * INST_RETIRED.ANY_P counts the number of cycles that retires
1256 * CNTMASK instructions. By setting CNTMASK to a value (16)
1257 * larger than the maximum number of instructions that can be
1258 * retired per cycle (4) and then inverting the condition, we
1259 * count all cycles that retire 16 or less instructions, which
1262 * Thereby we gain a PEBS capable cycle counter.
1264 u64 alt_config
= 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
1266 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1267 event
->hw
.config
= alt_config
;
1270 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1273 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1276 if (x86_pmu
.version
< 3)
1279 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1282 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1287 static __initconst
const struct x86_pmu core_pmu
= {
1289 .handle_irq
= x86_pmu_handle_irq
,
1290 .disable_all
= x86_pmu_disable_all
,
1291 .enable_all
= x86_pmu_enable_all
,
1292 .enable
= x86_pmu_enable_event
,
1293 .disable
= x86_pmu_disable_event
,
1294 .hw_config
= x86_pmu_hw_config
,
1295 .schedule_events
= x86_schedule_events
,
1296 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1297 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1298 .event_map
= intel_pmu_event_map
,
1299 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1302 * Intel PMCs cannot be accessed sanely above 32 bit width,
1303 * so we install an artificial 1<<31 period regardless of
1304 * the generic event period:
1306 .max_period
= (1ULL << 31) - 1,
1307 .get_event_constraints
= intel_get_event_constraints
,
1308 .put_event_constraints
= intel_put_event_constraints
,
1309 .event_constraints
= intel_core_event_constraints
,
1312 static struct intel_shared_regs
*allocate_shared_regs(int cpu
)
1314 struct intel_shared_regs
*regs
;
1317 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
1318 GFP_KERNEL
, cpu_to_node(cpu
));
1321 * initialize the locks to keep lockdep happy
1323 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
1324 raw_spin_lock_init(®s
->regs
[i
].lock
);
1331 static int intel_pmu_cpu_prepare(int cpu
)
1333 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1335 if (!x86_pmu
.extra_regs
)
1338 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
1339 if (!cpuc
->shared_regs
)
1345 static void intel_pmu_cpu_starting(int cpu
)
1347 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1348 int core_id
= topology_core_id(cpu
);
1351 init_debug_store_on_cpu(cpu
);
1353 * Deal with CPUs that don't clear their LBRs on power-up.
1355 intel_pmu_lbr_reset();
1357 if (!cpuc
->shared_regs
|| (x86_pmu
.er_flags
& ERF_NO_HT_SHARING
))
1360 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
1361 struct intel_shared_regs
*pc
;
1363 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
1364 if (pc
&& pc
->core_id
== core_id
) {
1365 kfree(cpuc
->shared_regs
);
1366 cpuc
->shared_regs
= pc
;
1371 cpuc
->shared_regs
->core_id
= core_id
;
1372 cpuc
->shared_regs
->refcnt
++;
1375 static void intel_pmu_cpu_dying(int cpu
)
1377 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1378 struct intel_shared_regs
*pc
;
1380 pc
= cpuc
->shared_regs
;
1382 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
1384 cpuc
->shared_regs
= NULL
;
1387 fini_debug_store_on_cpu(cpu
);
1390 static __initconst
const struct x86_pmu intel_pmu
= {
1392 .handle_irq
= intel_pmu_handle_irq
,
1393 .disable_all
= intel_pmu_disable_all
,
1394 .enable_all
= intel_pmu_enable_all
,
1395 .enable
= intel_pmu_enable_event
,
1396 .disable
= intel_pmu_disable_event
,
1397 .hw_config
= intel_pmu_hw_config
,
1398 .schedule_events
= x86_schedule_events
,
1399 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1400 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1401 .event_map
= intel_pmu_event_map
,
1402 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1405 * Intel PMCs cannot be accessed sanely above 32 bit width,
1406 * so we install an artificial 1<<31 period regardless of
1407 * the generic event period:
1409 .max_period
= (1ULL << 31) - 1,
1410 .get_event_constraints
= intel_get_event_constraints
,
1411 .put_event_constraints
= intel_put_event_constraints
,
1413 .cpu_prepare
= intel_pmu_cpu_prepare
,
1414 .cpu_starting
= intel_pmu_cpu_starting
,
1415 .cpu_dying
= intel_pmu_cpu_dying
,
1418 static void intel_clovertown_quirks(void)
1421 * PEBS is unreliable due to:
1423 * AJ67 - PEBS may experience CPL leaks
1424 * AJ68 - PEBS PMI may be delayed by one event
1425 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1426 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1428 * AJ67 could be worked around by restricting the OS/USR flags.
1429 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1431 * AJ106 could possibly be worked around by not allowing LBR
1432 * usage from PEBS, including the fixup.
1433 * AJ68 could possibly be worked around by always programming
1434 * a pebs_event_reset[0] value and coping with the lost events.
1436 * But taken together it might just make sense to not enable PEBS on
1439 printk(KERN_WARNING
"PEBS disabled due to CPU errata.\n");
1441 x86_pmu
.pebs_constraints
= NULL
;
1444 static __init
int intel_pmu_init(void)
1446 union cpuid10_edx edx
;
1447 union cpuid10_eax eax
;
1448 unsigned int unused
;
1452 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
1453 switch (boot_cpu_data
.x86
) {
1455 return p6_pmu_init();
1457 return p4_pmu_init();
1463 * Check whether the Architectural PerfMon supports
1464 * Branch Misses Retired hw_event or not.
1466 cpuid(10, &eax
.full
, &ebx
, &unused
, &edx
.full
);
1467 if (eax
.split
.mask_length
<= ARCH_PERFMON_BRANCH_MISSES_RETIRED
)
1470 version
= eax
.split
.version_id
;
1474 x86_pmu
= intel_pmu
;
1476 x86_pmu
.version
= version
;
1477 x86_pmu
.num_counters
= eax
.split
.num_counters
;
1478 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
1479 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
1482 * Quirk: v2 perfmon does not report fixed-purpose events, so
1483 * assume at least 3 events:
1486 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
1489 * v2 and above have a perf capabilities MSR
1494 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
1495 x86_pmu
.intel_cap
.capabilities
= capabilities
;
1501 * Install the hw-cache-events table:
1503 switch (boot_cpu_data
.x86_model
) {
1504 case 14: /* 65 nm core solo/duo, "Yonah" */
1505 pr_cont("Core events, ");
1508 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1509 x86_pmu
.quirks
= intel_clovertown_quirks
;
1510 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1511 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1512 case 29: /* six-core 45 nm xeon "Dunnington" */
1513 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
1514 sizeof(hw_cache_event_ids
));
1516 intel_pmu_lbr_init_core();
1518 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
1519 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
1520 pr_cont("Core2 events, ");
1523 case 26: /* 45 nm nehalem, "Bloomfield" */
1524 case 30: /* 45 nm nehalem, "Lynnfield" */
1525 case 46: /* 45 nm nehalem-ex, "Beckton" */
1526 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
1527 sizeof(hw_cache_event_ids
));
1528 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
1529 sizeof(hw_cache_extra_regs
));
1531 intel_pmu_lbr_init_nhm();
1533 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
1534 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
1535 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
1536 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
1538 /* UOPS_ISSUED.STALLED_CYCLES */
1539 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = 0x180010e;
1540 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1541 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = 0x1803fb1;
1545 * Erratum AAJ80 detected, we work it around by using
1546 * the BR_MISP_EXEC.ANY event. This will over-count
1547 * branch-misses, but it's still much better than the
1548 * architectural event which is often completely bogus:
1550 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
1552 pr_cont("erratum AAJ80 worked around, ");
1554 pr_cont("Nehalem events, ");
1558 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
1559 sizeof(hw_cache_event_ids
));
1561 intel_pmu_lbr_init_atom();
1563 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
1564 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
1565 pr_cont("Atom events, ");
1568 case 37: /* 32 nm nehalem, "Clarkdale" */
1569 case 44: /* 32 nm nehalem, "Gulftown" */
1570 case 47: /* 32 nm Xeon E7 */
1571 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
1572 sizeof(hw_cache_event_ids
));
1573 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
1574 sizeof(hw_cache_extra_regs
));
1576 intel_pmu_lbr_init_nhm();
1578 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
1579 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
1580 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
1581 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
1582 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
1584 /* UOPS_ISSUED.STALLED_CYCLES */
1585 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = 0x180010e;
1586 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1587 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = 0x1803fb1;
1589 pr_cont("Westmere events, ");
1592 case 42: /* SandyBridge */
1593 case 45: /* SandyBridge, "Romely-EP" */
1594 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
1595 sizeof(hw_cache_event_ids
));
1597 intel_pmu_lbr_init_nhm();
1599 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
1600 x86_pmu
.pebs_constraints
= intel_snb_pebs_events
;
1601 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
1602 /* all extra regs are per-cpu when HT is on */
1603 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
1604 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
1606 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
1607 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = 0x180010e;
1608 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
1609 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = 0x18001b1;
1611 pr_cont("SandyBridge events, ");
1615 switch (x86_pmu
.version
) {
1617 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
1618 pr_cont("generic architected perfmon v1, ");
1622 * default constraints for v2 and up
1624 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
1625 pr_cont("generic architected perfmon, ");
1632 #else /* CONFIG_CPU_SUP_INTEL */
1634 static int intel_pmu_init(void)
1639 static struct intel_shared_regs
*allocate_shared_regs(int cpu
)
1643 #endif /* CONFIG_CPU_SUP_INTEL */