iwlwifi: mvm: fix version check for GEO_TX_POWER_LIMIT support
[linux/fpc-iii.git] / arch / x86 / events / intel / core.c
blobdb5a2ba6175366ed6eb60dd38c46fd275e79e635
1 /*
2 * Per core/cpu state
4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
6 */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/nmi.h>
17 #include <asm/cpufeature.h>
18 #include <asm/hardirq.h>
19 #include <asm/intel-family.h>
20 #include <asm/apic.h>
22 #include "../perf_event.h"
25 * Intel PerfMon, used on Core and later.
27 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
29 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
30 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
31 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
32 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
33 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
34 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
35 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
36 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
39 static struct event_constraint intel_core_event_constraints[] __read_mostly =
41 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
42 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
43 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
44 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
45 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
46 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
47 EVENT_CONSTRAINT_END
50 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
52 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
53 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
54 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
55 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
56 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
57 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
58 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
59 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
60 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
61 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
62 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
63 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
64 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
65 EVENT_CONSTRAINT_END
68 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
70 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
71 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
72 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
73 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
74 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
75 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
76 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
77 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
78 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
79 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
80 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
81 EVENT_CONSTRAINT_END
84 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
86 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
87 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
88 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
89 EVENT_EXTRA_END
92 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
94 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
95 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
96 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
97 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
98 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
99 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
100 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
101 EVENT_CONSTRAINT_END
104 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
106 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
107 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
108 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
109 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
110 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
111 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
112 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
113 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
114 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
115 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
116 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
117 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
120 * When HT is off these events can only run on the bottom 4 counters
121 * When HT is on, they are impacted by the HT bug and require EXCL access
123 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
124 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
125 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
126 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
128 EVENT_CONSTRAINT_END
131 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
133 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
134 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
135 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
136 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
137 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
138 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
139 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
140 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
141 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
142 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
143 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
144 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
145 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
148 * When HT is off these events can only run on the bottom 4 counters
149 * When HT is on, they are impacted by the HT bug and require EXCL access
151 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
152 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
153 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
154 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
156 EVENT_CONSTRAINT_END
159 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
161 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
162 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
163 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
164 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
165 EVENT_EXTRA_END
168 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
170 EVENT_CONSTRAINT_END
173 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
175 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
176 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
177 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
178 EVENT_CONSTRAINT_END
181 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
183 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
184 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
185 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
186 EVENT_CONSTRAINT_END
189 static struct event_constraint intel_skl_event_constraints[] = {
190 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
191 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
192 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
193 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
196 * when HT is off, these can only run on the bottom 4 counters
198 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
199 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
200 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
201 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
202 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
204 EVENT_CONSTRAINT_END
207 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
208 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
209 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
210 EVENT_EXTRA_END
213 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
214 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
215 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
216 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
217 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
218 EVENT_EXTRA_END
221 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
222 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
223 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
224 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
225 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
226 EVENT_EXTRA_END
229 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
230 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
231 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
232 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
234 * Note the low 8 bits eventsel code is not a continuous field, containing
235 * some #GPing bits. These are masked out.
237 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
238 EVENT_EXTRA_END
241 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
242 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
243 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
245 static struct attribute *nhm_events_attrs[] = {
246 EVENT_PTR(mem_ld_nhm),
247 NULL,
251 * topdown events for Intel Core CPUs.
253 * The events are all in slots, which is a free slot in a 4 wide
254 * pipeline. Some events are already reported in slots, for cycle
255 * events we multiply by the pipeline width (4).
257 * With Hyper Threading on, topdown metrics are either summed or averaged
258 * between the threads of a core: (count_t0 + count_t1).
260 * For the average case the metric is always scaled to pipeline width,
261 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
264 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
265 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
266 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
267 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
268 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
269 "event=0xe,umask=0x1"); /* uops_issued.any */
270 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
271 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
272 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
273 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
274 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
275 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
276 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
277 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
278 "4", "2");
280 static struct attribute *snb_events_attrs[] = {
281 EVENT_PTR(mem_ld_snb),
282 EVENT_PTR(mem_st_snb),
283 EVENT_PTR(td_slots_issued),
284 EVENT_PTR(td_slots_retired),
285 EVENT_PTR(td_fetch_bubbles),
286 EVENT_PTR(td_total_slots),
287 EVENT_PTR(td_total_slots_scale),
288 EVENT_PTR(td_recovery_bubbles),
289 EVENT_PTR(td_recovery_bubbles_scale),
290 NULL,
293 static struct event_constraint intel_hsw_event_constraints[] = {
294 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
295 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
296 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
297 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
298 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
299 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
300 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
301 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
302 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
303 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
304 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
305 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
308 * When HT is off these events can only run on the bottom 4 counters
309 * When HT is on, they are impacted by the HT bug and require EXCL access
311 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
312 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
313 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
314 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
316 EVENT_CONSTRAINT_END
319 static struct event_constraint intel_bdw_event_constraints[] = {
320 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
321 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
322 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
323 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
324 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
326 * when HT is off, these can only run on the bottom 4 counters
328 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
329 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
330 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
331 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
332 EVENT_CONSTRAINT_END
335 static u64 intel_pmu_event_map(int hw_event)
337 return intel_perfmon_event_map[hw_event];
341 * Notes on the events:
342 * - data reads do not include code reads (comparable to earlier tables)
343 * - data counts include speculative execution (except L1 write, dtlb, bpu)
344 * - remote node access includes remote memory, remote cache, remote mmio.
345 * - prefetches are not included in the counts.
346 * - icache miss does not include decoded icache
349 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
350 #define SKL_DEMAND_RFO BIT_ULL(1)
351 #define SKL_ANY_RESPONSE BIT_ULL(16)
352 #define SKL_SUPPLIER_NONE BIT_ULL(17)
353 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
354 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
355 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
356 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
357 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
358 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
359 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
360 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
361 #define SKL_SPL_HIT BIT_ULL(30)
362 #define SKL_SNOOP_NONE BIT_ULL(31)
363 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
364 #define SKL_SNOOP_MISS BIT_ULL(33)
365 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
366 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
367 #define SKL_SNOOP_HITM BIT_ULL(36)
368 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
369 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
370 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
371 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
372 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
373 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
374 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
375 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
376 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
377 SKL_SNOOP_HITM|SKL_SPL_HIT)
378 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
379 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
380 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
381 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
382 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
384 static __initconst const u64 skl_hw_cache_event_ids
385 [PERF_COUNT_HW_CACHE_MAX]
386 [PERF_COUNT_HW_CACHE_OP_MAX]
387 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
389 [ C(L1D ) ] = {
390 [ C(OP_READ) ] = {
391 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
392 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
394 [ C(OP_WRITE) ] = {
395 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
396 [ C(RESULT_MISS) ] = 0x0,
398 [ C(OP_PREFETCH) ] = {
399 [ C(RESULT_ACCESS) ] = 0x0,
400 [ C(RESULT_MISS) ] = 0x0,
403 [ C(L1I ) ] = {
404 [ C(OP_READ) ] = {
405 [ C(RESULT_ACCESS) ] = 0x0,
406 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
408 [ C(OP_WRITE) ] = {
409 [ C(RESULT_ACCESS) ] = -1,
410 [ C(RESULT_MISS) ] = -1,
412 [ C(OP_PREFETCH) ] = {
413 [ C(RESULT_ACCESS) ] = 0x0,
414 [ C(RESULT_MISS) ] = 0x0,
417 [ C(LL ) ] = {
418 [ C(OP_READ) ] = {
419 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
420 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
422 [ C(OP_WRITE) ] = {
423 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
424 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
426 [ C(OP_PREFETCH) ] = {
427 [ C(RESULT_ACCESS) ] = 0x0,
428 [ C(RESULT_MISS) ] = 0x0,
431 [ C(DTLB) ] = {
432 [ C(OP_READ) ] = {
433 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
434 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
436 [ C(OP_WRITE) ] = {
437 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
438 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
440 [ C(OP_PREFETCH) ] = {
441 [ C(RESULT_ACCESS) ] = 0x0,
442 [ C(RESULT_MISS) ] = 0x0,
445 [ C(ITLB) ] = {
446 [ C(OP_READ) ] = {
447 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
448 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
450 [ C(OP_WRITE) ] = {
451 [ C(RESULT_ACCESS) ] = -1,
452 [ C(RESULT_MISS) ] = -1,
454 [ C(OP_PREFETCH) ] = {
455 [ C(RESULT_ACCESS) ] = -1,
456 [ C(RESULT_MISS) ] = -1,
459 [ C(BPU ) ] = {
460 [ C(OP_READ) ] = {
461 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
462 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
464 [ C(OP_WRITE) ] = {
465 [ C(RESULT_ACCESS) ] = -1,
466 [ C(RESULT_MISS) ] = -1,
468 [ C(OP_PREFETCH) ] = {
469 [ C(RESULT_ACCESS) ] = -1,
470 [ C(RESULT_MISS) ] = -1,
473 [ C(NODE) ] = {
474 [ C(OP_READ) ] = {
475 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
476 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
478 [ C(OP_WRITE) ] = {
479 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
480 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
482 [ C(OP_PREFETCH) ] = {
483 [ C(RESULT_ACCESS) ] = 0x0,
484 [ C(RESULT_MISS) ] = 0x0,
489 static __initconst const u64 skl_hw_cache_extra_regs
490 [PERF_COUNT_HW_CACHE_MAX]
491 [PERF_COUNT_HW_CACHE_OP_MAX]
492 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
494 [ C(LL ) ] = {
495 [ C(OP_READ) ] = {
496 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
497 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
498 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
499 SKL_L3_MISS|SKL_ANY_SNOOP|
500 SKL_SUPPLIER_NONE,
502 [ C(OP_WRITE) ] = {
503 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
504 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
505 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
506 SKL_L3_MISS|SKL_ANY_SNOOP|
507 SKL_SUPPLIER_NONE,
509 [ C(OP_PREFETCH) ] = {
510 [ C(RESULT_ACCESS) ] = 0x0,
511 [ C(RESULT_MISS) ] = 0x0,
514 [ C(NODE) ] = {
515 [ C(OP_READ) ] = {
516 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
517 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
518 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
519 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
521 [ C(OP_WRITE) ] = {
522 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
523 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
524 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
525 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
527 [ C(OP_PREFETCH) ] = {
528 [ C(RESULT_ACCESS) ] = 0x0,
529 [ C(RESULT_MISS) ] = 0x0,
534 #define SNB_DMND_DATA_RD (1ULL << 0)
535 #define SNB_DMND_RFO (1ULL << 1)
536 #define SNB_DMND_IFETCH (1ULL << 2)
537 #define SNB_DMND_WB (1ULL << 3)
538 #define SNB_PF_DATA_RD (1ULL << 4)
539 #define SNB_PF_RFO (1ULL << 5)
540 #define SNB_PF_IFETCH (1ULL << 6)
541 #define SNB_LLC_DATA_RD (1ULL << 7)
542 #define SNB_LLC_RFO (1ULL << 8)
543 #define SNB_LLC_IFETCH (1ULL << 9)
544 #define SNB_BUS_LOCKS (1ULL << 10)
545 #define SNB_STRM_ST (1ULL << 11)
546 #define SNB_OTHER (1ULL << 15)
547 #define SNB_RESP_ANY (1ULL << 16)
548 #define SNB_NO_SUPP (1ULL << 17)
549 #define SNB_LLC_HITM (1ULL << 18)
550 #define SNB_LLC_HITE (1ULL << 19)
551 #define SNB_LLC_HITS (1ULL << 20)
552 #define SNB_LLC_HITF (1ULL << 21)
553 #define SNB_LOCAL (1ULL << 22)
554 #define SNB_REMOTE (0xffULL << 23)
555 #define SNB_SNP_NONE (1ULL << 31)
556 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
557 #define SNB_SNP_MISS (1ULL << 33)
558 #define SNB_NO_FWD (1ULL << 34)
559 #define SNB_SNP_FWD (1ULL << 35)
560 #define SNB_HITM (1ULL << 36)
561 #define SNB_NON_DRAM (1ULL << 37)
563 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
564 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
565 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
567 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
568 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
569 SNB_HITM)
571 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
572 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
574 #define SNB_L3_ACCESS SNB_RESP_ANY
575 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
577 static __initconst const u64 snb_hw_cache_extra_regs
578 [PERF_COUNT_HW_CACHE_MAX]
579 [PERF_COUNT_HW_CACHE_OP_MAX]
580 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
582 [ C(LL ) ] = {
583 [ C(OP_READ) ] = {
584 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
585 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
587 [ C(OP_WRITE) ] = {
588 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
589 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
591 [ C(OP_PREFETCH) ] = {
592 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
593 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
596 [ C(NODE) ] = {
597 [ C(OP_READ) ] = {
598 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
599 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
601 [ C(OP_WRITE) ] = {
602 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
603 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
605 [ C(OP_PREFETCH) ] = {
606 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
607 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
612 static __initconst const u64 snb_hw_cache_event_ids
613 [PERF_COUNT_HW_CACHE_MAX]
614 [PERF_COUNT_HW_CACHE_OP_MAX]
615 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
617 [ C(L1D) ] = {
618 [ C(OP_READ) ] = {
619 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
620 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
622 [ C(OP_WRITE) ] = {
623 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
624 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
626 [ C(OP_PREFETCH) ] = {
627 [ C(RESULT_ACCESS) ] = 0x0,
628 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
631 [ C(L1I ) ] = {
632 [ C(OP_READ) ] = {
633 [ C(RESULT_ACCESS) ] = 0x0,
634 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
636 [ C(OP_WRITE) ] = {
637 [ C(RESULT_ACCESS) ] = -1,
638 [ C(RESULT_MISS) ] = -1,
640 [ C(OP_PREFETCH) ] = {
641 [ C(RESULT_ACCESS) ] = 0x0,
642 [ C(RESULT_MISS) ] = 0x0,
645 [ C(LL ) ] = {
646 [ C(OP_READ) ] = {
647 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
648 [ C(RESULT_ACCESS) ] = 0x01b7,
649 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
650 [ C(RESULT_MISS) ] = 0x01b7,
652 [ C(OP_WRITE) ] = {
653 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
654 [ C(RESULT_ACCESS) ] = 0x01b7,
655 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
656 [ C(RESULT_MISS) ] = 0x01b7,
658 [ C(OP_PREFETCH) ] = {
659 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
660 [ C(RESULT_ACCESS) ] = 0x01b7,
661 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
662 [ C(RESULT_MISS) ] = 0x01b7,
665 [ C(DTLB) ] = {
666 [ C(OP_READ) ] = {
667 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
668 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
670 [ C(OP_WRITE) ] = {
671 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
672 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
674 [ C(OP_PREFETCH) ] = {
675 [ C(RESULT_ACCESS) ] = 0x0,
676 [ C(RESULT_MISS) ] = 0x0,
679 [ C(ITLB) ] = {
680 [ C(OP_READ) ] = {
681 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
682 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
684 [ C(OP_WRITE) ] = {
685 [ C(RESULT_ACCESS) ] = -1,
686 [ C(RESULT_MISS) ] = -1,
688 [ C(OP_PREFETCH) ] = {
689 [ C(RESULT_ACCESS) ] = -1,
690 [ C(RESULT_MISS) ] = -1,
693 [ C(BPU ) ] = {
694 [ C(OP_READ) ] = {
695 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
696 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
698 [ C(OP_WRITE) ] = {
699 [ C(RESULT_ACCESS) ] = -1,
700 [ C(RESULT_MISS) ] = -1,
702 [ C(OP_PREFETCH) ] = {
703 [ C(RESULT_ACCESS) ] = -1,
704 [ C(RESULT_MISS) ] = -1,
707 [ C(NODE) ] = {
708 [ C(OP_READ) ] = {
709 [ C(RESULT_ACCESS) ] = 0x01b7,
710 [ C(RESULT_MISS) ] = 0x01b7,
712 [ C(OP_WRITE) ] = {
713 [ C(RESULT_ACCESS) ] = 0x01b7,
714 [ C(RESULT_MISS) ] = 0x01b7,
716 [ C(OP_PREFETCH) ] = {
717 [ C(RESULT_ACCESS) ] = 0x01b7,
718 [ C(RESULT_MISS) ] = 0x01b7,
725 * Notes on the events:
726 * - data reads do not include code reads (comparable to earlier tables)
727 * - data counts include speculative execution (except L1 write, dtlb, bpu)
728 * - remote node access includes remote memory, remote cache, remote mmio.
729 * - prefetches are not included in the counts because they are not
730 * reliably counted.
733 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
734 #define HSW_DEMAND_RFO BIT_ULL(1)
735 #define HSW_ANY_RESPONSE BIT_ULL(16)
736 #define HSW_SUPPLIER_NONE BIT_ULL(17)
737 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
738 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
739 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
740 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
741 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
742 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
743 HSW_L3_MISS_REMOTE_HOP2P)
744 #define HSW_SNOOP_NONE BIT_ULL(31)
745 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
746 #define HSW_SNOOP_MISS BIT_ULL(33)
747 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
748 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
749 #define HSW_SNOOP_HITM BIT_ULL(36)
750 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
751 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
752 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
753 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
754 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
755 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
756 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
757 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
758 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
759 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
760 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
762 #define BDW_L3_MISS_LOCAL BIT(26)
763 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
764 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
765 HSW_L3_MISS_REMOTE_HOP2P)
768 static __initconst const u64 hsw_hw_cache_event_ids
769 [PERF_COUNT_HW_CACHE_MAX]
770 [PERF_COUNT_HW_CACHE_OP_MAX]
771 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
773 [ C(L1D ) ] = {
774 [ C(OP_READ) ] = {
775 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
776 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
778 [ C(OP_WRITE) ] = {
779 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
780 [ C(RESULT_MISS) ] = 0x0,
782 [ C(OP_PREFETCH) ] = {
783 [ C(RESULT_ACCESS) ] = 0x0,
784 [ C(RESULT_MISS) ] = 0x0,
787 [ C(L1I ) ] = {
788 [ C(OP_READ) ] = {
789 [ C(RESULT_ACCESS) ] = 0x0,
790 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
792 [ C(OP_WRITE) ] = {
793 [ C(RESULT_ACCESS) ] = -1,
794 [ C(RESULT_MISS) ] = -1,
796 [ C(OP_PREFETCH) ] = {
797 [ C(RESULT_ACCESS) ] = 0x0,
798 [ C(RESULT_MISS) ] = 0x0,
801 [ C(LL ) ] = {
802 [ C(OP_READ) ] = {
803 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
804 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
806 [ C(OP_WRITE) ] = {
807 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
808 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
810 [ C(OP_PREFETCH) ] = {
811 [ C(RESULT_ACCESS) ] = 0x0,
812 [ C(RESULT_MISS) ] = 0x0,
815 [ C(DTLB) ] = {
816 [ C(OP_READ) ] = {
817 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
818 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
820 [ C(OP_WRITE) ] = {
821 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
822 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
824 [ C(OP_PREFETCH) ] = {
825 [ C(RESULT_ACCESS) ] = 0x0,
826 [ C(RESULT_MISS) ] = 0x0,
829 [ C(ITLB) ] = {
830 [ C(OP_READ) ] = {
831 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
832 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
834 [ C(OP_WRITE) ] = {
835 [ C(RESULT_ACCESS) ] = -1,
836 [ C(RESULT_MISS) ] = -1,
838 [ C(OP_PREFETCH) ] = {
839 [ C(RESULT_ACCESS) ] = -1,
840 [ C(RESULT_MISS) ] = -1,
843 [ C(BPU ) ] = {
844 [ C(OP_READ) ] = {
845 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
846 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
848 [ C(OP_WRITE) ] = {
849 [ C(RESULT_ACCESS) ] = -1,
850 [ C(RESULT_MISS) ] = -1,
852 [ C(OP_PREFETCH) ] = {
853 [ C(RESULT_ACCESS) ] = -1,
854 [ C(RESULT_MISS) ] = -1,
857 [ C(NODE) ] = {
858 [ C(OP_READ) ] = {
859 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
860 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
862 [ C(OP_WRITE) ] = {
863 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
864 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
866 [ C(OP_PREFETCH) ] = {
867 [ C(RESULT_ACCESS) ] = 0x0,
868 [ C(RESULT_MISS) ] = 0x0,
873 static __initconst const u64 hsw_hw_cache_extra_regs
874 [PERF_COUNT_HW_CACHE_MAX]
875 [PERF_COUNT_HW_CACHE_OP_MAX]
876 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
878 [ C(LL ) ] = {
879 [ C(OP_READ) ] = {
880 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
881 HSW_LLC_ACCESS,
882 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
883 HSW_L3_MISS|HSW_ANY_SNOOP,
885 [ C(OP_WRITE) ] = {
886 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
887 HSW_LLC_ACCESS,
888 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
889 HSW_L3_MISS|HSW_ANY_SNOOP,
891 [ C(OP_PREFETCH) ] = {
892 [ C(RESULT_ACCESS) ] = 0x0,
893 [ C(RESULT_MISS) ] = 0x0,
896 [ C(NODE) ] = {
897 [ C(OP_READ) ] = {
898 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
899 HSW_L3_MISS_LOCAL_DRAM|
900 HSW_SNOOP_DRAM,
901 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
902 HSW_L3_MISS_REMOTE|
903 HSW_SNOOP_DRAM,
905 [ C(OP_WRITE) ] = {
906 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
907 HSW_L3_MISS_LOCAL_DRAM|
908 HSW_SNOOP_DRAM,
909 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
910 HSW_L3_MISS_REMOTE|
911 HSW_SNOOP_DRAM,
913 [ C(OP_PREFETCH) ] = {
914 [ C(RESULT_ACCESS) ] = 0x0,
915 [ C(RESULT_MISS) ] = 0x0,
920 static __initconst const u64 westmere_hw_cache_event_ids
921 [PERF_COUNT_HW_CACHE_MAX]
922 [PERF_COUNT_HW_CACHE_OP_MAX]
923 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
925 [ C(L1D) ] = {
926 [ C(OP_READ) ] = {
927 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
928 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
930 [ C(OP_WRITE) ] = {
931 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
932 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
934 [ C(OP_PREFETCH) ] = {
935 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
936 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
939 [ C(L1I ) ] = {
940 [ C(OP_READ) ] = {
941 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
942 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
944 [ C(OP_WRITE) ] = {
945 [ C(RESULT_ACCESS) ] = -1,
946 [ C(RESULT_MISS) ] = -1,
948 [ C(OP_PREFETCH) ] = {
949 [ C(RESULT_ACCESS) ] = 0x0,
950 [ C(RESULT_MISS) ] = 0x0,
953 [ C(LL ) ] = {
954 [ C(OP_READ) ] = {
955 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
956 [ C(RESULT_ACCESS) ] = 0x01b7,
957 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
958 [ C(RESULT_MISS) ] = 0x01b7,
961 * Use RFO, not WRITEBACK, because a write miss would typically occur
962 * on RFO.
964 [ C(OP_WRITE) ] = {
965 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
966 [ C(RESULT_ACCESS) ] = 0x01b7,
967 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
968 [ C(RESULT_MISS) ] = 0x01b7,
970 [ C(OP_PREFETCH) ] = {
971 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
972 [ C(RESULT_ACCESS) ] = 0x01b7,
973 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
974 [ C(RESULT_MISS) ] = 0x01b7,
977 [ C(DTLB) ] = {
978 [ C(OP_READ) ] = {
979 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
980 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
982 [ C(OP_WRITE) ] = {
983 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
984 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
986 [ C(OP_PREFETCH) ] = {
987 [ C(RESULT_ACCESS) ] = 0x0,
988 [ C(RESULT_MISS) ] = 0x0,
991 [ C(ITLB) ] = {
992 [ C(OP_READ) ] = {
993 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
994 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
996 [ C(OP_WRITE) ] = {
997 [ C(RESULT_ACCESS) ] = -1,
998 [ C(RESULT_MISS) ] = -1,
1000 [ C(OP_PREFETCH) ] = {
1001 [ C(RESULT_ACCESS) ] = -1,
1002 [ C(RESULT_MISS) ] = -1,
1005 [ C(BPU ) ] = {
1006 [ C(OP_READ) ] = {
1007 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1008 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1010 [ C(OP_WRITE) ] = {
1011 [ C(RESULT_ACCESS) ] = -1,
1012 [ C(RESULT_MISS) ] = -1,
1014 [ C(OP_PREFETCH) ] = {
1015 [ C(RESULT_ACCESS) ] = -1,
1016 [ C(RESULT_MISS) ] = -1,
1019 [ C(NODE) ] = {
1020 [ C(OP_READ) ] = {
1021 [ C(RESULT_ACCESS) ] = 0x01b7,
1022 [ C(RESULT_MISS) ] = 0x01b7,
1024 [ C(OP_WRITE) ] = {
1025 [ C(RESULT_ACCESS) ] = 0x01b7,
1026 [ C(RESULT_MISS) ] = 0x01b7,
1028 [ C(OP_PREFETCH) ] = {
1029 [ C(RESULT_ACCESS) ] = 0x01b7,
1030 [ C(RESULT_MISS) ] = 0x01b7,
1036 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1037 * See IA32 SDM Vol 3B 30.6.1.3
1040 #define NHM_DMND_DATA_RD (1 << 0)
1041 #define NHM_DMND_RFO (1 << 1)
1042 #define NHM_DMND_IFETCH (1 << 2)
1043 #define NHM_DMND_WB (1 << 3)
1044 #define NHM_PF_DATA_RD (1 << 4)
1045 #define NHM_PF_DATA_RFO (1 << 5)
1046 #define NHM_PF_IFETCH (1 << 6)
1047 #define NHM_OFFCORE_OTHER (1 << 7)
1048 #define NHM_UNCORE_HIT (1 << 8)
1049 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1050 #define NHM_OTHER_CORE_HITM (1 << 10)
1051 /* reserved */
1052 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1053 #define NHM_REMOTE_DRAM (1 << 13)
1054 #define NHM_LOCAL_DRAM (1 << 14)
1055 #define NHM_NON_DRAM (1 << 15)
1057 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1058 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1060 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1061 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1062 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1064 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1065 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1066 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1068 static __initconst const u64 nehalem_hw_cache_extra_regs
1069 [PERF_COUNT_HW_CACHE_MAX]
1070 [PERF_COUNT_HW_CACHE_OP_MAX]
1071 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1073 [ C(LL ) ] = {
1074 [ C(OP_READ) ] = {
1075 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1076 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1078 [ C(OP_WRITE) ] = {
1079 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1080 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1082 [ C(OP_PREFETCH) ] = {
1083 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1084 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1087 [ C(NODE) ] = {
1088 [ C(OP_READ) ] = {
1089 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1090 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1092 [ C(OP_WRITE) ] = {
1093 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1094 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1096 [ C(OP_PREFETCH) ] = {
1097 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1098 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1103 static __initconst const u64 nehalem_hw_cache_event_ids
1104 [PERF_COUNT_HW_CACHE_MAX]
1105 [PERF_COUNT_HW_CACHE_OP_MAX]
1106 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1108 [ C(L1D) ] = {
1109 [ C(OP_READ) ] = {
1110 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1111 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1113 [ C(OP_WRITE) ] = {
1114 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1115 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1117 [ C(OP_PREFETCH) ] = {
1118 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1119 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1122 [ C(L1I ) ] = {
1123 [ C(OP_READ) ] = {
1124 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1125 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1127 [ C(OP_WRITE) ] = {
1128 [ C(RESULT_ACCESS) ] = -1,
1129 [ C(RESULT_MISS) ] = -1,
1131 [ C(OP_PREFETCH) ] = {
1132 [ C(RESULT_ACCESS) ] = 0x0,
1133 [ C(RESULT_MISS) ] = 0x0,
1136 [ C(LL ) ] = {
1137 [ C(OP_READ) ] = {
1138 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1139 [ C(RESULT_ACCESS) ] = 0x01b7,
1140 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1141 [ C(RESULT_MISS) ] = 0x01b7,
1144 * Use RFO, not WRITEBACK, because a write miss would typically occur
1145 * on RFO.
1147 [ C(OP_WRITE) ] = {
1148 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1149 [ C(RESULT_ACCESS) ] = 0x01b7,
1150 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1151 [ C(RESULT_MISS) ] = 0x01b7,
1153 [ C(OP_PREFETCH) ] = {
1154 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1155 [ C(RESULT_ACCESS) ] = 0x01b7,
1156 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1157 [ C(RESULT_MISS) ] = 0x01b7,
1160 [ C(DTLB) ] = {
1161 [ C(OP_READ) ] = {
1162 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1163 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1165 [ C(OP_WRITE) ] = {
1166 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1167 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1169 [ C(OP_PREFETCH) ] = {
1170 [ C(RESULT_ACCESS) ] = 0x0,
1171 [ C(RESULT_MISS) ] = 0x0,
1174 [ C(ITLB) ] = {
1175 [ C(OP_READ) ] = {
1176 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1177 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1179 [ C(OP_WRITE) ] = {
1180 [ C(RESULT_ACCESS) ] = -1,
1181 [ C(RESULT_MISS) ] = -1,
1183 [ C(OP_PREFETCH) ] = {
1184 [ C(RESULT_ACCESS) ] = -1,
1185 [ C(RESULT_MISS) ] = -1,
1188 [ C(BPU ) ] = {
1189 [ C(OP_READ) ] = {
1190 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1191 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1193 [ C(OP_WRITE) ] = {
1194 [ C(RESULT_ACCESS) ] = -1,
1195 [ C(RESULT_MISS) ] = -1,
1197 [ C(OP_PREFETCH) ] = {
1198 [ C(RESULT_ACCESS) ] = -1,
1199 [ C(RESULT_MISS) ] = -1,
1202 [ C(NODE) ] = {
1203 [ C(OP_READ) ] = {
1204 [ C(RESULT_ACCESS) ] = 0x01b7,
1205 [ C(RESULT_MISS) ] = 0x01b7,
1207 [ C(OP_WRITE) ] = {
1208 [ C(RESULT_ACCESS) ] = 0x01b7,
1209 [ C(RESULT_MISS) ] = 0x01b7,
1211 [ C(OP_PREFETCH) ] = {
1212 [ C(RESULT_ACCESS) ] = 0x01b7,
1213 [ C(RESULT_MISS) ] = 0x01b7,
1218 static __initconst const u64 core2_hw_cache_event_ids
1219 [PERF_COUNT_HW_CACHE_MAX]
1220 [PERF_COUNT_HW_CACHE_OP_MAX]
1221 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1223 [ C(L1D) ] = {
1224 [ C(OP_READ) ] = {
1225 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1226 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1228 [ C(OP_WRITE) ] = {
1229 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1230 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1232 [ C(OP_PREFETCH) ] = {
1233 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1234 [ C(RESULT_MISS) ] = 0,
1237 [ C(L1I ) ] = {
1238 [ C(OP_READ) ] = {
1239 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1240 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1242 [ C(OP_WRITE) ] = {
1243 [ C(RESULT_ACCESS) ] = -1,
1244 [ C(RESULT_MISS) ] = -1,
1246 [ C(OP_PREFETCH) ] = {
1247 [ C(RESULT_ACCESS) ] = 0,
1248 [ C(RESULT_MISS) ] = 0,
1251 [ C(LL ) ] = {
1252 [ C(OP_READ) ] = {
1253 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1254 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1256 [ C(OP_WRITE) ] = {
1257 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1258 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1260 [ C(OP_PREFETCH) ] = {
1261 [ C(RESULT_ACCESS) ] = 0,
1262 [ C(RESULT_MISS) ] = 0,
1265 [ C(DTLB) ] = {
1266 [ C(OP_READ) ] = {
1267 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1268 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1270 [ C(OP_WRITE) ] = {
1271 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1272 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1274 [ C(OP_PREFETCH) ] = {
1275 [ C(RESULT_ACCESS) ] = 0,
1276 [ C(RESULT_MISS) ] = 0,
1279 [ C(ITLB) ] = {
1280 [ C(OP_READ) ] = {
1281 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1282 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1284 [ C(OP_WRITE) ] = {
1285 [ C(RESULT_ACCESS) ] = -1,
1286 [ C(RESULT_MISS) ] = -1,
1288 [ C(OP_PREFETCH) ] = {
1289 [ C(RESULT_ACCESS) ] = -1,
1290 [ C(RESULT_MISS) ] = -1,
1293 [ C(BPU ) ] = {
1294 [ C(OP_READ) ] = {
1295 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1296 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1298 [ C(OP_WRITE) ] = {
1299 [ C(RESULT_ACCESS) ] = -1,
1300 [ C(RESULT_MISS) ] = -1,
1302 [ C(OP_PREFETCH) ] = {
1303 [ C(RESULT_ACCESS) ] = -1,
1304 [ C(RESULT_MISS) ] = -1,
1309 static __initconst const u64 atom_hw_cache_event_ids
1310 [PERF_COUNT_HW_CACHE_MAX]
1311 [PERF_COUNT_HW_CACHE_OP_MAX]
1312 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1314 [ C(L1D) ] = {
1315 [ C(OP_READ) ] = {
1316 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1317 [ C(RESULT_MISS) ] = 0,
1319 [ C(OP_WRITE) ] = {
1320 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1321 [ C(RESULT_MISS) ] = 0,
1323 [ C(OP_PREFETCH) ] = {
1324 [ C(RESULT_ACCESS) ] = 0x0,
1325 [ C(RESULT_MISS) ] = 0,
1328 [ C(L1I ) ] = {
1329 [ C(OP_READ) ] = {
1330 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1331 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1333 [ C(OP_WRITE) ] = {
1334 [ C(RESULT_ACCESS) ] = -1,
1335 [ C(RESULT_MISS) ] = -1,
1337 [ C(OP_PREFETCH) ] = {
1338 [ C(RESULT_ACCESS) ] = 0,
1339 [ C(RESULT_MISS) ] = 0,
1342 [ C(LL ) ] = {
1343 [ C(OP_READ) ] = {
1344 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1345 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1347 [ C(OP_WRITE) ] = {
1348 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1349 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1351 [ C(OP_PREFETCH) ] = {
1352 [ C(RESULT_ACCESS) ] = 0,
1353 [ C(RESULT_MISS) ] = 0,
1356 [ C(DTLB) ] = {
1357 [ C(OP_READ) ] = {
1358 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1359 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1361 [ C(OP_WRITE) ] = {
1362 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1363 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1365 [ C(OP_PREFETCH) ] = {
1366 [ C(RESULT_ACCESS) ] = 0,
1367 [ C(RESULT_MISS) ] = 0,
1370 [ C(ITLB) ] = {
1371 [ C(OP_READ) ] = {
1372 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1373 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1375 [ C(OP_WRITE) ] = {
1376 [ C(RESULT_ACCESS) ] = -1,
1377 [ C(RESULT_MISS) ] = -1,
1379 [ C(OP_PREFETCH) ] = {
1380 [ C(RESULT_ACCESS) ] = -1,
1381 [ C(RESULT_MISS) ] = -1,
1384 [ C(BPU ) ] = {
1385 [ C(OP_READ) ] = {
1386 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1387 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1389 [ C(OP_WRITE) ] = {
1390 [ C(RESULT_ACCESS) ] = -1,
1391 [ C(RESULT_MISS) ] = -1,
1393 [ C(OP_PREFETCH) ] = {
1394 [ C(RESULT_ACCESS) ] = -1,
1395 [ C(RESULT_MISS) ] = -1,
1400 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1401 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1402 /* no_alloc_cycles.not_delivered */
1403 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1404 "event=0xca,umask=0x50");
1405 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1406 /* uops_retired.all */
1407 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1408 "event=0xc2,umask=0x10");
1409 /* uops_retired.all */
1410 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1411 "event=0xc2,umask=0x10");
1413 static struct attribute *slm_events_attrs[] = {
1414 EVENT_PTR(td_total_slots_slm),
1415 EVENT_PTR(td_total_slots_scale_slm),
1416 EVENT_PTR(td_fetch_bubbles_slm),
1417 EVENT_PTR(td_fetch_bubbles_scale_slm),
1418 EVENT_PTR(td_slots_issued_slm),
1419 EVENT_PTR(td_slots_retired_slm),
1420 NULL
1423 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1425 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1426 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1427 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1428 EVENT_EXTRA_END
1431 #define SLM_DMND_READ SNB_DMND_DATA_RD
1432 #define SLM_DMND_WRITE SNB_DMND_RFO
1433 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1435 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1436 #define SLM_LLC_ACCESS SNB_RESP_ANY
1437 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1439 static __initconst const u64 slm_hw_cache_extra_regs
1440 [PERF_COUNT_HW_CACHE_MAX]
1441 [PERF_COUNT_HW_CACHE_OP_MAX]
1442 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1444 [ C(LL ) ] = {
1445 [ C(OP_READ) ] = {
1446 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1447 [ C(RESULT_MISS) ] = 0,
1449 [ C(OP_WRITE) ] = {
1450 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1451 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1453 [ C(OP_PREFETCH) ] = {
1454 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1455 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1460 static __initconst const u64 slm_hw_cache_event_ids
1461 [PERF_COUNT_HW_CACHE_MAX]
1462 [PERF_COUNT_HW_CACHE_OP_MAX]
1463 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1465 [ C(L1D) ] = {
1466 [ C(OP_READ) ] = {
1467 [ C(RESULT_ACCESS) ] = 0,
1468 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1470 [ C(OP_WRITE) ] = {
1471 [ C(RESULT_ACCESS) ] = 0,
1472 [ C(RESULT_MISS) ] = 0,
1474 [ C(OP_PREFETCH) ] = {
1475 [ C(RESULT_ACCESS) ] = 0,
1476 [ C(RESULT_MISS) ] = 0,
1479 [ C(L1I ) ] = {
1480 [ C(OP_READ) ] = {
1481 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1482 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1484 [ C(OP_WRITE) ] = {
1485 [ C(RESULT_ACCESS) ] = -1,
1486 [ C(RESULT_MISS) ] = -1,
1488 [ C(OP_PREFETCH) ] = {
1489 [ C(RESULT_ACCESS) ] = 0,
1490 [ C(RESULT_MISS) ] = 0,
1493 [ C(LL ) ] = {
1494 [ C(OP_READ) ] = {
1495 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1496 [ C(RESULT_ACCESS) ] = 0x01b7,
1497 [ C(RESULT_MISS) ] = 0,
1499 [ C(OP_WRITE) ] = {
1500 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1501 [ C(RESULT_ACCESS) ] = 0x01b7,
1502 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1503 [ C(RESULT_MISS) ] = 0x01b7,
1505 [ C(OP_PREFETCH) ] = {
1506 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1507 [ C(RESULT_ACCESS) ] = 0x01b7,
1508 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1509 [ C(RESULT_MISS) ] = 0x01b7,
1512 [ C(DTLB) ] = {
1513 [ C(OP_READ) ] = {
1514 [ C(RESULT_ACCESS) ] = 0,
1515 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1517 [ C(OP_WRITE) ] = {
1518 [ C(RESULT_ACCESS) ] = 0,
1519 [ C(RESULT_MISS) ] = 0,
1521 [ C(OP_PREFETCH) ] = {
1522 [ C(RESULT_ACCESS) ] = 0,
1523 [ C(RESULT_MISS) ] = 0,
1526 [ C(ITLB) ] = {
1527 [ C(OP_READ) ] = {
1528 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1529 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1531 [ C(OP_WRITE) ] = {
1532 [ C(RESULT_ACCESS) ] = -1,
1533 [ C(RESULT_MISS) ] = -1,
1535 [ C(OP_PREFETCH) ] = {
1536 [ C(RESULT_ACCESS) ] = -1,
1537 [ C(RESULT_MISS) ] = -1,
1540 [ C(BPU ) ] = {
1541 [ C(OP_READ) ] = {
1542 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1543 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1545 [ C(OP_WRITE) ] = {
1546 [ C(RESULT_ACCESS) ] = -1,
1547 [ C(RESULT_MISS) ] = -1,
1549 [ C(OP_PREFETCH) ] = {
1550 [ C(RESULT_ACCESS) ] = -1,
1551 [ C(RESULT_MISS) ] = -1,
1556 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1557 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1558 /* UOPS_NOT_DELIVERED.ANY */
1559 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1560 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1561 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1562 /* UOPS_RETIRED.ANY */
1563 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1564 /* UOPS_ISSUED.ANY */
1565 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1567 static struct attribute *glm_events_attrs[] = {
1568 EVENT_PTR(td_total_slots_glm),
1569 EVENT_PTR(td_total_slots_scale_glm),
1570 EVENT_PTR(td_fetch_bubbles_glm),
1571 EVENT_PTR(td_recovery_bubbles_glm),
1572 EVENT_PTR(td_slots_issued_glm),
1573 EVENT_PTR(td_slots_retired_glm),
1574 NULL
1577 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1578 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1579 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1580 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1581 EVENT_EXTRA_END
1584 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1585 #define GLM_DEMAND_RFO BIT_ULL(1)
1586 #define GLM_ANY_RESPONSE BIT_ULL(16)
1587 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1588 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1589 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1590 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1591 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1592 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1593 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1595 static __initconst const u64 glm_hw_cache_event_ids
1596 [PERF_COUNT_HW_CACHE_MAX]
1597 [PERF_COUNT_HW_CACHE_OP_MAX]
1598 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1599 [C(L1D)] = {
1600 [C(OP_READ)] = {
1601 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1602 [C(RESULT_MISS)] = 0x0,
1604 [C(OP_WRITE)] = {
1605 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1606 [C(RESULT_MISS)] = 0x0,
1608 [C(OP_PREFETCH)] = {
1609 [C(RESULT_ACCESS)] = 0x0,
1610 [C(RESULT_MISS)] = 0x0,
1613 [C(L1I)] = {
1614 [C(OP_READ)] = {
1615 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1616 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1618 [C(OP_WRITE)] = {
1619 [C(RESULT_ACCESS)] = -1,
1620 [C(RESULT_MISS)] = -1,
1622 [C(OP_PREFETCH)] = {
1623 [C(RESULT_ACCESS)] = 0x0,
1624 [C(RESULT_MISS)] = 0x0,
1627 [C(LL)] = {
1628 [C(OP_READ)] = {
1629 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1630 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1632 [C(OP_WRITE)] = {
1633 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1634 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1636 [C(OP_PREFETCH)] = {
1637 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1638 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1641 [C(DTLB)] = {
1642 [C(OP_READ)] = {
1643 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1644 [C(RESULT_MISS)] = 0x0,
1646 [C(OP_WRITE)] = {
1647 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1648 [C(RESULT_MISS)] = 0x0,
1650 [C(OP_PREFETCH)] = {
1651 [C(RESULT_ACCESS)] = 0x0,
1652 [C(RESULT_MISS)] = 0x0,
1655 [C(ITLB)] = {
1656 [C(OP_READ)] = {
1657 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1658 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1660 [C(OP_WRITE)] = {
1661 [C(RESULT_ACCESS)] = -1,
1662 [C(RESULT_MISS)] = -1,
1664 [C(OP_PREFETCH)] = {
1665 [C(RESULT_ACCESS)] = -1,
1666 [C(RESULT_MISS)] = -1,
1669 [C(BPU)] = {
1670 [C(OP_READ)] = {
1671 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1672 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1674 [C(OP_WRITE)] = {
1675 [C(RESULT_ACCESS)] = -1,
1676 [C(RESULT_MISS)] = -1,
1678 [C(OP_PREFETCH)] = {
1679 [C(RESULT_ACCESS)] = -1,
1680 [C(RESULT_MISS)] = -1,
1685 static __initconst const u64 glm_hw_cache_extra_regs
1686 [PERF_COUNT_HW_CACHE_MAX]
1687 [PERF_COUNT_HW_CACHE_OP_MAX]
1688 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1689 [C(LL)] = {
1690 [C(OP_READ)] = {
1691 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1692 GLM_LLC_ACCESS,
1693 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1694 GLM_LLC_MISS,
1696 [C(OP_WRITE)] = {
1697 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1698 GLM_LLC_ACCESS,
1699 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1700 GLM_LLC_MISS,
1702 [C(OP_PREFETCH)] = {
1703 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1704 GLM_LLC_ACCESS,
1705 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1706 GLM_LLC_MISS,
1711 static __initconst const u64 glp_hw_cache_event_ids
1712 [PERF_COUNT_HW_CACHE_MAX]
1713 [PERF_COUNT_HW_CACHE_OP_MAX]
1714 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1715 [C(L1D)] = {
1716 [C(OP_READ)] = {
1717 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1718 [C(RESULT_MISS)] = 0x0,
1720 [C(OP_WRITE)] = {
1721 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1722 [C(RESULT_MISS)] = 0x0,
1724 [C(OP_PREFETCH)] = {
1725 [C(RESULT_ACCESS)] = 0x0,
1726 [C(RESULT_MISS)] = 0x0,
1729 [C(L1I)] = {
1730 [C(OP_READ)] = {
1731 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1732 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1734 [C(OP_WRITE)] = {
1735 [C(RESULT_ACCESS)] = -1,
1736 [C(RESULT_MISS)] = -1,
1738 [C(OP_PREFETCH)] = {
1739 [C(RESULT_ACCESS)] = 0x0,
1740 [C(RESULT_MISS)] = 0x0,
1743 [C(LL)] = {
1744 [C(OP_READ)] = {
1745 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1746 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1748 [C(OP_WRITE)] = {
1749 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1750 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1752 [C(OP_PREFETCH)] = {
1753 [C(RESULT_ACCESS)] = 0x0,
1754 [C(RESULT_MISS)] = 0x0,
1757 [C(DTLB)] = {
1758 [C(OP_READ)] = {
1759 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1760 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1762 [C(OP_WRITE)] = {
1763 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1764 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
1766 [C(OP_PREFETCH)] = {
1767 [C(RESULT_ACCESS)] = 0x0,
1768 [C(RESULT_MISS)] = 0x0,
1771 [C(ITLB)] = {
1772 [C(OP_READ)] = {
1773 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1774 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1776 [C(OP_WRITE)] = {
1777 [C(RESULT_ACCESS)] = -1,
1778 [C(RESULT_MISS)] = -1,
1780 [C(OP_PREFETCH)] = {
1781 [C(RESULT_ACCESS)] = -1,
1782 [C(RESULT_MISS)] = -1,
1785 [C(BPU)] = {
1786 [C(OP_READ)] = {
1787 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1788 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1790 [C(OP_WRITE)] = {
1791 [C(RESULT_ACCESS)] = -1,
1792 [C(RESULT_MISS)] = -1,
1794 [C(OP_PREFETCH)] = {
1795 [C(RESULT_ACCESS)] = -1,
1796 [C(RESULT_MISS)] = -1,
1801 static __initconst const u64 glp_hw_cache_extra_regs
1802 [PERF_COUNT_HW_CACHE_MAX]
1803 [PERF_COUNT_HW_CACHE_OP_MAX]
1804 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1805 [C(LL)] = {
1806 [C(OP_READ)] = {
1807 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1808 GLM_LLC_ACCESS,
1809 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1810 GLM_LLC_MISS,
1812 [C(OP_WRITE)] = {
1813 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1814 GLM_LLC_ACCESS,
1815 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1816 GLM_LLC_MISS,
1818 [C(OP_PREFETCH)] = {
1819 [C(RESULT_ACCESS)] = 0x0,
1820 [C(RESULT_MISS)] = 0x0,
1825 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
1826 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
1827 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
1828 #define KNL_MCDRAM_FAR BIT_ULL(22)
1829 #define KNL_DDR_LOCAL BIT_ULL(23)
1830 #define KNL_DDR_FAR BIT_ULL(24)
1831 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
1832 KNL_DDR_LOCAL | KNL_DDR_FAR)
1833 #define KNL_L2_READ SLM_DMND_READ
1834 #define KNL_L2_WRITE SLM_DMND_WRITE
1835 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
1836 #define KNL_L2_ACCESS SLM_LLC_ACCESS
1837 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
1838 KNL_DRAM_ANY | SNB_SNP_ANY | \
1839 SNB_NON_DRAM)
1841 static __initconst const u64 knl_hw_cache_extra_regs
1842 [PERF_COUNT_HW_CACHE_MAX]
1843 [PERF_COUNT_HW_CACHE_OP_MAX]
1844 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1845 [C(LL)] = {
1846 [C(OP_READ)] = {
1847 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
1848 [C(RESULT_MISS)] = 0,
1850 [C(OP_WRITE)] = {
1851 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
1852 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
1854 [C(OP_PREFETCH)] = {
1855 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
1856 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
1862 * Used from PMIs where the LBRs are already disabled.
1864 * This function could be called consecutively. It is required to remain in
1865 * disabled state if called consecutively.
1867 * During consecutive calls, the same disable value will be written to related
1868 * registers, so the PMU state remains unchanged.
1870 * intel_bts events don't coexist with intel PMU's BTS events because of
1871 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
1872 * disabled around intel PMU's event batching etc, only inside the PMI handler.
1874 static void __intel_pmu_disable_all(void)
1876 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1878 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1880 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1881 intel_pmu_disable_bts();
1883 intel_pmu_pebs_disable_all();
1886 static void intel_pmu_disable_all(void)
1888 __intel_pmu_disable_all();
1889 intel_pmu_lbr_disable_all();
1892 static void __intel_pmu_enable_all(int added, bool pmi)
1894 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1896 intel_pmu_pebs_enable_all();
1897 intel_pmu_lbr_enable_all(pmi);
1898 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1899 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1901 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1902 struct perf_event *event =
1903 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1905 if (WARN_ON_ONCE(!event))
1906 return;
1908 intel_pmu_enable_bts(event->hw.config);
1912 static void intel_pmu_enable_all(int added)
1914 __intel_pmu_enable_all(added, false);
1918 * Workaround for:
1919 * Intel Errata AAK100 (model 26)
1920 * Intel Errata AAP53 (model 30)
1921 * Intel Errata BD53 (model 44)
1923 * The official story:
1924 * These chips need to be 'reset' when adding counters by programming the
1925 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1926 * in sequence on the same PMC or on different PMCs.
1928 * In practise it appears some of these events do in fact count, and
1929 * we need to programm all 4 events.
1931 static void intel_pmu_nhm_workaround(void)
1933 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1934 static const unsigned long nhm_magic[4] = {
1935 0x4300B5,
1936 0x4300D2,
1937 0x4300B1,
1938 0x4300B1
1940 struct perf_event *event;
1941 int i;
1944 * The Errata requires below steps:
1945 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1946 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1947 * the corresponding PMCx;
1948 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1949 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1950 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1954 * The real steps we choose are a little different from above.
1955 * A) To reduce MSR operations, we don't run step 1) as they
1956 * are already cleared before this function is called;
1957 * B) Call x86_perf_event_update to save PMCx before configuring
1958 * PERFEVTSELx with magic number;
1959 * C) With step 5), we do clear only when the PERFEVTSELx is
1960 * not used currently.
1961 * D) Call x86_perf_event_set_period to restore PMCx;
1964 /* We always operate 4 pairs of PERF Counters */
1965 for (i = 0; i < 4; i++) {
1966 event = cpuc->events[i];
1967 if (event)
1968 x86_perf_event_update(event);
1971 for (i = 0; i < 4; i++) {
1972 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
1973 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
1976 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
1977 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
1979 for (i = 0; i < 4; i++) {
1980 event = cpuc->events[i];
1982 if (event) {
1983 x86_perf_event_set_period(event);
1984 __x86_pmu_enable_event(&event->hw,
1985 ARCH_PERFMON_EVENTSEL_ENABLE);
1986 } else
1987 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
1991 static void intel_pmu_nhm_enable_all(int added)
1993 if (added)
1994 intel_pmu_nhm_workaround();
1995 intel_pmu_enable_all(added);
1998 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2000 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2002 if (cpuc->tfa_shadow != val) {
2003 cpuc->tfa_shadow = val;
2004 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2008 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2011 * We're going to use PMC3, make sure TFA is set before we touch it.
2013 if (cntr == 3 && !cpuc->is_fake)
2014 intel_set_tfa(cpuc, true);
2017 static void intel_tfa_pmu_enable_all(int added)
2019 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2022 * If we find PMC3 is no longer used when we enable the PMU, we can
2023 * clear TFA.
2025 if (!test_bit(3, cpuc->active_mask))
2026 intel_set_tfa(cpuc, false);
2028 intel_pmu_enable_all(added);
2031 static inline u64 intel_pmu_get_status(void)
2033 u64 status;
2035 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2037 return status;
2040 static inline void intel_pmu_ack_status(u64 ack)
2042 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2045 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
2047 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2048 u64 ctrl_val, mask;
2050 mask = 0xfULL << (idx * 4);
2052 rdmsrl(hwc->config_base, ctrl_val);
2053 ctrl_val &= ~mask;
2054 wrmsrl(hwc->config_base, ctrl_val);
2057 static inline bool event_is_checkpointed(struct perf_event *event)
2059 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2062 static void intel_pmu_disable_event(struct perf_event *event)
2064 struct hw_perf_event *hwc = &event->hw;
2065 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2067 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2068 intel_pmu_disable_bts();
2069 intel_pmu_drain_bts_buffer();
2070 return;
2073 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
2074 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
2075 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
2077 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
2078 intel_pmu_disable_fixed(hwc);
2079 else
2080 x86_pmu_disable_event(event);
2083 * Needs to be called after x86_pmu_disable_event,
2084 * so we don't trigger the event without PEBS bit set.
2086 if (unlikely(event->attr.precise_ip))
2087 intel_pmu_pebs_disable(event);
2090 static void intel_pmu_del_event(struct perf_event *event)
2092 if (needs_branch_stack(event))
2093 intel_pmu_lbr_del(event);
2094 if (event->attr.precise_ip)
2095 intel_pmu_pebs_del(event);
2098 static void intel_pmu_read_event(struct perf_event *event)
2100 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2101 intel_pmu_auto_reload_read(event);
2102 else
2103 x86_perf_event_update(event);
2106 static void intel_pmu_enable_fixed(struct perf_event *event)
2108 struct hw_perf_event *hwc = &event->hw;
2109 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2110 u64 ctrl_val, mask, bits = 0;
2113 * Enable IRQ generation (0x8), if not PEBS,
2114 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2115 * if requested:
2117 if (!event->attr.precise_ip)
2118 bits |= 0x8;
2119 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2120 bits |= 0x2;
2121 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2122 bits |= 0x1;
2125 * ANY bit is supported in v3 and up
2127 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2128 bits |= 0x4;
2130 bits <<= (idx * 4);
2131 mask = 0xfULL << (idx * 4);
2133 rdmsrl(hwc->config_base, ctrl_val);
2134 ctrl_val &= ~mask;
2135 ctrl_val |= bits;
2136 wrmsrl(hwc->config_base, ctrl_val);
2139 static void intel_pmu_enable_event(struct perf_event *event)
2141 struct hw_perf_event *hwc = &event->hw;
2142 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2144 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2145 if (!__this_cpu_read(cpu_hw_events.enabled))
2146 return;
2148 intel_pmu_enable_bts(hwc->config);
2149 return;
2152 if (event->attr.exclude_host)
2153 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
2154 if (event->attr.exclude_guest)
2155 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
2157 if (unlikely(event_is_checkpointed(event)))
2158 cpuc->intel_cp_status |= (1ull << hwc->idx);
2160 if (unlikely(event->attr.precise_ip))
2161 intel_pmu_pebs_enable(event);
2163 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2164 intel_pmu_enable_fixed(event);
2165 return;
2168 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2171 static void intel_pmu_add_event(struct perf_event *event)
2173 if (event->attr.precise_ip)
2174 intel_pmu_pebs_add(event);
2175 if (needs_branch_stack(event))
2176 intel_pmu_lbr_add(event);
2180 * Save and restart an expired event. Called by NMI contexts,
2181 * so it has to be careful about preempting normal event ops:
2183 int intel_pmu_save_and_restart(struct perf_event *event)
2185 x86_perf_event_update(event);
2187 * For a checkpointed counter always reset back to 0. This
2188 * avoids a situation where the counter overflows, aborts the
2189 * transaction and is then set back to shortly before the
2190 * overflow, and overflows and aborts again.
2192 if (unlikely(event_is_checkpointed(event))) {
2193 /* No race with NMIs because the counter should not be armed */
2194 wrmsrl(event->hw.event_base, 0);
2195 local64_set(&event->hw.prev_count, 0);
2197 return x86_perf_event_set_period(event);
2200 static void intel_pmu_reset(void)
2202 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2203 unsigned long flags;
2204 int idx;
2206 if (!x86_pmu.num_counters)
2207 return;
2209 local_irq_save(flags);
2211 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2213 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2214 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2215 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2217 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
2218 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2220 if (ds)
2221 ds->bts_index = ds->bts_buffer_base;
2223 /* Ack all overflows and disable fixed counters */
2224 if (x86_pmu.version >= 2) {
2225 intel_pmu_ack_status(intel_pmu_get_status());
2226 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2229 /* Reset LBRs and LBR freezing */
2230 if (x86_pmu.lbr_nr) {
2231 update_debugctlmsr(get_debugctlmsr() &
2232 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2235 local_irq_restore(flags);
2239 * This handler is triggered by the local APIC, so the APIC IRQ handling
2240 * rules apply:
2242 static int intel_pmu_handle_irq(struct pt_regs *regs)
2244 struct perf_sample_data data;
2245 struct cpu_hw_events *cpuc;
2246 int bit, loops;
2247 u64 status;
2248 int handled;
2249 int pmu_enabled;
2251 cpuc = this_cpu_ptr(&cpu_hw_events);
2254 * Save the PMU state.
2255 * It needs to be restored when leaving the handler.
2257 pmu_enabled = cpuc->enabled;
2259 * No known reason to not always do late ACK,
2260 * but just in case do it opt-in.
2262 if (!x86_pmu.late_ack)
2263 apic_write(APIC_LVTPC, APIC_DM_NMI);
2264 intel_bts_disable_local();
2265 cpuc->enabled = 0;
2266 __intel_pmu_disable_all();
2267 handled = intel_pmu_drain_bts_buffer();
2268 handled += intel_bts_interrupt();
2269 status = intel_pmu_get_status();
2270 if (!status)
2271 goto done;
2273 loops = 0;
2274 again:
2275 intel_pmu_lbr_read();
2276 intel_pmu_ack_status(status);
2277 if (++loops > 100) {
2278 static bool warned = false;
2279 if (!warned) {
2280 WARN(1, "perfevents: irq loop stuck!\n");
2281 perf_event_print_debug();
2282 warned = true;
2284 intel_pmu_reset();
2285 goto done;
2288 inc_irq_stat(apic_perf_irqs);
2292 * Ignore a range of extra bits in status that do not indicate
2293 * overflow by themselves.
2295 status &= ~(GLOBAL_STATUS_COND_CHG |
2296 GLOBAL_STATUS_ASIF |
2297 GLOBAL_STATUS_LBRS_FROZEN);
2298 if (!status)
2299 goto done;
2301 * In case multiple PEBS events are sampled at the same time,
2302 * it is possible to have GLOBAL_STATUS bit 62 set indicating
2303 * PEBS buffer overflow and also seeing at most 3 PEBS counters
2304 * having their bits set in the status register. This is a sign
2305 * that there was at least one PEBS record pending at the time
2306 * of the PMU interrupt. PEBS counters must only be processed
2307 * via the drain_pebs() calls and not via the regular sample
2308 * processing loop coming after that the function, otherwise
2309 * phony regular samples may be generated in the sampling buffer
2310 * not marked with the EXACT tag. Another possibility is to have
2311 * one PEBS event and at least one non-PEBS event whic hoverflows
2312 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2313 * not be set, yet the overflow status bit for the PEBS counter will
2314 * be on Skylake.
2316 * To avoid this problem, we systematically ignore the PEBS-enabled
2317 * counters from the GLOBAL_STATUS mask and we always process PEBS
2318 * events via drain_pebs().
2320 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2321 status &= ~cpuc->pebs_enabled;
2322 else
2323 status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2326 * PEBS overflow sets bit 62 in the global status register
2328 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
2329 handled++;
2330 x86_pmu.drain_pebs(regs);
2331 status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2335 * Intel PT
2337 if (__test_and_clear_bit(55, (unsigned long *)&status)) {
2338 handled++;
2339 intel_pt_interrupt();
2343 * Checkpointed counters can lead to 'spurious' PMIs because the
2344 * rollback caused by the PMI will have cleared the overflow status
2345 * bit. Therefore always force probe these counters.
2347 status |= cpuc->intel_cp_status;
2349 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2350 struct perf_event *event = cpuc->events[bit];
2352 handled++;
2354 if (!test_bit(bit, cpuc->active_mask))
2355 continue;
2357 if (!intel_pmu_save_and_restart(event))
2358 continue;
2360 perf_sample_data_init(&data, 0, event->hw.last_period);
2362 if (has_branch_stack(event))
2363 data.br_stack = &cpuc->lbr_stack;
2365 if (perf_event_overflow(event, &data, regs))
2366 x86_pmu_stop(event, 0);
2370 * Repeat if there is more work to be done:
2372 status = intel_pmu_get_status();
2373 if (status)
2374 goto again;
2376 done:
2377 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2378 cpuc->enabled = pmu_enabled;
2379 if (pmu_enabled)
2380 __intel_pmu_enable_all(0, true);
2381 intel_bts_enable_local();
2384 * Only unmask the NMI after the overflow counters
2385 * have been reset. This avoids spurious NMIs on
2386 * Haswell CPUs.
2388 if (x86_pmu.late_ack)
2389 apic_write(APIC_LVTPC, APIC_DM_NMI);
2390 return handled;
2393 static struct event_constraint *
2394 intel_bts_constraints(struct perf_event *event)
2396 if (unlikely(intel_pmu_has_bts(event)))
2397 return &bts_constraint;
2399 return NULL;
2402 static int intel_alt_er(int idx, u64 config)
2404 int alt_idx = idx;
2406 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
2407 return idx;
2409 if (idx == EXTRA_REG_RSP_0)
2410 alt_idx = EXTRA_REG_RSP_1;
2412 if (idx == EXTRA_REG_RSP_1)
2413 alt_idx = EXTRA_REG_RSP_0;
2415 if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
2416 return idx;
2418 return alt_idx;
2421 static void intel_fixup_er(struct perf_event *event, int idx)
2423 event->hw.extra_reg.idx = idx;
2425 if (idx == EXTRA_REG_RSP_0) {
2426 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2427 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
2428 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
2429 } else if (idx == EXTRA_REG_RSP_1) {
2430 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2431 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
2432 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
2437 * manage allocation of shared extra msr for certain events
2439 * sharing can be:
2440 * per-cpu: to be shared between the various events on a single PMU
2441 * per-core: per-cpu + shared by HT threads
2443 static struct event_constraint *
2444 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
2445 struct perf_event *event,
2446 struct hw_perf_event_extra *reg)
2448 struct event_constraint *c = &emptyconstraint;
2449 struct er_account *era;
2450 unsigned long flags;
2451 int idx = reg->idx;
2454 * reg->alloc can be set due to existing state, so for fake cpuc we
2455 * need to ignore this, otherwise we might fail to allocate proper fake
2456 * state for this extra reg constraint. Also see the comment below.
2458 if (reg->alloc && !cpuc->is_fake)
2459 return NULL; /* call x86_get_event_constraint() */
2461 again:
2462 era = &cpuc->shared_regs->regs[idx];
2464 * we use spin_lock_irqsave() to avoid lockdep issues when
2465 * passing a fake cpuc
2467 raw_spin_lock_irqsave(&era->lock, flags);
2469 if (!atomic_read(&era->ref) || era->config == reg->config) {
2472 * If its a fake cpuc -- as per validate_{group,event}() we
2473 * shouldn't touch event state and we can avoid doing so
2474 * since both will only call get_event_constraints() once
2475 * on each event, this avoids the need for reg->alloc.
2477 * Not doing the ER fixup will only result in era->reg being
2478 * wrong, but since we won't actually try and program hardware
2479 * this isn't a problem either.
2481 if (!cpuc->is_fake) {
2482 if (idx != reg->idx)
2483 intel_fixup_er(event, idx);
2486 * x86_schedule_events() can call get_event_constraints()
2487 * multiple times on events in the case of incremental
2488 * scheduling(). reg->alloc ensures we only do the ER
2489 * allocation once.
2491 reg->alloc = 1;
2494 /* lock in msr value */
2495 era->config = reg->config;
2496 era->reg = reg->reg;
2498 /* one more user */
2499 atomic_inc(&era->ref);
2502 * need to call x86_get_event_constraint()
2503 * to check if associated event has constraints
2505 c = NULL;
2506 } else {
2507 idx = intel_alt_er(idx, reg->config);
2508 if (idx != reg->idx) {
2509 raw_spin_unlock_irqrestore(&era->lock, flags);
2510 goto again;
2513 raw_spin_unlock_irqrestore(&era->lock, flags);
2515 return c;
2518 static void
2519 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
2520 struct hw_perf_event_extra *reg)
2522 struct er_account *era;
2525 * Only put constraint if extra reg was actually allocated. Also takes
2526 * care of event which do not use an extra shared reg.
2528 * Also, if this is a fake cpuc we shouldn't touch any event state
2529 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
2530 * either since it'll be thrown out.
2532 if (!reg->alloc || cpuc->is_fake)
2533 return;
2535 era = &cpuc->shared_regs->regs[reg->idx];
2537 /* one fewer user */
2538 atomic_dec(&era->ref);
2540 /* allocate again next time */
2541 reg->alloc = 0;
2544 static struct event_constraint *
2545 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
2546 struct perf_event *event)
2548 struct event_constraint *c = NULL, *d;
2549 struct hw_perf_event_extra *xreg, *breg;
2551 xreg = &event->hw.extra_reg;
2552 if (xreg->idx != EXTRA_REG_NONE) {
2553 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
2554 if (c == &emptyconstraint)
2555 return c;
2557 breg = &event->hw.branch_reg;
2558 if (breg->idx != EXTRA_REG_NONE) {
2559 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
2560 if (d == &emptyconstraint) {
2561 __intel_shared_reg_put_constraints(cpuc, xreg);
2562 c = d;
2565 return c;
2568 struct event_constraint *
2569 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2570 struct perf_event *event)
2572 struct event_constraint *c;
2574 if (x86_pmu.event_constraints) {
2575 for_each_event_constraint(c, x86_pmu.event_constraints) {
2576 if ((event->hw.config & c->cmask) == c->code) {
2577 event->hw.flags |= c->flags;
2578 return c;
2583 return &unconstrained;
2586 static struct event_constraint *
2587 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2588 struct perf_event *event)
2590 struct event_constraint *c;
2592 c = intel_bts_constraints(event);
2593 if (c)
2594 return c;
2596 c = intel_shared_regs_constraints(cpuc, event);
2597 if (c)
2598 return c;
2600 c = intel_pebs_constraints(event);
2601 if (c)
2602 return c;
2604 return x86_get_event_constraints(cpuc, idx, event);
2607 static void
2608 intel_start_scheduling(struct cpu_hw_events *cpuc)
2610 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2611 struct intel_excl_states *xl;
2612 int tid = cpuc->excl_thread_id;
2615 * nothing needed if in group validation mode
2617 if (cpuc->is_fake || !is_ht_workaround_enabled())
2618 return;
2621 * no exclusion needed
2623 if (WARN_ON_ONCE(!excl_cntrs))
2624 return;
2626 xl = &excl_cntrs->states[tid];
2628 xl->sched_started = true;
2630 * lock shared state until we are done scheduling
2631 * in stop_event_scheduling()
2632 * makes scheduling appear as a transaction
2634 raw_spin_lock(&excl_cntrs->lock);
2637 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2639 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2640 struct event_constraint *c = cpuc->event_constraint[idx];
2641 struct intel_excl_states *xl;
2642 int tid = cpuc->excl_thread_id;
2644 if (cpuc->is_fake || !is_ht_workaround_enabled())
2645 return;
2647 if (WARN_ON_ONCE(!excl_cntrs))
2648 return;
2650 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
2651 return;
2653 xl = &excl_cntrs->states[tid];
2655 lockdep_assert_held(&excl_cntrs->lock);
2657 if (c->flags & PERF_X86_EVENT_EXCL)
2658 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
2659 else
2660 xl->state[cntr] = INTEL_EXCL_SHARED;
2663 static void
2664 intel_stop_scheduling(struct cpu_hw_events *cpuc)
2666 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2667 struct intel_excl_states *xl;
2668 int tid = cpuc->excl_thread_id;
2671 * nothing needed if in group validation mode
2673 if (cpuc->is_fake || !is_ht_workaround_enabled())
2674 return;
2676 * no exclusion needed
2678 if (WARN_ON_ONCE(!excl_cntrs))
2679 return;
2681 xl = &excl_cntrs->states[tid];
2683 xl->sched_started = false;
2685 * release shared state lock (acquired in intel_start_scheduling())
2687 raw_spin_unlock(&excl_cntrs->lock);
2690 static struct event_constraint *
2691 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
2693 WARN_ON_ONCE(!cpuc->constraint_list);
2695 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
2696 struct event_constraint *cx;
2699 * grab pre-allocated constraint entry
2701 cx = &cpuc->constraint_list[idx];
2704 * initialize dynamic constraint
2705 * with static constraint
2707 *cx = *c;
2710 * mark constraint as dynamic
2712 cx->flags |= PERF_X86_EVENT_DYNAMIC;
2713 c = cx;
2716 return c;
2719 static struct event_constraint *
2720 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2721 int idx, struct event_constraint *c)
2723 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2724 struct intel_excl_states *xlo;
2725 int tid = cpuc->excl_thread_id;
2726 int is_excl, i;
2729 * validating a group does not require
2730 * enforcing cross-thread exclusion
2732 if (cpuc->is_fake || !is_ht_workaround_enabled())
2733 return c;
2736 * no exclusion needed
2738 if (WARN_ON_ONCE(!excl_cntrs))
2739 return c;
2742 * because we modify the constraint, we need
2743 * to make a copy. Static constraints come
2744 * from static const tables.
2746 * only needed when constraint has not yet
2747 * been cloned (marked dynamic)
2749 c = dyn_constraint(cpuc, c, idx);
2752 * From here on, the constraint is dynamic.
2753 * Either it was just allocated above, or it
2754 * was allocated during a earlier invocation
2755 * of this function
2759 * state of sibling HT
2761 xlo = &excl_cntrs->states[tid ^ 1];
2764 * event requires exclusive counter access
2765 * across HT threads
2767 is_excl = c->flags & PERF_X86_EVENT_EXCL;
2768 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
2769 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
2770 if (!cpuc->n_excl++)
2771 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
2775 * Modify static constraint with current dynamic
2776 * state of thread
2778 * EXCLUSIVE: sibling counter measuring exclusive event
2779 * SHARED : sibling counter measuring non-exclusive event
2780 * UNUSED : sibling counter unused
2782 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
2784 * exclusive event in sibling counter
2785 * our corresponding counter cannot be used
2786 * regardless of our event
2788 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
2789 __clear_bit(i, c->idxmsk);
2791 * if measuring an exclusive event, sibling
2792 * measuring non-exclusive, then counter cannot
2793 * be used
2795 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
2796 __clear_bit(i, c->idxmsk);
2800 * recompute actual bit weight for scheduling algorithm
2802 c->weight = hweight64(c->idxmsk64);
2805 * if we return an empty mask, then switch
2806 * back to static empty constraint to avoid
2807 * the cost of freeing later on
2809 if (c->weight == 0)
2810 c = &emptyconstraint;
2812 return c;
2815 static struct event_constraint *
2816 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2817 struct perf_event *event)
2819 struct event_constraint *c1 = NULL;
2820 struct event_constraint *c2;
2822 if (idx >= 0) /* fake does < 0 */
2823 c1 = cpuc->event_constraint[idx];
2826 * first time only
2827 * - static constraint: no change across incremental scheduling calls
2828 * - dynamic constraint: handled by intel_get_excl_constraints()
2830 c2 = __intel_get_event_constraints(cpuc, idx, event);
2831 if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
2832 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
2833 c1->weight = c2->weight;
2834 c2 = c1;
2837 if (cpuc->excl_cntrs)
2838 return intel_get_excl_constraints(cpuc, event, idx, c2);
2840 return c2;
2843 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
2844 struct perf_event *event)
2846 struct hw_perf_event *hwc = &event->hw;
2847 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2848 int tid = cpuc->excl_thread_id;
2849 struct intel_excl_states *xl;
2852 * nothing needed if in group validation mode
2854 if (cpuc->is_fake)
2855 return;
2857 if (WARN_ON_ONCE(!excl_cntrs))
2858 return;
2860 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
2861 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
2862 if (!--cpuc->n_excl)
2863 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
2867 * If event was actually assigned, then mark the counter state as
2868 * unused now.
2870 if (hwc->idx >= 0) {
2871 xl = &excl_cntrs->states[tid];
2874 * put_constraint may be called from x86_schedule_events()
2875 * which already has the lock held so here make locking
2876 * conditional.
2878 if (!xl->sched_started)
2879 raw_spin_lock(&excl_cntrs->lock);
2881 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
2883 if (!xl->sched_started)
2884 raw_spin_unlock(&excl_cntrs->lock);
2888 static void
2889 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
2890 struct perf_event *event)
2892 struct hw_perf_event_extra *reg;
2894 reg = &event->hw.extra_reg;
2895 if (reg->idx != EXTRA_REG_NONE)
2896 __intel_shared_reg_put_constraints(cpuc, reg);
2898 reg = &event->hw.branch_reg;
2899 if (reg->idx != EXTRA_REG_NONE)
2900 __intel_shared_reg_put_constraints(cpuc, reg);
2903 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
2904 struct perf_event *event)
2906 intel_put_shared_regs_event_constraints(cpuc, event);
2909 * is PMU has exclusive counter restrictions, then
2910 * all events are subject to and must call the
2911 * put_excl_constraints() routine
2913 if (cpuc->excl_cntrs)
2914 intel_put_excl_constraints(cpuc, event);
2917 static void intel_pebs_aliases_core2(struct perf_event *event)
2919 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
2921 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
2922 * (0x003c) so that we can use it with PEBS.
2924 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
2925 * PEBS capable. However we can use INST_RETIRED.ANY_P
2926 * (0x00c0), which is a PEBS capable event, to get the same
2927 * count.
2929 * INST_RETIRED.ANY_P counts the number of cycles that retires
2930 * CNTMASK instructions. By setting CNTMASK to a value (16)
2931 * larger than the maximum number of instructions that can be
2932 * retired per cycle (4) and then inverting the condition, we
2933 * count all cycles that retire 16 or less instructions, which
2934 * is every cycle.
2936 * Thereby we gain a PEBS capable cycle counter.
2938 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
2940 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
2941 event->hw.config = alt_config;
2945 static void intel_pebs_aliases_snb(struct perf_event *event)
2947 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
2949 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
2950 * (0x003c) so that we can use it with PEBS.
2952 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
2953 * PEBS capable. However we can use UOPS_RETIRED.ALL
2954 * (0x01c2), which is a PEBS capable event, to get the same
2955 * count.
2957 * UOPS_RETIRED.ALL counts the number of cycles that retires
2958 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
2959 * larger than the maximum number of micro-ops that can be
2960 * retired per cycle (4) and then inverting the condition, we
2961 * count all cycles that retire 16 or less micro-ops, which
2962 * is every cycle.
2964 * Thereby we gain a PEBS capable cycle counter.
2966 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
2968 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
2969 event->hw.config = alt_config;
2973 static void intel_pebs_aliases_precdist(struct perf_event *event)
2975 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
2977 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
2978 * (0x003c) so that we can use it with PEBS.
2980 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
2981 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
2982 * (0x01c0), which is a PEBS capable event, to get the same
2983 * count.
2985 * The PREC_DIST event has special support to minimize sample
2986 * shadowing effects. One drawback is that it can be
2987 * only programmed on counter 1, but that seems like an
2988 * acceptable trade off.
2990 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
2992 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
2993 event->hw.config = alt_config;
2997 static void intel_pebs_aliases_ivb(struct perf_event *event)
2999 if (event->attr.precise_ip < 3)
3000 return intel_pebs_aliases_snb(event);
3001 return intel_pebs_aliases_precdist(event);
3004 static void intel_pebs_aliases_skl(struct perf_event *event)
3006 if (event->attr.precise_ip < 3)
3007 return intel_pebs_aliases_core2(event);
3008 return intel_pebs_aliases_precdist(event);
3011 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3013 unsigned long flags = x86_pmu.large_pebs_flags;
3015 if (event->attr.use_clockid)
3016 flags &= ~PERF_SAMPLE_TIME;
3017 if (!event->attr.exclude_kernel)
3018 flags &= ~PERF_SAMPLE_REGS_USER;
3019 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3020 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3021 return flags;
3024 static int intel_pmu_bts_config(struct perf_event *event)
3026 struct perf_event_attr *attr = &event->attr;
3028 if (unlikely(intel_pmu_has_bts(event))) {
3029 /* BTS is not supported by this architecture. */
3030 if (!x86_pmu.bts_active)
3031 return -EOPNOTSUPP;
3033 /* BTS is currently only allowed for user-mode. */
3034 if (!attr->exclude_kernel)
3035 return -EOPNOTSUPP;
3037 /* BTS is not allowed for precise events. */
3038 if (attr->precise_ip)
3039 return -EOPNOTSUPP;
3041 /* disallow bts if conflicting events are present */
3042 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3043 return -EBUSY;
3045 event->destroy = hw_perf_lbr_event_destroy;
3048 return 0;
3051 static int core_pmu_hw_config(struct perf_event *event)
3053 int ret = x86_pmu_hw_config(event);
3055 if (ret)
3056 return ret;
3058 return intel_pmu_bts_config(event);
3061 static int intel_pmu_hw_config(struct perf_event *event)
3063 int ret = x86_pmu_hw_config(event);
3065 if (ret)
3066 return ret;
3068 ret = intel_pmu_bts_config(event);
3069 if (ret)
3070 return ret;
3072 if (event->attr.precise_ip) {
3073 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3074 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3075 if (!(event->attr.sample_type &
3076 ~intel_pmu_large_pebs_flags(event)))
3077 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3079 if (x86_pmu.pebs_aliases)
3080 x86_pmu.pebs_aliases(event);
3082 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3083 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3086 if (needs_branch_stack(event)) {
3087 ret = intel_pmu_setup_lbr_filter(event);
3088 if (ret)
3089 return ret;
3092 * BTS is set up earlier in this path, so don't account twice
3094 if (!unlikely(intel_pmu_has_bts(event))) {
3095 /* disallow lbr if conflicting events are present */
3096 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3097 return -EBUSY;
3099 event->destroy = hw_perf_lbr_event_destroy;
3103 if (event->attr.type != PERF_TYPE_RAW)
3104 return 0;
3106 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3107 return 0;
3109 if (x86_pmu.version < 3)
3110 return -EINVAL;
3112 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3113 return -EACCES;
3115 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3117 return 0;
3120 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
3122 if (x86_pmu.guest_get_msrs)
3123 return x86_pmu.guest_get_msrs(nr);
3124 *nr = 0;
3125 return NULL;
3127 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
3129 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3131 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3132 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3134 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3135 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3136 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3138 * If PMU counter has PEBS enabled it is not enough to disable counter
3139 * on a guest entry since PEBS memory write can overshoot guest entry
3140 * and corrupt guest memory. Disabling PEBS solves the problem.
3142 arr[1].msr = MSR_IA32_PEBS_ENABLE;
3143 arr[1].host = cpuc->pebs_enabled;
3144 arr[1].guest = 0;
3146 *nr = 2;
3147 return arr;
3150 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3152 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3153 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3154 int idx;
3156 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3157 struct perf_event *event = cpuc->events[idx];
3159 arr[idx].msr = x86_pmu_config_addr(idx);
3160 arr[idx].host = arr[idx].guest = 0;
3162 if (!test_bit(idx, cpuc->active_mask))
3163 continue;
3165 arr[idx].host = arr[idx].guest =
3166 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3168 if (event->attr.exclude_host)
3169 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3170 else if (event->attr.exclude_guest)
3171 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3174 *nr = x86_pmu.num_counters;
3175 return arr;
3178 static void core_pmu_enable_event(struct perf_event *event)
3180 if (!event->attr.exclude_host)
3181 x86_pmu_enable_event(event);
3184 static void core_pmu_enable_all(int added)
3186 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3187 int idx;
3189 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3190 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3192 if (!test_bit(idx, cpuc->active_mask) ||
3193 cpuc->events[idx]->attr.exclude_host)
3194 continue;
3196 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3200 static int hsw_hw_config(struct perf_event *event)
3202 int ret = intel_pmu_hw_config(event);
3204 if (ret)
3205 return ret;
3206 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3207 return 0;
3208 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
3211 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
3212 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
3213 * this combination.
3215 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
3216 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
3217 event->attr.precise_ip > 0))
3218 return -EOPNOTSUPP;
3220 if (event_is_checkpointed(event)) {
3222 * Sampling of checkpointed events can cause situations where
3223 * the CPU constantly aborts because of a overflow, which is
3224 * then checkpointed back and ignored. Forbid checkpointing
3225 * for sampling.
3227 * But still allow a long sampling period, so that perf stat
3228 * from KVM works.
3230 if (event->attr.sample_period > 0 &&
3231 event->attr.sample_period < 0x7fffffff)
3232 return -EOPNOTSUPP;
3234 return 0;
3237 static struct event_constraint counter0_constraint =
3238 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3240 static struct event_constraint counter2_constraint =
3241 EVENT_CONSTRAINT(0, 0x4, 0);
3243 static struct event_constraint *
3244 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3245 struct perf_event *event)
3247 struct event_constraint *c;
3249 c = intel_get_event_constraints(cpuc, idx, event);
3251 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
3252 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
3253 if (c->idxmsk64 & (1U << 2))
3254 return &counter2_constraint;
3255 return &emptyconstraint;
3258 return c;
3261 static struct event_constraint *
3262 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3263 struct perf_event *event)
3265 struct event_constraint *c;
3267 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
3268 if (event->attr.precise_ip == 3)
3269 return &counter0_constraint;
3271 c = intel_get_event_constraints(cpuc, idx, event);
3273 return c;
3276 static bool allow_tsx_force_abort = true;
3278 static struct event_constraint *
3279 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3280 struct perf_event *event)
3282 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
3285 * Without TFA we must not use PMC3.
3287 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) {
3288 c = dyn_constraint(cpuc, c, idx);
3289 c->idxmsk64 &= ~(1ULL << 3);
3290 c->weight--;
3293 return c;
3297 * Broadwell:
3299 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
3300 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
3301 * the two to enforce a minimum period of 128 (the smallest value that has bits
3302 * 0-5 cleared and >= 100).
3304 * Because of how the code in x86_perf_event_set_period() works, the truncation
3305 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
3306 * to make up for the 'lost' events due to carrying the 'error' in period_left.
3308 * Therefore the effective (average) period matches the requested period,
3309 * despite coarser hardware granularity.
3311 static u64 bdw_limit_period(struct perf_event *event, u64 left)
3313 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
3314 X86_CONFIG(.event=0xc0, .umask=0x01)) {
3315 if (left < 128)
3316 left = 128;
3317 left &= ~0x3fULL;
3319 return left;
3322 PMU_FORMAT_ATTR(event, "config:0-7" );
3323 PMU_FORMAT_ATTR(umask, "config:8-15" );
3324 PMU_FORMAT_ATTR(edge, "config:18" );
3325 PMU_FORMAT_ATTR(pc, "config:19" );
3326 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
3327 PMU_FORMAT_ATTR(inv, "config:23" );
3328 PMU_FORMAT_ATTR(cmask, "config:24-31" );
3329 PMU_FORMAT_ATTR(in_tx, "config:32");
3330 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
3332 static struct attribute *intel_arch_formats_attr[] = {
3333 &format_attr_event.attr,
3334 &format_attr_umask.attr,
3335 &format_attr_edge.attr,
3336 &format_attr_pc.attr,
3337 &format_attr_inv.attr,
3338 &format_attr_cmask.attr,
3339 NULL,
3342 ssize_t intel_event_sysfs_show(char *page, u64 config)
3344 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
3346 return x86_event_sysfs_show(page, config, event);
3349 static struct intel_shared_regs *allocate_shared_regs(int cpu)
3351 struct intel_shared_regs *regs;
3352 int i;
3354 regs = kzalloc_node(sizeof(struct intel_shared_regs),
3355 GFP_KERNEL, cpu_to_node(cpu));
3356 if (regs) {
3358 * initialize the locks to keep lockdep happy
3360 for (i = 0; i < EXTRA_REG_MAX; i++)
3361 raw_spin_lock_init(&regs->regs[i].lock);
3363 regs->core_id = -1;
3365 return regs;
3368 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
3370 struct intel_excl_cntrs *c;
3372 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
3373 GFP_KERNEL, cpu_to_node(cpu));
3374 if (c) {
3375 raw_spin_lock_init(&c->lock);
3376 c->core_id = -1;
3378 return c;
3382 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
3384 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
3385 cpuc->shared_regs = allocate_shared_regs(cpu);
3386 if (!cpuc->shared_regs)
3387 goto err;
3390 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
3391 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
3393 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
3394 if (!cpuc->constraint_list)
3395 goto err_shared_regs;
3398 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3399 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
3400 if (!cpuc->excl_cntrs)
3401 goto err_constraint_list;
3403 cpuc->excl_thread_id = 0;
3406 return 0;
3408 err_constraint_list:
3409 kfree(cpuc->constraint_list);
3410 cpuc->constraint_list = NULL;
3412 err_shared_regs:
3413 kfree(cpuc->shared_regs);
3414 cpuc->shared_regs = NULL;
3416 err:
3417 return -ENOMEM;
3420 static int intel_pmu_cpu_prepare(int cpu)
3422 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
3425 static void flip_smm_bit(void *data)
3427 unsigned long set = *(unsigned long *)data;
3429 if (set > 0) {
3430 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
3431 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3432 } else {
3433 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
3434 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3438 static void intel_pmu_cpu_starting(int cpu)
3440 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3441 int core_id = topology_core_id(cpu);
3442 int i;
3444 init_debug_store_on_cpu(cpu);
3446 * Deal with CPUs that don't clear their LBRs on power-up.
3448 intel_pmu_lbr_reset();
3450 cpuc->lbr_sel = NULL;
3452 if (x86_pmu.flags & PMU_FL_TFA) {
3453 WARN_ON_ONCE(cpuc->tfa_shadow);
3454 cpuc->tfa_shadow = ~0ULL;
3455 intel_set_tfa(cpuc, false);
3458 if (x86_pmu.version > 1)
3459 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
3461 if (!cpuc->shared_regs)
3462 return;
3464 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
3465 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3466 struct intel_shared_regs *pc;
3468 pc = per_cpu(cpu_hw_events, i).shared_regs;
3469 if (pc && pc->core_id == core_id) {
3470 cpuc->kfree_on_online[0] = cpuc->shared_regs;
3471 cpuc->shared_regs = pc;
3472 break;
3475 cpuc->shared_regs->core_id = core_id;
3476 cpuc->shared_regs->refcnt++;
3479 if (x86_pmu.lbr_sel_map)
3480 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
3482 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3483 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3484 struct cpu_hw_events *sibling;
3485 struct intel_excl_cntrs *c;
3487 sibling = &per_cpu(cpu_hw_events, i);
3488 c = sibling->excl_cntrs;
3489 if (c && c->core_id == core_id) {
3490 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
3491 cpuc->excl_cntrs = c;
3492 if (!sibling->excl_thread_id)
3493 cpuc->excl_thread_id = 1;
3494 break;
3497 cpuc->excl_cntrs->core_id = core_id;
3498 cpuc->excl_cntrs->refcnt++;
3502 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
3504 struct intel_excl_cntrs *c;
3506 c = cpuc->excl_cntrs;
3507 if (c) {
3508 if (c->core_id == -1 || --c->refcnt == 0)
3509 kfree(c);
3510 cpuc->excl_cntrs = NULL;
3513 kfree(cpuc->constraint_list);
3514 cpuc->constraint_list = NULL;
3517 static void intel_pmu_cpu_dying(int cpu)
3519 fini_debug_store_on_cpu(cpu);
3522 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
3524 struct intel_shared_regs *pc;
3526 pc = cpuc->shared_regs;
3527 if (pc) {
3528 if (pc->core_id == -1 || --pc->refcnt == 0)
3529 kfree(pc);
3530 cpuc->shared_regs = NULL;
3533 free_excl_cntrs(cpuc);
3536 static void intel_pmu_cpu_dead(int cpu)
3538 intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
3541 static void intel_pmu_sched_task(struct perf_event_context *ctx,
3542 bool sched_in)
3544 intel_pmu_pebs_sched_task(ctx, sched_in);
3545 intel_pmu_lbr_sched_task(ctx, sched_in);
3548 static int intel_pmu_check_period(struct perf_event *event, u64 value)
3550 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
3553 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
3555 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
3557 PMU_FORMAT_ATTR(frontend, "config1:0-23");
3559 static struct attribute *intel_arch3_formats_attr[] = {
3560 &format_attr_event.attr,
3561 &format_attr_umask.attr,
3562 &format_attr_edge.attr,
3563 &format_attr_pc.attr,
3564 &format_attr_any.attr,
3565 &format_attr_inv.attr,
3566 &format_attr_cmask.attr,
3567 NULL,
3570 static struct attribute *hsw_format_attr[] = {
3571 &format_attr_in_tx.attr,
3572 &format_attr_in_tx_cp.attr,
3573 &format_attr_offcore_rsp.attr,
3574 &format_attr_ldlat.attr,
3575 NULL
3578 static struct attribute *nhm_format_attr[] = {
3579 &format_attr_offcore_rsp.attr,
3580 &format_attr_ldlat.attr,
3581 NULL
3584 static struct attribute *slm_format_attr[] = {
3585 &format_attr_offcore_rsp.attr,
3586 NULL
3589 static struct attribute *skl_format_attr[] = {
3590 &format_attr_frontend.attr,
3591 NULL,
3594 static __initconst const struct x86_pmu core_pmu = {
3595 .name = "core",
3596 .handle_irq = x86_pmu_handle_irq,
3597 .disable_all = x86_pmu_disable_all,
3598 .enable_all = core_pmu_enable_all,
3599 .enable = core_pmu_enable_event,
3600 .disable = x86_pmu_disable_event,
3601 .hw_config = core_pmu_hw_config,
3602 .schedule_events = x86_schedule_events,
3603 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3604 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
3605 .event_map = intel_pmu_event_map,
3606 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3607 .apic = 1,
3608 .large_pebs_flags = LARGE_PEBS_FLAGS,
3611 * Intel PMCs cannot be accessed sanely above 32-bit width,
3612 * so we install an artificial 1<<31 period regardless of
3613 * the generic event period:
3615 .max_period = (1ULL<<31) - 1,
3616 .get_event_constraints = intel_get_event_constraints,
3617 .put_event_constraints = intel_put_event_constraints,
3618 .event_constraints = intel_core_event_constraints,
3619 .guest_get_msrs = core_guest_get_msrs,
3620 .format_attrs = intel_arch_formats_attr,
3621 .events_sysfs_show = intel_event_sysfs_show,
3624 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
3625 * together with PMU version 1 and thus be using core_pmu with
3626 * shared_regs. We need following callbacks here to allocate
3627 * it properly.
3629 .cpu_prepare = intel_pmu_cpu_prepare,
3630 .cpu_starting = intel_pmu_cpu_starting,
3631 .cpu_dying = intel_pmu_cpu_dying,
3632 .cpu_dead = intel_pmu_cpu_dead,
3634 .check_period = intel_pmu_check_period,
3637 static struct attribute *intel_pmu_attrs[];
3639 static __initconst const struct x86_pmu intel_pmu = {
3640 .name = "Intel",
3641 .handle_irq = intel_pmu_handle_irq,
3642 .disable_all = intel_pmu_disable_all,
3643 .enable_all = intel_pmu_enable_all,
3644 .enable = intel_pmu_enable_event,
3645 .disable = intel_pmu_disable_event,
3646 .add = intel_pmu_add_event,
3647 .del = intel_pmu_del_event,
3648 .read = intel_pmu_read_event,
3649 .hw_config = intel_pmu_hw_config,
3650 .schedule_events = x86_schedule_events,
3651 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3652 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
3653 .event_map = intel_pmu_event_map,
3654 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3655 .apic = 1,
3656 .large_pebs_flags = LARGE_PEBS_FLAGS,
3658 * Intel PMCs cannot be accessed sanely above 32 bit width,
3659 * so we install an artificial 1<<31 period regardless of
3660 * the generic event period:
3662 .max_period = (1ULL << 31) - 1,
3663 .get_event_constraints = intel_get_event_constraints,
3664 .put_event_constraints = intel_put_event_constraints,
3665 .pebs_aliases = intel_pebs_aliases_core2,
3667 .format_attrs = intel_arch3_formats_attr,
3668 .events_sysfs_show = intel_event_sysfs_show,
3670 .attrs = intel_pmu_attrs,
3672 .cpu_prepare = intel_pmu_cpu_prepare,
3673 .cpu_starting = intel_pmu_cpu_starting,
3674 .cpu_dying = intel_pmu_cpu_dying,
3675 .cpu_dead = intel_pmu_cpu_dead,
3677 .guest_get_msrs = intel_guest_get_msrs,
3678 .sched_task = intel_pmu_sched_task,
3680 .check_period = intel_pmu_check_period,
3683 static __init void intel_clovertown_quirk(void)
3686 * PEBS is unreliable due to:
3688 * AJ67 - PEBS may experience CPL leaks
3689 * AJ68 - PEBS PMI may be delayed by one event
3690 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
3691 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
3693 * AJ67 could be worked around by restricting the OS/USR flags.
3694 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
3696 * AJ106 could possibly be worked around by not allowing LBR
3697 * usage from PEBS, including the fixup.
3698 * AJ68 could possibly be worked around by always programming
3699 * a pebs_event_reset[0] value and coping with the lost events.
3701 * But taken together it might just make sense to not enable PEBS on
3702 * these chips.
3704 pr_warn("PEBS disabled due to CPU errata\n");
3705 x86_pmu.pebs = 0;
3706 x86_pmu.pebs_constraints = NULL;
3709 static int intel_snb_pebs_broken(int cpu)
3711 u32 rev = UINT_MAX; /* default to broken for unknown models */
3713 switch (cpu_data(cpu).x86_model) {
3714 case INTEL_FAM6_SANDYBRIDGE:
3715 rev = 0x28;
3716 break;
3718 case INTEL_FAM6_SANDYBRIDGE_X:
3719 switch (cpu_data(cpu).x86_stepping) {
3720 case 6: rev = 0x618; break;
3721 case 7: rev = 0x70c; break;
3725 return (cpu_data(cpu).microcode < rev);
3728 static void intel_snb_check_microcode(void)
3730 int pebs_broken = 0;
3731 int cpu;
3733 for_each_online_cpu(cpu) {
3734 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
3735 break;
3738 if (pebs_broken == x86_pmu.pebs_broken)
3739 return;
3742 * Serialized by the microcode lock..
3744 if (x86_pmu.pebs_broken) {
3745 pr_info("PEBS enabled due to microcode update\n");
3746 x86_pmu.pebs_broken = 0;
3747 } else {
3748 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
3749 x86_pmu.pebs_broken = 1;
3753 static bool is_lbr_from(unsigned long msr)
3755 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
3757 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
3761 * Under certain circumstances, access certain MSR may cause #GP.
3762 * The function tests if the input MSR can be safely accessed.
3764 static bool check_msr(unsigned long msr, u64 mask)
3766 u64 val_old, val_new, val_tmp;
3769 * Read the current value, change it and read it back to see if it
3770 * matches, this is needed to detect certain hardware emulators
3771 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
3773 if (rdmsrl_safe(msr, &val_old))
3774 return false;
3777 * Only change the bits which can be updated by wrmsrl.
3779 val_tmp = val_old ^ mask;
3781 if (is_lbr_from(msr))
3782 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
3784 if (wrmsrl_safe(msr, val_tmp) ||
3785 rdmsrl_safe(msr, &val_new))
3786 return false;
3789 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
3790 * should equal rdmsrl()'s even with the quirk.
3792 if (val_new != val_tmp)
3793 return false;
3795 if (is_lbr_from(msr))
3796 val_old = lbr_from_signext_quirk_wr(val_old);
3798 /* Here it's sure that the MSR can be safely accessed.
3799 * Restore the old value and return.
3801 wrmsrl(msr, val_old);
3803 return true;
3806 static __init void intel_sandybridge_quirk(void)
3808 x86_pmu.check_microcode = intel_snb_check_microcode;
3809 cpus_read_lock();
3810 intel_snb_check_microcode();
3811 cpus_read_unlock();
3814 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
3815 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
3816 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
3817 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
3818 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
3819 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
3820 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
3821 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
3824 static __init void intel_arch_events_quirk(void)
3826 int bit;
3828 /* disable event that reported as not presend by cpuid */
3829 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
3830 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
3831 pr_warn("CPUID marked event: \'%s\' unavailable\n",
3832 intel_arch_events_map[bit].name);
3836 static __init void intel_nehalem_quirk(void)
3838 union cpuid10_ebx ebx;
3840 ebx.full = x86_pmu.events_maskl;
3841 if (ebx.split.no_branch_misses_retired) {
3843 * Erratum AAJ80 detected, we work it around by using
3844 * the BR_MISP_EXEC.ANY event. This will over-count
3845 * branch-misses, but it's still much better than the
3846 * architectural event which is often completely bogus:
3848 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
3849 ebx.split.no_branch_misses_retired = 0;
3850 x86_pmu.events_maskl = ebx.full;
3851 pr_info("CPU erratum AAJ80 worked around\n");
3856 * enable software workaround for errata:
3857 * SNB: BJ122
3858 * IVB: BV98
3859 * HSW: HSD29
3861 * Only needed when HT is enabled. However detecting
3862 * if HT is enabled is difficult (model specific). So instead,
3863 * we enable the workaround in the early boot, and verify if
3864 * it is needed in a later initcall phase once we have valid
3865 * topology information to check if HT is actually enabled
3867 static __init void intel_ht_bug(void)
3869 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
3871 x86_pmu.start_scheduling = intel_start_scheduling;
3872 x86_pmu.commit_scheduling = intel_commit_scheduling;
3873 x86_pmu.stop_scheduling = intel_stop_scheduling;
3876 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
3877 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
3879 /* Haswell special events */
3880 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
3881 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
3882 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
3883 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
3884 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
3885 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
3886 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
3887 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
3888 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
3889 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
3890 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
3891 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
3893 static struct attribute *hsw_events_attrs[] = {
3894 EVENT_PTR(mem_ld_hsw),
3895 EVENT_PTR(mem_st_hsw),
3896 EVENT_PTR(td_slots_issued),
3897 EVENT_PTR(td_slots_retired),
3898 EVENT_PTR(td_fetch_bubbles),
3899 EVENT_PTR(td_total_slots),
3900 EVENT_PTR(td_total_slots_scale),
3901 EVENT_PTR(td_recovery_bubbles),
3902 EVENT_PTR(td_recovery_bubbles_scale),
3903 NULL
3906 static struct attribute *hsw_tsx_events_attrs[] = {
3907 EVENT_PTR(tx_start),
3908 EVENT_PTR(tx_commit),
3909 EVENT_PTR(tx_abort),
3910 EVENT_PTR(tx_capacity),
3911 EVENT_PTR(tx_conflict),
3912 EVENT_PTR(el_start),
3913 EVENT_PTR(el_commit),
3914 EVENT_PTR(el_abort),
3915 EVENT_PTR(el_capacity),
3916 EVENT_PTR(el_conflict),
3917 EVENT_PTR(cycles_t),
3918 EVENT_PTR(cycles_ct),
3919 NULL
3922 static __init struct attribute **get_hsw_events_attrs(void)
3924 return boot_cpu_has(X86_FEATURE_RTM) ?
3925 merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) :
3926 hsw_events_attrs;
3929 static ssize_t freeze_on_smi_show(struct device *cdev,
3930 struct device_attribute *attr,
3931 char *buf)
3933 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
3936 static DEFINE_MUTEX(freeze_on_smi_mutex);
3938 static ssize_t freeze_on_smi_store(struct device *cdev,
3939 struct device_attribute *attr,
3940 const char *buf, size_t count)
3942 unsigned long val;
3943 ssize_t ret;
3945 ret = kstrtoul(buf, 0, &val);
3946 if (ret)
3947 return ret;
3949 if (val > 1)
3950 return -EINVAL;
3952 mutex_lock(&freeze_on_smi_mutex);
3954 if (x86_pmu.attr_freeze_on_smi == val)
3955 goto done;
3957 x86_pmu.attr_freeze_on_smi = val;
3959 get_online_cpus();
3960 on_each_cpu(flip_smm_bit, &val, 1);
3961 put_online_cpus();
3962 done:
3963 mutex_unlock(&freeze_on_smi_mutex);
3965 return count;
3968 static DEVICE_ATTR_RW(freeze_on_smi);
3970 static ssize_t branches_show(struct device *cdev,
3971 struct device_attribute *attr,
3972 char *buf)
3974 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
3977 static DEVICE_ATTR_RO(branches);
3979 static struct attribute *lbr_attrs[] = {
3980 &dev_attr_branches.attr,
3981 NULL
3984 static char pmu_name_str[30];
3986 static ssize_t pmu_name_show(struct device *cdev,
3987 struct device_attribute *attr,
3988 char *buf)
3990 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
3993 static DEVICE_ATTR_RO(pmu_name);
3995 static struct attribute *intel_pmu_caps_attrs[] = {
3996 &dev_attr_pmu_name.attr,
3997 NULL
4000 static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
4002 static struct attribute *intel_pmu_attrs[] = {
4003 &dev_attr_freeze_on_smi.attr,
4004 NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
4005 NULL,
4008 __init int intel_pmu_init(void)
4010 struct attribute **extra_attr = NULL;
4011 struct attribute **to_free = NULL;
4012 union cpuid10_edx edx;
4013 union cpuid10_eax eax;
4014 union cpuid10_ebx ebx;
4015 struct event_constraint *c;
4016 unsigned int unused;
4017 struct extra_reg *er;
4018 int version, i;
4019 char *name;
4021 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
4022 switch (boot_cpu_data.x86) {
4023 case 0x6:
4024 return p6_pmu_init();
4025 case 0xb:
4026 return knc_pmu_init();
4027 case 0xf:
4028 return p4_pmu_init();
4030 return -ENODEV;
4034 * Check whether the Architectural PerfMon supports
4035 * Branch Misses Retired hw_event or not.
4037 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
4038 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
4039 return -ENODEV;
4041 version = eax.split.version_id;
4042 if (version < 2)
4043 x86_pmu = core_pmu;
4044 else
4045 x86_pmu = intel_pmu;
4047 x86_pmu.version = version;
4048 x86_pmu.num_counters = eax.split.num_counters;
4049 x86_pmu.cntval_bits = eax.split.bit_width;
4050 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
4052 x86_pmu.events_maskl = ebx.full;
4053 x86_pmu.events_mask_len = eax.split.mask_length;
4055 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
4058 * Quirk: v2 perfmon does not report fixed-purpose events, so
4059 * assume at least 3 events, when not running in a hypervisor:
4061 if (version > 1) {
4062 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
4064 x86_pmu.num_counters_fixed =
4065 max((int)edx.split.num_counters_fixed, assume);
4068 if (boot_cpu_has(X86_FEATURE_PDCM)) {
4069 u64 capabilities;
4071 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
4072 x86_pmu.intel_cap.capabilities = capabilities;
4075 intel_ds_init();
4077 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
4080 * Install the hw-cache-events table:
4082 switch (boot_cpu_data.x86_model) {
4083 case INTEL_FAM6_CORE_YONAH:
4084 pr_cont("Core events, ");
4085 name = "core";
4086 break;
4088 case INTEL_FAM6_CORE2_MEROM:
4089 x86_add_quirk(intel_clovertown_quirk);
4090 case INTEL_FAM6_CORE2_MEROM_L:
4091 case INTEL_FAM6_CORE2_PENRYN:
4092 case INTEL_FAM6_CORE2_DUNNINGTON:
4093 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
4094 sizeof(hw_cache_event_ids));
4096 intel_pmu_lbr_init_core();
4098 x86_pmu.event_constraints = intel_core2_event_constraints;
4099 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
4100 pr_cont("Core2 events, ");
4101 name = "core2";
4102 break;
4104 case INTEL_FAM6_NEHALEM:
4105 case INTEL_FAM6_NEHALEM_EP:
4106 case INTEL_FAM6_NEHALEM_EX:
4107 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
4108 sizeof(hw_cache_event_ids));
4109 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4110 sizeof(hw_cache_extra_regs));
4112 intel_pmu_lbr_init_nhm();
4114 x86_pmu.event_constraints = intel_nehalem_event_constraints;
4115 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4116 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4117 x86_pmu.extra_regs = intel_nehalem_extra_regs;
4119 x86_pmu.cpu_events = nhm_events_attrs;
4121 /* UOPS_ISSUED.STALLED_CYCLES */
4122 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4123 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4124 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4125 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4126 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4128 intel_pmu_pebs_data_source_nhm();
4129 x86_add_quirk(intel_nehalem_quirk);
4130 x86_pmu.pebs_no_tlb = 1;
4131 extra_attr = nhm_format_attr;
4133 pr_cont("Nehalem events, ");
4134 name = "nehalem";
4135 break;
4137 case INTEL_FAM6_ATOM_BONNELL:
4138 case INTEL_FAM6_ATOM_BONNELL_MID:
4139 case INTEL_FAM6_ATOM_SALTWELL:
4140 case INTEL_FAM6_ATOM_SALTWELL_MID:
4141 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
4142 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
4143 sizeof(hw_cache_event_ids));
4145 intel_pmu_lbr_init_atom();
4147 x86_pmu.event_constraints = intel_gen_event_constraints;
4148 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
4149 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
4150 pr_cont("Atom events, ");
4151 name = "bonnell";
4152 break;
4154 case INTEL_FAM6_ATOM_SILVERMONT:
4155 case INTEL_FAM6_ATOM_SILVERMONT_X:
4156 case INTEL_FAM6_ATOM_SILVERMONT_MID:
4157 case INTEL_FAM6_ATOM_AIRMONT:
4158 case INTEL_FAM6_ATOM_AIRMONT_MID:
4159 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
4160 sizeof(hw_cache_event_ids));
4161 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
4162 sizeof(hw_cache_extra_regs));
4164 intel_pmu_lbr_init_slm();
4166 x86_pmu.event_constraints = intel_slm_event_constraints;
4167 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4168 x86_pmu.extra_regs = intel_slm_extra_regs;
4169 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4170 x86_pmu.cpu_events = slm_events_attrs;
4171 extra_attr = slm_format_attr;
4172 pr_cont("Silvermont events, ");
4173 name = "silvermont";
4174 break;
4176 case INTEL_FAM6_ATOM_GOLDMONT:
4177 case INTEL_FAM6_ATOM_GOLDMONT_X:
4178 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
4179 sizeof(hw_cache_event_ids));
4180 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
4181 sizeof(hw_cache_extra_regs));
4183 intel_pmu_lbr_init_skl();
4185 x86_pmu.event_constraints = intel_slm_event_constraints;
4186 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
4187 x86_pmu.extra_regs = intel_glm_extra_regs;
4189 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4190 * for precise cycles.
4191 * :pp is identical to :ppp
4193 x86_pmu.pebs_aliases = NULL;
4194 x86_pmu.pebs_prec_dist = true;
4195 x86_pmu.lbr_pt_coexist = true;
4196 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4197 x86_pmu.cpu_events = glm_events_attrs;
4198 extra_attr = slm_format_attr;
4199 pr_cont("Goldmont events, ");
4200 name = "goldmont";
4201 break;
4203 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
4204 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
4205 sizeof(hw_cache_event_ids));
4206 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
4207 sizeof(hw_cache_extra_regs));
4209 intel_pmu_lbr_init_skl();
4211 x86_pmu.event_constraints = intel_slm_event_constraints;
4212 x86_pmu.extra_regs = intel_glm_extra_regs;
4214 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4215 * for precise cycles.
4217 x86_pmu.pebs_aliases = NULL;
4218 x86_pmu.pebs_prec_dist = true;
4219 x86_pmu.lbr_pt_coexist = true;
4220 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4221 x86_pmu.flags |= PMU_FL_PEBS_ALL;
4222 x86_pmu.get_event_constraints = glp_get_event_constraints;
4223 x86_pmu.cpu_events = glm_events_attrs;
4224 /* Goldmont Plus has 4-wide pipeline */
4225 event_attr_td_total_slots_scale_glm.event_str = "4";
4226 extra_attr = slm_format_attr;
4227 pr_cont("Goldmont plus events, ");
4228 name = "goldmont_plus";
4229 break;
4231 case INTEL_FAM6_WESTMERE:
4232 case INTEL_FAM6_WESTMERE_EP:
4233 case INTEL_FAM6_WESTMERE_EX:
4234 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
4235 sizeof(hw_cache_event_ids));
4236 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4237 sizeof(hw_cache_extra_regs));
4239 intel_pmu_lbr_init_nhm();
4241 x86_pmu.event_constraints = intel_westmere_event_constraints;
4242 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4243 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
4244 x86_pmu.extra_regs = intel_westmere_extra_regs;
4245 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4247 x86_pmu.cpu_events = nhm_events_attrs;
4249 /* UOPS_ISSUED.STALLED_CYCLES */
4250 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4251 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4252 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4253 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4254 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4256 intel_pmu_pebs_data_source_nhm();
4257 extra_attr = nhm_format_attr;
4258 pr_cont("Westmere events, ");
4259 name = "westmere";
4260 break;
4262 case INTEL_FAM6_SANDYBRIDGE:
4263 case INTEL_FAM6_SANDYBRIDGE_X:
4264 x86_add_quirk(intel_sandybridge_quirk);
4265 x86_add_quirk(intel_ht_bug);
4266 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4267 sizeof(hw_cache_event_ids));
4268 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4269 sizeof(hw_cache_extra_regs));
4271 intel_pmu_lbr_init_snb();
4273 x86_pmu.event_constraints = intel_snb_event_constraints;
4274 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
4275 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
4276 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
4277 x86_pmu.extra_regs = intel_snbep_extra_regs;
4278 else
4279 x86_pmu.extra_regs = intel_snb_extra_regs;
4282 /* all extra regs are per-cpu when HT is on */
4283 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4284 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4286 x86_pmu.cpu_events = snb_events_attrs;
4288 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4289 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4290 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4291 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
4292 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4293 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
4295 extra_attr = nhm_format_attr;
4297 pr_cont("SandyBridge events, ");
4298 name = "sandybridge";
4299 break;
4301 case INTEL_FAM6_IVYBRIDGE:
4302 case INTEL_FAM6_IVYBRIDGE_X:
4303 x86_add_quirk(intel_ht_bug);
4304 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4305 sizeof(hw_cache_event_ids));
4306 /* dTLB-load-misses on IVB is different than SNB */
4307 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
4309 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4310 sizeof(hw_cache_extra_regs));
4312 intel_pmu_lbr_init_snb();
4314 x86_pmu.event_constraints = intel_ivb_event_constraints;
4315 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
4316 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4317 x86_pmu.pebs_prec_dist = true;
4318 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
4319 x86_pmu.extra_regs = intel_snbep_extra_regs;
4320 else
4321 x86_pmu.extra_regs = intel_snb_extra_regs;
4322 /* all extra regs are per-cpu when HT is on */
4323 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4324 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4326 x86_pmu.cpu_events = snb_events_attrs;
4328 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4329 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4330 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4332 extra_attr = nhm_format_attr;
4334 pr_cont("IvyBridge events, ");
4335 name = "ivybridge";
4336 break;
4339 case INTEL_FAM6_HASWELL_CORE:
4340 case INTEL_FAM6_HASWELL_X:
4341 case INTEL_FAM6_HASWELL_ULT:
4342 case INTEL_FAM6_HASWELL_GT3E:
4343 x86_add_quirk(intel_ht_bug);
4344 x86_pmu.late_ack = true;
4345 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4346 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4348 intel_pmu_lbr_init_hsw();
4350 x86_pmu.event_constraints = intel_hsw_event_constraints;
4351 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
4352 x86_pmu.extra_regs = intel_snbep_extra_regs;
4353 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4354 x86_pmu.pebs_prec_dist = true;
4355 /* all extra regs are per-cpu when HT is on */
4356 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4357 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4359 x86_pmu.hw_config = hsw_hw_config;
4360 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4361 x86_pmu.cpu_events = get_hsw_events_attrs();
4362 x86_pmu.lbr_double_abort = true;
4363 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4364 hsw_format_attr : nhm_format_attr;
4365 pr_cont("Haswell events, ");
4366 name = "haswell";
4367 break;
4369 case INTEL_FAM6_BROADWELL_CORE:
4370 case INTEL_FAM6_BROADWELL_XEON_D:
4371 case INTEL_FAM6_BROADWELL_GT3E:
4372 case INTEL_FAM6_BROADWELL_X:
4373 x86_pmu.late_ack = true;
4374 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4375 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4377 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
4378 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
4379 BDW_L3_MISS|HSW_SNOOP_DRAM;
4380 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
4381 HSW_SNOOP_DRAM;
4382 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
4383 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4384 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
4385 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4387 intel_pmu_lbr_init_hsw();
4389 x86_pmu.event_constraints = intel_bdw_event_constraints;
4390 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
4391 x86_pmu.extra_regs = intel_snbep_extra_regs;
4392 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4393 x86_pmu.pebs_prec_dist = true;
4394 /* all extra regs are per-cpu when HT is on */
4395 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4396 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4398 x86_pmu.hw_config = hsw_hw_config;
4399 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4400 x86_pmu.cpu_events = get_hsw_events_attrs();
4401 x86_pmu.limit_period = bdw_limit_period;
4402 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4403 hsw_format_attr : nhm_format_attr;
4404 pr_cont("Broadwell events, ");
4405 name = "broadwell";
4406 break;
4408 case INTEL_FAM6_XEON_PHI_KNL:
4409 case INTEL_FAM6_XEON_PHI_KNM:
4410 memcpy(hw_cache_event_ids,
4411 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4412 memcpy(hw_cache_extra_regs,
4413 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4414 intel_pmu_lbr_init_knl();
4416 x86_pmu.event_constraints = intel_slm_event_constraints;
4417 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4418 x86_pmu.extra_regs = intel_knl_extra_regs;
4420 /* all extra regs are per-cpu when HT is on */
4421 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4422 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4423 extra_attr = slm_format_attr;
4424 pr_cont("Knights Landing/Mill events, ");
4425 name = "knights-landing";
4426 break;
4428 case INTEL_FAM6_SKYLAKE_MOBILE:
4429 case INTEL_FAM6_SKYLAKE_DESKTOP:
4430 case INTEL_FAM6_SKYLAKE_X:
4431 case INTEL_FAM6_KABYLAKE_MOBILE:
4432 case INTEL_FAM6_KABYLAKE_DESKTOP:
4433 x86_pmu.late_ack = true;
4434 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4435 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4436 intel_pmu_lbr_init_skl();
4438 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
4439 event_attr_td_recovery_bubbles.event_str_noht =
4440 "event=0xd,umask=0x1,cmask=1";
4441 event_attr_td_recovery_bubbles.event_str_ht =
4442 "event=0xd,umask=0x1,cmask=1,any=1";
4444 x86_pmu.event_constraints = intel_skl_event_constraints;
4445 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
4446 x86_pmu.extra_regs = intel_skl_extra_regs;
4447 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
4448 x86_pmu.pebs_prec_dist = true;
4449 /* all extra regs are per-cpu when HT is on */
4450 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4451 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4453 x86_pmu.hw_config = hsw_hw_config;
4454 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4455 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4456 hsw_format_attr : nhm_format_attr;
4457 extra_attr = merge_attr(extra_attr, skl_format_attr);
4458 to_free = extra_attr;
4459 x86_pmu.cpu_events = get_hsw_events_attrs();
4460 intel_pmu_pebs_data_source_skl(
4461 boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
4463 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
4464 x86_pmu.flags |= PMU_FL_TFA;
4465 x86_pmu.get_event_constraints = tfa_get_event_constraints;
4466 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
4467 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
4468 intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
4471 pr_cont("Skylake events, ");
4472 name = "skylake";
4473 break;
4475 default:
4476 switch (x86_pmu.version) {
4477 case 1:
4478 x86_pmu.event_constraints = intel_v1_event_constraints;
4479 pr_cont("generic architected perfmon v1, ");
4480 name = "generic_arch_v1";
4481 break;
4482 default:
4484 * default constraints for v2 and up
4486 x86_pmu.event_constraints = intel_gen_event_constraints;
4487 pr_cont("generic architected perfmon, ");
4488 name = "generic_arch_v2+";
4489 break;
4493 snprintf(pmu_name_str, sizeof pmu_name_str, "%s", name);
4495 if (version >= 2 && extra_attr) {
4496 x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
4497 extra_attr);
4498 WARN_ON(!x86_pmu.format_attrs);
4501 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
4502 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
4503 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
4504 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
4506 x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
4508 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
4509 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
4510 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
4511 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
4514 x86_pmu.intel_ctrl |=
4515 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
4517 if (x86_pmu.event_constraints) {
4519 * event on fixed counter2 (REF_CYCLES) only works on this
4520 * counter, so do not extend mask to generic counters
4522 for_each_event_constraint(c, x86_pmu.event_constraints) {
4523 if (c->cmask == FIXED_EVENT_FLAGS
4524 && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
4525 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
4527 c->idxmsk64 &=
4528 ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
4529 c->weight = hweight64(c->idxmsk64);
4534 * Access LBR MSR may cause #GP under certain circumstances.
4535 * E.g. KVM doesn't support LBR MSR
4536 * Check all LBT MSR here.
4537 * Disable LBR access if any LBR MSRs can not be accessed.
4539 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
4540 x86_pmu.lbr_nr = 0;
4541 for (i = 0; i < x86_pmu.lbr_nr; i++) {
4542 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
4543 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
4544 x86_pmu.lbr_nr = 0;
4547 x86_pmu.caps_attrs = intel_pmu_caps_attrs;
4549 if (x86_pmu.lbr_nr) {
4550 x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs);
4551 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
4555 * Access extra MSR may cause #GP under certain circumstances.
4556 * E.g. KVM doesn't support offcore event
4557 * Check all extra_regs here.
4559 if (x86_pmu.extra_regs) {
4560 for (er = x86_pmu.extra_regs; er->msr; er++) {
4561 er->extra_msr_access = check_msr(er->msr, 0x11UL);
4562 /* Disable LBR select mapping */
4563 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
4564 x86_pmu.lbr_sel_map = NULL;
4568 /* Support full width counters using alternative MSR range */
4569 if (x86_pmu.intel_cap.full_width_write) {
4570 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
4571 x86_pmu.perfctr = MSR_IA32_PMC0;
4572 pr_cont("full-width counters, ");
4575 kfree(to_free);
4576 return 0;
4580 * HT bug: phase 2 init
4581 * Called once we have valid topology information to check
4582 * whether or not HT is enabled
4583 * If HT is off, then we disable the workaround
4585 static __init int fixup_ht_bug(void)
4587 int c;
4589 * problem not present on this CPU model, nothing to do
4591 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
4592 return 0;
4594 if (topology_max_smt_threads() > 1) {
4595 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
4596 return 0;
4599 cpus_read_lock();
4601 hardlockup_detector_perf_stop();
4603 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
4605 x86_pmu.start_scheduling = NULL;
4606 x86_pmu.commit_scheduling = NULL;
4607 x86_pmu.stop_scheduling = NULL;
4609 hardlockup_detector_perf_restart();
4611 for_each_online_cpu(c)
4612 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
4614 cpus_read_unlock();
4615 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
4616 return 0;
4618 subsys_initcall(fixup_ht_bug)