2 * Netburst Performance Events (P4, old Xeon)
4 * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
5 * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
7 * For licencing details see kernel-base/COPYING
10 #ifdef CONFIG_CPU_SUP_INTEL
12 #include <asm/perf_event_p4.h>
14 #define P4_CNTR_LIMIT 3
16 * array indices: 0,1 - HT threads, used with HT enabled cpu
18 struct p4_event_bind
{
19 unsigned int opcode
; /* Event code and ESCR selector */
20 unsigned int escr_msr
[2]; /* ESCR MSR for this event */
21 unsigned int escr_emask
; /* valid ESCR EventMask bits */
22 unsigned int shared
; /* event is shared across threads */
23 char cntr
[2][P4_CNTR_LIMIT
]; /* counter index (offset), -1 on abscence */
27 unsigned int metric_pebs
;
28 unsigned int metric_vert
;
31 /* it sets P4_PEBS_ENABLE_UOP_TAG as well */
32 #define P4_GEN_PEBS_BIND(name, pebs, vert) \
33 [P4_PEBS_METRIC__##name] = { \
34 .metric_pebs = pebs | P4_PEBS_ENABLE_UOP_TAG, \
35 .metric_vert = vert, \
39 * note we have P4_PEBS_ENABLE_UOP_TAG always set here
41 * it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of
42 * event configuration to find out which values are to be
43 * written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT
46 static struct p4_pebs_bind p4_pebs_bind_map
[] = {
47 P4_GEN_PEBS_BIND(1stl_cache_load_miss_retired
, 0x0000001, 0x0000001),
48 P4_GEN_PEBS_BIND(2ndl_cache_load_miss_retired
, 0x0000002, 0x0000001),
49 P4_GEN_PEBS_BIND(dtlb_load_miss_retired
, 0x0000004, 0x0000001),
50 P4_GEN_PEBS_BIND(dtlb_store_miss_retired
, 0x0000004, 0x0000002),
51 P4_GEN_PEBS_BIND(dtlb_all_miss_retired
, 0x0000004, 0x0000003),
52 P4_GEN_PEBS_BIND(tagged_mispred_branch
, 0x0018000, 0x0000010),
53 P4_GEN_PEBS_BIND(mob_load_replay_retired
, 0x0000200, 0x0000001),
54 P4_GEN_PEBS_BIND(split_load_retired
, 0x0000400, 0x0000001),
55 P4_GEN_PEBS_BIND(split_store_retired
, 0x0000400, 0x0000002),
59 * Note that we don't use CCCR1 here, there is an
60 * exception for P4_BSQ_ALLOCATION but we just have
63 * consider this binding as resources which particular
64 * event may borrow, it doesn't contain EventMask,
65 * Tags and friends -- they are left to a caller
67 static struct p4_event_bind p4_event_bind_map
[] = {
68 [P4_EVENT_TC_DELIVER_MODE
] = {
69 .opcode
= P4_OPCODE(P4_EVENT_TC_DELIVER_MODE
),
70 .escr_msr
= { MSR_P4_TC_ESCR0
, MSR_P4_TC_ESCR1
},
72 P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE
, DD
) |
73 P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE
, DB
) |
74 P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE
, DI
) |
75 P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE
, BD
) |
76 P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE
, BB
) |
77 P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE
, BI
) |
78 P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE
, ID
),
80 .cntr
= { {4, 5, -1}, {6, 7, -1} },
82 [P4_EVENT_BPU_FETCH_REQUEST
] = {
83 .opcode
= P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST
),
84 .escr_msr
= { MSR_P4_BPU_ESCR0
, MSR_P4_BPU_ESCR1
},
86 P4_ESCR_EMASK_BIT(P4_EVENT_BPU_FETCH_REQUEST
, TCMISS
),
87 .cntr
= { {0, -1, -1}, {2, -1, -1} },
89 [P4_EVENT_ITLB_REFERENCE
] = {
90 .opcode
= P4_OPCODE(P4_EVENT_ITLB_REFERENCE
),
91 .escr_msr
= { MSR_P4_ITLB_ESCR0
, MSR_P4_ITLB_ESCR1
},
93 P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE
, HIT
) |
94 P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE
, MISS
) |
95 P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE
, HIT_UK
),
96 .cntr
= { {0, -1, -1}, {2, -1, -1} },
98 [P4_EVENT_MEMORY_CANCEL
] = {
99 .opcode
= P4_OPCODE(P4_EVENT_MEMORY_CANCEL
),
100 .escr_msr
= { MSR_P4_DAC_ESCR0
, MSR_P4_DAC_ESCR1
},
102 P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL
, ST_RB_FULL
) |
103 P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL
, 64K_CONF
),
104 .cntr
= { {8, 9, -1}, {10, 11, -1} },
106 [P4_EVENT_MEMORY_COMPLETE
] = {
107 .opcode
= P4_OPCODE(P4_EVENT_MEMORY_COMPLETE
),
108 .escr_msr
= { MSR_P4_SAAT_ESCR0
, MSR_P4_SAAT_ESCR1
},
110 P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE
, LSC
) |
111 P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE
, SSC
),
112 .cntr
= { {8, 9, -1}, {10, 11, -1} },
114 [P4_EVENT_LOAD_PORT_REPLAY
] = {
115 .opcode
= P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY
),
116 .escr_msr
= { MSR_P4_SAAT_ESCR0
, MSR_P4_SAAT_ESCR1
},
118 P4_ESCR_EMASK_BIT(P4_EVENT_LOAD_PORT_REPLAY
, SPLIT_LD
),
119 .cntr
= { {8, 9, -1}, {10, 11, -1} },
121 [P4_EVENT_STORE_PORT_REPLAY
] = {
122 .opcode
= P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY
),
123 .escr_msr
= { MSR_P4_SAAT_ESCR0
, MSR_P4_SAAT_ESCR1
},
125 P4_ESCR_EMASK_BIT(P4_EVENT_STORE_PORT_REPLAY
, SPLIT_ST
),
126 .cntr
= { {8, 9, -1}, {10, 11, -1} },
128 [P4_EVENT_MOB_LOAD_REPLAY
] = {
129 .opcode
= P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY
),
130 .escr_msr
= { MSR_P4_MOB_ESCR0
, MSR_P4_MOB_ESCR1
},
132 P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY
, NO_STA
) |
133 P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY
, NO_STD
) |
134 P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY
, PARTIAL_DATA
) |
135 P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY
, UNALGN_ADDR
),
136 .cntr
= { {0, -1, -1}, {2, -1, -1} },
138 [P4_EVENT_PAGE_WALK_TYPE
] = {
139 .opcode
= P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE
),
140 .escr_msr
= { MSR_P4_PMH_ESCR0
, MSR_P4_PMH_ESCR1
},
142 P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE
, DTMISS
) |
143 P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE
, ITMISS
),
145 .cntr
= { {0, -1, -1}, {2, -1, -1} },
147 [P4_EVENT_BSQ_CACHE_REFERENCE
] = {
148 .opcode
= P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE
),
149 .escr_msr
= { MSR_P4_BSU_ESCR0
, MSR_P4_BSU_ESCR1
},
151 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_HITS
) |
152 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_HITE
) |
153 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_HITM
) |
154 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_HITS
) |
155 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_HITE
) |
156 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_HITM
) |
157 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_MISS
) |
158 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_MISS
) |
159 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, WR_2ndL_MISS
),
160 .cntr
= { {0, -1, -1}, {2, -1, -1} },
162 [P4_EVENT_IOQ_ALLOCATION
] = {
163 .opcode
= P4_OPCODE(P4_EVENT_IOQ_ALLOCATION
),
164 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
166 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION
, DEFAULT
) |
167 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION
, ALL_READ
) |
168 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION
, ALL_WRITE
) |
169 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION
, MEM_UC
) |
170 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION
, MEM_WC
) |
171 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION
, MEM_WT
) |
172 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION
, MEM_WP
) |
173 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION
, MEM_WB
) |
174 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION
, OWN
) |
175 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION
, OTHER
) |
176 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION
, PREFETCH
),
177 .cntr
= { {0, -1, -1}, {2, -1, -1} },
179 [P4_EVENT_IOQ_ACTIVE_ENTRIES
] = { /* shared ESCR */
180 .opcode
= P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES
),
181 .escr_msr
= { MSR_P4_FSB_ESCR1
, MSR_P4_FSB_ESCR1
},
183 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES
, DEFAULT
) |
184 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES
, ALL_READ
) |
185 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES
, ALL_WRITE
) |
186 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES
, MEM_UC
) |
187 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES
, MEM_WC
) |
188 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES
, MEM_WT
) |
189 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES
, MEM_WP
) |
190 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES
, MEM_WB
) |
191 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES
, OWN
) |
192 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES
, OTHER
) |
193 P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES
, PREFETCH
),
194 .cntr
= { {2, -1, -1}, {3, -1, -1} },
196 [P4_EVENT_FSB_DATA_ACTIVITY
] = {
197 .opcode
= P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY
),
198 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
200 P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY
, DRDY_DRV
) |
201 P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY
, DRDY_OWN
) |
202 P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY
, DRDY_OTHER
) |
203 P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY
, DBSY_DRV
) |
204 P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY
, DBSY_OWN
) |
205 P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY
, DBSY_OTHER
),
207 .cntr
= { {0, -1, -1}, {2, -1, -1} },
209 [P4_EVENT_BSQ_ALLOCATION
] = { /* shared ESCR, broken CCCR1 */
210 .opcode
= P4_OPCODE(P4_EVENT_BSQ_ALLOCATION
),
211 .escr_msr
= { MSR_P4_BSU_ESCR0
, MSR_P4_BSU_ESCR0
},
213 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION
, REQ_TYPE0
) |
214 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION
, REQ_TYPE1
) |
215 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION
, REQ_LEN0
) |
216 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION
, REQ_LEN1
) |
217 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION
, REQ_IO_TYPE
) |
218 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION
, REQ_LOCK_TYPE
) |
219 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION
, REQ_CACHE_TYPE
) |
220 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION
, REQ_SPLIT_TYPE
) |
221 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION
, REQ_DEM_TYPE
) |
222 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION
, REQ_ORD_TYPE
) |
223 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION
, MEM_TYPE0
) |
224 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION
, MEM_TYPE1
) |
225 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION
, MEM_TYPE2
),
226 .cntr
= { {0, -1, -1}, {1, -1, -1} },
228 [P4_EVENT_BSQ_ACTIVE_ENTRIES
] = { /* shared ESCR */
229 .opcode
= P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES
),
230 .escr_msr
= { MSR_P4_BSU_ESCR1
, MSR_P4_BSU_ESCR1
},
232 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES
, REQ_TYPE0
) |
233 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES
, REQ_TYPE1
) |
234 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES
, REQ_LEN0
) |
235 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES
, REQ_LEN1
) |
236 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES
, REQ_IO_TYPE
) |
237 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES
, REQ_LOCK_TYPE
) |
238 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES
, REQ_CACHE_TYPE
) |
239 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES
, REQ_SPLIT_TYPE
) |
240 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES
, REQ_DEM_TYPE
) |
241 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES
, REQ_ORD_TYPE
) |
242 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES
, MEM_TYPE0
) |
243 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES
, MEM_TYPE1
) |
244 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES
, MEM_TYPE2
),
245 .cntr
= { {2, -1, -1}, {3, -1, -1} },
247 [P4_EVENT_SSE_INPUT_ASSIST
] = {
248 .opcode
= P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST
),
249 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
251 P4_ESCR_EMASK_BIT(P4_EVENT_SSE_INPUT_ASSIST
, ALL
),
253 .cntr
= { {8, 9, -1}, {10, 11, -1} },
255 [P4_EVENT_PACKED_SP_UOP
] = {
256 .opcode
= P4_OPCODE(P4_EVENT_PACKED_SP_UOP
),
257 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
259 P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_SP_UOP
, ALL
),
261 .cntr
= { {8, 9, -1}, {10, 11, -1} },
263 [P4_EVENT_PACKED_DP_UOP
] = {
264 .opcode
= P4_OPCODE(P4_EVENT_PACKED_DP_UOP
),
265 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
267 P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_DP_UOP
, ALL
),
269 .cntr
= { {8, 9, -1}, {10, 11, -1} },
271 [P4_EVENT_SCALAR_SP_UOP
] = {
272 .opcode
= P4_OPCODE(P4_EVENT_SCALAR_SP_UOP
),
273 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
275 P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_SP_UOP
, ALL
),
277 .cntr
= { {8, 9, -1}, {10, 11, -1} },
279 [P4_EVENT_SCALAR_DP_UOP
] = {
280 .opcode
= P4_OPCODE(P4_EVENT_SCALAR_DP_UOP
),
281 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
283 P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_DP_UOP
, ALL
),
285 .cntr
= { {8, 9, -1}, {10, 11, -1} },
287 [P4_EVENT_64BIT_MMX_UOP
] = {
288 .opcode
= P4_OPCODE(P4_EVENT_64BIT_MMX_UOP
),
289 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
291 P4_ESCR_EMASK_BIT(P4_EVENT_64BIT_MMX_UOP
, ALL
),
293 .cntr
= { {8, 9, -1}, {10, 11, -1} },
295 [P4_EVENT_128BIT_MMX_UOP
] = {
296 .opcode
= P4_OPCODE(P4_EVENT_128BIT_MMX_UOP
),
297 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
299 P4_ESCR_EMASK_BIT(P4_EVENT_128BIT_MMX_UOP
, ALL
),
301 .cntr
= { {8, 9, -1}, {10, 11, -1} },
303 [P4_EVENT_X87_FP_UOP
] = {
304 .opcode
= P4_OPCODE(P4_EVENT_X87_FP_UOP
),
305 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
307 P4_ESCR_EMASK_BIT(P4_EVENT_X87_FP_UOP
, ALL
),
309 .cntr
= { {8, 9, -1}, {10, 11, -1} },
311 [P4_EVENT_TC_MISC
] = {
312 .opcode
= P4_OPCODE(P4_EVENT_TC_MISC
),
313 .escr_msr
= { MSR_P4_TC_ESCR0
, MSR_P4_TC_ESCR1
},
315 P4_ESCR_EMASK_BIT(P4_EVENT_TC_MISC
, FLUSH
),
316 .cntr
= { {4, 5, -1}, {6, 7, -1} },
318 [P4_EVENT_GLOBAL_POWER_EVENTS
] = {
319 .opcode
= P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS
),
320 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
322 P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS
, RUNNING
),
323 .cntr
= { {0, -1, -1}, {2, -1, -1} },
325 [P4_EVENT_TC_MS_XFER
] = {
326 .opcode
= P4_OPCODE(P4_EVENT_TC_MS_XFER
),
327 .escr_msr
= { MSR_P4_MS_ESCR0
, MSR_P4_MS_ESCR1
},
329 P4_ESCR_EMASK_BIT(P4_EVENT_TC_MS_XFER
, CISC
),
330 .cntr
= { {4, 5, -1}, {6, 7, -1} },
332 [P4_EVENT_UOP_QUEUE_WRITES
] = {
333 .opcode
= P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES
),
334 .escr_msr
= { MSR_P4_MS_ESCR0
, MSR_P4_MS_ESCR1
},
336 P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES
, FROM_TC_BUILD
) |
337 P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES
, FROM_TC_DELIVER
) |
338 P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES
, FROM_ROM
),
339 .cntr
= { {4, 5, -1}, {6, 7, -1} },
341 [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE
] = {
342 .opcode
= P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE
),
343 .escr_msr
= { MSR_P4_TBPU_ESCR0
, MSR_P4_TBPU_ESCR0
},
345 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE
, CONDITIONAL
) |
346 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE
, CALL
) |
347 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE
, RETURN
) |
348 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE
, INDIRECT
),
349 .cntr
= { {4, 5, -1}, {6, 7, -1} },
351 [P4_EVENT_RETIRED_BRANCH_TYPE
] = {
352 .opcode
= P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE
),
353 .escr_msr
= { MSR_P4_TBPU_ESCR0
, MSR_P4_TBPU_ESCR1
},
355 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, CONDITIONAL
) |
356 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, CALL
) |
357 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, RETURN
) |
358 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, INDIRECT
),
359 .cntr
= { {4, 5, -1}, {6, 7, -1} },
361 [P4_EVENT_RESOURCE_STALL
] = {
362 .opcode
= P4_OPCODE(P4_EVENT_RESOURCE_STALL
),
363 .escr_msr
= { MSR_P4_ALF_ESCR0
, MSR_P4_ALF_ESCR1
},
365 P4_ESCR_EMASK_BIT(P4_EVENT_RESOURCE_STALL
, SBFULL
),
366 .cntr
= { {12, 13, 16}, {14, 15, 17} },
368 [P4_EVENT_WC_BUFFER
] = {
369 .opcode
= P4_OPCODE(P4_EVENT_WC_BUFFER
),
370 .escr_msr
= { MSR_P4_DAC_ESCR0
, MSR_P4_DAC_ESCR1
},
372 P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER
, WCB_EVICTS
) |
373 P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER
, WCB_FULL_EVICTS
),
375 .cntr
= { {8, 9, -1}, {10, 11, -1} },
377 [P4_EVENT_B2B_CYCLES
] = {
378 .opcode
= P4_OPCODE(P4_EVENT_B2B_CYCLES
),
379 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
381 .cntr
= { {0, -1, -1}, {2, -1, -1} },
384 .opcode
= P4_OPCODE(P4_EVENT_BNR
),
385 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
387 .cntr
= { {0, -1, -1}, {2, -1, -1} },
390 .opcode
= P4_OPCODE(P4_EVENT_SNOOP
),
391 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
393 .cntr
= { {0, -1, -1}, {2, -1, -1} },
395 [P4_EVENT_RESPONSE
] = {
396 .opcode
= P4_OPCODE(P4_EVENT_RESPONSE
),
397 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
399 .cntr
= { {0, -1, -1}, {2, -1, -1} },
401 [P4_EVENT_FRONT_END_EVENT
] = {
402 .opcode
= P4_OPCODE(P4_EVENT_FRONT_END_EVENT
),
403 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
405 P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT
, NBOGUS
) |
406 P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT
, BOGUS
),
407 .cntr
= { {12, 13, 16}, {14, 15, 17} },
409 [P4_EVENT_EXECUTION_EVENT
] = {
410 .opcode
= P4_OPCODE(P4_EVENT_EXECUTION_EVENT
),
411 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
413 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, NBOGUS0
) |
414 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, NBOGUS1
) |
415 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, NBOGUS2
) |
416 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, NBOGUS3
) |
417 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, BOGUS0
) |
418 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, BOGUS1
) |
419 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, BOGUS2
) |
420 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, BOGUS3
),
421 .cntr
= { {12, 13, 16}, {14, 15, 17} },
423 [P4_EVENT_REPLAY_EVENT
] = {
424 .opcode
= P4_OPCODE(P4_EVENT_REPLAY_EVENT
),
425 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
427 P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT
, NBOGUS
) |
428 P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT
, BOGUS
),
429 .cntr
= { {12, 13, 16}, {14, 15, 17} },
431 [P4_EVENT_INSTR_RETIRED
] = {
432 .opcode
= P4_OPCODE(P4_EVENT_INSTR_RETIRED
),
433 .escr_msr
= { MSR_P4_CRU_ESCR0
, MSR_P4_CRU_ESCR1
},
435 P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED
, NBOGUSNTAG
) |
436 P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED
, NBOGUSTAG
) |
437 P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED
, BOGUSNTAG
) |
438 P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED
, BOGUSTAG
),
439 .cntr
= { {12, 13, 16}, {14, 15, 17} },
441 [P4_EVENT_UOPS_RETIRED
] = {
442 .opcode
= P4_OPCODE(P4_EVENT_UOPS_RETIRED
),
443 .escr_msr
= { MSR_P4_CRU_ESCR0
, MSR_P4_CRU_ESCR1
},
445 P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED
, NBOGUS
) |
446 P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED
, BOGUS
),
447 .cntr
= { {12, 13, 16}, {14, 15, 17} },
449 [P4_EVENT_UOP_TYPE
] = {
450 .opcode
= P4_OPCODE(P4_EVENT_UOP_TYPE
),
451 .escr_msr
= { MSR_P4_RAT_ESCR0
, MSR_P4_RAT_ESCR1
},
453 P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE
, TAGLOADS
) |
454 P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE
, TAGSTORES
),
455 .cntr
= { {12, 13, 16}, {14, 15, 17} },
457 [P4_EVENT_BRANCH_RETIRED
] = {
458 .opcode
= P4_OPCODE(P4_EVENT_BRANCH_RETIRED
),
459 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
461 P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED
, MMNP
) |
462 P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED
, MMNM
) |
463 P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED
, MMTP
) |
464 P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED
, MMTM
),
465 .cntr
= { {12, 13, 16}, {14, 15, 17} },
467 [P4_EVENT_MISPRED_BRANCH_RETIRED
] = {
468 .opcode
= P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED
),
469 .escr_msr
= { MSR_P4_CRU_ESCR0
, MSR_P4_CRU_ESCR1
},
471 P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED
, NBOGUS
),
472 .cntr
= { {12, 13, 16}, {14, 15, 17} },
474 [P4_EVENT_X87_ASSIST
] = {
475 .opcode
= P4_OPCODE(P4_EVENT_X87_ASSIST
),
476 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
478 P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST
, FPSU
) |
479 P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST
, FPSO
) |
480 P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST
, POAO
) |
481 P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST
, POAU
) |
482 P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST
, PREA
),
483 .cntr
= { {12, 13, 16}, {14, 15, 17} },
485 [P4_EVENT_MACHINE_CLEAR
] = {
486 .opcode
= P4_OPCODE(P4_EVENT_MACHINE_CLEAR
),
487 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
489 P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR
, CLEAR
) |
490 P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR
, MOCLEAR
) |
491 P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR
, SMCLEAR
),
492 .cntr
= { {12, 13, 16}, {14, 15, 17} },
494 [P4_EVENT_INSTR_COMPLETED
] = {
495 .opcode
= P4_OPCODE(P4_EVENT_INSTR_COMPLETED
),
496 .escr_msr
= { MSR_P4_CRU_ESCR0
, MSR_P4_CRU_ESCR1
},
498 P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED
, NBOGUS
) |
499 P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED
, BOGUS
),
500 .cntr
= { {12, 13, 16}, {14, 15, 17} },
504 #define P4_GEN_CACHE_EVENT(event, bit, metric) \
505 p4_config_pack_escr(P4_ESCR_EVENT(event) | \
506 P4_ESCR_EMASK_BIT(event, bit)) | \
507 p4_config_pack_cccr(metric | \
508 P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event))))
510 static __initconst
const u64 p4_hw_cache_event_ids
511 [PERF_COUNT_HW_CACHE_MAX
]
512 [PERF_COUNT_HW_CACHE_OP_MAX
]
513 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
517 [ C(RESULT_ACCESS
) ] = 0x0,
518 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT
, NBOGUS
,
519 P4_PEBS_METRIC__1stl_cache_load_miss_retired
),
524 [ C(RESULT_ACCESS
) ] = 0x0,
525 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT
, NBOGUS
,
526 P4_PEBS_METRIC__2ndl_cache_load_miss_retired
),
531 [ C(RESULT_ACCESS
) ] = 0x0,
532 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT
, NBOGUS
,
533 P4_PEBS_METRIC__dtlb_load_miss_retired
),
536 [ C(RESULT_ACCESS
) ] = 0x0,
537 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT
, NBOGUS
,
538 P4_PEBS_METRIC__dtlb_store_miss_retired
),
543 [ C(RESULT_ACCESS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE
, HIT
,
544 P4_PEBS_METRIC__none
),
545 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE
, MISS
,
546 P4_PEBS_METRIC__none
),
549 [ C(RESULT_ACCESS
) ] = -1,
550 [ C(RESULT_MISS
) ] = -1,
552 [ C(OP_PREFETCH
) ] = {
553 [ C(RESULT_ACCESS
) ] = -1,
554 [ C(RESULT_MISS
) ] = -1,
559 [ C(RESULT_ACCESS
) ] = -1,
560 [ C(RESULT_MISS
) ] = -1,
563 [ C(RESULT_ACCESS
) ] = -1,
564 [ C(RESULT_MISS
) ] = -1,
566 [ C(OP_PREFETCH
) ] = {
567 [ C(RESULT_ACCESS
) ] = -1,
568 [ C(RESULT_MISS
) ] = -1,
574 * Because of Netburst being quite restricted in how many
575 * identical events may run simultaneously, we introduce event aliases,
576 * ie the different events which have the same functionality but
577 * utilize non-intersected resources (ESCR/CCCR/counter registers).
579 * This allow us to relax restrictions a bit and run two or more
580 * identical events together.
582 * Never set any custom internal bits such as P4_CONFIG_HT,
583 * P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are
584 * either up to date automatically or not applicable at all.
586 struct p4_event_alias
{
589 } p4_event_aliases
[] = {
592 * Non-halted cycles can be substituted with non-sleeping cycles (see
593 * Intel SDM Vol3b for details). We need this alias to be able
594 * to run nmi-watchdog and 'perf top' (or any other user space tool
595 * which is interested in running PERF_COUNT_HW_CPU_CYCLES)
599 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS
) |
600 P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS
, RUNNING
)),
602 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_EXECUTION_EVENT
) |
603 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, NBOGUS0
)|
604 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, NBOGUS1
)|
605 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, NBOGUS2
)|
606 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, NBOGUS3
)|
607 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, BOGUS0
) |
608 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, BOGUS1
) |
609 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, BOGUS2
) |
610 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT
, BOGUS3
))|
611 p4_config_pack_cccr(P4_CCCR_THRESHOLD(15) | P4_CCCR_COMPLEMENT
|
616 static u64
p4_get_alias_event(u64 config
)
622 * Only event with special mark is allowed,
623 * we're to be sure it didn't come as malformed
626 if (!(config
& P4_CONFIG_ALIASABLE
))
629 config_match
= config
& P4_CONFIG_EVENT_ALIAS_MASK
;
631 for (i
= 0; i
< ARRAY_SIZE(p4_event_aliases
); i
++) {
632 if (config_match
== p4_event_aliases
[i
].original
) {
633 config_match
= p4_event_aliases
[i
].alternative
;
635 } else if (config_match
== p4_event_aliases
[i
].alternative
) {
636 config_match
= p4_event_aliases
[i
].original
;
641 if (i
>= ARRAY_SIZE(p4_event_aliases
))
644 return config_match
| (config
& P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS
);
647 static u64 p4_general_events
[PERF_COUNT_HW_MAX
] = {
648 /* non-halted CPU clocks */
649 [PERF_COUNT_HW_CPU_CYCLES
] =
650 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS
) |
651 P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS
, RUNNING
)) |
655 * retired instructions
656 * in a sake of simplicity we don't use the FSB tagging
658 [PERF_COUNT_HW_INSTRUCTIONS
] =
659 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_INSTR_RETIRED
) |
660 P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED
, NBOGUSNTAG
) |
661 P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED
, BOGUSNTAG
)),
664 [PERF_COUNT_HW_CACHE_REFERENCES
] =
665 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE
) |
666 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_HITS
) |
667 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_HITE
) |
668 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_HITM
) |
669 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_HITS
) |
670 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_HITE
) |
671 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_HITM
)),
674 [PERF_COUNT_HW_CACHE_MISSES
] =
675 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE
) |
676 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_MISS
) |
677 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_MISS
) |
678 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, WR_2ndL_MISS
)),
680 /* branch instructions retired */
681 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] =
682 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_RETIRED_BRANCH_TYPE
) |
683 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, CONDITIONAL
) |
684 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, CALL
) |
685 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, RETURN
) |
686 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, INDIRECT
)),
688 /* mispredicted branches retired */
689 [PERF_COUNT_HW_BRANCH_MISSES
] =
690 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_MISPRED_BRANCH_RETIRED
) |
691 P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED
, NBOGUS
)),
693 /* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */
694 [PERF_COUNT_HW_BUS_CYCLES
] =
695 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_FSB_DATA_ACTIVITY
) |
696 P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY
, DRDY_DRV
) |
697 P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY
, DRDY_OWN
)) |
698 p4_config_pack_cccr(P4_CCCR_EDGE
| P4_CCCR_COMPARE
),
701 static struct p4_event_bind
*p4_config_get_bind(u64 config
)
703 unsigned int evnt
= p4_config_unpack_event(config
);
704 struct p4_event_bind
*bind
= NULL
;
706 if (evnt
< ARRAY_SIZE(p4_event_bind_map
))
707 bind
= &p4_event_bind_map
[evnt
];
712 static u64
p4_pmu_event_map(int hw_event
)
714 struct p4_event_bind
*bind
;
718 config
= p4_general_events
[hw_event
];
719 bind
= p4_config_get_bind(config
);
720 esel
= P4_OPCODE_ESEL(bind
->opcode
);
721 config
|= p4_config_pack_cccr(P4_CCCR_ESEL(esel
));
726 /* check cpu model specifics */
727 static bool p4_event_match_cpu_model(unsigned int event_idx
)
729 /* INSTR_COMPLETED event only exist for model 3, 4, 6 (Prescott) */
730 if (event_idx
== P4_EVENT_INSTR_COMPLETED
) {
731 if (boot_cpu_data
.x86_model
!= 3 &&
732 boot_cpu_data
.x86_model
!= 4 &&
733 boot_cpu_data
.x86_model
!= 6)
739 * - IQ_ESCR0, IQ_ESCR1 only for models 1 and 2
745 static int p4_validate_raw_event(struct perf_event
*event
)
747 unsigned int v
, emask
;
749 /* User data may have out-of-bound event index */
750 v
= p4_config_unpack_event(event
->attr
.config
);
751 if (v
>= ARRAY_SIZE(p4_event_bind_map
))
754 /* It may be unsupported: */
755 if (!p4_event_match_cpu_model(v
))
759 * NOTE: P4_CCCR_THREAD_ANY has not the same meaning as
760 * in Architectural Performance Monitoring, it means not
761 * on _which_ logical cpu to count but rather _when_, ie it
762 * depends on logical cpu state -- count event if one cpu active,
763 * none, both or any, so we just allow user to pass any value
766 * In turn we always set Tx_OS/Tx_USR bits bound to logical
767 * cpu without their propagation to another cpu
771 * if an event is shared across the logical threads
772 * the user needs special permissions to be able to use it
774 if (p4_ht_active() && p4_event_bind_map
[v
].shared
) {
775 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
779 /* ESCR EventMask bits may be invalid */
780 emask
= p4_config_unpack_escr(event
->attr
.config
) & P4_ESCR_EVENTMASK_MASK
;
781 if (emask
& ~p4_event_bind_map
[v
].escr_emask
)
785 * it may have some invalid PEBS bits
787 if (p4_config_pebs_has(event
->attr
.config
, P4_PEBS_CONFIG_ENABLE
))
790 v
= p4_config_unpack_metric(event
->attr
.config
);
791 if (v
>= ARRAY_SIZE(p4_pebs_bind_map
))
797 static int p4_hw_config(struct perf_event
*event
)
804 * the reason we use cpu that early is that: if we get scheduled
805 * first time on the same cpu -- we will not need swap thread
806 * specific flags in config (and will save some cpu cycles)
809 cccr
= p4_default_cccr_conf(cpu
);
810 escr
= p4_default_escr_conf(cpu
, event
->attr
.exclude_kernel
,
811 event
->attr
.exclude_user
);
812 event
->hw
.config
= p4_config_pack_escr(escr
) |
813 p4_config_pack_cccr(cccr
);
815 if (p4_ht_active() && p4_ht_thread(cpu
))
816 event
->hw
.config
= p4_set_ht_bit(event
->hw
.config
);
818 if (event
->attr
.type
== PERF_TYPE_RAW
) {
819 struct p4_event_bind
*bind
;
822 * Clear bits we reserve to be managed by kernel itself
823 * and never allowed from a user space
825 event
->attr
.config
&= P4_CONFIG_MASK
;
827 rc
= p4_validate_raw_event(event
);
832 * Note that for RAW events we allow user to use P4_CCCR_RESERVED
833 * bits since we keep additional info here (for cache events and etc)
835 event
->hw
.config
|= event
->attr
.config
;
836 bind
= p4_config_get_bind(event
->attr
.config
);
841 esel
= P4_OPCODE_ESEL(bind
->opcode
);
842 event
->hw
.config
|= p4_config_pack_cccr(P4_CCCR_ESEL(esel
));
845 rc
= x86_setup_perfctr(event
);
851 static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event
*hwc
)
855 /* an official way for overflow indication */
856 rdmsrl(hwc
->config_base
, v
);
857 if (v
& P4_CCCR_OVF
) {
858 wrmsrl(hwc
->config_base
, v
& ~P4_CCCR_OVF
);
863 * In some circumstances the overflow might issue an NMI but did
864 * not set P4_CCCR_OVF bit. Because a counter holds a negative value
865 * we simply check for high bit being set, if it's cleared it means
866 * the counter has reached zero value and continued counting before
867 * real NMI signal was received:
869 rdmsrl(hwc
->event_base
, v
);
870 if (!(v
& ARCH_P4_UNFLAGGED_BIT
))
876 static void p4_pmu_disable_pebs(void)
881 * It's still allowed that two threads setup same cache
882 * events so we can't simply clear metrics until we knew
883 * no one is depending on us, so we need kind of counter
884 * for "ReplayEvent" users.
886 * What is more complex -- RAW events, if user (for some
887 * reason) will pass some cache event metric with improper
888 * event opcode -- it's fine from hardware point of view
889 * but completely nonsense from "meaning" of such action.
891 * So at moment let leave metrics turned on forever -- it's
892 * ok for now but need to be revisited!
894 * (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0);
895 * (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
899 static inline void p4_pmu_disable_event(struct perf_event
*event
)
901 struct hw_perf_event
*hwc
= &event
->hw
;
904 * If event gets disabled while counter is in overflowed
905 * state we need to clear P4_CCCR_OVF, otherwise interrupt get
906 * asserted again and again
908 (void)checking_wrmsrl(hwc
->config_base
,
909 (u64
)(p4_config_unpack_cccr(hwc
->config
)) &
910 ~P4_CCCR_ENABLE
& ~P4_CCCR_OVF
& ~P4_CCCR_RESERVED
);
913 static void p4_pmu_disable_all(void)
915 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
918 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
919 struct perf_event
*event
= cpuc
->events
[idx
];
920 if (!test_bit(idx
, cpuc
->active_mask
))
922 p4_pmu_disable_event(event
);
925 p4_pmu_disable_pebs();
928 /* configuration must be valid */
929 static void p4_pmu_enable_pebs(u64 config
)
931 struct p4_pebs_bind
*bind
;
934 BUILD_BUG_ON(P4_PEBS_METRIC__max
> P4_PEBS_CONFIG_METRIC_MASK
);
936 idx
= p4_config_unpack_metric(config
);
937 if (idx
== P4_PEBS_METRIC__none
)
940 bind
= &p4_pebs_bind_map
[idx
];
942 (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE
, (u64
)bind
->metric_pebs
);
943 (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT
, (u64
)bind
->metric_vert
);
946 static void p4_pmu_enable_event(struct perf_event
*event
)
948 struct hw_perf_event
*hwc
= &event
->hw
;
949 int thread
= p4_ht_config_thread(hwc
->config
);
950 u64 escr_conf
= p4_config_unpack_escr(p4_clear_ht_bit(hwc
->config
));
951 unsigned int idx
= p4_config_unpack_event(hwc
->config
);
952 struct p4_event_bind
*bind
;
955 bind
= &p4_event_bind_map
[idx
];
956 escr_addr
= (u64
)bind
->escr_msr
[thread
];
959 * - we dont support cascaded counters yet
960 * - and counter 1 is broken (erratum)
962 WARN_ON_ONCE(p4_is_event_cascaded(hwc
->config
));
963 WARN_ON_ONCE(hwc
->idx
== 1);
965 /* we need a real Event value */
966 escr_conf
&= ~P4_ESCR_EVENT_MASK
;
967 escr_conf
|= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind
->opcode
));
969 cccr
= p4_config_unpack_cccr(hwc
->config
);
972 * it could be Cache event so we need to write metrics
973 * into additional MSRs
975 p4_pmu_enable_pebs(hwc
->config
);
977 (void)checking_wrmsrl(escr_addr
, escr_conf
);
978 (void)checking_wrmsrl(hwc
->config_base
,
979 (cccr
& ~P4_CCCR_RESERVED
) | P4_CCCR_ENABLE
);
982 static void p4_pmu_enable_all(int added
)
984 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
987 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
988 struct perf_event
*event
= cpuc
->events
[idx
];
989 if (!test_bit(idx
, cpuc
->active_mask
))
991 p4_pmu_enable_event(event
);
995 static int p4_pmu_handle_irq(struct pt_regs
*regs
)
997 struct perf_sample_data data
;
998 struct cpu_hw_events
*cpuc
;
999 struct perf_event
*event
;
1000 struct hw_perf_event
*hwc
;
1001 int idx
, handled
= 0;
1004 perf_sample_data_init(&data
, 0);
1006 cpuc
= &__get_cpu_var(cpu_hw_events
);
1008 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1011 if (!test_bit(idx
, cpuc
->active_mask
)) {
1012 /* catch in-flight IRQs */
1013 if (__test_and_clear_bit(idx
, cpuc
->running
))
1018 event
= cpuc
->events
[idx
];
1021 WARN_ON_ONCE(hwc
->idx
!= idx
);
1023 /* it might be unflagged overflow */
1024 overflow
= p4_pmu_clear_cccr_ovf(hwc
);
1026 val
= x86_perf_event_update(event
);
1027 if (!overflow
&& (val
& (1ULL << (x86_pmu
.cntval_bits
- 1))))
1030 handled
+= overflow
;
1032 /* event overflow for sure */
1033 data
.period
= event
->hw
.last_period
;
1035 if (!x86_perf_event_set_period(event
))
1037 if (perf_event_overflow(event
, &data
, regs
))
1038 x86_pmu_stop(event
, 0);
1042 inc_irq_stat(apic_perf_irqs
);
1045 * When dealing with the unmasking of the LVTPC on P4 perf hw, it has
1046 * been observed that the OVF bit flag has to be cleared first _before_
1047 * the LVTPC can be unmasked.
1049 * The reason is the NMI line will continue to be asserted while the OVF
1050 * bit is set. This causes a second NMI to generate if the LVTPC is
1051 * unmasked before the OVF bit is cleared, leading to unknown NMI
1054 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1060 * swap thread specific fields according to a thread
1061 * we are going to run on
1063 static void p4_pmu_swap_config_ts(struct hw_perf_event
*hwc
, int cpu
)
1068 * we either lucky and continue on same cpu or no HT support
1070 if (!p4_should_swap_ts(hwc
->config
, cpu
))
1074 * the event is migrated from an another logical
1075 * cpu, so we need to swap thread specific flags
1078 escr
= p4_config_unpack_escr(hwc
->config
);
1079 cccr
= p4_config_unpack_cccr(hwc
->config
);
1081 if (p4_ht_thread(cpu
)) {
1082 cccr
&= ~P4_CCCR_OVF_PMI_T0
;
1083 cccr
|= P4_CCCR_OVF_PMI_T1
;
1084 if (escr
& P4_ESCR_T0_OS
) {
1085 escr
&= ~P4_ESCR_T0_OS
;
1086 escr
|= P4_ESCR_T1_OS
;
1088 if (escr
& P4_ESCR_T0_USR
) {
1089 escr
&= ~P4_ESCR_T0_USR
;
1090 escr
|= P4_ESCR_T1_USR
;
1092 hwc
->config
= p4_config_pack_escr(escr
);
1093 hwc
->config
|= p4_config_pack_cccr(cccr
);
1094 hwc
->config
|= P4_CONFIG_HT
;
1096 cccr
&= ~P4_CCCR_OVF_PMI_T1
;
1097 cccr
|= P4_CCCR_OVF_PMI_T0
;
1098 if (escr
& P4_ESCR_T1_OS
) {
1099 escr
&= ~P4_ESCR_T1_OS
;
1100 escr
|= P4_ESCR_T0_OS
;
1102 if (escr
& P4_ESCR_T1_USR
) {
1103 escr
&= ~P4_ESCR_T1_USR
;
1104 escr
|= P4_ESCR_T0_USR
;
1106 hwc
->config
= p4_config_pack_escr(escr
);
1107 hwc
->config
|= p4_config_pack_cccr(cccr
);
1108 hwc
->config
&= ~P4_CONFIG_HT
;
1113 * ESCR address hashing is tricky, ESCRs are not sequential
1114 * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and
1115 * the metric between any ESCRs is laid in range [0xa0,0xe1]
1117 * so we make ~70% filled hashtable
1120 #define P4_ESCR_MSR_BASE 0x000003a0
1121 #define P4_ESCR_MSR_MAX 0x000003e1
1122 #define P4_ESCR_MSR_TABLE_SIZE (P4_ESCR_MSR_MAX - P4_ESCR_MSR_BASE + 1)
1123 #define P4_ESCR_MSR_IDX(msr) (msr - P4_ESCR_MSR_BASE)
1124 #define P4_ESCR_MSR_TABLE_ENTRY(msr) [P4_ESCR_MSR_IDX(msr)] = msr
1126 static const unsigned int p4_escr_table
[P4_ESCR_MSR_TABLE_SIZE
] = {
1127 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR0
),
1128 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR1
),
1129 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR0
),
1130 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR1
),
1131 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR0
),
1132 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR1
),
1133 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR0
),
1134 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR1
),
1135 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR2
),
1136 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR3
),
1137 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR4
),
1138 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR5
),
1139 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR0
),
1140 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR1
),
1141 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR0
),
1142 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR1
),
1143 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR0
),
1144 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR1
),
1145 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR0
),
1146 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR1
),
1147 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR0
),
1148 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR1
),
1149 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR0
),
1150 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR1
),
1151 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR0
),
1152 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR1
),
1153 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR0
),
1154 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR1
),
1155 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR0
),
1156 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR1
),
1157 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR0
),
1158 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR1
),
1159 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR0
),
1160 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR1
),
1161 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR0
),
1162 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR1
),
1163 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR0
),
1164 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR1
),
1165 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR0
),
1166 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR1
),
1167 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR0
),
1168 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR1
),
1169 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR0
),
1170 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR1
),
1171 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR0
),
1172 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR1
),
1175 static int p4_get_escr_idx(unsigned int addr
)
1177 unsigned int idx
= P4_ESCR_MSR_IDX(addr
);
1179 if (unlikely(idx
>= P4_ESCR_MSR_TABLE_SIZE
||
1180 !p4_escr_table
[idx
] ||
1181 p4_escr_table
[idx
] != addr
)) {
1182 WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr
);
1189 static int p4_next_cntr(int thread
, unsigned long *used_mask
,
1190 struct p4_event_bind
*bind
)
1194 for (i
= 0; i
< P4_CNTR_LIMIT
; i
++) {
1195 j
= bind
->cntr
[thread
][i
];
1196 if (j
!= -1 && !test_bit(j
, used_mask
))
1203 static int p4_pmu_schedule_events(struct cpu_hw_events
*cpuc
, int n
, int *assign
)
1205 unsigned long used_mask
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
1206 unsigned long escr_mask
[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE
)];
1207 int cpu
= smp_processor_id();
1208 struct hw_perf_event
*hwc
;
1209 struct p4_event_bind
*bind
;
1210 unsigned int i
, thread
, num
;
1211 int cntr_idx
, escr_idx
;
1215 bitmap_zero(used_mask
, X86_PMC_IDX_MAX
);
1216 bitmap_zero(escr_mask
, P4_ESCR_MSR_TABLE_SIZE
);
1218 for (i
= 0, num
= n
; i
< n
; i
++, num
--) {
1220 hwc
= &cpuc
->event_list
[i
]->hw
;
1221 thread
= p4_ht_thread(cpu
);
1226 * It's possible to hit a circular lock
1227 * between original and alternative events
1228 * if both are scheduled already.
1233 bind
= p4_config_get_bind(hwc
->config
);
1234 escr_idx
= p4_get_escr_idx(bind
->escr_msr
[thread
]);
1235 if (unlikely(escr_idx
== -1))
1238 if (hwc
->idx
!= -1 && !p4_should_swap_ts(hwc
->config
, cpu
)) {
1239 cntr_idx
= hwc
->idx
;
1241 assign
[i
] = hwc
->idx
;
1245 cntr_idx
= p4_next_cntr(thread
, used_mask
, bind
);
1246 if (cntr_idx
== -1 || test_bit(escr_idx
, escr_mask
)) {
1248 * Check whether an event alias is still available.
1250 config_alias
= p4_get_alias_event(hwc
->config
);
1253 hwc
->config
= config_alias
;
1258 p4_pmu_swap_config_ts(hwc
, cpu
);
1260 assign
[i
] = cntr_idx
;
1262 set_bit(cntr_idx
, used_mask
);
1263 set_bit(escr_idx
, escr_mask
);
1267 return num
? -ENOSPC
: 0;
1270 static __initconst
const struct x86_pmu p4_pmu
= {
1271 .name
= "Netburst P4/Xeon",
1272 .handle_irq
= p4_pmu_handle_irq
,
1273 .disable_all
= p4_pmu_disable_all
,
1274 .enable_all
= p4_pmu_enable_all
,
1275 .enable
= p4_pmu_enable_event
,
1276 .disable
= p4_pmu_disable_event
,
1277 .eventsel
= MSR_P4_BPU_CCCR0
,
1278 .perfctr
= MSR_P4_BPU_PERFCTR0
,
1279 .event_map
= p4_pmu_event_map
,
1280 .max_events
= ARRAY_SIZE(p4_general_events
),
1281 .get_event_constraints
= x86_get_event_constraints
,
1283 * IF HT disabled we may need to use all
1284 * ARCH_P4_MAX_CCCR counters simulaneously
1285 * though leave it restricted at moment assuming
1288 .num_counters
= ARCH_P4_MAX_CCCR
,
1290 .cntval_bits
= ARCH_P4_CNTRVAL_BITS
,
1291 .cntval_mask
= ARCH_P4_CNTRVAL_MASK
,
1292 .max_period
= (1ULL << (ARCH_P4_CNTRVAL_BITS
- 1)) - 1,
1293 .hw_config
= p4_hw_config
,
1294 .schedule_events
= p4_pmu_schedule_events
,
1296 * This handles erratum N15 in intel doc 249199-029,
1297 * the counter may not be updated correctly on write
1298 * so we need a second write operation to do the trick
1299 * (the official workaround didn't work)
1301 * the former idea is taken from OProfile code
1303 .perfctr_second_write
= 1,
1306 static __init
int p4_pmu_init(void)
1308 unsigned int low
, high
;
1310 /* If we get stripped -- indexing fails */
1311 BUILD_BUG_ON(ARCH_P4_MAX_CCCR
> X86_PMC_MAX_GENERIC
);
1313 rdmsr(MSR_IA32_MISC_ENABLE
, low
, high
);
1314 if (!(low
& (1 << 7))) {
1315 pr_cont("unsupported Netburst CPU model %d ",
1316 boot_cpu_data
.x86_model
);
1320 memcpy(hw_cache_event_ids
, p4_hw_cache_event_ids
,
1321 sizeof(hw_cache_event_ids
));
1323 pr_cont("Netburst events, ");
1330 #endif /* CONFIG_CPU_SUP_INTEL */