1 /* Nehalem-EX/Westmere-EX uncore support */
4 /* NHM-EX event control */
5 #define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff
6 #define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00
7 #define NHMEX_PMON_CTL_EN_BIT0 (1 << 0)
8 #define NHMEX_PMON_CTL_EDGE_DET (1 << 18)
9 #define NHMEX_PMON_CTL_PMI_EN (1 << 20)
10 #define NHMEX_PMON_CTL_EN_BIT22 (1 << 22)
11 #define NHMEX_PMON_CTL_INVERT (1 << 23)
12 #define NHMEX_PMON_CTL_TRESH_MASK 0xff000000
13 #define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \
14 NHMEX_PMON_CTL_UMASK_MASK | \
15 NHMEX_PMON_CTL_EDGE_DET | \
16 NHMEX_PMON_CTL_INVERT | \
17 NHMEX_PMON_CTL_TRESH_MASK)
20 #define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00
21 #define NHMEX_U_MSR_PMON_CTR 0xc11
22 #define NHMEX_U_MSR_PMON_EV_SEL 0xc10
24 #define NHMEX_U_PMON_GLOBAL_EN (1 << 0)
25 #define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e
26 #define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28)
27 #define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29)
28 #define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
30 #define NHMEX_U_PMON_RAW_EVENT_MASK \
31 (NHMEX_PMON_CTL_EV_SEL_MASK | \
32 NHMEX_PMON_CTL_EDGE_DET)
35 #define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00
36 #define NHMEX_C0_MSR_PMON_CTR0 0xd11
37 #define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10
38 #define NHMEX_C_MSR_OFFSET 0x20
41 #define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20
42 #define NHMEX_B0_MSR_PMON_CTR0 0xc31
43 #define NHMEX_B0_MSR_PMON_CTL0 0xc30
44 #define NHMEX_B_MSR_OFFSET 0x40
45 #define NHMEX_B0_MSR_MATCH 0xe45
46 #define NHMEX_B0_MSR_MASK 0xe46
47 #define NHMEX_B1_MSR_MATCH 0xe4d
48 #define NHMEX_B1_MSR_MASK 0xe4e
50 #define NHMEX_B_PMON_CTL_EN (1 << 0)
51 #define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1
52 #define NHMEX_B_PMON_CTL_EV_SEL_MASK \
53 (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT)
54 #define NHMEX_B_PMON_CTR_SHIFT 6
55 #define NHMEX_B_PMON_CTR_MASK \
56 (0x3 << NHMEX_B_PMON_CTR_SHIFT)
57 #define NHMEX_B_PMON_RAW_EVENT_MASK \
58 (NHMEX_B_PMON_CTL_EV_SEL_MASK | \
59 NHMEX_B_PMON_CTR_MASK)
62 #define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40
63 #define NHMEX_S0_MSR_PMON_CTR0 0xc51
64 #define NHMEX_S0_MSR_PMON_CTL0 0xc50
65 #define NHMEX_S_MSR_OFFSET 0x80
66 #define NHMEX_S0_MSR_MM_CFG 0xe48
67 #define NHMEX_S0_MSR_MATCH 0xe49
68 #define NHMEX_S0_MSR_MASK 0xe4a
69 #define NHMEX_S1_MSR_MM_CFG 0xe58
70 #define NHMEX_S1_MSR_MATCH 0xe59
71 #define NHMEX_S1_MSR_MASK 0xe5a
73 #define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63)
74 #define NHMEX_S_EVENT_TO_R_PROG_EV 0
77 #define NHMEX_M0_MSR_GLOBAL_CTL 0xca0
78 #define NHMEX_M0_MSR_PMU_DSP 0xca5
79 #define NHMEX_M0_MSR_PMU_ISS 0xca6
80 #define NHMEX_M0_MSR_PMU_MAP 0xca7
81 #define NHMEX_M0_MSR_PMU_MSC_THR 0xca8
82 #define NHMEX_M0_MSR_PMU_PGT 0xca9
83 #define NHMEX_M0_MSR_PMU_PLD 0xcaa
84 #define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab
85 #define NHMEX_M0_MSR_PMU_CTL0 0xcb0
86 #define NHMEX_M0_MSR_PMU_CNT0 0xcb1
87 #define NHMEX_M_MSR_OFFSET 0x40
88 #define NHMEX_M0_MSR_PMU_MM_CFG 0xe54
89 #define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c
91 #define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63)
92 #define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL
93 #define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL
94 #define NHMEX_M_PMON_ADDR_MASK_SHIFT 34
96 #define NHMEX_M_PMON_CTL_EN (1 << 0)
97 #define NHMEX_M_PMON_CTL_PMI_EN (1 << 1)
98 #define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2
99 #define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \
100 (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT)
101 #define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4
102 #define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \
103 (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT)
104 #define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6)
105 #define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7)
106 #define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9
107 #define NHMEX_M_PMON_CTL_INC_SEL_MASK \
108 (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
109 #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19
110 #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \
111 (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT)
112 #define NHMEX_M_PMON_RAW_EVENT_MASK \
113 (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \
114 NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \
115 NHMEX_M_PMON_CTL_WRAP_MODE | \
116 NHMEX_M_PMON_CTL_FLAG_MODE | \
117 NHMEX_M_PMON_CTL_INC_SEL_MASK | \
118 NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
120 #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23))
121 #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n)))
123 #define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24))
124 #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n)))
127 * use the 9~13 bits to select event If the 7th bit is not set,
128 * otherwise use the 19~21 bits to select event.
130 #define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
131 #define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \
132 NHMEX_M_PMON_CTL_FLAG_MODE)
133 #define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \
134 NHMEX_M_PMON_CTL_FLAG_MODE)
135 #define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \
136 NHMEX_M_PMON_CTL_FLAG_MODE)
137 #define MBOX_INC_SEL_EXTAR_REG(c, r) \
138 EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \
139 MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r)
140 #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \
141 EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \
142 MBOX_SET_FLAG_SEL_MASK, \
143 (u64)-1, NHMEX_M_##r)
146 #define NHMEX_R_MSR_GLOBAL_CTL 0xe00
147 #define NHMEX_R_MSR_PMON_CTL0 0xe10
148 #define NHMEX_R_MSR_PMON_CNT0 0xe11
149 #define NHMEX_R_MSR_OFFSET 0x20
151 #define NHMEX_R_MSR_PORTN_QLX_CFG(n) \
152 ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4))
153 #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n))
154 #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n))
155 #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \
156 (((n) < 4 ? 0 : 0x10) + (n) * 4)
157 #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \
158 (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
159 #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \
160 (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1)
161 #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \
162 (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2)
163 #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \
164 (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
165 #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \
166 (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1)
167 #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \
168 (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2)
170 #define NHMEX_R_PMON_CTL_EN (1 << 0)
171 #define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1
172 #define NHMEX_R_PMON_CTL_EV_SEL_MASK \
173 (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT)
174 #define NHMEX_R_PMON_CTL_PMI_EN (1 << 6)
175 #define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK
178 #define NHMEX_W_MSR_GLOBAL_CTL 0xc80
179 #define NHMEX_W_MSR_PMON_CNT0 0xc90
180 #define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91
181 #define NHMEX_W_MSR_PMON_FIXED_CTR 0x394
182 #define NHMEX_W_MSR_PMON_FIXED_CTL 0x395
184 #define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31)
186 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
187 ((1ULL << (n)) - 1)))
189 DEFINE_UNCORE_FORMAT_ATTR(event
, event
, "config:0-7");
190 DEFINE_UNCORE_FORMAT_ATTR(event5
, event
, "config:1-5");
191 DEFINE_UNCORE_FORMAT_ATTR(umask
, umask
, "config:8-15");
192 DEFINE_UNCORE_FORMAT_ATTR(edge
, edge
, "config:18");
193 DEFINE_UNCORE_FORMAT_ATTR(inv
, inv
, "config:23");
194 DEFINE_UNCORE_FORMAT_ATTR(thresh8
, thresh
, "config:24-31");
195 DEFINE_UNCORE_FORMAT_ATTR(counter
, counter
, "config:6-7");
196 DEFINE_UNCORE_FORMAT_ATTR(match
, match
, "config1:0-63");
197 DEFINE_UNCORE_FORMAT_ATTR(mask
, mask
, "config2:0-63");
199 static void nhmex_uncore_msr_init_box(struct intel_uncore_box
*box
)
201 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL
, NHMEX_U_PMON_GLOBAL_EN_ALL
);
204 static void nhmex_uncore_msr_exit_box(struct intel_uncore_box
*box
)
206 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL
, 0);
209 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box
*box
)
211 unsigned msr
= uncore_msr_box_ctl(box
);
216 config
&= ~((1ULL << uncore_num_counters(box
)) - 1);
217 /* WBox has a fixed counter */
218 if (uncore_msr_fixed_ctl(box
))
219 config
&= ~NHMEX_W_PMON_GLOBAL_FIXED_EN
;
224 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box
*box
)
226 unsigned msr
= uncore_msr_box_ctl(box
);
231 config
|= (1ULL << uncore_num_counters(box
)) - 1;
232 /* WBox has a fixed counter */
233 if (uncore_msr_fixed_ctl(box
))
234 config
|= NHMEX_W_PMON_GLOBAL_FIXED_EN
;
239 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
241 wrmsrl(event
->hw
.config_base
, 0);
244 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
246 struct hw_perf_event
*hwc
= &event
->hw
;
248 if (hwc
->idx
>= UNCORE_PMC_IDX_FIXED
)
249 wrmsrl(hwc
->config_base
, NHMEX_PMON_CTL_EN_BIT0
);
250 else if (box
->pmu
->type
->event_mask
& NHMEX_PMON_CTL_EN_BIT0
)
251 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT22
);
253 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT0
);
256 #define NHMEX_UNCORE_OPS_COMMON_INIT() \
257 .init_box = nhmex_uncore_msr_init_box, \
258 .exit_box = nhmex_uncore_msr_exit_box, \
259 .disable_box = nhmex_uncore_msr_disable_box, \
260 .enable_box = nhmex_uncore_msr_enable_box, \
261 .disable_event = nhmex_uncore_msr_disable_event, \
262 .read_counter = uncore_msr_read_counter
264 static struct intel_uncore_ops nhmex_uncore_ops
= {
265 NHMEX_UNCORE_OPS_COMMON_INIT(),
266 .enable_event
= nhmex_uncore_msr_enable_event
,
269 static struct attribute
*nhmex_uncore_ubox_formats_attr
[] = {
270 &format_attr_event
.attr
,
271 &format_attr_edge
.attr
,
275 static struct attribute_group nhmex_uncore_ubox_format_group
= {
277 .attrs
= nhmex_uncore_ubox_formats_attr
,
280 static struct intel_uncore_type nhmex_uncore_ubox
= {
285 .event_ctl
= NHMEX_U_MSR_PMON_EV_SEL
,
286 .perf_ctr
= NHMEX_U_MSR_PMON_CTR
,
287 .event_mask
= NHMEX_U_PMON_RAW_EVENT_MASK
,
288 .box_ctl
= NHMEX_U_MSR_PMON_GLOBAL_CTL
,
289 .ops
= &nhmex_uncore_ops
,
290 .format_group
= &nhmex_uncore_ubox_format_group
293 static struct attribute
*nhmex_uncore_cbox_formats_attr
[] = {
294 &format_attr_event
.attr
,
295 &format_attr_umask
.attr
,
296 &format_attr_edge
.attr
,
297 &format_attr_inv
.attr
,
298 &format_attr_thresh8
.attr
,
302 static struct attribute_group nhmex_uncore_cbox_format_group
= {
304 .attrs
= nhmex_uncore_cbox_formats_attr
,
307 /* msr offset for each instance of cbox */
308 static unsigned nhmex_cbox_msr_offsets
[] = {
309 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
312 static struct intel_uncore_type nhmex_uncore_cbox
= {
317 .event_ctl
= NHMEX_C0_MSR_PMON_EV_SEL0
,
318 .perf_ctr
= NHMEX_C0_MSR_PMON_CTR0
,
319 .event_mask
= NHMEX_PMON_RAW_EVENT_MASK
,
320 .box_ctl
= NHMEX_C0_MSR_PMON_GLOBAL_CTL
,
321 .msr_offsets
= nhmex_cbox_msr_offsets
,
323 .ops
= &nhmex_uncore_ops
,
324 .format_group
= &nhmex_uncore_cbox_format_group
327 static struct uncore_event_desc nhmex_uncore_wbox_events
[] = {
328 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0"),
329 { /* end: all zeroes */ },
332 static struct intel_uncore_type nhmex_uncore_wbox
= {
337 .event_ctl
= NHMEX_W_MSR_PMON_CNT0
,
338 .perf_ctr
= NHMEX_W_MSR_PMON_EVT_SEL0
,
339 .fixed_ctr
= NHMEX_W_MSR_PMON_FIXED_CTR
,
340 .fixed_ctl
= NHMEX_W_MSR_PMON_FIXED_CTL
,
341 .event_mask
= NHMEX_PMON_RAW_EVENT_MASK
,
342 .box_ctl
= NHMEX_W_MSR_GLOBAL_CTL
,
344 .event_descs
= nhmex_uncore_wbox_events
,
345 .ops
= &nhmex_uncore_ops
,
346 .format_group
= &nhmex_uncore_cbox_format_group
349 static int nhmex_bbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
351 struct hw_perf_event
*hwc
= &event
->hw
;
352 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
353 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
356 ctr
= (hwc
->config
& NHMEX_B_PMON_CTR_MASK
) >>
357 NHMEX_B_PMON_CTR_SHIFT
;
358 ev_sel
= (hwc
->config
& NHMEX_B_PMON_CTL_EV_SEL_MASK
) >>
359 NHMEX_B_PMON_CTL_EV_SEL_SHIFT
;
361 /* events that do not use the match/mask registers */
362 if ((ctr
== 0 && ev_sel
> 0x3) || (ctr
== 1 && ev_sel
> 0x6) ||
363 (ctr
== 2 && ev_sel
!= 0x4) || ctr
== 3)
366 if (box
->pmu
->pmu_idx
== 0)
367 reg1
->reg
= NHMEX_B0_MSR_MATCH
;
369 reg1
->reg
= NHMEX_B1_MSR_MATCH
;
371 reg1
->config
= event
->attr
.config1
;
372 reg2
->config
= event
->attr
.config2
;
376 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
378 struct hw_perf_event
*hwc
= &event
->hw
;
379 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
380 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
382 if (reg1
->idx
!= EXTRA_REG_NONE
) {
383 wrmsrl(reg1
->reg
, reg1
->config
);
384 wrmsrl(reg1
->reg
+ 1, reg2
->config
);
386 wrmsrl(hwc
->config_base
, NHMEX_PMON_CTL_EN_BIT0
|
387 (hwc
->config
& NHMEX_B_PMON_CTL_EV_SEL_MASK
));
391 * The Bbox has 4 counters, but each counter monitors different events.
392 * Use bits 6-7 in the event config to select counter.
394 static struct event_constraint nhmex_uncore_bbox_constraints
[] = {
395 EVENT_CONSTRAINT(0 , 1, 0xc0),
396 EVENT_CONSTRAINT(0x40, 2, 0xc0),
397 EVENT_CONSTRAINT(0x80, 4, 0xc0),
398 EVENT_CONSTRAINT(0xc0, 8, 0xc0),
399 EVENT_CONSTRAINT_END
,
402 static struct attribute
*nhmex_uncore_bbox_formats_attr
[] = {
403 &format_attr_event5
.attr
,
404 &format_attr_counter
.attr
,
405 &format_attr_match
.attr
,
406 &format_attr_mask
.attr
,
410 static struct attribute_group nhmex_uncore_bbox_format_group
= {
412 .attrs
= nhmex_uncore_bbox_formats_attr
,
415 static struct intel_uncore_ops nhmex_uncore_bbox_ops
= {
416 NHMEX_UNCORE_OPS_COMMON_INIT(),
417 .enable_event
= nhmex_bbox_msr_enable_event
,
418 .hw_config
= nhmex_bbox_hw_config
,
419 .get_constraint
= uncore_get_constraint
,
420 .put_constraint
= uncore_put_constraint
,
423 static struct intel_uncore_type nhmex_uncore_bbox
= {
428 .event_ctl
= NHMEX_B0_MSR_PMON_CTL0
,
429 .perf_ctr
= NHMEX_B0_MSR_PMON_CTR0
,
430 .event_mask
= NHMEX_B_PMON_RAW_EVENT_MASK
,
431 .box_ctl
= NHMEX_B0_MSR_PMON_GLOBAL_CTL
,
432 .msr_offset
= NHMEX_B_MSR_OFFSET
,
434 .num_shared_regs
= 1,
435 .constraints
= nhmex_uncore_bbox_constraints
,
436 .ops
= &nhmex_uncore_bbox_ops
,
437 .format_group
= &nhmex_uncore_bbox_format_group
440 static int nhmex_sbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
442 struct hw_perf_event
*hwc
= &event
->hw
;
443 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
444 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
446 /* only TO_R_PROG_EV event uses the match/mask register */
447 if ((hwc
->config
& NHMEX_PMON_CTL_EV_SEL_MASK
) !=
448 NHMEX_S_EVENT_TO_R_PROG_EV
)
451 if (box
->pmu
->pmu_idx
== 0)
452 reg1
->reg
= NHMEX_S0_MSR_MM_CFG
;
454 reg1
->reg
= NHMEX_S1_MSR_MM_CFG
;
456 reg1
->config
= event
->attr
.config1
;
457 reg2
->config
= event
->attr
.config2
;
461 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
463 struct hw_perf_event
*hwc
= &event
->hw
;
464 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
465 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
467 if (reg1
->idx
!= EXTRA_REG_NONE
) {
468 wrmsrl(reg1
->reg
, 0);
469 wrmsrl(reg1
->reg
+ 1, reg1
->config
);
470 wrmsrl(reg1
->reg
+ 2, reg2
->config
);
471 wrmsrl(reg1
->reg
, NHMEX_S_PMON_MM_CFG_EN
);
473 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT22
);
476 static struct attribute
*nhmex_uncore_sbox_formats_attr
[] = {
477 &format_attr_event
.attr
,
478 &format_attr_umask
.attr
,
479 &format_attr_edge
.attr
,
480 &format_attr_inv
.attr
,
481 &format_attr_thresh8
.attr
,
482 &format_attr_match
.attr
,
483 &format_attr_mask
.attr
,
487 static struct attribute_group nhmex_uncore_sbox_format_group
= {
489 .attrs
= nhmex_uncore_sbox_formats_attr
,
492 static struct intel_uncore_ops nhmex_uncore_sbox_ops
= {
493 NHMEX_UNCORE_OPS_COMMON_INIT(),
494 .enable_event
= nhmex_sbox_msr_enable_event
,
495 .hw_config
= nhmex_sbox_hw_config
,
496 .get_constraint
= uncore_get_constraint
,
497 .put_constraint
= uncore_put_constraint
,
500 static struct intel_uncore_type nhmex_uncore_sbox
= {
505 .event_ctl
= NHMEX_S0_MSR_PMON_CTL0
,
506 .perf_ctr
= NHMEX_S0_MSR_PMON_CTR0
,
507 .event_mask
= NHMEX_PMON_RAW_EVENT_MASK
,
508 .box_ctl
= NHMEX_S0_MSR_PMON_GLOBAL_CTL
,
509 .msr_offset
= NHMEX_S_MSR_OFFSET
,
511 .num_shared_regs
= 1,
512 .ops
= &nhmex_uncore_sbox_ops
,
513 .format_group
= &nhmex_uncore_sbox_format_group
517 EXTRA_REG_NHMEX_M_FILTER
,
518 EXTRA_REG_NHMEX_M_DSP
,
519 EXTRA_REG_NHMEX_M_ISS
,
520 EXTRA_REG_NHMEX_M_MAP
,
521 EXTRA_REG_NHMEX_M_MSC_THR
,
522 EXTRA_REG_NHMEX_M_PGT
,
523 EXTRA_REG_NHMEX_M_PLD
,
524 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
,
527 static struct extra_reg nhmex_uncore_mbox_extra_regs
[] = {
528 MBOX_INC_SEL_EXTAR_REG(0x0, DSP
),
529 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR
),
530 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR
),
531 MBOX_INC_SEL_EXTAR_REG(0x9, ISS
),
532 /* event 0xa uses two extra registers */
533 MBOX_INC_SEL_EXTAR_REG(0xa, ISS
),
534 MBOX_INC_SEL_EXTAR_REG(0xa, PLD
),
535 MBOX_INC_SEL_EXTAR_REG(0xb, PLD
),
536 /* events 0xd ~ 0x10 use the same extra register */
537 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC
),
538 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC
),
539 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC
),
540 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC
),
541 MBOX_INC_SEL_EXTAR_REG(0x16, PGT
),
542 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP
),
543 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS
),
544 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT
),
545 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP
),
549 /* Nehalem-EX or Westmere-EX ? */
550 static bool uncore_nhmex
;
552 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box
*box
, int idx
, u64 config
)
554 struct intel_uncore_extra_reg
*er
;
559 if (idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
) {
560 er
= &box
->shared_regs
[idx
];
561 raw_spin_lock_irqsave(&er
->lock
, flags
);
562 if (!atomic_read(&er
->ref
) || er
->config
== config
) {
563 atomic_inc(&er
->ref
);
567 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
572 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
573 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
574 * fields which are shared.
576 idx
-= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
577 if (WARN_ON_ONCE(idx
>= 4))
580 /* mask of the shared fields */
582 mask
= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
;
584 mask
= WSMEX_M_PMON_ZDP_CTL_FVC_MASK
;
585 er
= &box
->shared_regs
[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
];
587 raw_spin_lock_irqsave(&er
->lock
, flags
);
588 /* add mask of the non-shared field if it's in use */
589 if (__BITS_VALUE(atomic_read(&er
->ref
), idx
, 8)) {
591 mask
|= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
593 mask
|= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
596 if (!atomic_read(&er
->ref
) || !((er
->config
^ config
) & mask
)) {
597 atomic_add(1 << (idx
* 8), &er
->ref
);
599 mask
= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
|
600 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
602 mask
= WSMEX_M_PMON_ZDP_CTL_FVC_MASK
|
603 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
605 er
->config
|= (config
& mask
);
608 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
613 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box
*box
, int idx
)
615 struct intel_uncore_extra_reg
*er
;
617 if (idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
) {
618 er
= &box
->shared_regs
[idx
];
619 atomic_dec(&er
->ref
);
623 idx
-= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
624 er
= &box
->shared_regs
[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
];
625 atomic_sub(1 << (idx
* 8), &er
->ref
);
628 static u64
nhmex_mbox_alter_er(struct perf_event
*event
, int new_idx
, bool modify
)
630 struct hw_perf_event
*hwc
= &event
->hw
;
631 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
632 u64 idx
, orig_idx
= __BITS_VALUE(reg1
->idx
, 0, 8);
633 u64 config
= reg1
->config
;
635 /* get the non-shared control bits and shift them */
636 idx
= orig_idx
- EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
638 config
&= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
640 config
&= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
641 if (new_idx
> orig_idx
) {
642 idx
= new_idx
- orig_idx
;
645 idx
= orig_idx
- new_idx
;
649 /* add the shared control bits back */
651 config
|= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
& reg1
->config
;
653 config
|= WSMEX_M_PMON_ZDP_CTL_FVC_MASK
& reg1
->config
;
654 config
|= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
& reg1
->config
;
656 /* adjust the main event selector */
657 if (new_idx
> orig_idx
)
658 hwc
->config
+= idx
<< NHMEX_M_PMON_CTL_INC_SEL_SHIFT
;
660 hwc
->config
-= idx
<< NHMEX_M_PMON_CTL_INC_SEL_SHIFT
;
661 reg1
->config
= config
;
662 reg1
->idx
= ~0xff | new_idx
;
667 static struct event_constraint
*
668 nhmex_mbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
670 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
671 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
672 int i
, idx
[2], alloc
= 0;
673 u64 config1
= reg1
->config
;
675 idx
[0] = __BITS_VALUE(reg1
->idx
, 0, 8);
676 idx
[1] = __BITS_VALUE(reg1
->idx
, 1, 8);
678 for (i
= 0; i
< 2; i
++) {
679 if (!uncore_box_is_fake(box
) && (reg1
->alloc
& (0x1 << i
)))
685 if (!nhmex_mbox_get_shared_reg(box
, idx
[i
],
686 __BITS_VALUE(config1
, i
, 32)))
691 /* for the match/mask registers */
692 if (reg2
->idx
!= EXTRA_REG_NONE
&&
693 (uncore_box_is_fake(box
) || !reg2
->alloc
) &&
694 !nhmex_mbox_get_shared_reg(box
, reg2
->idx
, reg2
->config
))
698 * If it's a fake box -- as per validate_{group,event}() we
699 * shouldn't touch event state and we can avoid doing so
700 * since both will only call get_event_constraints() once
701 * on each event, this avoids the need for reg->alloc.
703 if (!uncore_box_is_fake(box
)) {
704 if (idx
[0] != 0xff && idx
[0] != __BITS_VALUE(reg1
->idx
, 0, 8))
705 nhmex_mbox_alter_er(event
, idx
[0], true);
706 reg1
->alloc
|= alloc
;
707 if (reg2
->idx
!= EXTRA_REG_NONE
)
712 if (idx
[0] != 0xff && !(alloc
& 0x1) &&
713 idx
[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
) {
715 * events 0xd ~ 0x10 are functional identical, but are
716 * controlled by different fields in the ZDP_CTL_FVC
717 * register. If we failed to take one field, try the
720 BUG_ON(__BITS_VALUE(reg1
->idx
, 1, 8) != 0xff);
721 idx
[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
722 idx
[0] = (idx
[0] + 1) % 4;
723 idx
[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
724 if (idx
[0] != __BITS_VALUE(reg1
->idx
, 0, 8)) {
725 config1
= nhmex_mbox_alter_er(event
, idx
[0], false);
731 nhmex_mbox_put_shared_reg(box
, idx
[0]);
733 nhmex_mbox_put_shared_reg(box
, idx
[1]);
734 return &uncore_constraint_empty
;
737 static void nhmex_mbox_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
739 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
740 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
742 if (uncore_box_is_fake(box
))
745 if (reg1
->alloc
& 0x1)
746 nhmex_mbox_put_shared_reg(box
, __BITS_VALUE(reg1
->idx
, 0, 8));
747 if (reg1
->alloc
& 0x2)
748 nhmex_mbox_put_shared_reg(box
, __BITS_VALUE(reg1
->idx
, 1, 8));
752 nhmex_mbox_put_shared_reg(box
, reg2
->idx
);
757 static int nhmex_mbox_extra_reg_idx(struct extra_reg
*er
)
759 if (er
->idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
)
761 return er
->idx
+ (er
->event
>> NHMEX_M_PMON_CTL_INC_SEL_SHIFT
) - 0xd;
764 static int nhmex_mbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
766 struct intel_uncore_type
*type
= box
->pmu
->type
;
767 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
768 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
769 struct extra_reg
*er
;
773 * The mbox events may require 2 extra MSRs at the most. But only
774 * the lower 32 bits in these MSRs are significant, so we can use
775 * config1 to pass two MSRs' config.
777 for (er
= nhmex_uncore_mbox_extra_regs
; er
->msr
; er
++) {
778 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
780 if (event
->attr
.config1
& ~er
->valid_mask
)
783 msr
= er
->msr
+ type
->msr_offset
* box
->pmu
->pmu_idx
;
784 if (WARN_ON_ONCE(msr
>= 0xffff || er
->idx
>= 0xff))
787 /* always use the 32~63 bits to pass the PLD config */
788 if (er
->idx
== EXTRA_REG_NHMEX_M_PLD
)
790 else if (WARN_ON_ONCE(reg_idx
> 0))
793 reg1
->idx
&= ~(0xff << (reg_idx
* 8));
794 reg1
->reg
&= ~(0xffff << (reg_idx
* 16));
795 reg1
->idx
|= nhmex_mbox_extra_reg_idx(er
) << (reg_idx
* 8);
796 reg1
->reg
|= msr
<< (reg_idx
* 16);
797 reg1
->config
= event
->attr
.config1
;
801 * The mbox only provides ability to perform address matching
802 * for the PLD events.
805 reg2
->idx
= EXTRA_REG_NHMEX_M_FILTER
;
806 if (event
->attr
.config2
& NHMEX_M_PMON_MM_CFG_EN
)
807 reg2
->config
= event
->attr
.config2
;
809 reg2
->config
= ~0ULL;
810 if (box
->pmu
->pmu_idx
== 0)
811 reg2
->reg
= NHMEX_M0_MSR_PMU_MM_CFG
;
813 reg2
->reg
= NHMEX_M1_MSR_PMU_MM_CFG
;
818 static u64
nhmex_mbox_shared_reg_config(struct intel_uncore_box
*box
, int idx
)
820 struct intel_uncore_extra_reg
*er
;
824 if (idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
)
825 return box
->shared_regs
[idx
].config
;
827 er
= &box
->shared_regs
[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
];
828 raw_spin_lock_irqsave(&er
->lock
, flags
);
830 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
834 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
836 struct hw_perf_event
*hwc
= &event
->hw
;
837 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
838 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
841 idx
= __BITS_VALUE(reg1
->idx
, 0, 8);
843 wrmsrl(__BITS_VALUE(reg1
->reg
, 0, 16),
844 nhmex_mbox_shared_reg_config(box
, idx
));
845 idx
= __BITS_VALUE(reg1
->idx
, 1, 8);
847 wrmsrl(__BITS_VALUE(reg1
->reg
, 1, 16),
848 nhmex_mbox_shared_reg_config(box
, idx
));
850 if (reg2
->idx
!= EXTRA_REG_NONE
) {
851 wrmsrl(reg2
->reg
, 0);
852 if (reg2
->config
!= ~0ULL) {
853 wrmsrl(reg2
->reg
+ 1,
854 reg2
->config
& NHMEX_M_PMON_ADDR_MATCH_MASK
);
855 wrmsrl(reg2
->reg
+ 2, NHMEX_M_PMON_ADDR_MASK_MASK
&
856 (reg2
->config
>> NHMEX_M_PMON_ADDR_MASK_SHIFT
));
857 wrmsrl(reg2
->reg
, NHMEX_M_PMON_MM_CFG_EN
);
861 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT0
);
864 DEFINE_UNCORE_FORMAT_ATTR(count_mode
, count_mode
, "config:2-3");
865 DEFINE_UNCORE_FORMAT_ATTR(storage_mode
, storage_mode
, "config:4-5");
866 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode
, wrap_mode
, "config:6");
867 DEFINE_UNCORE_FORMAT_ATTR(flag_mode
, flag_mode
, "config:7");
868 DEFINE_UNCORE_FORMAT_ATTR(inc_sel
, inc_sel
, "config:9-13");
869 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel
, set_flag_sel
, "config:19-21");
870 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en
, filter_cfg_en
, "config2:63");
871 DEFINE_UNCORE_FORMAT_ATTR(filter_match
, filter_match
, "config2:0-33");
872 DEFINE_UNCORE_FORMAT_ATTR(filter_mask
, filter_mask
, "config2:34-61");
873 DEFINE_UNCORE_FORMAT_ATTR(dsp
, dsp
, "config1:0-31");
874 DEFINE_UNCORE_FORMAT_ATTR(thr
, thr
, "config1:0-31");
875 DEFINE_UNCORE_FORMAT_ATTR(fvc
, fvc
, "config1:0-31");
876 DEFINE_UNCORE_FORMAT_ATTR(pgt
, pgt
, "config1:0-31");
877 DEFINE_UNCORE_FORMAT_ATTR(map
, map
, "config1:0-31");
878 DEFINE_UNCORE_FORMAT_ATTR(iss
, iss
, "config1:0-31");
879 DEFINE_UNCORE_FORMAT_ATTR(pld
, pld
, "config1:32-63");
881 static struct attribute
*nhmex_uncore_mbox_formats_attr
[] = {
882 &format_attr_count_mode
.attr
,
883 &format_attr_storage_mode
.attr
,
884 &format_attr_wrap_mode
.attr
,
885 &format_attr_flag_mode
.attr
,
886 &format_attr_inc_sel
.attr
,
887 &format_attr_set_flag_sel
.attr
,
888 &format_attr_filter_cfg_en
.attr
,
889 &format_attr_filter_match
.attr
,
890 &format_attr_filter_mask
.attr
,
891 &format_attr_dsp
.attr
,
892 &format_attr_thr
.attr
,
893 &format_attr_fvc
.attr
,
894 &format_attr_pgt
.attr
,
895 &format_attr_map
.attr
,
896 &format_attr_iss
.attr
,
897 &format_attr_pld
.attr
,
901 static struct attribute_group nhmex_uncore_mbox_format_group
= {
903 .attrs
= nhmex_uncore_mbox_formats_attr
,
906 static struct uncore_event_desc nhmex_uncore_mbox_events
[] = {
907 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read
, "inc_sel=0xd,fvc=0x2800"),
908 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write
, "inc_sel=0xd,fvc=0x2820"),
909 { /* end: all zeroes */ },
912 static struct uncore_event_desc wsmex_uncore_mbox_events
[] = {
913 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read
, "inc_sel=0xd,fvc=0x5000"),
914 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write
, "inc_sel=0xd,fvc=0x5040"),
915 { /* end: all zeroes */ },
918 static struct intel_uncore_ops nhmex_uncore_mbox_ops
= {
919 NHMEX_UNCORE_OPS_COMMON_INIT(),
920 .enable_event
= nhmex_mbox_msr_enable_event
,
921 .hw_config
= nhmex_mbox_hw_config
,
922 .get_constraint
= nhmex_mbox_get_constraint
,
923 .put_constraint
= nhmex_mbox_put_constraint
,
926 static struct intel_uncore_type nhmex_uncore_mbox
= {
931 .event_ctl
= NHMEX_M0_MSR_PMU_CTL0
,
932 .perf_ctr
= NHMEX_M0_MSR_PMU_CNT0
,
933 .event_mask
= NHMEX_M_PMON_RAW_EVENT_MASK
,
934 .box_ctl
= NHMEX_M0_MSR_GLOBAL_CTL
,
935 .msr_offset
= NHMEX_M_MSR_OFFSET
,
937 .num_shared_regs
= 8,
938 .event_descs
= nhmex_uncore_mbox_events
,
939 .ops
= &nhmex_uncore_mbox_ops
,
940 .format_group
= &nhmex_uncore_mbox_format_group
,
943 static void nhmex_rbox_alter_er(struct intel_uncore_box
*box
, struct perf_event
*event
)
945 struct hw_perf_event
*hwc
= &event
->hw
;
946 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
948 /* adjust the main event selector and extra register index */
951 hwc
->config
-= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT
;
954 hwc
->config
+= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT
;
957 /* adjust extra register config */
958 switch (reg1
->idx
% 6) {
960 /* shift the 8~15 bits to the 0~7 bits */
964 /* shift the 0~7 bits to the 8~15 bits */
971 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
972 * An event set consists of 6 events, the 3rd and 4th events in
973 * an event set use the same extra register. So an event set uses
976 static struct event_constraint
*
977 nhmex_rbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
979 struct hw_perf_event
*hwc
= &event
->hw
;
980 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
981 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
982 struct intel_uncore_extra_reg
*er
;
988 if (!uncore_box_is_fake(box
) && reg1
->alloc
)
992 config1
= reg1
->config
;
995 /* the 3rd and 4th events use the same extra register */
998 er_idx
+= (reg1
->idx
/ 6) * 5;
1000 er
= &box
->shared_regs
[er_idx
];
1001 raw_spin_lock_irqsave(&er
->lock
, flags
);
1003 if (!atomic_read(&er
->ref
) || er
->config
== reg1
->config
) {
1004 atomic_inc(&er
->ref
);
1005 er
->config
= reg1
->config
;
1008 } else if (idx
== 2 || idx
== 3) {
1010 * these two events use different fields in a extra register,
1011 * the 0~7 bits and the 8~15 bits respectively.
1013 u64 mask
= 0xff << ((idx
- 2) * 8);
1014 if (!__BITS_VALUE(atomic_read(&er
->ref
), idx
- 2, 8) ||
1015 !((er
->config
^ config1
) & mask
)) {
1016 atomic_add(1 << ((idx
- 2) * 8), &er
->ref
);
1017 er
->config
&= ~mask
;
1018 er
->config
|= config1
& mask
;
1022 if (!atomic_read(&er
->ref
) ||
1023 (er
->config
== (hwc
->config
>> 32) &&
1024 er
->config1
== reg1
->config
&&
1025 er
->config2
== reg2
->config
)) {
1026 atomic_inc(&er
->ref
);
1027 er
->config
= (hwc
->config
>> 32);
1028 er
->config1
= reg1
->config
;
1029 er
->config2
= reg2
->config
;
1033 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
1037 * The Rbox events are always in pairs. The paired
1038 * events are functional identical, but use different
1039 * extra registers. If we failed to take an extra
1040 * register, try the alternative.
1043 if (idx
!= reg1
->idx
% 6) {
1051 if (!uncore_box_is_fake(box
)) {
1052 if (idx
!= reg1
->idx
% 6)
1053 nhmex_rbox_alter_er(box
, event
);
1058 return &uncore_constraint_empty
;
1061 static void nhmex_rbox_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
1063 struct intel_uncore_extra_reg
*er
;
1064 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
1067 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
1070 idx
= reg1
->idx
% 6;
1074 er_idx
+= (reg1
->idx
/ 6) * 5;
1076 er
= &box
->shared_regs
[er_idx
];
1077 if (idx
== 2 || idx
== 3)
1078 atomic_sub(1 << ((idx
- 2) * 8), &er
->ref
);
1080 atomic_dec(&er
->ref
);
1085 static int nhmex_rbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
1087 struct hw_perf_event
*hwc
= &event
->hw
;
1088 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
1089 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
1092 idx
= (event
->hw
.config
& NHMEX_R_PMON_CTL_EV_SEL_MASK
) >>
1093 NHMEX_R_PMON_CTL_EV_SEL_SHIFT
;
1098 reg1
->config
= event
->attr
.config1
;
1103 hwc
->config
|= event
->attr
.config
& (~0ULL << 32);
1104 reg2
->config
= event
->attr
.config2
;
1110 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1112 struct hw_perf_event
*hwc
= &event
->hw
;
1113 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1114 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
1118 port
= idx
/ 6 + box
->pmu
->pmu_idx
* 4;
1122 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port
), reg1
->config
);
1125 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port
), reg1
->config
);
1129 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port
),
1130 uncore_shared_reg_config(box
, 2 + (idx
/ 6) * 5));
1133 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port
),
1135 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port
), reg1
->config
);
1136 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port
), reg2
->config
);
1139 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port
),
1141 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port
), reg1
->config
);
1142 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port
), reg2
->config
);
1146 wrmsrl(hwc
->config_base
, NHMEX_PMON_CTL_EN_BIT0
|
1147 (hwc
->config
& NHMEX_R_PMON_CTL_EV_SEL_MASK
));
1150 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg
, xbr_mm_cfg
, "config:32-63");
1151 DEFINE_UNCORE_FORMAT_ATTR(xbr_match
, xbr_match
, "config1:0-63");
1152 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask
, xbr_mask
, "config2:0-63");
1153 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg
, qlx_cfg
, "config1:0-15");
1154 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg
, iperf_cfg
, "config1:0-31");
1156 static struct attribute
*nhmex_uncore_rbox_formats_attr
[] = {
1157 &format_attr_event5
.attr
,
1158 &format_attr_xbr_mm_cfg
.attr
,
1159 &format_attr_xbr_match
.attr
,
1160 &format_attr_xbr_mask
.attr
,
1161 &format_attr_qlx_cfg
.attr
,
1162 &format_attr_iperf_cfg
.attr
,
1166 static struct attribute_group nhmex_uncore_rbox_format_group
= {
1168 .attrs
= nhmex_uncore_rbox_formats_attr
,
1171 static struct uncore_event_desc nhmex_uncore_rbox_events
[] = {
1172 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send
, "event=0x0,iperf_cfg=0x80000000"),
1173 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send
, "event=0x6,iperf_cfg=0x80000000"),
1174 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt
, "event=0x0,iperf_cfg=0x40000000"),
1175 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt
, "event=0x6,iperf_cfg=0x40000000"),
1176 INTEL_UNCORE_EVENT_DESC(qpi0_date_response
, "event=0x0,iperf_cfg=0xc4"),
1177 INTEL_UNCORE_EVENT_DESC(qpi1_date_response
, "event=0x6,iperf_cfg=0xc4"),
1178 { /* end: all zeroes */ },
1181 static struct intel_uncore_ops nhmex_uncore_rbox_ops
= {
1182 NHMEX_UNCORE_OPS_COMMON_INIT(),
1183 .enable_event
= nhmex_rbox_msr_enable_event
,
1184 .hw_config
= nhmex_rbox_hw_config
,
1185 .get_constraint
= nhmex_rbox_get_constraint
,
1186 .put_constraint
= nhmex_rbox_put_constraint
,
1189 static struct intel_uncore_type nhmex_uncore_rbox
= {
1193 .perf_ctr_bits
= 48,
1194 .event_ctl
= NHMEX_R_MSR_PMON_CTL0
,
1195 .perf_ctr
= NHMEX_R_MSR_PMON_CNT0
,
1196 .event_mask
= NHMEX_R_PMON_RAW_EVENT_MASK
,
1197 .box_ctl
= NHMEX_R_MSR_GLOBAL_CTL
,
1198 .msr_offset
= NHMEX_R_MSR_OFFSET
,
1200 .num_shared_regs
= 20,
1201 .event_descs
= nhmex_uncore_rbox_events
,
1202 .ops
= &nhmex_uncore_rbox_ops
,
1203 .format_group
= &nhmex_uncore_rbox_format_group
1206 static struct intel_uncore_type
*nhmex_msr_uncores
[] = {
1217 void nhmex_uncore_cpu_init(void)
1219 if (boot_cpu_data
.x86_model
== 46)
1220 uncore_nhmex
= true;
1222 nhmex_uncore_mbox
.event_descs
= wsmex_uncore_mbox_events
;
1223 if (nhmex_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
1224 nhmex_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
1225 uncore_msr_uncores
= nhmex_msr_uncores
;
1227 /* end of Nehalem-EX uncore support */