1 // SPDX-License-Identifier: GPL-2.0
2 /* Nehalem-EX/Westmere-EX uncore support */
5 /* NHM-EX event control */
6 #define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff
7 #define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00
8 #define NHMEX_PMON_CTL_EN_BIT0 (1 << 0)
9 #define NHMEX_PMON_CTL_EDGE_DET (1 << 18)
10 #define NHMEX_PMON_CTL_PMI_EN (1 << 20)
11 #define NHMEX_PMON_CTL_EN_BIT22 (1 << 22)
12 #define NHMEX_PMON_CTL_INVERT (1 << 23)
13 #define NHMEX_PMON_CTL_TRESH_MASK 0xff000000
14 #define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \
15 NHMEX_PMON_CTL_UMASK_MASK | \
16 NHMEX_PMON_CTL_EDGE_DET | \
17 NHMEX_PMON_CTL_INVERT | \
18 NHMEX_PMON_CTL_TRESH_MASK)
21 #define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00
22 #define NHMEX_U_MSR_PMON_CTR 0xc11
23 #define NHMEX_U_MSR_PMON_EV_SEL 0xc10
25 #define NHMEX_U_PMON_GLOBAL_EN (1 << 0)
26 #define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e
27 #define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28)
28 #define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29)
29 #define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
31 #define NHMEX_U_PMON_RAW_EVENT_MASK \
32 (NHMEX_PMON_CTL_EV_SEL_MASK | \
33 NHMEX_PMON_CTL_EDGE_DET)
36 #define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00
37 #define NHMEX_C0_MSR_PMON_CTR0 0xd11
38 #define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10
39 #define NHMEX_C_MSR_OFFSET 0x20
42 #define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20
43 #define NHMEX_B0_MSR_PMON_CTR0 0xc31
44 #define NHMEX_B0_MSR_PMON_CTL0 0xc30
45 #define NHMEX_B_MSR_OFFSET 0x40
46 #define NHMEX_B0_MSR_MATCH 0xe45
47 #define NHMEX_B0_MSR_MASK 0xe46
48 #define NHMEX_B1_MSR_MATCH 0xe4d
49 #define NHMEX_B1_MSR_MASK 0xe4e
51 #define NHMEX_B_PMON_CTL_EN (1 << 0)
52 #define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1
53 #define NHMEX_B_PMON_CTL_EV_SEL_MASK \
54 (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT)
55 #define NHMEX_B_PMON_CTR_SHIFT 6
56 #define NHMEX_B_PMON_CTR_MASK \
57 (0x3 << NHMEX_B_PMON_CTR_SHIFT)
58 #define NHMEX_B_PMON_RAW_EVENT_MASK \
59 (NHMEX_B_PMON_CTL_EV_SEL_MASK | \
60 NHMEX_B_PMON_CTR_MASK)
63 #define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40
64 #define NHMEX_S0_MSR_PMON_CTR0 0xc51
65 #define NHMEX_S0_MSR_PMON_CTL0 0xc50
66 #define NHMEX_S_MSR_OFFSET 0x80
67 #define NHMEX_S0_MSR_MM_CFG 0xe48
68 #define NHMEX_S0_MSR_MATCH 0xe49
69 #define NHMEX_S0_MSR_MASK 0xe4a
70 #define NHMEX_S1_MSR_MM_CFG 0xe58
71 #define NHMEX_S1_MSR_MATCH 0xe59
72 #define NHMEX_S1_MSR_MASK 0xe5a
74 #define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63)
75 #define NHMEX_S_EVENT_TO_R_PROG_EV 0
78 #define NHMEX_M0_MSR_GLOBAL_CTL 0xca0
79 #define NHMEX_M0_MSR_PMU_DSP 0xca5
80 #define NHMEX_M0_MSR_PMU_ISS 0xca6
81 #define NHMEX_M0_MSR_PMU_MAP 0xca7
82 #define NHMEX_M0_MSR_PMU_MSC_THR 0xca8
83 #define NHMEX_M0_MSR_PMU_PGT 0xca9
84 #define NHMEX_M0_MSR_PMU_PLD 0xcaa
85 #define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab
86 #define NHMEX_M0_MSR_PMU_CTL0 0xcb0
87 #define NHMEX_M0_MSR_PMU_CNT0 0xcb1
88 #define NHMEX_M_MSR_OFFSET 0x40
89 #define NHMEX_M0_MSR_PMU_MM_CFG 0xe54
90 #define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c
92 #define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63)
93 #define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL
94 #define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL
95 #define NHMEX_M_PMON_ADDR_MASK_SHIFT 34
97 #define NHMEX_M_PMON_CTL_EN (1 << 0)
98 #define NHMEX_M_PMON_CTL_PMI_EN (1 << 1)
99 #define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2
100 #define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \
101 (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT)
102 #define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4
103 #define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \
104 (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT)
105 #define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6)
106 #define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7)
107 #define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9
108 #define NHMEX_M_PMON_CTL_INC_SEL_MASK \
109 (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
110 #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19
111 #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \
112 (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT)
113 #define NHMEX_M_PMON_RAW_EVENT_MASK \
114 (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \
115 NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \
116 NHMEX_M_PMON_CTL_WRAP_MODE | \
117 NHMEX_M_PMON_CTL_FLAG_MODE | \
118 NHMEX_M_PMON_CTL_INC_SEL_MASK | \
119 NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
121 #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23))
122 #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n)))
124 #define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24))
125 #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n)))
128 * use the 9~13 bits to select event If the 7th bit is not set,
129 * otherwise use the 19~21 bits to select event.
131 #define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
132 #define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \
133 NHMEX_M_PMON_CTL_FLAG_MODE)
134 #define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \
135 NHMEX_M_PMON_CTL_FLAG_MODE)
136 #define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \
137 NHMEX_M_PMON_CTL_FLAG_MODE)
138 #define MBOX_INC_SEL_EXTAR_REG(c, r) \
139 EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \
140 MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r)
141 #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \
142 EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \
143 MBOX_SET_FLAG_SEL_MASK, \
144 (u64)-1, NHMEX_M_##r)
147 #define NHMEX_R_MSR_GLOBAL_CTL 0xe00
148 #define NHMEX_R_MSR_PMON_CTL0 0xe10
149 #define NHMEX_R_MSR_PMON_CNT0 0xe11
150 #define NHMEX_R_MSR_OFFSET 0x20
152 #define NHMEX_R_MSR_PORTN_QLX_CFG(n) \
153 ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4))
154 #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n))
155 #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n))
156 #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \
157 (((n) < 4 ? 0 : 0x10) + (n) * 4)
158 #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \
159 (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
160 #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \
161 (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1)
162 #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \
163 (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2)
164 #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \
165 (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
166 #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \
167 (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1)
168 #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \
169 (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2)
171 #define NHMEX_R_PMON_CTL_EN (1 << 0)
172 #define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1
173 #define NHMEX_R_PMON_CTL_EV_SEL_MASK \
174 (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT)
175 #define NHMEX_R_PMON_CTL_PMI_EN (1 << 6)
176 #define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK
179 #define NHMEX_W_MSR_GLOBAL_CTL 0xc80
180 #define NHMEX_W_MSR_PMON_CNT0 0xc90
181 #define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91
182 #define NHMEX_W_MSR_PMON_FIXED_CTR 0x394
183 #define NHMEX_W_MSR_PMON_FIXED_CTL 0x395
185 #define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31)
187 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
188 ((1ULL << (n)) - 1)))
190 DEFINE_UNCORE_FORMAT_ATTR(event
, event
, "config:0-7");
191 DEFINE_UNCORE_FORMAT_ATTR(event5
, event
, "config:1-5");
192 DEFINE_UNCORE_FORMAT_ATTR(umask
, umask
, "config:8-15");
193 DEFINE_UNCORE_FORMAT_ATTR(edge
, edge
, "config:18");
194 DEFINE_UNCORE_FORMAT_ATTR(inv
, inv
, "config:23");
195 DEFINE_UNCORE_FORMAT_ATTR(thresh8
, thresh
, "config:24-31");
196 DEFINE_UNCORE_FORMAT_ATTR(counter
, counter
, "config:6-7");
197 DEFINE_UNCORE_FORMAT_ATTR(match
, match
, "config1:0-63");
198 DEFINE_UNCORE_FORMAT_ATTR(mask
, mask
, "config2:0-63");
200 static void nhmex_uncore_msr_init_box(struct intel_uncore_box
*box
)
202 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL
, NHMEX_U_PMON_GLOBAL_EN_ALL
);
205 static void nhmex_uncore_msr_exit_box(struct intel_uncore_box
*box
)
207 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL
, 0);
210 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box
*box
)
212 unsigned msr
= uncore_msr_box_ctl(box
);
217 config
&= ~((1ULL << uncore_num_counters(box
)) - 1);
218 /* WBox has a fixed counter */
219 if (uncore_msr_fixed_ctl(box
))
220 config
&= ~NHMEX_W_PMON_GLOBAL_FIXED_EN
;
225 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box
*box
)
227 unsigned msr
= uncore_msr_box_ctl(box
);
232 config
|= (1ULL << uncore_num_counters(box
)) - 1;
233 /* WBox has a fixed counter */
234 if (uncore_msr_fixed_ctl(box
))
235 config
|= NHMEX_W_PMON_GLOBAL_FIXED_EN
;
240 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
242 wrmsrl(event
->hw
.config_base
, 0);
245 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
247 struct hw_perf_event
*hwc
= &event
->hw
;
249 if (hwc
->idx
== UNCORE_PMC_IDX_FIXED
)
250 wrmsrl(hwc
->config_base
, NHMEX_PMON_CTL_EN_BIT0
);
251 else if (box
->pmu
->type
->event_mask
& NHMEX_PMON_CTL_EN_BIT0
)
252 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT22
);
254 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT0
);
257 #define NHMEX_UNCORE_OPS_COMMON_INIT() \
258 .init_box = nhmex_uncore_msr_init_box, \
259 .exit_box = nhmex_uncore_msr_exit_box, \
260 .disable_box = nhmex_uncore_msr_disable_box, \
261 .enable_box = nhmex_uncore_msr_enable_box, \
262 .disable_event = nhmex_uncore_msr_disable_event, \
263 .read_counter = uncore_msr_read_counter
265 static struct intel_uncore_ops nhmex_uncore_ops
= {
266 NHMEX_UNCORE_OPS_COMMON_INIT(),
267 .enable_event
= nhmex_uncore_msr_enable_event
,
270 static struct attribute
*nhmex_uncore_ubox_formats_attr
[] = {
271 &format_attr_event
.attr
,
272 &format_attr_edge
.attr
,
276 static const struct attribute_group nhmex_uncore_ubox_format_group
= {
278 .attrs
= nhmex_uncore_ubox_formats_attr
,
281 static struct intel_uncore_type nhmex_uncore_ubox
= {
286 .event_ctl
= NHMEX_U_MSR_PMON_EV_SEL
,
287 .perf_ctr
= NHMEX_U_MSR_PMON_CTR
,
288 .event_mask
= NHMEX_U_PMON_RAW_EVENT_MASK
,
289 .box_ctl
= NHMEX_U_MSR_PMON_GLOBAL_CTL
,
290 .ops
= &nhmex_uncore_ops
,
291 .format_group
= &nhmex_uncore_ubox_format_group
294 static struct attribute
*nhmex_uncore_cbox_formats_attr
[] = {
295 &format_attr_event
.attr
,
296 &format_attr_umask
.attr
,
297 &format_attr_edge
.attr
,
298 &format_attr_inv
.attr
,
299 &format_attr_thresh8
.attr
,
303 static const struct attribute_group nhmex_uncore_cbox_format_group
= {
305 .attrs
= nhmex_uncore_cbox_formats_attr
,
308 /* msr offset for each instance of cbox */
309 static unsigned nhmex_cbox_msr_offsets
[] = {
310 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
313 static struct intel_uncore_type nhmex_uncore_cbox
= {
318 .event_ctl
= NHMEX_C0_MSR_PMON_EV_SEL0
,
319 .perf_ctr
= NHMEX_C0_MSR_PMON_CTR0
,
320 .event_mask
= NHMEX_PMON_RAW_EVENT_MASK
,
321 .box_ctl
= NHMEX_C0_MSR_PMON_GLOBAL_CTL
,
322 .msr_offsets
= nhmex_cbox_msr_offsets
,
324 .ops
= &nhmex_uncore_ops
,
325 .format_group
= &nhmex_uncore_cbox_format_group
328 static struct uncore_event_desc nhmex_uncore_wbox_events
[] = {
329 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0"),
330 { /* end: all zeroes */ },
333 static struct intel_uncore_type nhmex_uncore_wbox
= {
338 .event_ctl
= NHMEX_W_MSR_PMON_CNT0
,
339 .perf_ctr
= NHMEX_W_MSR_PMON_EVT_SEL0
,
340 .fixed_ctr
= NHMEX_W_MSR_PMON_FIXED_CTR
,
341 .fixed_ctl
= NHMEX_W_MSR_PMON_FIXED_CTL
,
342 .event_mask
= NHMEX_PMON_RAW_EVENT_MASK
,
343 .box_ctl
= NHMEX_W_MSR_GLOBAL_CTL
,
345 .event_descs
= nhmex_uncore_wbox_events
,
346 .ops
= &nhmex_uncore_ops
,
347 .format_group
= &nhmex_uncore_cbox_format_group
350 static int nhmex_bbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
352 struct hw_perf_event
*hwc
= &event
->hw
;
353 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
354 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
357 ctr
= (hwc
->config
& NHMEX_B_PMON_CTR_MASK
) >>
358 NHMEX_B_PMON_CTR_SHIFT
;
359 ev_sel
= (hwc
->config
& NHMEX_B_PMON_CTL_EV_SEL_MASK
) >>
360 NHMEX_B_PMON_CTL_EV_SEL_SHIFT
;
362 /* events that do not use the match/mask registers */
363 if ((ctr
== 0 && ev_sel
> 0x3) || (ctr
== 1 && ev_sel
> 0x6) ||
364 (ctr
== 2 && ev_sel
!= 0x4) || ctr
== 3)
367 if (box
->pmu
->pmu_idx
== 0)
368 reg1
->reg
= NHMEX_B0_MSR_MATCH
;
370 reg1
->reg
= NHMEX_B1_MSR_MATCH
;
372 reg1
->config
= event
->attr
.config1
;
373 reg2
->config
= event
->attr
.config2
;
377 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
379 struct hw_perf_event
*hwc
= &event
->hw
;
380 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
381 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
383 if (reg1
->idx
!= EXTRA_REG_NONE
) {
384 wrmsrl(reg1
->reg
, reg1
->config
);
385 wrmsrl(reg1
->reg
+ 1, reg2
->config
);
387 wrmsrl(hwc
->config_base
, NHMEX_PMON_CTL_EN_BIT0
|
388 (hwc
->config
& NHMEX_B_PMON_CTL_EV_SEL_MASK
));
392 * The Bbox has 4 counters, but each counter monitors different events.
393 * Use bits 6-7 in the event config to select counter.
395 static struct event_constraint nhmex_uncore_bbox_constraints
[] = {
396 EVENT_CONSTRAINT(0 , 1, 0xc0),
397 EVENT_CONSTRAINT(0x40, 2, 0xc0),
398 EVENT_CONSTRAINT(0x80, 4, 0xc0),
399 EVENT_CONSTRAINT(0xc0, 8, 0xc0),
400 EVENT_CONSTRAINT_END
,
403 static struct attribute
*nhmex_uncore_bbox_formats_attr
[] = {
404 &format_attr_event5
.attr
,
405 &format_attr_counter
.attr
,
406 &format_attr_match
.attr
,
407 &format_attr_mask
.attr
,
411 static const struct attribute_group nhmex_uncore_bbox_format_group
= {
413 .attrs
= nhmex_uncore_bbox_formats_attr
,
416 static struct intel_uncore_ops nhmex_uncore_bbox_ops
= {
417 NHMEX_UNCORE_OPS_COMMON_INIT(),
418 .enable_event
= nhmex_bbox_msr_enable_event
,
419 .hw_config
= nhmex_bbox_hw_config
,
420 .get_constraint
= uncore_get_constraint
,
421 .put_constraint
= uncore_put_constraint
,
424 static struct intel_uncore_type nhmex_uncore_bbox
= {
429 .event_ctl
= NHMEX_B0_MSR_PMON_CTL0
,
430 .perf_ctr
= NHMEX_B0_MSR_PMON_CTR0
,
431 .event_mask
= NHMEX_B_PMON_RAW_EVENT_MASK
,
432 .box_ctl
= NHMEX_B0_MSR_PMON_GLOBAL_CTL
,
433 .msr_offset
= NHMEX_B_MSR_OFFSET
,
435 .num_shared_regs
= 1,
436 .constraints
= nhmex_uncore_bbox_constraints
,
437 .ops
= &nhmex_uncore_bbox_ops
,
438 .format_group
= &nhmex_uncore_bbox_format_group
441 static int nhmex_sbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
443 struct hw_perf_event
*hwc
= &event
->hw
;
444 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
445 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
447 /* only TO_R_PROG_EV event uses the match/mask register */
448 if ((hwc
->config
& NHMEX_PMON_CTL_EV_SEL_MASK
) !=
449 NHMEX_S_EVENT_TO_R_PROG_EV
)
452 if (box
->pmu
->pmu_idx
== 0)
453 reg1
->reg
= NHMEX_S0_MSR_MM_CFG
;
455 reg1
->reg
= NHMEX_S1_MSR_MM_CFG
;
457 reg1
->config
= event
->attr
.config1
;
458 reg2
->config
= event
->attr
.config2
;
462 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
464 struct hw_perf_event
*hwc
= &event
->hw
;
465 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
466 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
468 if (reg1
->idx
!= EXTRA_REG_NONE
) {
469 wrmsrl(reg1
->reg
, 0);
470 wrmsrl(reg1
->reg
+ 1, reg1
->config
);
471 wrmsrl(reg1
->reg
+ 2, reg2
->config
);
472 wrmsrl(reg1
->reg
, NHMEX_S_PMON_MM_CFG_EN
);
474 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT22
);
477 static struct attribute
*nhmex_uncore_sbox_formats_attr
[] = {
478 &format_attr_event
.attr
,
479 &format_attr_umask
.attr
,
480 &format_attr_edge
.attr
,
481 &format_attr_inv
.attr
,
482 &format_attr_thresh8
.attr
,
483 &format_attr_match
.attr
,
484 &format_attr_mask
.attr
,
488 static const struct attribute_group nhmex_uncore_sbox_format_group
= {
490 .attrs
= nhmex_uncore_sbox_formats_attr
,
493 static struct intel_uncore_ops nhmex_uncore_sbox_ops
= {
494 NHMEX_UNCORE_OPS_COMMON_INIT(),
495 .enable_event
= nhmex_sbox_msr_enable_event
,
496 .hw_config
= nhmex_sbox_hw_config
,
497 .get_constraint
= uncore_get_constraint
,
498 .put_constraint
= uncore_put_constraint
,
501 static struct intel_uncore_type nhmex_uncore_sbox
= {
506 .event_ctl
= NHMEX_S0_MSR_PMON_CTL0
,
507 .perf_ctr
= NHMEX_S0_MSR_PMON_CTR0
,
508 .event_mask
= NHMEX_PMON_RAW_EVENT_MASK
,
509 .box_ctl
= NHMEX_S0_MSR_PMON_GLOBAL_CTL
,
510 .msr_offset
= NHMEX_S_MSR_OFFSET
,
512 .num_shared_regs
= 1,
513 .ops
= &nhmex_uncore_sbox_ops
,
514 .format_group
= &nhmex_uncore_sbox_format_group
518 EXTRA_REG_NHMEX_M_FILTER
,
519 EXTRA_REG_NHMEX_M_DSP
,
520 EXTRA_REG_NHMEX_M_ISS
,
521 EXTRA_REG_NHMEX_M_MAP
,
522 EXTRA_REG_NHMEX_M_MSC_THR
,
523 EXTRA_REG_NHMEX_M_PGT
,
524 EXTRA_REG_NHMEX_M_PLD
,
525 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
,
528 static struct extra_reg nhmex_uncore_mbox_extra_regs
[] = {
529 MBOX_INC_SEL_EXTAR_REG(0x0, DSP
),
530 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR
),
531 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR
),
532 MBOX_INC_SEL_EXTAR_REG(0x9, ISS
),
533 /* event 0xa uses two extra registers */
534 MBOX_INC_SEL_EXTAR_REG(0xa, ISS
),
535 MBOX_INC_SEL_EXTAR_REG(0xa, PLD
),
536 MBOX_INC_SEL_EXTAR_REG(0xb, PLD
),
537 /* events 0xd ~ 0x10 use the same extra register */
538 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC
),
539 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC
),
540 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC
),
541 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC
),
542 MBOX_INC_SEL_EXTAR_REG(0x16, PGT
),
543 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP
),
544 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS
),
545 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT
),
546 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP
),
550 /* Nehalem-EX or Westmere-EX ? */
551 static bool uncore_nhmex
;
553 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box
*box
, int idx
, u64 config
)
555 struct intel_uncore_extra_reg
*er
;
560 if (idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
) {
561 er
= &box
->shared_regs
[idx
];
562 raw_spin_lock_irqsave(&er
->lock
, flags
);
563 if (!atomic_read(&er
->ref
) || er
->config
== config
) {
564 atomic_inc(&er
->ref
);
568 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
573 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
574 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
575 * fields which are shared.
577 idx
-= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
578 if (WARN_ON_ONCE(idx
>= 4))
581 /* mask of the shared fields */
583 mask
= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
;
585 mask
= WSMEX_M_PMON_ZDP_CTL_FVC_MASK
;
586 er
= &box
->shared_regs
[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
];
588 raw_spin_lock_irqsave(&er
->lock
, flags
);
589 /* add mask of the non-shared field if it's in use */
590 if (__BITS_VALUE(atomic_read(&er
->ref
), idx
, 8)) {
592 mask
|= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
594 mask
|= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
597 if (!atomic_read(&er
->ref
) || !((er
->config
^ config
) & mask
)) {
598 atomic_add(1 << (idx
* 8), &er
->ref
);
600 mask
= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
|
601 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
603 mask
= WSMEX_M_PMON_ZDP_CTL_FVC_MASK
|
604 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
606 er
->config
|= (config
& mask
);
609 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
614 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box
*box
, int idx
)
616 struct intel_uncore_extra_reg
*er
;
618 if (idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
) {
619 er
= &box
->shared_regs
[idx
];
620 atomic_dec(&er
->ref
);
624 idx
-= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
625 er
= &box
->shared_regs
[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
];
626 atomic_sub(1 << (idx
* 8), &er
->ref
);
629 static u64
nhmex_mbox_alter_er(struct perf_event
*event
, int new_idx
, bool modify
)
631 struct hw_perf_event
*hwc
= &event
->hw
;
632 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
633 u64 idx
, orig_idx
= __BITS_VALUE(reg1
->idx
, 0, 8);
634 u64 config
= reg1
->config
;
636 /* get the non-shared control bits and shift them */
637 idx
= orig_idx
- EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
639 config
&= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
641 config
&= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx
);
642 if (new_idx
> orig_idx
) {
643 idx
= new_idx
- orig_idx
;
646 idx
= orig_idx
- new_idx
;
650 /* add the shared control bits back */
652 config
|= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
& reg1
->config
;
654 config
|= WSMEX_M_PMON_ZDP_CTL_FVC_MASK
& reg1
->config
;
655 config
|= NHMEX_M_PMON_ZDP_CTL_FVC_MASK
& reg1
->config
;
657 /* adjust the main event selector */
658 if (new_idx
> orig_idx
)
659 hwc
->config
+= idx
<< NHMEX_M_PMON_CTL_INC_SEL_SHIFT
;
661 hwc
->config
-= idx
<< NHMEX_M_PMON_CTL_INC_SEL_SHIFT
;
662 reg1
->config
= config
;
663 reg1
->idx
= ~0xff | new_idx
;
668 static struct event_constraint
*
669 nhmex_mbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
671 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
672 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
673 int i
, idx
[2], alloc
= 0;
674 u64 config1
= reg1
->config
;
676 idx
[0] = __BITS_VALUE(reg1
->idx
, 0, 8);
677 idx
[1] = __BITS_VALUE(reg1
->idx
, 1, 8);
679 for (i
= 0; i
< 2; i
++) {
680 if (!uncore_box_is_fake(box
) && (reg1
->alloc
& (0x1 << i
)))
686 if (!nhmex_mbox_get_shared_reg(box
, idx
[i
],
687 __BITS_VALUE(config1
, i
, 32)))
692 /* for the match/mask registers */
693 if (reg2
->idx
!= EXTRA_REG_NONE
&&
694 (uncore_box_is_fake(box
) || !reg2
->alloc
) &&
695 !nhmex_mbox_get_shared_reg(box
, reg2
->idx
, reg2
->config
))
699 * If it's a fake box -- as per validate_{group,event}() we
700 * shouldn't touch event state and we can avoid doing so
701 * since both will only call get_event_constraints() once
702 * on each event, this avoids the need for reg->alloc.
704 if (!uncore_box_is_fake(box
)) {
705 if (idx
[0] != 0xff && idx
[0] != __BITS_VALUE(reg1
->idx
, 0, 8))
706 nhmex_mbox_alter_er(event
, idx
[0], true);
707 reg1
->alloc
|= alloc
;
708 if (reg2
->idx
!= EXTRA_REG_NONE
)
713 if (idx
[0] != 0xff && !(alloc
& 0x1) &&
714 idx
[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
) {
716 * events 0xd ~ 0x10 are functional identical, but are
717 * controlled by different fields in the ZDP_CTL_FVC
718 * register. If we failed to take one field, try the
721 BUG_ON(__BITS_VALUE(reg1
->idx
, 1, 8) != 0xff);
722 idx
[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
723 idx
[0] = (idx
[0] + 1) % 4;
724 idx
[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
;
725 if (idx
[0] != __BITS_VALUE(reg1
->idx
, 0, 8)) {
726 config1
= nhmex_mbox_alter_er(event
, idx
[0], false);
732 nhmex_mbox_put_shared_reg(box
, idx
[0]);
734 nhmex_mbox_put_shared_reg(box
, idx
[1]);
735 return &uncore_constraint_empty
;
738 static void nhmex_mbox_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
740 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
741 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
743 if (uncore_box_is_fake(box
))
746 if (reg1
->alloc
& 0x1)
747 nhmex_mbox_put_shared_reg(box
, __BITS_VALUE(reg1
->idx
, 0, 8));
748 if (reg1
->alloc
& 0x2)
749 nhmex_mbox_put_shared_reg(box
, __BITS_VALUE(reg1
->idx
, 1, 8));
753 nhmex_mbox_put_shared_reg(box
, reg2
->idx
);
758 static int nhmex_mbox_extra_reg_idx(struct extra_reg
*er
)
760 if (er
->idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
)
762 return er
->idx
+ (er
->event
>> NHMEX_M_PMON_CTL_INC_SEL_SHIFT
) - 0xd;
765 static int nhmex_mbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
767 struct intel_uncore_type
*type
= box
->pmu
->type
;
768 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
769 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
770 struct extra_reg
*er
;
774 * The mbox events may require 2 extra MSRs at the most. But only
775 * the lower 32 bits in these MSRs are significant, so we can use
776 * config1 to pass two MSRs' config.
778 for (er
= nhmex_uncore_mbox_extra_regs
; er
->msr
; er
++) {
779 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
781 if (event
->attr
.config1
& ~er
->valid_mask
)
784 msr
= er
->msr
+ type
->msr_offset
* box
->pmu
->pmu_idx
;
785 if (WARN_ON_ONCE(msr
>= 0xffff || er
->idx
>= 0xff))
788 /* always use the 32~63 bits to pass the PLD config */
789 if (er
->idx
== EXTRA_REG_NHMEX_M_PLD
)
791 else if (WARN_ON_ONCE(reg_idx
> 0))
794 reg1
->idx
&= ~(0xff << (reg_idx
* 8));
795 reg1
->reg
&= ~(0xffff << (reg_idx
* 16));
796 reg1
->idx
|= nhmex_mbox_extra_reg_idx(er
) << (reg_idx
* 8);
797 reg1
->reg
|= msr
<< (reg_idx
* 16);
798 reg1
->config
= event
->attr
.config1
;
802 * The mbox only provides ability to perform address matching
803 * for the PLD events.
806 reg2
->idx
= EXTRA_REG_NHMEX_M_FILTER
;
807 if (event
->attr
.config2
& NHMEX_M_PMON_MM_CFG_EN
)
808 reg2
->config
= event
->attr
.config2
;
810 reg2
->config
= ~0ULL;
811 if (box
->pmu
->pmu_idx
== 0)
812 reg2
->reg
= NHMEX_M0_MSR_PMU_MM_CFG
;
814 reg2
->reg
= NHMEX_M1_MSR_PMU_MM_CFG
;
819 static u64
nhmex_mbox_shared_reg_config(struct intel_uncore_box
*box
, int idx
)
821 struct intel_uncore_extra_reg
*er
;
825 if (idx
< EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
)
826 return box
->shared_regs
[idx
].config
;
828 er
= &box
->shared_regs
[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC
];
829 raw_spin_lock_irqsave(&er
->lock
, flags
);
831 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
835 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
837 struct hw_perf_event
*hwc
= &event
->hw
;
838 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
839 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
842 idx
= __BITS_VALUE(reg1
->idx
, 0, 8);
844 wrmsrl(__BITS_VALUE(reg1
->reg
, 0, 16),
845 nhmex_mbox_shared_reg_config(box
, idx
));
846 idx
= __BITS_VALUE(reg1
->idx
, 1, 8);
848 wrmsrl(__BITS_VALUE(reg1
->reg
, 1, 16),
849 nhmex_mbox_shared_reg_config(box
, idx
));
851 if (reg2
->idx
!= EXTRA_REG_NONE
) {
852 wrmsrl(reg2
->reg
, 0);
853 if (reg2
->config
!= ~0ULL) {
854 wrmsrl(reg2
->reg
+ 1,
855 reg2
->config
& NHMEX_M_PMON_ADDR_MATCH_MASK
);
856 wrmsrl(reg2
->reg
+ 2, NHMEX_M_PMON_ADDR_MASK_MASK
&
857 (reg2
->config
>> NHMEX_M_PMON_ADDR_MASK_SHIFT
));
858 wrmsrl(reg2
->reg
, NHMEX_M_PMON_MM_CFG_EN
);
862 wrmsrl(hwc
->config_base
, hwc
->config
| NHMEX_PMON_CTL_EN_BIT0
);
865 DEFINE_UNCORE_FORMAT_ATTR(count_mode
, count_mode
, "config:2-3");
866 DEFINE_UNCORE_FORMAT_ATTR(storage_mode
, storage_mode
, "config:4-5");
867 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode
, wrap_mode
, "config:6");
868 DEFINE_UNCORE_FORMAT_ATTR(flag_mode
, flag_mode
, "config:7");
869 DEFINE_UNCORE_FORMAT_ATTR(inc_sel
, inc_sel
, "config:9-13");
870 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel
, set_flag_sel
, "config:19-21");
871 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en
, filter_cfg_en
, "config2:63");
872 DEFINE_UNCORE_FORMAT_ATTR(filter_match
, filter_match
, "config2:0-33");
873 DEFINE_UNCORE_FORMAT_ATTR(filter_mask
, filter_mask
, "config2:34-61");
874 DEFINE_UNCORE_FORMAT_ATTR(dsp
, dsp
, "config1:0-31");
875 DEFINE_UNCORE_FORMAT_ATTR(thr
, thr
, "config1:0-31");
876 DEFINE_UNCORE_FORMAT_ATTR(fvc
, fvc
, "config1:0-31");
877 DEFINE_UNCORE_FORMAT_ATTR(pgt
, pgt
, "config1:0-31");
878 DEFINE_UNCORE_FORMAT_ATTR(map
, map
, "config1:0-31");
879 DEFINE_UNCORE_FORMAT_ATTR(iss
, iss
, "config1:0-31");
880 DEFINE_UNCORE_FORMAT_ATTR(pld
, pld
, "config1:32-63");
882 static struct attribute
*nhmex_uncore_mbox_formats_attr
[] = {
883 &format_attr_count_mode
.attr
,
884 &format_attr_storage_mode
.attr
,
885 &format_attr_wrap_mode
.attr
,
886 &format_attr_flag_mode
.attr
,
887 &format_attr_inc_sel
.attr
,
888 &format_attr_set_flag_sel
.attr
,
889 &format_attr_filter_cfg_en
.attr
,
890 &format_attr_filter_match
.attr
,
891 &format_attr_filter_mask
.attr
,
892 &format_attr_dsp
.attr
,
893 &format_attr_thr
.attr
,
894 &format_attr_fvc
.attr
,
895 &format_attr_pgt
.attr
,
896 &format_attr_map
.attr
,
897 &format_attr_iss
.attr
,
898 &format_attr_pld
.attr
,
902 static const struct attribute_group nhmex_uncore_mbox_format_group
= {
904 .attrs
= nhmex_uncore_mbox_formats_attr
,
907 static struct uncore_event_desc nhmex_uncore_mbox_events
[] = {
908 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read
, "inc_sel=0xd,fvc=0x2800"),
909 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write
, "inc_sel=0xd,fvc=0x2820"),
910 { /* end: all zeroes */ },
913 static struct uncore_event_desc wsmex_uncore_mbox_events
[] = {
914 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read
, "inc_sel=0xd,fvc=0x5000"),
915 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write
, "inc_sel=0xd,fvc=0x5040"),
916 { /* end: all zeroes */ },
919 static struct intel_uncore_ops nhmex_uncore_mbox_ops
= {
920 NHMEX_UNCORE_OPS_COMMON_INIT(),
921 .enable_event
= nhmex_mbox_msr_enable_event
,
922 .hw_config
= nhmex_mbox_hw_config
,
923 .get_constraint
= nhmex_mbox_get_constraint
,
924 .put_constraint
= nhmex_mbox_put_constraint
,
927 static struct intel_uncore_type nhmex_uncore_mbox
= {
932 .event_ctl
= NHMEX_M0_MSR_PMU_CTL0
,
933 .perf_ctr
= NHMEX_M0_MSR_PMU_CNT0
,
934 .event_mask
= NHMEX_M_PMON_RAW_EVENT_MASK
,
935 .box_ctl
= NHMEX_M0_MSR_GLOBAL_CTL
,
936 .msr_offset
= NHMEX_M_MSR_OFFSET
,
938 .num_shared_regs
= 8,
939 .event_descs
= nhmex_uncore_mbox_events
,
940 .ops
= &nhmex_uncore_mbox_ops
,
941 .format_group
= &nhmex_uncore_mbox_format_group
,
944 static void nhmex_rbox_alter_er(struct intel_uncore_box
*box
, struct perf_event
*event
)
946 struct hw_perf_event
*hwc
= &event
->hw
;
947 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
949 /* adjust the main event selector and extra register index */
952 hwc
->config
-= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT
;
955 hwc
->config
+= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT
;
958 /* adjust extra register config */
959 switch (reg1
->idx
% 6) {
961 /* shift the 8~15 bits to the 0~7 bits */
965 /* shift the 0~7 bits to the 8~15 bits */
972 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
973 * An event set consists of 6 events, the 3rd and 4th events in
974 * an event set use the same extra register. So an event set uses
977 static struct event_constraint
*
978 nhmex_rbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
980 struct hw_perf_event
*hwc
= &event
->hw
;
981 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
982 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
983 struct intel_uncore_extra_reg
*er
;
989 if (!uncore_box_is_fake(box
) && reg1
->alloc
)
993 config1
= reg1
->config
;
996 /* the 3rd and 4th events use the same extra register */
999 er_idx
+= (reg1
->idx
/ 6) * 5;
1001 er
= &box
->shared_regs
[er_idx
];
1002 raw_spin_lock_irqsave(&er
->lock
, flags
);
1004 if (!atomic_read(&er
->ref
) || er
->config
== reg1
->config
) {
1005 atomic_inc(&er
->ref
);
1006 er
->config
= reg1
->config
;
1009 } else if (idx
== 2 || idx
== 3) {
1011 * these two events use different fields in a extra register,
1012 * the 0~7 bits and the 8~15 bits respectively.
1014 u64 mask
= 0xff << ((idx
- 2) * 8);
1015 if (!__BITS_VALUE(atomic_read(&er
->ref
), idx
- 2, 8) ||
1016 !((er
->config
^ config1
) & mask
)) {
1017 atomic_add(1 << ((idx
- 2) * 8), &er
->ref
);
1018 er
->config
&= ~mask
;
1019 er
->config
|= config1
& mask
;
1023 if (!atomic_read(&er
->ref
) ||
1024 (er
->config
== (hwc
->config
>> 32) &&
1025 er
->config1
== reg1
->config
&&
1026 er
->config2
== reg2
->config
)) {
1027 atomic_inc(&er
->ref
);
1028 er
->config
= (hwc
->config
>> 32);
1029 er
->config1
= reg1
->config
;
1030 er
->config2
= reg2
->config
;
1034 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
1038 * The Rbox events are always in pairs. The paired
1039 * events are functional identical, but use different
1040 * extra registers. If we failed to take an extra
1041 * register, try the alternative.
1044 if (idx
!= reg1
->idx
% 6) {
1052 if (!uncore_box_is_fake(box
)) {
1053 if (idx
!= reg1
->idx
% 6)
1054 nhmex_rbox_alter_er(box
, event
);
1059 return &uncore_constraint_empty
;
1062 static void nhmex_rbox_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
1064 struct intel_uncore_extra_reg
*er
;
1065 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
1068 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
1071 idx
= reg1
->idx
% 6;
1075 er_idx
+= (reg1
->idx
/ 6) * 5;
1077 er
= &box
->shared_regs
[er_idx
];
1078 if (idx
== 2 || idx
== 3)
1079 atomic_sub(1 << ((idx
- 2) * 8), &er
->ref
);
1081 atomic_dec(&er
->ref
);
1086 static int nhmex_rbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
1088 struct hw_perf_event
*hwc
= &event
->hw
;
1089 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
1090 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
1093 idx
= (event
->hw
.config
& NHMEX_R_PMON_CTL_EV_SEL_MASK
) >>
1094 NHMEX_R_PMON_CTL_EV_SEL_SHIFT
;
1099 reg1
->config
= event
->attr
.config1
;
1104 hwc
->config
|= event
->attr
.config
& (~0ULL << 32);
1105 reg2
->config
= event
->attr
.config2
;
1111 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1113 struct hw_perf_event
*hwc
= &event
->hw
;
1114 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1115 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
1119 port
= idx
/ 6 + box
->pmu
->pmu_idx
* 4;
1123 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port
), reg1
->config
);
1126 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port
), reg1
->config
);
1130 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port
),
1131 uncore_shared_reg_config(box
, 2 + (idx
/ 6) * 5));
1134 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port
),
1136 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port
), reg1
->config
);
1137 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port
), reg2
->config
);
1140 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port
),
1142 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port
), reg1
->config
);
1143 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port
), reg2
->config
);
1147 wrmsrl(hwc
->config_base
, NHMEX_PMON_CTL_EN_BIT0
|
1148 (hwc
->config
& NHMEX_R_PMON_CTL_EV_SEL_MASK
));
1151 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg
, xbr_mm_cfg
, "config:32-63");
1152 DEFINE_UNCORE_FORMAT_ATTR(xbr_match
, xbr_match
, "config1:0-63");
1153 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask
, xbr_mask
, "config2:0-63");
1154 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg
, qlx_cfg
, "config1:0-15");
1155 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg
, iperf_cfg
, "config1:0-31");
1157 static struct attribute
*nhmex_uncore_rbox_formats_attr
[] = {
1158 &format_attr_event5
.attr
,
1159 &format_attr_xbr_mm_cfg
.attr
,
1160 &format_attr_xbr_match
.attr
,
1161 &format_attr_xbr_mask
.attr
,
1162 &format_attr_qlx_cfg
.attr
,
1163 &format_attr_iperf_cfg
.attr
,
1167 static const struct attribute_group nhmex_uncore_rbox_format_group
= {
1169 .attrs
= nhmex_uncore_rbox_formats_attr
,
1172 static struct uncore_event_desc nhmex_uncore_rbox_events
[] = {
1173 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send
, "event=0x0,iperf_cfg=0x80000000"),
1174 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send
, "event=0x6,iperf_cfg=0x80000000"),
1175 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt
, "event=0x0,iperf_cfg=0x40000000"),
1176 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt
, "event=0x6,iperf_cfg=0x40000000"),
1177 INTEL_UNCORE_EVENT_DESC(qpi0_date_response
, "event=0x0,iperf_cfg=0xc4"),
1178 INTEL_UNCORE_EVENT_DESC(qpi1_date_response
, "event=0x6,iperf_cfg=0xc4"),
1179 { /* end: all zeroes */ },
1182 static struct intel_uncore_ops nhmex_uncore_rbox_ops
= {
1183 NHMEX_UNCORE_OPS_COMMON_INIT(),
1184 .enable_event
= nhmex_rbox_msr_enable_event
,
1185 .hw_config
= nhmex_rbox_hw_config
,
1186 .get_constraint
= nhmex_rbox_get_constraint
,
1187 .put_constraint
= nhmex_rbox_put_constraint
,
1190 static struct intel_uncore_type nhmex_uncore_rbox
= {
1194 .perf_ctr_bits
= 48,
1195 .event_ctl
= NHMEX_R_MSR_PMON_CTL0
,
1196 .perf_ctr
= NHMEX_R_MSR_PMON_CNT0
,
1197 .event_mask
= NHMEX_R_PMON_RAW_EVENT_MASK
,
1198 .box_ctl
= NHMEX_R_MSR_GLOBAL_CTL
,
1199 .msr_offset
= NHMEX_R_MSR_OFFSET
,
1201 .num_shared_regs
= 20,
1202 .event_descs
= nhmex_uncore_rbox_events
,
1203 .ops
= &nhmex_uncore_rbox_ops
,
1204 .format_group
= &nhmex_uncore_rbox_format_group
1207 static struct intel_uncore_type
*nhmex_msr_uncores
[] = {
1218 void nhmex_uncore_cpu_init(void)
1220 if (boot_cpu_data
.x86_model
== 46)
1221 uncore_nhmex
= true;
1223 nhmex_uncore_mbox
.event_descs
= wsmex_uncore_mbox_events
;
1224 if (nhmex_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
1225 nhmex_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
1226 uncore_msr_uncores
= nhmex_msr_uncores
;
1228 /* end of Nehalem-EX uncore support */