1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID 0x40
7 #define SNBEP_GIDNIDMAP 0x54
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
20 #define SNBEP_PMON_CTL_RST (1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
23 #define SNBEP_PMON_CTL_EN (1 << 22)
24 #define SNBEP_PMON_CTL_INVERT (1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
27 SNBEP_PMON_CTL_UMASK_MASK | \
28 SNBEP_PMON_CTL_EDGE_DET | \
29 SNBEP_PMON_CTL_INVERT | \
30 SNBEP_PMON_CTL_TRESH_MASK)
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
35 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36 SNBEP_PMON_CTL_UMASK_MASK | \
37 SNBEP_PMON_CTL_EDGE_DET | \
38 SNBEP_PMON_CTL_INVERT | \
39 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
41 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
43 SNBEP_CBO_PMON_CTL_TID_EN)
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
51 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 SNBEP_PMON_CTL_EDGE_DET | \
54 SNBEP_PMON_CTL_INVERT | \
55 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
60 (SNBEP_PMON_RAW_EVENT_MASK | \
61 SNBEP_PMON_CTL_EV_SEL_EXT)
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
65 #define SNBEP_PCI_PMON_CTL0 0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0 0xa0
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0 0xc16
84 #define SNBEP_U_MSR_PMON_CTL0 0xc10
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
94 #define SNBEP_CBO_MSR_OFFSET 0x20
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
103 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
104 .config_mask = (m), \
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
121 SNBEP_PMON_CTL_UMASK_MASK | \
122 SNBEP_PMON_CTL_EDGE_DET | \
123 SNBEP_PMON_CTL_TRESH_MASK)
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131 SNBEP_PMON_CTL_UMASK_MASK | \
132 SNBEP_PMON_CTL_EDGE_DET | \
133 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
136 SNBEP_CBO_PMON_CTL_TID_EN)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
150 (IVBEP_PMON_RAW_EVENT_MASK | \
151 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
154 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 SNBEP_PMON_CTL_EDGE_DET | \
157 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
162 (IVBEP_PMON_RAW_EVENT_MASK | \
163 SNBEP_PMON_CTL_EV_SEL_EXT)
165 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
166 ((1ULL << (n)) - 1)))
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0 0x709
170 #define HSWEP_U_MSR_PMON_CTL0 0x705
171 #define HSWEP_U_MSR_PMON_FILTER 0x707
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
183 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
187 #define HSWEP_CBO_MSR_OFFSET 0x10
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0 0x726
202 #define HSWEP_S0_MSR_PMON_CTL0 0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
204 #define HSWEP_SBOX_MSR_OFFSET 0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
206 SNBEP_CBO_PMON_CTL_TID_EN)
209 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 SNBEP_CBO_PMON_CTL_TID_EN)
219 #define KNL_CHA_MSR_OFFSET 0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
233 #define KNL_UCLK_MSR_PMON_CTL0 0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
237 #define KNL_PMON_FIXED_CTL_EN 0x1
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
254 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
256 KNL_CHA_MSR_PMON_CTL_QOR)
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 SNBEP_PMON_CTL_EDGE_DET | \
266 SNBEP_CBO_PMON_CTL_TID_EN | \
267 SNBEP_PMON_CTL_INVERT | \
268 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID 0xc0
274 #define SKX_GIDNIDMAP 0xd4
277 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
278 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
279 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
280 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
281 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
282 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
283 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
284 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
285 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
286 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
287 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
288 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
289 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
292 #define SKX_IIO0_MSR_PMON_CTL0 0xa48
293 #define SKX_IIO0_MSR_PMON_CTR0 0xa41
294 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
295 #define SKX_IIO_MSR_OFFSET 0x20
297 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
298 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
299 #define SKX_PMON_CTL_CH_MASK (0xff << 4)
300 #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
301 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
302 SNBEP_PMON_CTL_UMASK_MASK | \
303 SNBEP_PMON_CTL_EDGE_DET | \
304 SNBEP_PMON_CTL_INVERT | \
305 SKX_PMON_CTL_TRESH_MASK)
306 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
307 SKX_PMON_CTL_CH_MASK | \
308 SKX_PMON_CTL_FC_MASK)
311 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
312 #define SKX_IRP0_MSR_PMON_CTR0 0xa59
313 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
314 #define SKX_IRP_MSR_OFFSET 0x20
317 #define SKX_UPI_PCI_PMON_CTL0 0x350
318 #define SKX_UPI_PCI_PMON_CTR0 0x318
319 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
320 #define SKX_UPI_CTL_UMASK_EXT 0xffefff
323 #define SKX_M2M_PCI_PMON_CTL0 0x228
324 #define SKX_M2M_PCI_PMON_CTR0 0x200
325 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
327 DEFINE_UNCORE_FORMAT_ATTR(event
, event
, "config:0-7");
328 DEFINE_UNCORE_FORMAT_ATTR(event2
, event
, "config:0-6");
329 DEFINE_UNCORE_FORMAT_ATTR(event_ext
, event
, "config:0-7,21");
330 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr
, use_occ_ctr
, "config:7");
331 DEFINE_UNCORE_FORMAT_ATTR(umask
, umask
, "config:8-15");
332 DEFINE_UNCORE_FORMAT_ATTR(umask_ext
, umask
, "config:8-15,32-43,45-55");
333 DEFINE_UNCORE_FORMAT_ATTR(qor
, qor
, "config:16");
334 DEFINE_UNCORE_FORMAT_ATTR(edge
, edge
, "config:18");
335 DEFINE_UNCORE_FORMAT_ATTR(tid_en
, tid_en
, "config:19");
336 DEFINE_UNCORE_FORMAT_ATTR(inv
, inv
, "config:23");
337 DEFINE_UNCORE_FORMAT_ATTR(thresh9
, thresh
, "config:24-35");
338 DEFINE_UNCORE_FORMAT_ATTR(thresh8
, thresh
, "config:24-31");
339 DEFINE_UNCORE_FORMAT_ATTR(thresh6
, thresh
, "config:24-29");
340 DEFINE_UNCORE_FORMAT_ATTR(thresh5
, thresh
, "config:24-28");
341 DEFINE_UNCORE_FORMAT_ATTR(occ_sel
, occ_sel
, "config:14-15");
342 DEFINE_UNCORE_FORMAT_ATTR(occ_invert
, occ_invert
, "config:30");
343 DEFINE_UNCORE_FORMAT_ATTR(occ_edge
, occ_edge
, "config:14-51");
344 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det
, occ_edge_det
, "config:31");
345 DEFINE_UNCORE_FORMAT_ATTR(ch_mask
, ch_mask
, "config:36-43");
346 DEFINE_UNCORE_FORMAT_ATTR(fc_mask
, fc_mask
, "config:44-46");
347 DEFINE_UNCORE_FORMAT_ATTR(filter_tid
, filter_tid
, "config1:0-4");
348 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2
, filter_tid
, "config1:0");
349 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3
, filter_tid
, "config1:0-5");
350 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4
, filter_tid
, "config1:0-8");
351 DEFINE_UNCORE_FORMAT_ATTR(filter_cid
, filter_cid
, "config1:5");
352 DEFINE_UNCORE_FORMAT_ATTR(filter_link
, filter_link
, "config1:5-8");
353 DEFINE_UNCORE_FORMAT_ATTR(filter_link2
, filter_link
, "config1:6-8");
354 DEFINE_UNCORE_FORMAT_ATTR(filter_link3
, filter_link
, "config1:12");
355 DEFINE_UNCORE_FORMAT_ATTR(filter_nid
, filter_nid
, "config1:10-17");
356 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2
, filter_nid
, "config1:32-47");
357 DEFINE_UNCORE_FORMAT_ATTR(filter_state
, filter_state
, "config1:18-22");
358 DEFINE_UNCORE_FORMAT_ATTR(filter_state2
, filter_state
, "config1:17-22");
359 DEFINE_UNCORE_FORMAT_ATTR(filter_state3
, filter_state
, "config1:17-23");
360 DEFINE_UNCORE_FORMAT_ATTR(filter_state4
, filter_state
, "config1:18-20");
361 DEFINE_UNCORE_FORMAT_ATTR(filter_state5
, filter_state
, "config1:17-26");
362 DEFINE_UNCORE_FORMAT_ATTR(filter_rem
, filter_rem
, "config1:32");
363 DEFINE_UNCORE_FORMAT_ATTR(filter_loc
, filter_loc
, "config1:33");
364 DEFINE_UNCORE_FORMAT_ATTR(filter_nm
, filter_nm
, "config1:36");
365 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm
, filter_not_nm
, "config1:37");
366 DEFINE_UNCORE_FORMAT_ATTR(filter_local
, filter_local
, "config1:33");
367 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op
, filter_all_op
, "config1:35");
368 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm
, filter_nnm
, "config1:37");
369 DEFINE_UNCORE_FORMAT_ATTR(filter_opc
, filter_opc
, "config1:23-31");
370 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2
, filter_opc
, "config1:52-60");
371 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3
, filter_opc
, "config1:41-60");
372 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0
, filter_opc0
, "config1:41-50");
373 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1
, filter_opc1
, "config1:51-60");
374 DEFINE_UNCORE_FORMAT_ATTR(filter_nc
, filter_nc
, "config1:62");
375 DEFINE_UNCORE_FORMAT_ATTR(filter_c6
, filter_c6
, "config1:61");
376 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc
, filter_isoc
, "config1:63");
377 DEFINE_UNCORE_FORMAT_ATTR(filter_band0
, filter_band0
, "config1:0-7");
378 DEFINE_UNCORE_FORMAT_ATTR(filter_band1
, filter_band1
, "config1:8-15");
379 DEFINE_UNCORE_FORMAT_ATTR(filter_band2
, filter_band2
, "config1:16-23");
380 DEFINE_UNCORE_FORMAT_ATTR(filter_band3
, filter_band3
, "config1:24-31");
381 DEFINE_UNCORE_FORMAT_ATTR(match_rds
, match_rds
, "config1:48-51");
382 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30
, match_rnid30
, "config1:32-35");
383 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4
, match_rnid4
, "config1:31");
384 DEFINE_UNCORE_FORMAT_ATTR(match_dnid
, match_dnid
, "config1:13-17");
385 DEFINE_UNCORE_FORMAT_ATTR(match_mc
, match_mc
, "config1:9-12");
386 DEFINE_UNCORE_FORMAT_ATTR(match_opc
, match_opc
, "config1:5-8");
387 DEFINE_UNCORE_FORMAT_ATTR(match_vnw
, match_vnw
, "config1:3-4");
388 DEFINE_UNCORE_FORMAT_ATTR(match0
, match0
, "config1:0-31");
389 DEFINE_UNCORE_FORMAT_ATTR(match1
, match1
, "config1:32-63");
390 DEFINE_UNCORE_FORMAT_ATTR(mask_rds
, mask_rds
, "config2:48-51");
391 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30
, mask_rnid30
, "config2:32-35");
392 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4
, mask_rnid4
, "config2:31");
393 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid
, mask_dnid
, "config2:13-17");
394 DEFINE_UNCORE_FORMAT_ATTR(mask_mc
, mask_mc
, "config2:9-12");
395 DEFINE_UNCORE_FORMAT_ATTR(mask_opc
, mask_opc
, "config2:5-8");
396 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw
, mask_vnw
, "config2:3-4");
397 DEFINE_UNCORE_FORMAT_ATTR(mask0
, mask0
, "config2:0-31");
398 DEFINE_UNCORE_FORMAT_ATTR(mask1
, mask1
, "config2:32-63");
400 static void snbep_uncore_pci_disable_box(struct intel_uncore_box
*box
)
402 struct pci_dev
*pdev
= box
->pci_dev
;
403 int box_ctl
= uncore_pci_box_ctl(box
);
406 if (!pci_read_config_dword(pdev
, box_ctl
, &config
)) {
407 config
|= SNBEP_PMON_BOX_CTL_FRZ
;
408 pci_write_config_dword(pdev
, box_ctl
, config
);
412 static void snbep_uncore_pci_enable_box(struct intel_uncore_box
*box
)
414 struct pci_dev
*pdev
= box
->pci_dev
;
415 int box_ctl
= uncore_pci_box_ctl(box
);
418 if (!pci_read_config_dword(pdev
, box_ctl
, &config
)) {
419 config
&= ~SNBEP_PMON_BOX_CTL_FRZ
;
420 pci_write_config_dword(pdev
, box_ctl
, config
);
424 static void snbep_uncore_pci_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
426 struct pci_dev
*pdev
= box
->pci_dev
;
427 struct hw_perf_event
*hwc
= &event
->hw
;
429 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
432 static void snbep_uncore_pci_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
434 struct pci_dev
*pdev
= box
->pci_dev
;
435 struct hw_perf_event
*hwc
= &event
->hw
;
437 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
);
440 static u64
snbep_uncore_pci_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
442 struct pci_dev
*pdev
= box
->pci_dev
;
443 struct hw_perf_event
*hwc
= &event
->hw
;
446 pci_read_config_dword(pdev
, hwc
->event_base
, (u32
*)&count
);
447 pci_read_config_dword(pdev
, hwc
->event_base
+ 4, (u32
*)&count
+ 1);
452 static void snbep_uncore_pci_init_box(struct intel_uncore_box
*box
)
454 struct pci_dev
*pdev
= box
->pci_dev
;
455 int box_ctl
= uncore_pci_box_ctl(box
);
457 pci_write_config_dword(pdev
, box_ctl
, SNBEP_PMON_BOX_CTL_INT
);
460 static void snbep_uncore_msr_disable_box(struct intel_uncore_box
*box
)
465 msr
= uncore_msr_box_ctl(box
);
468 config
|= SNBEP_PMON_BOX_CTL_FRZ
;
473 static void snbep_uncore_msr_enable_box(struct intel_uncore_box
*box
)
478 msr
= uncore_msr_box_ctl(box
);
481 config
&= ~SNBEP_PMON_BOX_CTL_FRZ
;
486 static void snbep_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
488 struct hw_perf_event
*hwc
= &event
->hw
;
489 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
491 if (reg1
->idx
!= EXTRA_REG_NONE
)
492 wrmsrl(reg1
->reg
, uncore_shared_reg_config(box
, 0));
494 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
497 static void snbep_uncore_msr_disable_event(struct intel_uncore_box
*box
,
498 struct perf_event
*event
)
500 struct hw_perf_event
*hwc
= &event
->hw
;
502 wrmsrl(hwc
->config_base
, hwc
->config
);
505 static void snbep_uncore_msr_init_box(struct intel_uncore_box
*box
)
507 unsigned msr
= uncore_msr_box_ctl(box
);
510 wrmsrl(msr
, SNBEP_PMON_BOX_CTL_INT
);
513 static struct attribute
*snbep_uncore_formats_attr
[] = {
514 &format_attr_event
.attr
,
515 &format_attr_umask
.attr
,
516 &format_attr_edge
.attr
,
517 &format_attr_inv
.attr
,
518 &format_attr_thresh8
.attr
,
522 static struct attribute
*snbep_uncore_ubox_formats_attr
[] = {
523 &format_attr_event
.attr
,
524 &format_attr_umask
.attr
,
525 &format_attr_edge
.attr
,
526 &format_attr_inv
.attr
,
527 &format_attr_thresh5
.attr
,
531 static struct attribute
*snbep_uncore_cbox_formats_attr
[] = {
532 &format_attr_event
.attr
,
533 &format_attr_umask
.attr
,
534 &format_attr_edge
.attr
,
535 &format_attr_tid_en
.attr
,
536 &format_attr_inv
.attr
,
537 &format_attr_thresh8
.attr
,
538 &format_attr_filter_tid
.attr
,
539 &format_attr_filter_nid
.attr
,
540 &format_attr_filter_state
.attr
,
541 &format_attr_filter_opc
.attr
,
545 static struct attribute
*snbep_uncore_pcu_formats_attr
[] = {
546 &format_attr_event
.attr
,
547 &format_attr_occ_sel
.attr
,
548 &format_attr_edge
.attr
,
549 &format_attr_inv
.attr
,
550 &format_attr_thresh5
.attr
,
551 &format_attr_occ_invert
.attr
,
552 &format_attr_occ_edge
.attr
,
553 &format_attr_filter_band0
.attr
,
554 &format_attr_filter_band1
.attr
,
555 &format_attr_filter_band2
.attr
,
556 &format_attr_filter_band3
.attr
,
560 static struct attribute
*snbep_uncore_qpi_formats_attr
[] = {
561 &format_attr_event_ext
.attr
,
562 &format_attr_umask
.attr
,
563 &format_attr_edge
.attr
,
564 &format_attr_inv
.attr
,
565 &format_attr_thresh8
.attr
,
566 &format_attr_match_rds
.attr
,
567 &format_attr_match_rnid30
.attr
,
568 &format_attr_match_rnid4
.attr
,
569 &format_attr_match_dnid
.attr
,
570 &format_attr_match_mc
.attr
,
571 &format_attr_match_opc
.attr
,
572 &format_attr_match_vnw
.attr
,
573 &format_attr_match0
.attr
,
574 &format_attr_match1
.attr
,
575 &format_attr_mask_rds
.attr
,
576 &format_attr_mask_rnid30
.attr
,
577 &format_attr_mask_rnid4
.attr
,
578 &format_attr_mask_dnid
.attr
,
579 &format_attr_mask_mc
.attr
,
580 &format_attr_mask_opc
.attr
,
581 &format_attr_mask_vnw
.attr
,
582 &format_attr_mask0
.attr
,
583 &format_attr_mask1
.attr
,
587 static struct uncore_event_desc snbep_uncore_imc_events
[] = {
588 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
589 INTEL_UNCORE_EVENT_DESC(cas_count_read
, "event=0x04,umask=0x03"),
590 INTEL_UNCORE_EVENT_DESC(cas_count_read
.scale
, "6.103515625e-5"),
591 INTEL_UNCORE_EVENT_DESC(cas_count_read
.unit
, "MiB"),
592 INTEL_UNCORE_EVENT_DESC(cas_count_write
, "event=0x04,umask=0x0c"),
593 INTEL_UNCORE_EVENT_DESC(cas_count_write
.scale
, "6.103515625e-5"),
594 INTEL_UNCORE_EVENT_DESC(cas_count_write
.unit
, "MiB"),
595 { /* end: all zeroes */ },
598 static struct uncore_event_desc snbep_uncore_qpi_events
[] = {
599 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0x14"),
600 INTEL_UNCORE_EVENT_DESC(txl_flits_active
, "event=0x00,umask=0x06"),
601 INTEL_UNCORE_EVENT_DESC(drs_data
, "event=0x102,umask=0x08"),
602 INTEL_UNCORE_EVENT_DESC(ncb_data
, "event=0x103,umask=0x04"),
603 { /* end: all zeroes */ },
606 static const struct attribute_group snbep_uncore_format_group
= {
608 .attrs
= snbep_uncore_formats_attr
,
611 static const struct attribute_group snbep_uncore_ubox_format_group
= {
613 .attrs
= snbep_uncore_ubox_formats_attr
,
616 static const struct attribute_group snbep_uncore_cbox_format_group
= {
618 .attrs
= snbep_uncore_cbox_formats_attr
,
621 static const struct attribute_group snbep_uncore_pcu_format_group
= {
623 .attrs
= snbep_uncore_pcu_formats_attr
,
626 static const struct attribute_group snbep_uncore_qpi_format_group
= {
628 .attrs
= snbep_uncore_qpi_formats_attr
,
631 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
632 .disable_box = snbep_uncore_msr_disable_box, \
633 .enable_box = snbep_uncore_msr_enable_box, \
634 .disable_event = snbep_uncore_msr_disable_event, \
635 .enable_event = snbep_uncore_msr_enable_event, \
636 .read_counter = uncore_msr_read_counter
638 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
639 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
640 .init_box = snbep_uncore_msr_init_box \
642 static struct intel_uncore_ops snbep_uncore_msr_ops = {
643 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
646 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
647 .init_box = snbep_uncore_pci_init_box, \
648 .disable_box = snbep_uncore_pci_disable_box, \
649 .enable_box = snbep_uncore_pci_enable_box, \
650 .disable_event = snbep_uncore_pci_disable_event, \
651 .read_counter = snbep_uncore_pci_read_counter
653 static struct intel_uncore_ops snbep_uncore_pci_ops
= {
654 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
655 .enable_event
= snbep_uncore_pci_enable_event
, \
658 static struct event_constraint snbep_uncore_cbox_constraints
[] = {
659 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
660 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
661 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
662 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
663 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
664 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
665 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
666 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
667 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
668 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
669 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
670 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
671 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
672 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
673 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
674 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
675 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
676 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
677 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
678 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
679 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
680 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
681 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
682 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
683 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
684 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
688 static struct event_constraint snbep_uncore_r2pcie_constraints
[] = {
689 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
690 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
691 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
692 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
693 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
694 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
695 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
696 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
697 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
698 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
702 static struct event_constraint snbep_uncore_r3qpi_constraints
[] = {
703 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
704 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
705 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
706 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
707 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
708 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
709 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
710 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
711 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
712 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
713 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
714 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
715 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
716 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
717 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
718 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
719 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
720 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
721 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
722 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
723 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
724 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
725 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
726 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
727 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
728 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
729 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
730 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
734 static struct intel_uncore_type snbep_uncore_ubox
= {
739 .fixed_ctr_bits
= 48,
740 .perf_ctr
= SNBEP_U_MSR_PMON_CTR0
,
741 .event_ctl
= SNBEP_U_MSR_PMON_CTL0
,
742 .event_mask
= SNBEP_U_MSR_PMON_RAW_EVENT_MASK
,
743 .fixed_ctr
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR
,
744 .fixed_ctl
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL
,
745 .ops
= &snbep_uncore_msr_ops
,
746 .format_group
= &snbep_uncore_ubox_format_group
,
749 static struct extra_reg snbep_uncore_cbox_extra_regs
[] = {
750 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
751 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
752 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
753 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
754 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
755 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
756 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
757 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
758 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
759 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
760 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
761 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
762 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
763 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
764 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
765 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
766 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
767 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
768 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
769 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
770 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
771 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
772 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
773 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
774 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
778 static void snbep_cbox_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
780 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
781 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
784 if (uncore_box_is_fake(box
))
787 for (i
= 0; i
< 5; i
++) {
788 if (reg1
->alloc
& (0x1 << i
))
789 atomic_sub(1 << (i
* 6), &er
->ref
);
794 static struct event_constraint
*
795 __snbep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
,
796 u64 (*cbox_filter_mask
)(int fields
))
798 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
799 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
804 if (reg1
->idx
== EXTRA_REG_NONE
)
807 raw_spin_lock_irqsave(&er
->lock
, flags
);
808 for (i
= 0; i
< 5; i
++) {
809 if (!(reg1
->idx
& (0x1 << i
)))
811 if (!uncore_box_is_fake(box
) && (reg1
->alloc
& (0x1 << i
)))
814 mask
= cbox_filter_mask(0x1 << i
);
815 if (!__BITS_VALUE(atomic_read(&er
->ref
), i
, 6) ||
816 !((reg1
->config
^ er
->config
) & mask
)) {
817 atomic_add(1 << (i
* 6), &er
->ref
);
819 er
->config
|= reg1
->config
& mask
;
825 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
829 if (!uncore_box_is_fake(box
))
830 reg1
->alloc
|= alloc
;
834 for (; i
>= 0; i
--) {
835 if (alloc
& (0x1 << i
))
836 atomic_sub(1 << (i
* 6), &er
->ref
);
838 return &uncore_constraint_empty
;
841 static u64
snbep_cbox_filter_mask(int fields
)
846 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID
;
848 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID
;
850 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE
;
852 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC
;
857 static struct event_constraint
*
858 snbep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
860 return __snbep_cbox_get_constraint(box
, event
, snbep_cbox_filter_mask
);
863 static int snbep_cbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
865 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
866 struct extra_reg
*er
;
869 for (er
= snbep_uncore_cbox_extra_regs
; er
->msr
; er
++) {
870 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
876 reg1
->reg
= SNBEP_C0_MSR_PMON_BOX_FILTER
+
877 SNBEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
878 reg1
->config
= event
->attr
.config1
& snbep_cbox_filter_mask(idx
);
884 static struct intel_uncore_ops snbep_uncore_cbox_ops
= {
885 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
886 .hw_config
= snbep_cbox_hw_config
,
887 .get_constraint
= snbep_cbox_get_constraint
,
888 .put_constraint
= snbep_cbox_put_constraint
,
891 static struct intel_uncore_type snbep_uncore_cbox
= {
896 .event_ctl
= SNBEP_C0_MSR_PMON_CTL0
,
897 .perf_ctr
= SNBEP_C0_MSR_PMON_CTR0
,
898 .event_mask
= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
899 .box_ctl
= SNBEP_C0_MSR_PMON_BOX_CTL
,
900 .msr_offset
= SNBEP_CBO_MSR_OFFSET
,
901 .num_shared_regs
= 1,
902 .constraints
= snbep_uncore_cbox_constraints
,
903 .ops
= &snbep_uncore_cbox_ops
,
904 .format_group
= &snbep_uncore_cbox_format_group
,
907 static u64
snbep_pcu_alter_er(struct perf_event
*event
, int new_idx
, bool modify
)
909 struct hw_perf_event
*hwc
= &event
->hw
;
910 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
911 u64 config
= reg1
->config
;
913 if (new_idx
> reg1
->idx
)
914 config
<<= 8 * (new_idx
- reg1
->idx
);
916 config
>>= 8 * (reg1
->idx
- new_idx
);
919 hwc
->config
+= new_idx
- reg1
->idx
;
920 reg1
->config
= config
;
926 static struct event_constraint
*
927 snbep_pcu_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
929 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
930 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
933 u64 mask
, config1
= reg1
->config
;
936 if (reg1
->idx
== EXTRA_REG_NONE
||
937 (!uncore_box_is_fake(box
) && reg1
->alloc
))
940 mask
= 0xffULL
<< (idx
* 8);
941 raw_spin_lock_irqsave(&er
->lock
, flags
);
942 if (!__BITS_VALUE(atomic_read(&er
->ref
), idx
, 8) ||
943 !((config1
^ er
->config
) & mask
)) {
944 atomic_add(1 << (idx
* 8), &er
->ref
);
946 er
->config
|= config1
& mask
;
949 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
953 if (idx
!= reg1
->idx
) {
954 config1
= snbep_pcu_alter_er(event
, idx
, false);
957 return &uncore_constraint_empty
;
960 if (!uncore_box_is_fake(box
)) {
961 if (idx
!= reg1
->idx
)
962 snbep_pcu_alter_er(event
, idx
, true);
968 static void snbep_pcu_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
970 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
971 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
973 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
976 atomic_sub(1 << (reg1
->idx
* 8), &er
->ref
);
980 static int snbep_pcu_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
982 struct hw_perf_event
*hwc
= &event
->hw
;
983 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
984 int ev_sel
= hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
;
986 if (ev_sel
>= 0xb && ev_sel
<= 0xe) {
987 reg1
->reg
= SNBEP_PCU_MSR_PMON_BOX_FILTER
;
988 reg1
->idx
= ev_sel
- 0xb;
989 reg1
->config
= event
->attr
.config1
& (0xff << (reg1
->idx
* 8));
994 static struct intel_uncore_ops snbep_uncore_pcu_ops
= {
995 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
996 .hw_config
= snbep_pcu_hw_config
,
997 .get_constraint
= snbep_pcu_get_constraint
,
998 .put_constraint
= snbep_pcu_put_constraint
,
1001 static struct intel_uncore_type snbep_uncore_pcu
= {
1005 .perf_ctr_bits
= 48,
1006 .perf_ctr
= SNBEP_PCU_MSR_PMON_CTR0
,
1007 .event_ctl
= SNBEP_PCU_MSR_PMON_CTL0
,
1008 .event_mask
= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK
,
1009 .box_ctl
= SNBEP_PCU_MSR_PMON_BOX_CTL
,
1010 .num_shared_regs
= 1,
1011 .ops
= &snbep_uncore_pcu_ops
,
1012 .format_group
= &snbep_uncore_pcu_format_group
,
1015 static struct intel_uncore_type
*snbep_msr_uncores
[] = {
1022 void snbep_uncore_cpu_init(void)
1024 if (snbep_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
1025 snbep_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
1026 uncore_msr_uncores
= snbep_msr_uncores
;
1030 SNBEP_PCI_QPI_PORT0_FILTER
,
1031 SNBEP_PCI_QPI_PORT1_FILTER
,
1032 BDX_PCI_QPI_PORT2_FILTER
,
1036 static int snbep_qpi_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
1038 struct hw_perf_event
*hwc
= &event
->hw
;
1039 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1040 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
1042 if ((hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
) == 0x38) {
1044 reg1
->reg
= SNBEP_Q_Py_PCI_PMON_PKT_MATCH0
;
1045 reg1
->config
= event
->attr
.config1
;
1046 reg2
->reg
= SNBEP_Q_Py_PCI_PMON_PKT_MASK0
;
1047 reg2
->config
= event
->attr
.config2
;
1052 static void snbep_qpi_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1054 struct pci_dev
*pdev
= box
->pci_dev
;
1055 struct hw_perf_event
*hwc
= &event
->hw
;
1056 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1057 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
1059 if (reg1
->idx
!= EXTRA_REG_NONE
) {
1060 int idx
= box
->pmu
->pmu_idx
+ SNBEP_PCI_QPI_PORT0_FILTER
;
1061 int pkg
= box
->pkgid
;
1062 struct pci_dev
*filter_pdev
= uncore_extra_pci_dev
[pkg
].dev
[idx
];
1065 pci_write_config_dword(filter_pdev
, reg1
->reg
,
1067 pci_write_config_dword(filter_pdev
, reg1
->reg
+ 4,
1068 (u32
)(reg1
->config
>> 32));
1069 pci_write_config_dword(filter_pdev
, reg2
->reg
,
1071 pci_write_config_dword(filter_pdev
, reg2
->reg
+ 4,
1072 (u32
)(reg2
->config
>> 32));
1076 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
1079 static struct intel_uncore_ops snbep_uncore_qpi_ops
= {
1080 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1081 .enable_event
= snbep_qpi_enable_event
,
1082 .hw_config
= snbep_qpi_hw_config
,
1083 .get_constraint
= uncore_get_constraint
,
1084 .put_constraint
= uncore_put_constraint
,
1087 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1088 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1089 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1090 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1091 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1092 .ops = &snbep_uncore_pci_ops, \
1093 .format_group = &snbep_uncore_format_group
1095 static struct intel_uncore_type snbep_uncore_ha
= {
1099 .perf_ctr_bits
= 48,
1100 SNBEP_UNCORE_PCI_COMMON_INIT(),
1103 static struct intel_uncore_type snbep_uncore_imc
= {
1107 .perf_ctr_bits
= 48,
1108 .fixed_ctr_bits
= 48,
1109 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
1110 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
1111 .event_descs
= snbep_uncore_imc_events
,
1112 SNBEP_UNCORE_PCI_COMMON_INIT(),
1115 static struct intel_uncore_type snbep_uncore_qpi
= {
1119 .perf_ctr_bits
= 48,
1120 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
1121 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
1122 .event_mask
= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
1123 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
1124 .num_shared_regs
= 1,
1125 .ops
= &snbep_uncore_qpi_ops
,
1126 .event_descs
= snbep_uncore_qpi_events
,
1127 .format_group
= &snbep_uncore_qpi_format_group
,
1131 static struct intel_uncore_type snbep_uncore_r2pcie
= {
1135 .perf_ctr_bits
= 44,
1136 .constraints
= snbep_uncore_r2pcie_constraints
,
1137 SNBEP_UNCORE_PCI_COMMON_INIT(),
1140 static struct intel_uncore_type snbep_uncore_r3qpi
= {
1144 .perf_ctr_bits
= 44,
1145 .constraints
= snbep_uncore_r3qpi_constraints
,
1146 SNBEP_UNCORE_PCI_COMMON_INIT(),
1150 SNBEP_PCI_UNCORE_HA
,
1151 SNBEP_PCI_UNCORE_IMC
,
1152 SNBEP_PCI_UNCORE_QPI
,
1153 SNBEP_PCI_UNCORE_R2PCIE
,
1154 SNBEP_PCI_UNCORE_R3QPI
,
1157 static struct intel_uncore_type
*snbep_pci_uncores
[] = {
1158 [SNBEP_PCI_UNCORE_HA
] = &snbep_uncore_ha
,
1159 [SNBEP_PCI_UNCORE_IMC
] = &snbep_uncore_imc
,
1160 [SNBEP_PCI_UNCORE_QPI
] = &snbep_uncore_qpi
,
1161 [SNBEP_PCI_UNCORE_R2PCIE
] = &snbep_uncore_r2pcie
,
1162 [SNBEP_PCI_UNCORE_R3QPI
] = &snbep_uncore_r3qpi
,
1166 static const struct pci_device_id snbep_uncore_pci_ids
[] = {
1168 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_HA
),
1169 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA
, 0),
1171 { /* MC Channel 0 */
1172 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC0
),
1173 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 0),
1175 { /* MC Channel 1 */
1176 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC1
),
1177 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 1),
1179 { /* MC Channel 2 */
1180 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC2
),
1181 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 2),
1183 { /* MC Channel 3 */
1184 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC3
),
1185 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 3),
1188 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_QPI0
),
1189 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI
, 0),
1192 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_QPI1
),
1193 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI
, 1),
1196 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R2PCIE
),
1197 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE
, 0),
1199 { /* R3QPI Link 0 */
1200 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R3QPI0
),
1201 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI
, 0),
1203 { /* R3QPI Link 1 */
1204 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R3QPI1
),
1205 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI
, 1),
1207 { /* QPI Port 0 filter */
1208 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x3c86),
1209 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1210 SNBEP_PCI_QPI_PORT0_FILTER
),
1212 { /* QPI Port 0 filter */
1213 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x3c96),
1214 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1215 SNBEP_PCI_QPI_PORT1_FILTER
),
1217 { /* end: all zeroes */ }
1220 static struct pci_driver snbep_uncore_pci_driver
= {
1221 .name
= "snbep_uncore",
1222 .id_table
= snbep_uncore_pci_ids
,
1225 #define NODE_ID_MASK 0x7
1228 * build pci bus to socket mapping
1230 static int snbep_pci2phy_map_init(int devid
, int nodeid_loc
, int idmap_loc
, bool reverse
)
1232 struct pci_dev
*ubox_dev
= NULL
;
1233 int i
, bus
, nodeid
, segment
;
1234 struct pci2phy_map
*map
;
1239 /* find the UBOX device */
1240 ubox_dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, devid
, ubox_dev
);
1243 bus
= ubox_dev
->bus
->number
;
1244 /* get the Node ID of the local register */
1245 err
= pci_read_config_dword(ubox_dev
, nodeid_loc
, &config
);
1248 nodeid
= config
& NODE_ID_MASK
;
1249 /* get the Node ID mapping */
1250 err
= pci_read_config_dword(ubox_dev
, idmap_loc
, &config
);
1254 segment
= pci_domain_nr(ubox_dev
->bus
);
1255 raw_spin_lock(&pci2phy_map_lock
);
1256 map
= __find_pci2phy_map(segment
);
1258 raw_spin_unlock(&pci2phy_map_lock
);
1264 * every three bits in the Node ID mapping register maps
1265 * to a particular node.
1267 for (i
= 0; i
< 8; i
++) {
1268 if (nodeid
== ((config
>> (3 * i
)) & 0x7)) {
1269 map
->pbus_to_physid
[bus
] = i
;
1273 raw_spin_unlock(&pci2phy_map_lock
);
1278 * For PCI bus with no UBOX device, find the next bus
1279 * that has UBOX device and use its mapping.
1281 raw_spin_lock(&pci2phy_map_lock
);
1282 list_for_each_entry(map
, &pci2phy_map_head
, list
) {
1285 for (bus
= 255; bus
>= 0; bus
--) {
1286 if (map
->pbus_to_physid
[bus
] >= 0)
1287 i
= map
->pbus_to_physid
[bus
];
1289 map
->pbus_to_physid
[bus
] = i
;
1292 for (bus
= 0; bus
<= 255; bus
++) {
1293 if (map
->pbus_to_physid
[bus
] >= 0)
1294 i
= map
->pbus_to_physid
[bus
];
1296 map
->pbus_to_physid
[bus
] = i
;
1300 raw_spin_unlock(&pci2phy_map_lock
);
1303 pci_dev_put(ubox_dev
);
1305 return err
? pcibios_err_to_errno(err
) : 0;
1308 int snbep_uncore_pci_init(void)
1310 int ret
= snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID
, SNBEP_GIDNIDMAP
, true);
1313 uncore_pci_uncores
= snbep_pci_uncores
;
1314 uncore_pci_driver
= &snbep_uncore_pci_driver
;
1317 /* end of Sandy Bridge-EP uncore support */
1319 /* IvyTown uncore support */
1320 static void ivbep_uncore_msr_init_box(struct intel_uncore_box
*box
)
1322 unsigned msr
= uncore_msr_box_ctl(box
);
1324 wrmsrl(msr
, IVBEP_PMON_BOX_CTL_INT
);
1327 static void ivbep_uncore_pci_init_box(struct intel_uncore_box
*box
)
1329 struct pci_dev
*pdev
= box
->pci_dev
;
1331 pci_write_config_dword(pdev
, SNBEP_PCI_PMON_BOX_CTL
, IVBEP_PMON_BOX_CTL_INT
);
1334 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1335 .init_box = ivbep_uncore_msr_init_box, \
1336 .disable_box = snbep_uncore_msr_disable_box, \
1337 .enable_box = snbep_uncore_msr_enable_box, \
1338 .disable_event = snbep_uncore_msr_disable_event, \
1339 .enable_event = snbep_uncore_msr_enable_event, \
1340 .read_counter = uncore_msr_read_counter
1342 static struct intel_uncore_ops ivbep_uncore_msr_ops
= {
1343 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1346 static struct intel_uncore_ops ivbep_uncore_pci_ops
= {
1347 .init_box
= ivbep_uncore_pci_init_box
,
1348 .disable_box
= snbep_uncore_pci_disable_box
,
1349 .enable_box
= snbep_uncore_pci_enable_box
,
1350 .disable_event
= snbep_uncore_pci_disable_event
,
1351 .enable_event
= snbep_uncore_pci_enable_event
,
1352 .read_counter
= snbep_uncore_pci_read_counter
,
1355 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1356 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1357 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1358 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1359 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1360 .ops = &ivbep_uncore_pci_ops, \
1361 .format_group = &ivbep_uncore_format_group
1363 static struct attribute
*ivbep_uncore_formats_attr
[] = {
1364 &format_attr_event
.attr
,
1365 &format_attr_umask
.attr
,
1366 &format_attr_edge
.attr
,
1367 &format_attr_inv
.attr
,
1368 &format_attr_thresh8
.attr
,
1372 static struct attribute
*ivbep_uncore_ubox_formats_attr
[] = {
1373 &format_attr_event
.attr
,
1374 &format_attr_umask
.attr
,
1375 &format_attr_edge
.attr
,
1376 &format_attr_inv
.attr
,
1377 &format_attr_thresh5
.attr
,
1381 static struct attribute
*ivbep_uncore_cbox_formats_attr
[] = {
1382 &format_attr_event
.attr
,
1383 &format_attr_umask
.attr
,
1384 &format_attr_edge
.attr
,
1385 &format_attr_tid_en
.attr
,
1386 &format_attr_thresh8
.attr
,
1387 &format_attr_filter_tid
.attr
,
1388 &format_attr_filter_link
.attr
,
1389 &format_attr_filter_state2
.attr
,
1390 &format_attr_filter_nid2
.attr
,
1391 &format_attr_filter_opc2
.attr
,
1392 &format_attr_filter_nc
.attr
,
1393 &format_attr_filter_c6
.attr
,
1394 &format_attr_filter_isoc
.attr
,
1398 static struct attribute
*ivbep_uncore_pcu_formats_attr
[] = {
1399 &format_attr_event
.attr
,
1400 &format_attr_occ_sel
.attr
,
1401 &format_attr_edge
.attr
,
1402 &format_attr_thresh5
.attr
,
1403 &format_attr_occ_invert
.attr
,
1404 &format_attr_occ_edge
.attr
,
1405 &format_attr_filter_band0
.attr
,
1406 &format_attr_filter_band1
.attr
,
1407 &format_attr_filter_band2
.attr
,
1408 &format_attr_filter_band3
.attr
,
1412 static struct attribute
*ivbep_uncore_qpi_formats_attr
[] = {
1413 &format_attr_event_ext
.attr
,
1414 &format_attr_umask
.attr
,
1415 &format_attr_edge
.attr
,
1416 &format_attr_thresh8
.attr
,
1417 &format_attr_match_rds
.attr
,
1418 &format_attr_match_rnid30
.attr
,
1419 &format_attr_match_rnid4
.attr
,
1420 &format_attr_match_dnid
.attr
,
1421 &format_attr_match_mc
.attr
,
1422 &format_attr_match_opc
.attr
,
1423 &format_attr_match_vnw
.attr
,
1424 &format_attr_match0
.attr
,
1425 &format_attr_match1
.attr
,
1426 &format_attr_mask_rds
.attr
,
1427 &format_attr_mask_rnid30
.attr
,
1428 &format_attr_mask_rnid4
.attr
,
1429 &format_attr_mask_dnid
.attr
,
1430 &format_attr_mask_mc
.attr
,
1431 &format_attr_mask_opc
.attr
,
1432 &format_attr_mask_vnw
.attr
,
1433 &format_attr_mask0
.attr
,
1434 &format_attr_mask1
.attr
,
1438 static const struct attribute_group ivbep_uncore_format_group
= {
1440 .attrs
= ivbep_uncore_formats_attr
,
1443 static const struct attribute_group ivbep_uncore_ubox_format_group
= {
1445 .attrs
= ivbep_uncore_ubox_formats_attr
,
1448 static const struct attribute_group ivbep_uncore_cbox_format_group
= {
1450 .attrs
= ivbep_uncore_cbox_formats_attr
,
1453 static const struct attribute_group ivbep_uncore_pcu_format_group
= {
1455 .attrs
= ivbep_uncore_pcu_formats_attr
,
1458 static const struct attribute_group ivbep_uncore_qpi_format_group
= {
1460 .attrs
= ivbep_uncore_qpi_formats_attr
,
1463 static struct intel_uncore_type ivbep_uncore_ubox
= {
1467 .perf_ctr_bits
= 44,
1468 .fixed_ctr_bits
= 48,
1469 .perf_ctr
= SNBEP_U_MSR_PMON_CTR0
,
1470 .event_ctl
= SNBEP_U_MSR_PMON_CTL0
,
1471 .event_mask
= IVBEP_U_MSR_PMON_RAW_EVENT_MASK
,
1472 .fixed_ctr
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR
,
1473 .fixed_ctl
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL
,
1474 .ops
= &ivbep_uncore_msr_ops
,
1475 .format_group
= &ivbep_uncore_ubox_format_group
,
1478 static struct extra_reg ivbep_uncore_cbox_extra_regs
[] = {
1479 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
1480 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
1481 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1482 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1483 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1484 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1485 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1486 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1487 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1488 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1489 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1490 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1491 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1492 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1493 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1494 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1495 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1496 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1497 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1498 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1499 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1500 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1501 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1502 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1503 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1504 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1505 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1506 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1507 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1508 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1509 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1510 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1511 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1512 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1513 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1514 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1515 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1519 static u64
ivbep_cbox_filter_mask(int fields
)
1524 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID
;
1526 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK
;
1528 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE
;
1530 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID
;
1531 if (fields
& 0x10) {
1532 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC
;
1533 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC
;
1534 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6
;
1535 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC
;
1541 static struct event_constraint
*
1542 ivbep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
1544 return __snbep_cbox_get_constraint(box
, event
, ivbep_cbox_filter_mask
);
1547 static int ivbep_cbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
1549 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
1550 struct extra_reg
*er
;
1553 for (er
= ivbep_uncore_cbox_extra_regs
; er
->msr
; er
++) {
1554 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
1560 reg1
->reg
= SNBEP_C0_MSR_PMON_BOX_FILTER
+
1561 SNBEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
1562 reg1
->config
= event
->attr
.config1
& ivbep_cbox_filter_mask(idx
);
1568 static void ivbep_cbox_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1570 struct hw_perf_event
*hwc
= &event
->hw
;
1571 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1573 if (reg1
->idx
!= EXTRA_REG_NONE
) {
1574 u64 filter
= uncore_shared_reg_config(box
, 0);
1575 wrmsrl(reg1
->reg
, filter
& 0xffffffff);
1576 wrmsrl(reg1
->reg
+ 6, filter
>> 32);
1579 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
1582 static struct intel_uncore_ops ivbep_uncore_cbox_ops
= {
1583 .init_box
= ivbep_uncore_msr_init_box
,
1584 .disable_box
= snbep_uncore_msr_disable_box
,
1585 .enable_box
= snbep_uncore_msr_enable_box
,
1586 .disable_event
= snbep_uncore_msr_disable_event
,
1587 .enable_event
= ivbep_cbox_enable_event
,
1588 .read_counter
= uncore_msr_read_counter
,
1589 .hw_config
= ivbep_cbox_hw_config
,
1590 .get_constraint
= ivbep_cbox_get_constraint
,
1591 .put_constraint
= snbep_cbox_put_constraint
,
1594 static struct intel_uncore_type ivbep_uncore_cbox
= {
1598 .perf_ctr_bits
= 44,
1599 .event_ctl
= SNBEP_C0_MSR_PMON_CTL0
,
1600 .perf_ctr
= SNBEP_C0_MSR_PMON_CTR0
,
1601 .event_mask
= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
1602 .box_ctl
= SNBEP_C0_MSR_PMON_BOX_CTL
,
1603 .msr_offset
= SNBEP_CBO_MSR_OFFSET
,
1604 .num_shared_regs
= 1,
1605 .constraints
= snbep_uncore_cbox_constraints
,
1606 .ops
= &ivbep_uncore_cbox_ops
,
1607 .format_group
= &ivbep_uncore_cbox_format_group
,
1610 static struct intel_uncore_ops ivbep_uncore_pcu_ops
= {
1611 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1612 .hw_config
= snbep_pcu_hw_config
,
1613 .get_constraint
= snbep_pcu_get_constraint
,
1614 .put_constraint
= snbep_pcu_put_constraint
,
1617 static struct intel_uncore_type ivbep_uncore_pcu
= {
1621 .perf_ctr_bits
= 48,
1622 .perf_ctr
= SNBEP_PCU_MSR_PMON_CTR0
,
1623 .event_ctl
= SNBEP_PCU_MSR_PMON_CTL0
,
1624 .event_mask
= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK
,
1625 .box_ctl
= SNBEP_PCU_MSR_PMON_BOX_CTL
,
1626 .num_shared_regs
= 1,
1627 .ops
= &ivbep_uncore_pcu_ops
,
1628 .format_group
= &ivbep_uncore_pcu_format_group
,
1631 static struct intel_uncore_type
*ivbep_msr_uncores
[] = {
1638 void ivbep_uncore_cpu_init(void)
1640 if (ivbep_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
1641 ivbep_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
1642 uncore_msr_uncores
= ivbep_msr_uncores
;
1645 static struct intel_uncore_type ivbep_uncore_ha
= {
1649 .perf_ctr_bits
= 48,
1650 IVBEP_UNCORE_PCI_COMMON_INIT(),
1653 static struct intel_uncore_type ivbep_uncore_imc
= {
1657 .perf_ctr_bits
= 48,
1658 .fixed_ctr_bits
= 48,
1659 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
1660 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
1661 .event_descs
= snbep_uncore_imc_events
,
1662 IVBEP_UNCORE_PCI_COMMON_INIT(),
1665 /* registers in IRP boxes are not properly aligned */
1666 static unsigned ivbep_uncore_irp_ctls
[] = {0xd8, 0xdc, 0xe0, 0xe4};
1667 static unsigned ivbep_uncore_irp_ctrs
[] = {0xa0, 0xb0, 0xb8, 0xc0};
1669 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1671 struct pci_dev
*pdev
= box
->pci_dev
;
1672 struct hw_perf_event
*hwc
= &event
->hw
;
1674 pci_write_config_dword(pdev
, ivbep_uncore_irp_ctls
[hwc
->idx
],
1675 hwc
->config
| SNBEP_PMON_CTL_EN
);
1678 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1680 struct pci_dev
*pdev
= box
->pci_dev
;
1681 struct hw_perf_event
*hwc
= &event
->hw
;
1683 pci_write_config_dword(pdev
, ivbep_uncore_irp_ctls
[hwc
->idx
], hwc
->config
);
1686 static u64
ivbep_uncore_irp_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
1688 struct pci_dev
*pdev
= box
->pci_dev
;
1689 struct hw_perf_event
*hwc
= &event
->hw
;
1692 pci_read_config_dword(pdev
, ivbep_uncore_irp_ctrs
[hwc
->idx
], (u32
*)&count
);
1693 pci_read_config_dword(pdev
, ivbep_uncore_irp_ctrs
[hwc
->idx
] + 4, (u32
*)&count
+ 1);
1698 static struct intel_uncore_ops ivbep_uncore_irp_ops
= {
1699 .init_box
= ivbep_uncore_pci_init_box
,
1700 .disable_box
= snbep_uncore_pci_disable_box
,
1701 .enable_box
= snbep_uncore_pci_enable_box
,
1702 .disable_event
= ivbep_uncore_irp_disable_event
,
1703 .enable_event
= ivbep_uncore_irp_enable_event
,
1704 .read_counter
= ivbep_uncore_irp_read_counter
,
1707 static struct intel_uncore_type ivbep_uncore_irp
= {
1711 .perf_ctr_bits
= 48,
1712 .event_mask
= IVBEP_PMON_RAW_EVENT_MASK
,
1713 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
1714 .ops
= &ivbep_uncore_irp_ops
,
1715 .format_group
= &ivbep_uncore_format_group
,
1718 static struct intel_uncore_ops ivbep_uncore_qpi_ops
= {
1719 .init_box
= ivbep_uncore_pci_init_box
,
1720 .disable_box
= snbep_uncore_pci_disable_box
,
1721 .enable_box
= snbep_uncore_pci_enable_box
,
1722 .disable_event
= snbep_uncore_pci_disable_event
,
1723 .enable_event
= snbep_qpi_enable_event
,
1724 .read_counter
= snbep_uncore_pci_read_counter
,
1725 .hw_config
= snbep_qpi_hw_config
,
1726 .get_constraint
= uncore_get_constraint
,
1727 .put_constraint
= uncore_put_constraint
,
1730 static struct intel_uncore_type ivbep_uncore_qpi
= {
1734 .perf_ctr_bits
= 48,
1735 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
1736 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
1737 .event_mask
= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
1738 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
1739 .num_shared_regs
= 1,
1740 .ops
= &ivbep_uncore_qpi_ops
,
1741 .format_group
= &ivbep_uncore_qpi_format_group
,
1744 static struct intel_uncore_type ivbep_uncore_r2pcie
= {
1748 .perf_ctr_bits
= 44,
1749 .constraints
= snbep_uncore_r2pcie_constraints
,
1750 IVBEP_UNCORE_PCI_COMMON_INIT(),
1753 static struct intel_uncore_type ivbep_uncore_r3qpi
= {
1757 .perf_ctr_bits
= 44,
1758 .constraints
= snbep_uncore_r3qpi_constraints
,
1759 IVBEP_UNCORE_PCI_COMMON_INIT(),
1763 IVBEP_PCI_UNCORE_HA
,
1764 IVBEP_PCI_UNCORE_IMC
,
1765 IVBEP_PCI_UNCORE_IRP
,
1766 IVBEP_PCI_UNCORE_QPI
,
1767 IVBEP_PCI_UNCORE_R2PCIE
,
1768 IVBEP_PCI_UNCORE_R3QPI
,
1771 static struct intel_uncore_type
*ivbep_pci_uncores
[] = {
1772 [IVBEP_PCI_UNCORE_HA
] = &ivbep_uncore_ha
,
1773 [IVBEP_PCI_UNCORE_IMC
] = &ivbep_uncore_imc
,
1774 [IVBEP_PCI_UNCORE_IRP
] = &ivbep_uncore_irp
,
1775 [IVBEP_PCI_UNCORE_QPI
] = &ivbep_uncore_qpi
,
1776 [IVBEP_PCI_UNCORE_R2PCIE
] = &ivbep_uncore_r2pcie
,
1777 [IVBEP_PCI_UNCORE_R3QPI
] = &ivbep_uncore_r3qpi
,
1781 static const struct pci_device_id ivbep_uncore_pci_ids
[] = {
1782 { /* Home Agent 0 */
1783 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe30),
1784 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA
, 0),
1786 { /* Home Agent 1 */
1787 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe38),
1788 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA
, 1),
1790 { /* MC0 Channel 0 */
1791 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb4),
1792 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 0),
1794 { /* MC0 Channel 1 */
1795 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb5),
1796 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 1),
1798 { /* MC0 Channel 3 */
1799 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb0),
1800 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 2),
1802 { /* MC0 Channel 4 */
1803 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb1),
1804 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 3),
1806 { /* MC1 Channel 0 */
1807 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef4),
1808 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 4),
1810 { /* MC1 Channel 1 */
1811 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef5),
1812 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 5),
1814 { /* MC1 Channel 3 */
1815 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef0),
1816 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 6),
1818 { /* MC1 Channel 4 */
1819 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef1),
1820 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 7),
1823 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe39),
1824 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP
, 0),
1827 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe32),
1828 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI
, 0),
1831 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe33),
1832 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI
, 1),
1835 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe3a),
1836 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI
, 2),
1839 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe34),
1840 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE
, 0),
1842 { /* R3QPI0 Link 0 */
1843 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe36),
1844 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI
, 0),
1846 { /* R3QPI0 Link 1 */
1847 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe37),
1848 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI
, 1),
1850 { /* R3QPI1 Link 2 */
1851 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe3e),
1852 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI
, 2),
1854 { /* QPI Port 0 filter */
1855 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe86),
1856 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1857 SNBEP_PCI_QPI_PORT0_FILTER
),
1859 { /* QPI Port 0 filter */
1860 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe96),
1861 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1862 SNBEP_PCI_QPI_PORT1_FILTER
),
1864 { /* end: all zeroes */ }
1867 static struct pci_driver ivbep_uncore_pci_driver
= {
1868 .name
= "ivbep_uncore",
1869 .id_table
= ivbep_uncore_pci_ids
,
1872 int ivbep_uncore_pci_init(void)
1874 int ret
= snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID
, SNBEP_GIDNIDMAP
, true);
1877 uncore_pci_uncores
= ivbep_pci_uncores
;
1878 uncore_pci_driver
= &ivbep_uncore_pci_driver
;
1881 /* end of IvyTown uncore support */
1883 /* KNL uncore support */
1884 static struct attribute
*knl_uncore_ubox_formats_attr
[] = {
1885 &format_attr_event
.attr
,
1886 &format_attr_umask
.attr
,
1887 &format_attr_edge
.attr
,
1888 &format_attr_tid_en
.attr
,
1889 &format_attr_inv
.attr
,
1890 &format_attr_thresh5
.attr
,
1894 static const struct attribute_group knl_uncore_ubox_format_group
= {
1896 .attrs
= knl_uncore_ubox_formats_attr
,
1899 static struct intel_uncore_type knl_uncore_ubox
= {
1903 .perf_ctr_bits
= 48,
1904 .fixed_ctr_bits
= 48,
1905 .perf_ctr
= HSWEP_U_MSR_PMON_CTR0
,
1906 .event_ctl
= HSWEP_U_MSR_PMON_CTL0
,
1907 .event_mask
= KNL_U_MSR_PMON_RAW_EVENT_MASK
,
1908 .fixed_ctr
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR
,
1909 .fixed_ctl
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL
,
1910 .ops
= &snbep_uncore_msr_ops
,
1911 .format_group
= &knl_uncore_ubox_format_group
,
1914 static struct attribute
*knl_uncore_cha_formats_attr
[] = {
1915 &format_attr_event
.attr
,
1916 &format_attr_umask
.attr
,
1917 &format_attr_qor
.attr
,
1918 &format_attr_edge
.attr
,
1919 &format_attr_tid_en
.attr
,
1920 &format_attr_inv
.attr
,
1921 &format_attr_thresh8
.attr
,
1922 &format_attr_filter_tid4
.attr
,
1923 &format_attr_filter_link3
.attr
,
1924 &format_attr_filter_state4
.attr
,
1925 &format_attr_filter_local
.attr
,
1926 &format_attr_filter_all_op
.attr
,
1927 &format_attr_filter_nnm
.attr
,
1928 &format_attr_filter_opc3
.attr
,
1929 &format_attr_filter_nc
.attr
,
1930 &format_attr_filter_isoc
.attr
,
1934 static const struct attribute_group knl_uncore_cha_format_group
= {
1936 .attrs
= knl_uncore_cha_formats_attr
,
1939 static struct event_constraint knl_uncore_cha_constraints
[] = {
1940 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1941 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
1942 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1943 EVENT_CONSTRAINT_END
1946 static struct extra_reg knl_uncore_cha_extra_regs
[] = {
1947 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
1948 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
1949 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
1950 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
1951 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
1955 static u64
knl_cha_filter_mask(int fields
)
1960 mask
|= KNL_CHA_MSR_PMON_BOX_FILTER_TID
;
1962 mask
|= KNL_CHA_MSR_PMON_BOX_FILTER_STATE
;
1964 mask
|= KNL_CHA_MSR_PMON_BOX_FILTER_OP
;
1968 static struct event_constraint
*
1969 knl_cha_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
1971 return __snbep_cbox_get_constraint(box
, event
, knl_cha_filter_mask
);
1974 static int knl_cha_hw_config(struct intel_uncore_box
*box
,
1975 struct perf_event
*event
)
1977 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
1978 struct extra_reg
*er
;
1981 for (er
= knl_uncore_cha_extra_regs
; er
->msr
; er
++) {
1982 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
1988 reg1
->reg
= HSWEP_C0_MSR_PMON_BOX_FILTER0
+
1989 KNL_CHA_MSR_OFFSET
* box
->pmu
->pmu_idx
;
1990 reg1
->config
= event
->attr
.config1
& knl_cha_filter_mask(idx
);
1992 reg1
->config
|= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE
;
1993 reg1
->config
|= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE
;
1994 reg1
->config
|= KNL_CHA_MSR_PMON_BOX_FILTER_NNC
;
2000 static void hswep_cbox_enable_event(struct intel_uncore_box
*box
,
2001 struct perf_event
*event
);
2003 static struct intel_uncore_ops knl_uncore_cha_ops
= {
2004 .init_box
= snbep_uncore_msr_init_box
,
2005 .disable_box
= snbep_uncore_msr_disable_box
,
2006 .enable_box
= snbep_uncore_msr_enable_box
,
2007 .disable_event
= snbep_uncore_msr_disable_event
,
2008 .enable_event
= hswep_cbox_enable_event
,
2009 .read_counter
= uncore_msr_read_counter
,
2010 .hw_config
= knl_cha_hw_config
,
2011 .get_constraint
= knl_cha_get_constraint
,
2012 .put_constraint
= snbep_cbox_put_constraint
,
2015 static struct intel_uncore_type knl_uncore_cha
= {
2019 .perf_ctr_bits
= 48,
2020 .event_ctl
= HSWEP_C0_MSR_PMON_CTL0
,
2021 .perf_ctr
= HSWEP_C0_MSR_PMON_CTR0
,
2022 .event_mask
= KNL_CHA_MSR_PMON_RAW_EVENT_MASK
,
2023 .box_ctl
= HSWEP_C0_MSR_PMON_BOX_CTL
,
2024 .msr_offset
= KNL_CHA_MSR_OFFSET
,
2025 .num_shared_regs
= 1,
2026 .constraints
= knl_uncore_cha_constraints
,
2027 .ops
= &knl_uncore_cha_ops
,
2028 .format_group
= &knl_uncore_cha_format_group
,
2031 static struct attribute
*knl_uncore_pcu_formats_attr
[] = {
2032 &format_attr_event2
.attr
,
2033 &format_attr_use_occ_ctr
.attr
,
2034 &format_attr_occ_sel
.attr
,
2035 &format_attr_edge
.attr
,
2036 &format_attr_tid_en
.attr
,
2037 &format_attr_inv
.attr
,
2038 &format_attr_thresh6
.attr
,
2039 &format_attr_occ_invert
.attr
,
2040 &format_attr_occ_edge_det
.attr
,
2044 static const struct attribute_group knl_uncore_pcu_format_group
= {
2046 .attrs
= knl_uncore_pcu_formats_attr
,
2049 static struct intel_uncore_type knl_uncore_pcu
= {
2053 .perf_ctr_bits
= 48,
2054 .perf_ctr
= HSWEP_PCU_MSR_PMON_CTR0
,
2055 .event_ctl
= HSWEP_PCU_MSR_PMON_CTL0
,
2056 .event_mask
= KNL_PCU_MSR_PMON_RAW_EVENT_MASK
,
2057 .box_ctl
= HSWEP_PCU_MSR_PMON_BOX_CTL
,
2058 .ops
= &snbep_uncore_msr_ops
,
2059 .format_group
= &knl_uncore_pcu_format_group
,
2062 static struct intel_uncore_type
*knl_msr_uncores
[] = {
2069 void knl_uncore_cpu_init(void)
2071 uncore_msr_uncores
= knl_msr_uncores
;
2074 static void knl_uncore_imc_enable_box(struct intel_uncore_box
*box
)
2076 struct pci_dev
*pdev
= box
->pci_dev
;
2077 int box_ctl
= uncore_pci_box_ctl(box
);
2079 pci_write_config_dword(pdev
, box_ctl
, 0);
2082 static void knl_uncore_imc_enable_event(struct intel_uncore_box
*box
,
2083 struct perf_event
*event
)
2085 struct pci_dev
*pdev
= box
->pci_dev
;
2086 struct hw_perf_event
*hwc
= &event
->hw
;
2088 if ((event
->attr
.config
& SNBEP_PMON_CTL_EV_SEL_MASK
)
2089 == UNCORE_FIXED_EVENT
)
2090 pci_write_config_dword(pdev
, hwc
->config_base
,
2091 hwc
->config
| KNL_PMON_FIXED_CTL_EN
);
2093 pci_write_config_dword(pdev
, hwc
->config_base
,
2094 hwc
->config
| SNBEP_PMON_CTL_EN
);
2097 static struct intel_uncore_ops knl_uncore_imc_ops
= {
2098 .init_box
= snbep_uncore_pci_init_box
,
2099 .disable_box
= snbep_uncore_pci_disable_box
,
2100 .enable_box
= knl_uncore_imc_enable_box
,
2101 .read_counter
= snbep_uncore_pci_read_counter
,
2102 .enable_event
= knl_uncore_imc_enable_event
,
2103 .disable_event
= snbep_uncore_pci_disable_event
,
2106 static struct intel_uncore_type knl_uncore_imc_uclk
= {
2110 .perf_ctr_bits
= 48,
2111 .fixed_ctr_bits
= 48,
2112 .perf_ctr
= KNL_UCLK_MSR_PMON_CTR0_LOW
,
2113 .event_ctl
= KNL_UCLK_MSR_PMON_CTL0
,
2114 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2115 .fixed_ctr
= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW
,
2116 .fixed_ctl
= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL
,
2117 .box_ctl
= KNL_UCLK_MSR_PMON_BOX_CTL
,
2118 .ops
= &knl_uncore_imc_ops
,
2119 .format_group
= &snbep_uncore_format_group
,
2122 static struct intel_uncore_type knl_uncore_imc_dclk
= {
2126 .perf_ctr_bits
= 48,
2127 .fixed_ctr_bits
= 48,
2128 .perf_ctr
= KNL_MC0_CH0_MSR_PMON_CTR0_LOW
,
2129 .event_ctl
= KNL_MC0_CH0_MSR_PMON_CTL0
,
2130 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2131 .fixed_ctr
= KNL_MC0_CH0_MSR_PMON_FIXED_LOW
,
2132 .fixed_ctl
= KNL_MC0_CH0_MSR_PMON_FIXED_CTL
,
2133 .box_ctl
= KNL_MC0_CH0_MSR_PMON_BOX_CTL
,
2134 .ops
= &knl_uncore_imc_ops
,
2135 .format_group
= &snbep_uncore_format_group
,
2138 static struct intel_uncore_type knl_uncore_edc_uclk
= {
2142 .perf_ctr_bits
= 48,
2143 .fixed_ctr_bits
= 48,
2144 .perf_ctr
= KNL_UCLK_MSR_PMON_CTR0_LOW
,
2145 .event_ctl
= KNL_UCLK_MSR_PMON_CTL0
,
2146 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2147 .fixed_ctr
= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW
,
2148 .fixed_ctl
= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL
,
2149 .box_ctl
= KNL_UCLK_MSR_PMON_BOX_CTL
,
2150 .ops
= &knl_uncore_imc_ops
,
2151 .format_group
= &snbep_uncore_format_group
,
2154 static struct intel_uncore_type knl_uncore_edc_eclk
= {
2158 .perf_ctr_bits
= 48,
2159 .fixed_ctr_bits
= 48,
2160 .perf_ctr
= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW
,
2161 .event_ctl
= KNL_EDC0_ECLK_MSR_PMON_CTL0
,
2162 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2163 .fixed_ctr
= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW
,
2164 .fixed_ctl
= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL
,
2165 .box_ctl
= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL
,
2166 .ops
= &knl_uncore_imc_ops
,
2167 .format_group
= &snbep_uncore_format_group
,
2170 static struct event_constraint knl_uncore_m2pcie_constraints
[] = {
2171 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2172 EVENT_CONSTRAINT_END
2175 static struct intel_uncore_type knl_uncore_m2pcie
= {
2179 .perf_ctr_bits
= 48,
2180 .constraints
= knl_uncore_m2pcie_constraints
,
2181 SNBEP_UNCORE_PCI_COMMON_INIT(),
2184 static struct attribute
*knl_uncore_irp_formats_attr
[] = {
2185 &format_attr_event
.attr
,
2186 &format_attr_umask
.attr
,
2187 &format_attr_qor
.attr
,
2188 &format_attr_edge
.attr
,
2189 &format_attr_inv
.attr
,
2190 &format_attr_thresh8
.attr
,
2194 static const struct attribute_group knl_uncore_irp_format_group
= {
2196 .attrs
= knl_uncore_irp_formats_attr
,
2199 static struct intel_uncore_type knl_uncore_irp
= {
2203 .perf_ctr_bits
= 48,
2204 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
2205 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
2206 .event_mask
= KNL_IRP_PCI_PMON_RAW_EVENT_MASK
,
2207 .box_ctl
= KNL_IRP_PCI_PMON_BOX_CTL
,
2208 .ops
= &snbep_uncore_pci_ops
,
2209 .format_group
= &knl_uncore_irp_format_group
,
2213 KNL_PCI_UNCORE_MC_UCLK
,
2214 KNL_PCI_UNCORE_MC_DCLK
,
2215 KNL_PCI_UNCORE_EDC_UCLK
,
2216 KNL_PCI_UNCORE_EDC_ECLK
,
2217 KNL_PCI_UNCORE_M2PCIE
,
2221 static struct intel_uncore_type
*knl_pci_uncores
[] = {
2222 [KNL_PCI_UNCORE_MC_UCLK
] = &knl_uncore_imc_uclk
,
2223 [KNL_PCI_UNCORE_MC_DCLK
] = &knl_uncore_imc_dclk
,
2224 [KNL_PCI_UNCORE_EDC_UCLK
] = &knl_uncore_edc_uclk
,
2225 [KNL_PCI_UNCORE_EDC_ECLK
] = &knl_uncore_edc_eclk
,
2226 [KNL_PCI_UNCORE_M2PCIE
] = &knl_uncore_m2pcie
,
2227 [KNL_PCI_UNCORE_IRP
] = &knl_uncore_irp
,
2232 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2233 * device type. prior to KNL, each instance of a PMU device type had a unique
2236 * PCI Device ID Uncore PMU Devices
2237 * ----------------------------------
2238 * 0x7841 MC0 UClk, MC1 UClk
2239 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2240 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2241 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2242 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2243 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2244 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2249 static const struct pci_device_id knl_uncore_pci_ids
[] = {
2251 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7841),
2252 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK
, 0),
2255 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7841),
2256 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK
, 1),
2258 { /* MC0 DClk CH 0 */
2259 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2260 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK
, 0),
2262 { /* MC0 DClk CH 1 */
2263 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2264 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK
, 1),
2266 { /* MC0 DClk CH 2 */
2267 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2268 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK
, 2),
2270 { /* MC1 DClk CH 0 */
2271 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2272 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK
, 3),
2274 { /* MC1 DClk CH 1 */
2275 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2276 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK
, 4),
2278 { /* MC1 DClk CH 2 */
2279 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2280 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK
, 5),
2283 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2284 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK
, 0),
2287 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2288 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK
, 1),
2291 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2292 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK
, 2),
2295 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2296 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK
, 3),
2299 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2300 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK
, 4),
2303 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2304 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK
, 5),
2307 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2308 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK
, 6),
2311 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2312 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK
, 7),
2315 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2316 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK
, 0),
2319 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2320 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK
, 1),
2323 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2324 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK
, 2),
2327 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2328 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK
, 3),
2331 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2332 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK
, 4),
2335 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2336 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK
, 5),
2339 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2340 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK
, 6),
2343 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2344 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK
, 7),
2347 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7817),
2348 .driver_data
= UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE
, 0),
2351 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7814),
2352 .driver_data
= UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP
, 0),
2354 { /* end: all zeroes */ }
2357 static struct pci_driver knl_uncore_pci_driver
= {
2358 .name
= "knl_uncore",
2359 .id_table
= knl_uncore_pci_ids
,
2362 int knl_uncore_pci_init(void)
2366 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2367 ret
= snb_pci2phy_map_init(0x7814); /* IRP */
2370 ret
= snb_pci2phy_map_init(0x7817); /* M2PCIe */
2373 uncore_pci_uncores
= knl_pci_uncores
;
2374 uncore_pci_driver
= &knl_uncore_pci_driver
;
2378 /* end of KNL uncore support */
2380 /* Haswell-EP uncore support */
2381 static struct attribute
*hswep_uncore_ubox_formats_attr
[] = {
2382 &format_attr_event
.attr
,
2383 &format_attr_umask
.attr
,
2384 &format_attr_edge
.attr
,
2385 &format_attr_inv
.attr
,
2386 &format_attr_thresh5
.attr
,
2387 &format_attr_filter_tid2
.attr
,
2388 &format_attr_filter_cid
.attr
,
2392 static const struct attribute_group hswep_uncore_ubox_format_group
= {
2394 .attrs
= hswep_uncore_ubox_formats_attr
,
2397 static int hswep_ubox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2399 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2400 reg1
->reg
= HSWEP_U_MSR_PMON_FILTER
;
2401 reg1
->config
= event
->attr
.config1
& HSWEP_U_MSR_PMON_BOX_FILTER_MASK
;
2406 static struct intel_uncore_ops hswep_uncore_ubox_ops
= {
2407 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2408 .hw_config
= hswep_ubox_hw_config
,
2409 .get_constraint
= uncore_get_constraint
,
2410 .put_constraint
= uncore_put_constraint
,
2413 static struct intel_uncore_type hswep_uncore_ubox
= {
2417 .perf_ctr_bits
= 44,
2418 .fixed_ctr_bits
= 48,
2419 .perf_ctr
= HSWEP_U_MSR_PMON_CTR0
,
2420 .event_ctl
= HSWEP_U_MSR_PMON_CTL0
,
2421 .event_mask
= SNBEP_U_MSR_PMON_RAW_EVENT_MASK
,
2422 .fixed_ctr
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR
,
2423 .fixed_ctl
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL
,
2424 .num_shared_regs
= 1,
2425 .ops
= &hswep_uncore_ubox_ops
,
2426 .format_group
= &hswep_uncore_ubox_format_group
,
2429 static struct attribute
*hswep_uncore_cbox_formats_attr
[] = {
2430 &format_attr_event
.attr
,
2431 &format_attr_umask
.attr
,
2432 &format_attr_edge
.attr
,
2433 &format_attr_tid_en
.attr
,
2434 &format_attr_thresh8
.attr
,
2435 &format_attr_filter_tid3
.attr
,
2436 &format_attr_filter_link2
.attr
,
2437 &format_attr_filter_state3
.attr
,
2438 &format_attr_filter_nid2
.attr
,
2439 &format_attr_filter_opc2
.attr
,
2440 &format_attr_filter_nc
.attr
,
2441 &format_attr_filter_c6
.attr
,
2442 &format_attr_filter_isoc
.attr
,
2446 static const struct attribute_group hswep_uncore_cbox_format_group
= {
2448 .attrs
= hswep_uncore_cbox_formats_attr
,
2451 static struct event_constraint hswep_uncore_cbox_constraints
[] = {
2452 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2453 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2454 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2455 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2456 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2457 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2458 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2459 EVENT_CONSTRAINT_END
2462 static struct extra_reg hswep_uncore_cbox_extra_regs
[] = {
2463 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
2464 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
2465 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2466 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2467 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2468 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2469 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2470 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2471 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2472 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2473 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2474 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2475 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2476 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2477 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2478 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2479 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2480 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2481 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2482 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2483 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2484 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2485 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2486 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2487 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2488 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2489 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2490 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2491 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2492 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2493 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2494 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2495 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2496 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2497 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2498 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2499 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2500 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2504 static u64
hswep_cbox_filter_mask(int fields
)
2508 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID
;
2510 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK
;
2512 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE
;
2514 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID
;
2515 if (fields
& 0x10) {
2516 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC
;
2517 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC
;
2518 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6
;
2519 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC
;
2524 static struct event_constraint
*
2525 hswep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2527 return __snbep_cbox_get_constraint(box
, event
, hswep_cbox_filter_mask
);
2530 static int hswep_cbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2532 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2533 struct extra_reg
*er
;
2536 for (er
= hswep_uncore_cbox_extra_regs
; er
->msr
; er
++) {
2537 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
2543 reg1
->reg
= HSWEP_C0_MSR_PMON_BOX_FILTER0
+
2544 HSWEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
2545 reg1
->config
= event
->attr
.config1
& hswep_cbox_filter_mask(idx
);
2551 static void hswep_cbox_enable_event(struct intel_uncore_box
*box
,
2552 struct perf_event
*event
)
2554 struct hw_perf_event
*hwc
= &event
->hw
;
2555 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2557 if (reg1
->idx
!= EXTRA_REG_NONE
) {
2558 u64 filter
= uncore_shared_reg_config(box
, 0);
2559 wrmsrl(reg1
->reg
, filter
& 0xffffffff);
2560 wrmsrl(reg1
->reg
+ 1, filter
>> 32);
2563 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
2566 static struct intel_uncore_ops hswep_uncore_cbox_ops
= {
2567 .init_box
= snbep_uncore_msr_init_box
,
2568 .disable_box
= snbep_uncore_msr_disable_box
,
2569 .enable_box
= snbep_uncore_msr_enable_box
,
2570 .disable_event
= snbep_uncore_msr_disable_event
,
2571 .enable_event
= hswep_cbox_enable_event
,
2572 .read_counter
= uncore_msr_read_counter
,
2573 .hw_config
= hswep_cbox_hw_config
,
2574 .get_constraint
= hswep_cbox_get_constraint
,
2575 .put_constraint
= snbep_cbox_put_constraint
,
2578 static struct intel_uncore_type hswep_uncore_cbox
= {
2582 .perf_ctr_bits
= 48,
2583 .event_ctl
= HSWEP_C0_MSR_PMON_CTL0
,
2584 .perf_ctr
= HSWEP_C0_MSR_PMON_CTR0
,
2585 .event_mask
= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
2586 .box_ctl
= HSWEP_C0_MSR_PMON_BOX_CTL
,
2587 .msr_offset
= HSWEP_CBO_MSR_OFFSET
,
2588 .num_shared_regs
= 1,
2589 .constraints
= hswep_uncore_cbox_constraints
,
2590 .ops
= &hswep_uncore_cbox_ops
,
2591 .format_group
= &hswep_uncore_cbox_format_group
,
2595 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2597 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box
*box
)
2599 unsigned msr
= uncore_msr_box_ctl(box
);
2602 u64 init
= SNBEP_PMON_BOX_CTL_INT
;
2606 for_each_set_bit(i
, (unsigned long *)&init
, 64) {
2607 flags
|= (1ULL << i
);
2613 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops
= {
2614 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2615 .init_box
= hswep_uncore_sbox_msr_init_box
2618 static struct attribute
*hswep_uncore_sbox_formats_attr
[] = {
2619 &format_attr_event
.attr
,
2620 &format_attr_umask
.attr
,
2621 &format_attr_edge
.attr
,
2622 &format_attr_tid_en
.attr
,
2623 &format_attr_inv
.attr
,
2624 &format_attr_thresh8
.attr
,
2628 static const struct attribute_group hswep_uncore_sbox_format_group
= {
2630 .attrs
= hswep_uncore_sbox_formats_attr
,
2633 static struct intel_uncore_type hswep_uncore_sbox
= {
2637 .perf_ctr_bits
= 44,
2638 .event_ctl
= HSWEP_S0_MSR_PMON_CTL0
,
2639 .perf_ctr
= HSWEP_S0_MSR_PMON_CTR0
,
2640 .event_mask
= HSWEP_S_MSR_PMON_RAW_EVENT_MASK
,
2641 .box_ctl
= HSWEP_S0_MSR_PMON_BOX_CTL
,
2642 .msr_offset
= HSWEP_SBOX_MSR_OFFSET
,
2643 .ops
= &hswep_uncore_sbox_msr_ops
,
2644 .format_group
= &hswep_uncore_sbox_format_group
,
2647 static int hswep_pcu_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2649 struct hw_perf_event
*hwc
= &event
->hw
;
2650 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2651 int ev_sel
= hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
;
2653 if (ev_sel
>= 0xb && ev_sel
<= 0xe) {
2654 reg1
->reg
= HSWEP_PCU_MSR_PMON_BOX_FILTER
;
2655 reg1
->idx
= ev_sel
- 0xb;
2656 reg1
->config
= event
->attr
.config1
& (0xff << reg1
->idx
);
2661 static struct intel_uncore_ops hswep_uncore_pcu_ops
= {
2662 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2663 .hw_config
= hswep_pcu_hw_config
,
2664 .get_constraint
= snbep_pcu_get_constraint
,
2665 .put_constraint
= snbep_pcu_put_constraint
,
2668 static struct intel_uncore_type hswep_uncore_pcu
= {
2672 .perf_ctr_bits
= 48,
2673 .perf_ctr
= HSWEP_PCU_MSR_PMON_CTR0
,
2674 .event_ctl
= HSWEP_PCU_MSR_PMON_CTL0
,
2675 .event_mask
= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK
,
2676 .box_ctl
= HSWEP_PCU_MSR_PMON_BOX_CTL
,
2677 .num_shared_regs
= 1,
2678 .ops
= &hswep_uncore_pcu_ops
,
2679 .format_group
= &snbep_uncore_pcu_format_group
,
2682 static struct intel_uncore_type
*hswep_msr_uncores
[] = {
2690 void hswep_uncore_cpu_init(void)
2692 int pkg
= boot_cpu_data
.logical_proc_id
;
2694 if (hswep_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
2695 hswep_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
2697 /* Detect 6-8 core systems with only two SBOXes */
2698 if (uncore_extra_pci_dev
[pkg
].dev
[HSWEP_PCI_PCU_3
]) {
2701 pci_read_config_dword(uncore_extra_pci_dev
[pkg
].dev
[HSWEP_PCI_PCU_3
],
2703 if (((capid4
>> 6) & 0x3) == 0)
2704 hswep_uncore_sbox
.num_boxes
= 2;
2707 uncore_msr_uncores
= hswep_msr_uncores
;
2710 static struct intel_uncore_type hswep_uncore_ha
= {
2714 .perf_ctr_bits
= 48,
2715 SNBEP_UNCORE_PCI_COMMON_INIT(),
2718 static struct uncore_event_desc hswep_uncore_imc_events
[] = {
2719 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0x00,umask=0x00"),
2720 INTEL_UNCORE_EVENT_DESC(cas_count_read
, "event=0x04,umask=0x03"),
2721 INTEL_UNCORE_EVENT_DESC(cas_count_read
.scale
, "6.103515625e-5"),
2722 INTEL_UNCORE_EVENT_DESC(cas_count_read
.unit
, "MiB"),
2723 INTEL_UNCORE_EVENT_DESC(cas_count_write
, "event=0x04,umask=0x0c"),
2724 INTEL_UNCORE_EVENT_DESC(cas_count_write
.scale
, "6.103515625e-5"),
2725 INTEL_UNCORE_EVENT_DESC(cas_count_write
.unit
, "MiB"),
2726 { /* end: all zeroes */ },
2729 static struct intel_uncore_type hswep_uncore_imc
= {
2733 .perf_ctr_bits
= 48,
2734 .fixed_ctr_bits
= 48,
2735 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
2736 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
2737 .event_descs
= hswep_uncore_imc_events
,
2738 SNBEP_UNCORE_PCI_COMMON_INIT(),
2741 static unsigned hswep_uncore_irp_ctrs
[] = {0xa0, 0xa8, 0xb0, 0xb8};
2743 static u64
hswep_uncore_irp_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
2745 struct pci_dev
*pdev
= box
->pci_dev
;
2746 struct hw_perf_event
*hwc
= &event
->hw
;
2749 pci_read_config_dword(pdev
, hswep_uncore_irp_ctrs
[hwc
->idx
], (u32
*)&count
);
2750 pci_read_config_dword(pdev
, hswep_uncore_irp_ctrs
[hwc
->idx
] + 4, (u32
*)&count
+ 1);
2755 static struct intel_uncore_ops hswep_uncore_irp_ops
= {
2756 .init_box
= snbep_uncore_pci_init_box
,
2757 .disable_box
= snbep_uncore_pci_disable_box
,
2758 .enable_box
= snbep_uncore_pci_enable_box
,
2759 .disable_event
= ivbep_uncore_irp_disable_event
,
2760 .enable_event
= ivbep_uncore_irp_enable_event
,
2761 .read_counter
= hswep_uncore_irp_read_counter
,
2764 static struct intel_uncore_type hswep_uncore_irp
= {
2768 .perf_ctr_bits
= 48,
2769 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2770 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
2771 .ops
= &hswep_uncore_irp_ops
,
2772 .format_group
= &snbep_uncore_format_group
,
2775 static struct intel_uncore_type hswep_uncore_qpi
= {
2779 .perf_ctr_bits
= 48,
2780 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
2781 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
2782 .event_mask
= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
2783 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
2784 .num_shared_regs
= 1,
2785 .ops
= &snbep_uncore_qpi_ops
,
2786 .format_group
= &snbep_uncore_qpi_format_group
,
2789 static struct event_constraint hswep_uncore_r2pcie_constraints
[] = {
2790 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2791 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2792 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2793 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2794 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2795 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2796 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2797 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2798 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2799 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2800 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2801 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2802 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2803 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2804 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2805 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2806 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2807 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2808 EVENT_CONSTRAINT_END
2811 static struct intel_uncore_type hswep_uncore_r2pcie
= {
2815 .perf_ctr_bits
= 48,
2816 .constraints
= hswep_uncore_r2pcie_constraints
,
2817 SNBEP_UNCORE_PCI_COMMON_INIT(),
2820 static struct event_constraint hswep_uncore_r3qpi_constraints
[] = {
2821 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2822 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2823 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2824 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2825 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2826 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2827 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2828 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2829 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2830 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2831 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2832 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2833 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2834 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2835 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2836 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2837 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2838 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2839 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2840 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2841 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2842 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2843 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2844 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2845 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2846 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2847 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2848 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2849 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2850 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2851 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2852 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2853 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2854 EVENT_CONSTRAINT_END
2857 static struct intel_uncore_type hswep_uncore_r3qpi
= {
2861 .perf_ctr_bits
= 44,
2862 .constraints
= hswep_uncore_r3qpi_constraints
,
2863 SNBEP_UNCORE_PCI_COMMON_INIT(),
2867 HSWEP_PCI_UNCORE_HA
,
2868 HSWEP_PCI_UNCORE_IMC
,
2869 HSWEP_PCI_UNCORE_IRP
,
2870 HSWEP_PCI_UNCORE_QPI
,
2871 HSWEP_PCI_UNCORE_R2PCIE
,
2872 HSWEP_PCI_UNCORE_R3QPI
,
2875 static struct intel_uncore_type
*hswep_pci_uncores
[] = {
2876 [HSWEP_PCI_UNCORE_HA
] = &hswep_uncore_ha
,
2877 [HSWEP_PCI_UNCORE_IMC
] = &hswep_uncore_imc
,
2878 [HSWEP_PCI_UNCORE_IRP
] = &hswep_uncore_irp
,
2879 [HSWEP_PCI_UNCORE_QPI
] = &hswep_uncore_qpi
,
2880 [HSWEP_PCI_UNCORE_R2PCIE
] = &hswep_uncore_r2pcie
,
2881 [HSWEP_PCI_UNCORE_R3QPI
] = &hswep_uncore_r3qpi
,
2885 static const struct pci_device_id hswep_uncore_pci_ids
[] = {
2886 { /* Home Agent 0 */
2887 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f30),
2888 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA
, 0),
2890 { /* Home Agent 1 */
2891 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f38),
2892 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA
, 1),
2894 { /* MC0 Channel 0 */
2895 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fb0),
2896 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 0),
2898 { /* MC0 Channel 1 */
2899 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fb1),
2900 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 1),
2902 { /* MC0 Channel 2 */
2903 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fb4),
2904 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 2),
2906 { /* MC0 Channel 3 */
2907 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fb5),
2908 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 3),
2910 { /* MC1 Channel 0 */
2911 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fd0),
2912 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 4),
2914 { /* MC1 Channel 1 */
2915 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fd1),
2916 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 5),
2918 { /* MC1 Channel 2 */
2919 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fd4),
2920 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 6),
2922 { /* MC1 Channel 3 */
2923 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fd5),
2924 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 7),
2927 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f39),
2928 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP
, 0),
2931 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f32),
2932 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI
, 0),
2935 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f33),
2936 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI
, 1),
2939 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f3a),
2940 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI
, 2),
2943 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f34),
2944 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE
, 0),
2946 { /* R3QPI0 Link 0 */
2947 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f36),
2948 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI
, 0),
2950 { /* R3QPI0 Link 1 */
2951 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f37),
2952 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI
, 1),
2954 { /* R3QPI1 Link 2 */
2955 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f3e),
2956 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI
, 2),
2958 { /* QPI Port 0 filter */
2959 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f86),
2960 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
2961 SNBEP_PCI_QPI_PORT0_FILTER
),
2963 { /* QPI Port 1 filter */
2964 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f96),
2965 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
2966 SNBEP_PCI_QPI_PORT1_FILTER
),
2968 { /* PCU.3 (for Capability registers) */
2969 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fc0),
2970 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
2973 { /* end: all zeroes */ }
2976 static struct pci_driver hswep_uncore_pci_driver
= {
2977 .name
= "hswep_uncore",
2978 .id_table
= hswep_uncore_pci_ids
,
2981 int hswep_uncore_pci_init(void)
2983 int ret
= snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID
, SNBEP_GIDNIDMAP
, true);
2986 uncore_pci_uncores
= hswep_pci_uncores
;
2987 uncore_pci_driver
= &hswep_uncore_pci_driver
;
2990 /* end of Haswell-EP uncore support */
2992 /* BDX uncore support */
2994 static struct intel_uncore_type bdx_uncore_ubox
= {
2998 .perf_ctr_bits
= 48,
2999 .fixed_ctr_bits
= 48,
3000 .perf_ctr
= HSWEP_U_MSR_PMON_CTR0
,
3001 .event_ctl
= HSWEP_U_MSR_PMON_CTL0
,
3002 .event_mask
= SNBEP_U_MSR_PMON_RAW_EVENT_MASK
,
3003 .fixed_ctr
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR
,
3004 .fixed_ctl
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL
,
3005 .num_shared_regs
= 1,
3006 .ops
= &ivbep_uncore_msr_ops
,
3007 .format_group
= &ivbep_uncore_ubox_format_group
,
3010 static struct event_constraint bdx_uncore_cbox_constraints
[] = {
3011 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3012 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3013 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3014 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3015 EVENT_CONSTRAINT_END
3018 static struct intel_uncore_type bdx_uncore_cbox
= {
3022 .perf_ctr_bits
= 48,
3023 .event_ctl
= HSWEP_C0_MSR_PMON_CTL0
,
3024 .perf_ctr
= HSWEP_C0_MSR_PMON_CTR0
,
3025 .event_mask
= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
3026 .box_ctl
= HSWEP_C0_MSR_PMON_BOX_CTL
,
3027 .msr_offset
= HSWEP_CBO_MSR_OFFSET
,
3028 .num_shared_regs
= 1,
3029 .constraints
= bdx_uncore_cbox_constraints
,
3030 .ops
= &hswep_uncore_cbox_ops
,
3031 .format_group
= &hswep_uncore_cbox_format_group
,
3034 static struct intel_uncore_type bdx_uncore_sbox
= {
3038 .perf_ctr_bits
= 48,
3039 .event_ctl
= HSWEP_S0_MSR_PMON_CTL0
,
3040 .perf_ctr
= HSWEP_S0_MSR_PMON_CTR0
,
3041 .event_mask
= HSWEP_S_MSR_PMON_RAW_EVENT_MASK
,
3042 .box_ctl
= HSWEP_S0_MSR_PMON_BOX_CTL
,
3043 .msr_offset
= HSWEP_SBOX_MSR_OFFSET
,
3044 .ops
= &hswep_uncore_sbox_msr_ops
,
3045 .format_group
= &hswep_uncore_sbox_format_group
,
3048 #define BDX_MSR_UNCORE_SBOX 3
3050 static struct intel_uncore_type
*bdx_msr_uncores
[] = {
3058 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3059 static struct event_constraint bdx_uncore_pcu_constraints
[] = {
3060 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3061 EVENT_CONSTRAINT_END
3064 void bdx_uncore_cpu_init(void)
3066 int pkg
= topology_phys_to_logical_pkg(boot_cpu_data
.phys_proc_id
);
3068 if (bdx_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
3069 bdx_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
3070 uncore_msr_uncores
= bdx_msr_uncores
;
3072 /* BDX-DE doesn't have SBOX */
3073 if (boot_cpu_data
.x86_model
== 86) {
3074 uncore_msr_uncores
[BDX_MSR_UNCORE_SBOX
] = NULL
;
3075 /* Detect systems with no SBOXes */
3076 } else if (uncore_extra_pci_dev
[pkg
].dev
[HSWEP_PCI_PCU_3
]) {
3077 struct pci_dev
*pdev
;
3080 pdev
= uncore_extra_pci_dev
[pkg
].dev
[HSWEP_PCI_PCU_3
];
3081 pci_read_config_dword(pdev
, 0x94, &capid4
);
3082 if (((capid4
>> 6) & 0x3) == 0)
3083 bdx_msr_uncores
[BDX_MSR_UNCORE_SBOX
] = NULL
;
3085 hswep_uncore_pcu
.constraints
= bdx_uncore_pcu_constraints
;
3088 static struct intel_uncore_type bdx_uncore_ha
= {
3092 .perf_ctr_bits
= 48,
3093 SNBEP_UNCORE_PCI_COMMON_INIT(),
3096 static struct intel_uncore_type bdx_uncore_imc
= {
3100 .perf_ctr_bits
= 48,
3101 .fixed_ctr_bits
= 48,
3102 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
3103 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
3104 .event_descs
= hswep_uncore_imc_events
,
3105 SNBEP_UNCORE_PCI_COMMON_INIT(),
3108 static struct intel_uncore_type bdx_uncore_irp
= {
3112 .perf_ctr_bits
= 48,
3113 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3114 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
3115 .ops
= &hswep_uncore_irp_ops
,
3116 .format_group
= &snbep_uncore_format_group
,
3119 static struct intel_uncore_type bdx_uncore_qpi
= {
3123 .perf_ctr_bits
= 48,
3124 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
3125 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
3126 .event_mask
= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
3127 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
3128 .num_shared_regs
= 1,
3129 .ops
= &snbep_uncore_qpi_ops
,
3130 .format_group
= &snbep_uncore_qpi_format_group
,
3133 static struct event_constraint bdx_uncore_r2pcie_constraints
[] = {
3134 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3135 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3136 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3137 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3138 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3139 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3140 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3141 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3142 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3143 EVENT_CONSTRAINT_END
3146 static struct intel_uncore_type bdx_uncore_r2pcie
= {
3150 .perf_ctr_bits
= 48,
3151 .constraints
= bdx_uncore_r2pcie_constraints
,
3152 SNBEP_UNCORE_PCI_COMMON_INIT(),
3155 static struct event_constraint bdx_uncore_r3qpi_constraints
[] = {
3156 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3157 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3158 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3159 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3160 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3161 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3162 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3163 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3164 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3165 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3166 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3167 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3168 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3169 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3170 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3171 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3172 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3173 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3174 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3175 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3176 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3177 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3178 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3179 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3180 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3181 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3182 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3183 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3184 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3185 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3186 EVENT_CONSTRAINT_END
3189 static struct intel_uncore_type bdx_uncore_r3qpi
= {
3193 .perf_ctr_bits
= 48,
3194 .constraints
= bdx_uncore_r3qpi_constraints
,
3195 SNBEP_UNCORE_PCI_COMMON_INIT(),
3203 BDX_PCI_UNCORE_R2PCIE
,
3204 BDX_PCI_UNCORE_R3QPI
,
3207 static struct intel_uncore_type
*bdx_pci_uncores
[] = {
3208 [BDX_PCI_UNCORE_HA
] = &bdx_uncore_ha
,
3209 [BDX_PCI_UNCORE_IMC
] = &bdx_uncore_imc
,
3210 [BDX_PCI_UNCORE_IRP
] = &bdx_uncore_irp
,
3211 [BDX_PCI_UNCORE_QPI
] = &bdx_uncore_qpi
,
3212 [BDX_PCI_UNCORE_R2PCIE
] = &bdx_uncore_r2pcie
,
3213 [BDX_PCI_UNCORE_R3QPI
] = &bdx_uncore_r3qpi
,
3217 static const struct pci_device_id bdx_uncore_pci_ids
[] = {
3218 { /* Home Agent 0 */
3219 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f30),
3220 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA
, 0),
3222 { /* Home Agent 1 */
3223 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f38),
3224 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA
, 1),
3226 { /* MC0 Channel 0 */
3227 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fb0),
3228 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 0),
3230 { /* MC0 Channel 1 */
3231 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fb1),
3232 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 1),
3234 { /* MC0 Channel 2 */
3235 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fb4),
3236 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 2),
3238 { /* MC0 Channel 3 */
3239 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fb5),
3240 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 3),
3242 { /* MC1 Channel 0 */
3243 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fd0),
3244 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 4),
3246 { /* MC1 Channel 1 */
3247 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fd1),
3248 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 5),
3250 { /* MC1 Channel 2 */
3251 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fd4),
3252 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 6),
3254 { /* MC1 Channel 3 */
3255 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fd5),
3256 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 7),
3259 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f39),
3260 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP
, 0),
3263 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f32),
3264 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI
, 0),
3267 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f33),
3268 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI
, 1),
3271 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f3a),
3272 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI
, 2),
3275 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f34),
3276 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE
, 0),
3278 { /* R3QPI0 Link 0 */
3279 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f36),
3280 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI
, 0),
3282 { /* R3QPI0 Link 1 */
3283 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f37),
3284 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI
, 1),
3286 { /* R3QPI1 Link 2 */
3287 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f3e),
3288 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI
, 2),
3290 { /* QPI Port 0 filter */
3291 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f86),
3292 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
3293 SNBEP_PCI_QPI_PORT0_FILTER
),
3295 { /* QPI Port 1 filter */
3296 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f96),
3297 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
3298 SNBEP_PCI_QPI_PORT1_FILTER
),
3300 { /* QPI Port 2 filter */
3301 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f46),
3302 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
3303 BDX_PCI_QPI_PORT2_FILTER
),
3305 { /* PCU.3 (for Capability registers) */
3306 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fc0),
3307 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
3310 { /* end: all zeroes */ }
3313 static struct pci_driver bdx_uncore_pci_driver
= {
3314 .name
= "bdx_uncore",
3315 .id_table
= bdx_uncore_pci_ids
,
3318 int bdx_uncore_pci_init(void)
3320 int ret
= snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID
, SNBEP_GIDNIDMAP
, true);
3324 uncore_pci_uncores
= bdx_pci_uncores
;
3325 uncore_pci_driver
= &bdx_uncore_pci_driver
;
3329 /* end of BDX uncore support */
3331 /* SKX uncore support */
3333 static struct intel_uncore_type skx_uncore_ubox
= {
3337 .perf_ctr_bits
= 48,
3338 .fixed_ctr_bits
= 48,
3339 .perf_ctr
= HSWEP_U_MSR_PMON_CTR0
,
3340 .event_ctl
= HSWEP_U_MSR_PMON_CTL0
,
3341 .event_mask
= SNBEP_U_MSR_PMON_RAW_EVENT_MASK
,
3342 .fixed_ctr
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR
,
3343 .fixed_ctl
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL
,
3344 .ops
= &ivbep_uncore_msr_ops
,
3345 .format_group
= &ivbep_uncore_ubox_format_group
,
3348 static struct attribute
*skx_uncore_cha_formats_attr
[] = {
3349 &format_attr_event
.attr
,
3350 &format_attr_umask
.attr
,
3351 &format_attr_edge
.attr
,
3352 &format_attr_tid_en
.attr
,
3353 &format_attr_inv
.attr
,
3354 &format_attr_thresh8
.attr
,
3355 &format_attr_filter_tid4
.attr
,
3356 &format_attr_filter_state5
.attr
,
3357 &format_attr_filter_rem
.attr
,
3358 &format_attr_filter_loc
.attr
,
3359 &format_attr_filter_nm
.attr
,
3360 &format_attr_filter_all_op
.attr
,
3361 &format_attr_filter_not_nm
.attr
,
3362 &format_attr_filter_opc_0
.attr
,
3363 &format_attr_filter_opc_1
.attr
,
3364 &format_attr_filter_nc
.attr
,
3365 &format_attr_filter_isoc
.attr
,
3369 static const struct attribute_group skx_uncore_chabox_format_group
= {
3371 .attrs
= skx_uncore_cha_formats_attr
,
3374 static struct event_constraint skx_uncore_chabox_constraints
[] = {
3375 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3376 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3377 EVENT_CONSTRAINT_END
3380 static struct extra_reg skx_uncore_cha_extra_regs
[] = {
3381 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3382 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3383 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3384 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3385 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3386 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3387 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3388 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3389 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3393 static u64
skx_cha_filter_mask(int fields
)
3398 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_TID
;
3400 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_LINK
;
3402 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_STATE
;
3404 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_REM
;
3405 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_LOC
;
3406 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC
;
3407 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_NM
;
3408 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM
;
3409 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0
;
3410 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1
;
3411 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_NC
;
3412 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC
;
3417 static struct event_constraint
*
3418 skx_cha_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
3420 return __snbep_cbox_get_constraint(box
, event
, skx_cha_filter_mask
);
3423 static int skx_cha_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
3425 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
3426 struct extra_reg
*er
;
3429 for (er
= skx_uncore_cha_extra_regs
; er
->msr
; er
++) {
3430 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
3436 reg1
->reg
= HSWEP_C0_MSR_PMON_BOX_FILTER0
+
3437 HSWEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
3438 reg1
->config
= event
->attr
.config1
& skx_cha_filter_mask(idx
);
3444 static struct intel_uncore_ops skx_uncore_chabox_ops
= {
3445 /* There is no frz_en for chabox ctl */
3446 .init_box
= ivbep_uncore_msr_init_box
,
3447 .disable_box
= snbep_uncore_msr_disable_box
,
3448 .enable_box
= snbep_uncore_msr_enable_box
,
3449 .disable_event
= snbep_uncore_msr_disable_event
,
3450 .enable_event
= hswep_cbox_enable_event
,
3451 .read_counter
= uncore_msr_read_counter
,
3452 .hw_config
= skx_cha_hw_config
,
3453 .get_constraint
= skx_cha_get_constraint
,
3454 .put_constraint
= snbep_cbox_put_constraint
,
3457 static struct intel_uncore_type skx_uncore_chabox
= {
3460 .perf_ctr_bits
= 48,
3461 .event_ctl
= HSWEP_C0_MSR_PMON_CTL0
,
3462 .perf_ctr
= HSWEP_C0_MSR_PMON_CTR0
,
3463 .event_mask
= HSWEP_S_MSR_PMON_RAW_EVENT_MASK
,
3464 .box_ctl
= HSWEP_C0_MSR_PMON_BOX_CTL
,
3465 .msr_offset
= HSWEP_CBO_MSR_OFFSET
,
3466 .num_shared_regs
= 1,
3467 .constraints
= skx_uncore_chabox_constraints
,
3468 .ops
= &skx_uncore_chabox_ops
,
3469 .format_group
= &skx_uncore_chabox_format_group
,
3472 static struct attribute
*skx_uncore_iio_formats_attr
[] = {
3473 &format_attr_event
.attr
,
3474 &format_attr_umask
.attr
,
3475 &format_attr_edge
.attr
,
3476 &format_attr_inv
.attr
,
3477 &format_attr_thresh9
.attr
,
3478 &format_attr_ch_mask
.attr
,
3479 &format_attr_fc_mask
.attr
,
3483 static const struct attribute_group skx_uncore_iio_format_group
= {
3485 .attrs
= skx_uncore_iio_formats_attr
,
3488 static struct event_constraint skx_uncore_iio_constraints
[] = {
3489 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3490 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3491 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3492 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3493 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3494 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3495 EVENT_CONSTRAINT_END
3498 static void skx_iio_enable_event(struct intel_uncore_box
*box
,
3499 struct perf_event
*event
)
3501 struct hw_perf_event
*hwc
= &event
->hw
;
3503 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
3506 static struct intel_uncore_ops skx_uncore_iio_ops
= {
3507 .init_box
= ivbep_uncore_msr_init_box
,
3508 .disable_box
= snbep_uncore_msr_disable_box
,
3509 .enable_box
= snbep_uncore_msr_enable_box
,
3510 .disable_event
= snbep_uncore_msr_disable_event
,
3511 .enable_event
= skx_iio_enable_event
,
3512 .read_counter
= uncore_msr_read_counter
,
3515 static struct intel_uncore_type skx_uncore_iio
= {
3519 .perf_ctr_bits
= 48,
3520 .event_ctl
= SKX_IIO0_MSR_PMON_CTL0
,
3521 .perf_ctr
= SKX_IIO0_MSR_PMON_CTR0
,
3522 .event_mask
= SKX_IIO_PMON_RAW_EVENT_MASK
,
3523 .event_mask_ext
= SKX_IIO_PMON_RAW_EVENT_MASK_EXT
,
3524 .box_ctl
= SKX_IIO0_MSR_PMON_BOX_CTL
,
3525 .msr_offset
= SKX_IIO_MSR_OFFSET
,
3526 .constraints
= skx_uncore_iio_constraints
,
3527 .ops
= &skx_uncore_iio_ops
,
3528 .format_group
= &skx_uncore_iio_format_group
,
3531 enum perf_uncore_iio_freerunning_type_id
{
3532 SKX_IIO_MSR_IOCLK
= 0,
3534 SKX_IIO_MSR_UTIL
= 2,
3536 SKX_IIO_FREERUNNING_TYPE_MAX
,
3540 static struct freerunning_counters skx_iio_freerunning
[] = {
3541 [SKX_IIO_MSR_IOCLK
] = { 0xa45, 0x1, 0x20, 1, 36 },
3542 [SKX_IIO_MSR_BW
] = { 0xb00, 0x1, 0x10, 8, 36 },
3543 [SKX_IIO_MSR_UTIL
] = { 0xb08, 0x1, 0x10, 8, 36 },
3546 static struct uncore_event_desc skx_uncore_iio_freerunning_events
[] = {
3547 /* Free-Running IO CLOCKS Counter */
3548 INTEL_UNCORE_EVENT_DESC(ioclk
, "event=0xff,umask=0x10"),
3549 /* Free-Running IIO BANDWIDTH Counters */
3550 INTEL_UNCORE_EVENT_DESC(bw_in_port0
, "event=0xff,umask=0x20"),
3551 INTEL_UNCORE_EVENT_DESC(bw_in_port0
.scale
, "3.814697266e-6"),
3552 INTEL_UNCORE_EVENT_DESC(bw_in_port0
.unit
, "MiB"),
3553 INTEL_UNCORE_EVENT_DESC(bw_in_port1
, "event=0xff,umask=0x21"),
3554 INTEL_UNCORE_EVENT_DESC(bw_in_port1
.scale
, "3.814697266e-6"),
3555 INTEL_UNCORE_EVENT_DESC(bw_in_port1
.unit
, "MiB"),
3556 INTEL_UNCORE_EVENT_DESC(bw_in_port2
, "event=0xff,umask=0x22"),
3557 INTEL_UNCORE_EVENT_DESC(bw_in_port2
.scale
, "3.814697266e-6"),
3558 INTEL_UNCORE_EVENT_DESC(bw_in_port2
.unit
, "MiB"),
3559 INTEL_UNCORE_EVENT_DESC(bw_in_port3
, "event=0xff,umask=0x23"),
3560 INTEL_UNCORE_EVENT_DESC(bw_in_port3
.scale
, "3.814697266e-6"),
3561 INTEL_UNCORE_EVENT_DESC(bw_in_port3
.unit
, "MiB"),
3562 INTEL_UNCORE_EVENT_DESC(bw_out_port0
, "event=0xff,umask=0x24"),
3563 INTEL_UNCORE_EVENT_DESC(bw_out_port0
.scale
, "3.814697266e-6"),
3564 INTEL_UNCORE_EVENT_DESC(bw_out_port0
.unit
, "MiB"),
3565 INTEL_UNCORE_EVENT_DESC(bw_out_port1
, "event=0xff,umask=0x25"),
3566 INTEL_UNCORE_EVENT_DESC(bw_out_port1
.scale
, "3.814697266e-6"),
3567 INTEL_UNCORE_EVENT_DESC(bw_out_port1
.unit
, "MiB"),
3568 INTEL_UNCORE_EVENT_DESC(bw_out_port2
, "event=0xff,umask=0x26"),
3569 INTEL_UNCORE_EVENT_DESC(bw_out_port2
.scale
, "3.814697266e-6"),
3570 INTEL_UNCORE_EVENT_DESC(bw_out_port2
.unit
, "MiB"),
3571 INTEL_UNCORE_EVENT_DESC(bw_out_port3
, "event=0xff,umask=0x27"),
3572 INTEL_UNCORE_EVENT_DESC(bw_out_port3
.scale
, "3.814697266e-6"),
3573 INTEL_UNCORE_EVENT_DESC(bw_out_port3
.unit
, "MiB"),
3574 /* Free-running IIO UTILIZATION Counters */
3575 INTEL_UNCORE_EVENT_DESC(util_in_port0
, "event=0xff,umask=0x30"),
3576 INTEL_UNCORE_EVENT_DESC(util_out_port0
, "event=0xff,umask=0x31"),
3577 INTEL_UNCORE_EVENT_DESC(util_in_port1
, "event=0xff,umask=0x32"),
3578 INTEL_UNCORE_EVENT_DESC(util_out_port1
, "event=0xff,umask=0x33"),
3579 INTEL_UNCORE_EVENT_DESC(util_in_port2
, "event=0xff,umask=0x34"),
3580 INTEL_UNCORE_EVENT_DESC(util_out_port2
, "event=0xff,umask=0x35"),
3581 INTEL_UNCORE_EVENT_DESC(util_in_port3
, "event=0xff,umask=0x36"),
3582 INTEL_UNCORE_EVENT_DESC(util_out_port3
, "event=0xff,umask=0x37"),
3583 { /* end: all zeroes */ },
3586 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops
= {
3587 .read_counter
= uncore_msr_read_counter
,
3588 .hw_config
= uncore_freerunning_hw_config
,
3591 static struct attribute
*skx_uncore_iio_freerunning_formats_attr
[] = {
3592 &format_attr_event
.attr
,
3593 &format_attr_umask
.attr
,
3597 static const struct attribute_group skx_uncore_iio_freerunning_format_group
= {
3599 .attrs
= skx_uncore_iio_freerunning_formats_attr
,
3602 static struct intel_uncore_type skx_uncore_iio_free_running
= {
3603 .name
= "iio_free_running",
3606 .num_freerunning_types
= SKX_IIO_FREERUNNING_TYPE_MAX
,
3607 .freerunning
= skx_iio_freerunning
,
3608 .ops
= &skx_uncore_iio_freerunning_ops
,
3609 .event_descs
= skx_uncore_iio_freerunning_events
,
3610 .format_group
= &skx_uncore_iio_freerunning_format_group
,
3613 static struct attribute
*skx_uncore_formats_attr
[] = {
3614 &format_attr_event
.attr
,
3615 &format_attr_umask
.attr
,
3616 &format_attr_edge
.attr
,
3617 &format_attr_inv
.attr
,
3618 &format_attr_thresh8
.attr
,
3622 static const struct attribute_group skx_uncore_format_group
= {
3624 .attrs
= skx_uncore_formats_attr
,
3627 static struct intel_uncore_type skx_uncore_irp
= {
3631 .perf_ctr_bits
= 48,
3632 .event_ctl
= SKX_IRP0_MSR_PMON_CTL0
,
3633 .perf_ctr
= SKX_IRP0_MSR_PMON_CTR0
,
3634 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3635 .box_ctl
= SKX_IRP0_MSR_PMON_BOX_CTL
,
3636 .msr_offset
= SKX_IRP_MSR_OFFSET
,
3637 .ops
= &skx_uncore_iio_ops
,
3638 .format_group
= &skx_uncore_format_group
,
3641 static struct attribute
*skx_uncore_pcu_formats_attr
[] = {
3642 &format_attr_event
.attr
,
3643 &format_attr_umask
.attr
,
3644 &format_attr_edge
.attr
,
3645 &format_attr_inv
.attr
,
3646 &format_attr_thresh8
.attr
,
3647 &format_attr_occ_invert
.attr
,
3648 &format_attr_occ_edge_det
.attr
,
3649 &format_attr_filter_band0
.attr
,
3650 &format_attr_filter_band1
.attr
,
3651 &format_attr_filter_band2
.attr
,
3652 &format_attr_filter_band3
.attr
,
3656 static struct attribute_group skx_uncore_pcu_format_group
= {
3658 .attrs
= skx_uncore_pcu_formats_attr
,
3661 static struct intel_uncore_ops skx_uncore_pcu_ops
= {
3662 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3663 .hw_config
= hswep_pcu_hw_config
,
3664 .get_constraint
= snbep_pcu_get_constraint
,
3665 .put_constraint
= snbep_pcu_put_constraint
,
3668 static struct intel_uncore_type skx_uncore_pcu
= {
3672 .perf_ctr_bits
= 48,
3673 .perf_ctr
= HSWEP_PCU_MSR_PMON_CTR0
,
3674 .event_ctl
= HSWEP_PCU_MSR_PMON_CTL0
,
3675 .event_mask
= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK
,
3676 .box_ctl
= HSWEP_PCU_MSR_PMON_BOX_CTL
,
3677 .num_shared_regs
= 1,
3678 .ops
= &skx_uncore_pcu_ops
,
3679 .format_group
= &skx_uncore_pcu_format_group
,
3682 static struct intel_uncore_type
*skx_msr_uncores
[] = {
3686 &skx_uncore_iio_free_running
,
3693 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3694 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3696 #define SKX_CAPID6 0x9c
3697 #define SKX_CHA_BIT_MASK GENMASK(27, 0)
3699 static int skx_count_chabox(void)
3701 struct pci_dev
*dev
= NULL
;
3704 dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x2083, dev
);
3708 pci_read_config_dword(dev
, SKX_CAPID6
, &val
);
3709 val
&= SKX_CHA_BIT_MASK
;
3712 return hweight32(val
);
3715 void skx_uncore_cpu_init(void)
3717 skx_uncore_chabox
.num_boxes
= skx_count_chabox();
3718 uncore_msr_uncores
= skx_msr_uncores
;
3721 static struct intel_uncore_type skx_uncore_imc
= {
3725 .perf_ctr_bits
= 48,
3726 .fixed_ctr_bits
= 48,
3727 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
3728 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
3729 .event_descs
= hswep_uncore_imc_events
,
3730 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
3731 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
3732 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3733 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
3734 .ops
= &ivbep_uncore_pci_ops
,
3735 .format_group
= &skx_uncore_format_group
,
3738 static struct attribute
*skx_upi_uncore_formats_attr
[] = {
3739 &format_attr_event
.attr
,
3740 &format_attr_umask_ext
.attr
,
3741 &format_attr_edge
.attr
,
3742 &format_attr_inv
.attr
,
3743 &format_attr_thresh8
.attr
,
3747 static const struct attribute_group skx_upi_uncore_format_group
= {
3749 .attrs
= skx_upi_uncore_formats_attr
,
3752 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box
*box
)
3754 struct pci_dev
*pdev
= box
->pci_dev
;
3756 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8
, &box
->flags
);
3757 pci_write_config_dword(pdev
, SKX_UPI_PCI_PMON_BOX_CTL
, IVBEP_PMON_BOX_CTL_INT
);
3760 static struct intel_uncore_ops skx_upi_uncore_pci_ops
= {
3761 .init_box
= skx_upi_uncore_pci_init_box
,
3762 .disable_box
= snbep_uncore_pci_disable_box
,
3763 .enable_box
= snbep_uncore_pci_enable_box
,
3764 .disable_event
= snbep_uncore_pci_disable_event
,
3765 .enable_event
= snbep_uncore_pci_enable_event
,
3766 .read_counter
= snbep_uncore_pci_read_counter
,
3769 static struct intel_uncore_type skx_uncore_upi
= {
3773 .perf_ctr_bits
= 48,
3774 .perf_ctr
= SKX_UPI_PCI_PMON_CTR0
,
3775 .event_ctl
= SKX_UPI_PCI_PMON_CTL0
,
3776 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3777 .event_mask_ext
= SKX_UPI_CTL_UMASK_EXT
,
3778 .box_ctl
= SKX_UPI_PCI_PMON_BOX_CTL
,
3779 .ops
= &skx_upi_uncore_pci_ops
,
3780 .format_group
= &skx_upi_uncore_format_group
,
3783 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box
*box
)
3785 struct pci_dev
*pdev
= box
->pci_dev
;
3787 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8
, &box
->flags
);
3788 pci_write_config_dword(pdev
, SKX_M2M_PCI_PMON_BOX_CTL
, IVBEP_PMON_BOX_CTL_INT
);
3791 static struct intel_uncore_ops skx_m2m_uncore_pci_ops
= {
3792 .init_box
= skx_m2m_uncore_pci_init_box
,
3793 .disable_box
= snbep_uncore_pci_disable_box
,
3794 .enable_box
= snbep_uncore_pci_enable_box
,
3795 .disable_event
= snbep_uncore_pci_disable_event
,
3796 .enable_event
= snbep_uncore_pci_enable_event
,
3797 .read_counter
= snbep_uncore_pci_read_counter
,
3800 static struct intel_uncore_type skx_uncore_m2m
= {
3804 .perf_ctr_bits
= 48,
3805 .perf_ctr
= SKX_M2M_PCI_PMON_CTR0
,
3806 .event_ctl
= SKX_M2M_PCI_PMON_CTL0
,
3807 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3808 .box_ctl
= SKX_M2M_PCI_PMON_BOX_CTL
,
3809 .ops
= &skx_m2m_uncore_pci_ops
,
3810 .format_group
= &skx_uncore_format_group
,
3813 static struct event_constraint skx_uncore_m2pcie_constraints
[] = {
3814 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3815 EVENT_CONSTRAINT_END
3818 static struct intel_uncore_type skx_uncore_m2pcie
= {
3822 .perf_ctr_bits
= 48,
3823 .constraints
= skx_uncore_m2pcie_constraints
,
3824 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
3825 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
3826 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3827 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
3828 .ops
= &ivbep_uncore_pci_ops
,
3829 .format_group
= &skx_uncore_format_group
,
3832 static struct event_constraint skx_uncore_m3upi_constraints
[] = {
3833 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3834 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3835 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3836 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3837 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3838 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3839 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3840 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3841 EVENT_CONSTRAINT_END
3844 static struct intel_uncore_type skx_uncore_m3upi
= {
3848 .perf_ctr_bits
= 48,
3849 .constraints
= skx_uncore_m3upi_constraints
,
3850 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
3851 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
3852 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3853 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
3854 .ops
= &ivbep_uncore_pci_ops
,
3855 .format_group
= &skx_uncore_format_group
,
3862 SKX_PCI_UNCORE_M2PCIE
,
3863 SKX_PCI_UNCORE_M3UPI
,
3866 static struct intel_uncore_type
*skx_pci_uncores
[] = {
3867 [SKX_PCI_UNCORE_IMC
] = &skx_uncore_imc
,
3868 [SKX_PCI_UNCORE_M2M
] = &skx_uncore_m2m
,
3869 [SKX_PCI_UNCORE_UPI
] = &skx_uncore_upi
,
3870 [SKX_PCI_UNCORE_M2PCIE
] = &skx_uncore_m2pcie
,
3871 [SKX_PCI_UNCORE_M3UPI
] = &skx_uncore_m3upi
,
3875 static const struct pci_device_id skx_uncore_pci_ids
[] = {
3876 { /* MC0 Channel 0 */
3877 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2042),
3878 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC
, 0),
3880 { /* MC0 Channel 1 */
3881 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2046),
3882 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC
, 1),
3884 { /* MC0 Channel 2 */
3885 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x204a),
3886 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC
, 2),
3888 { /* MC1 Channel 0 */
3889 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2042),
3890 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC
, 3),
3892 { /* MC1 Channel 1 */
3893 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2046),
3894 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC
, 4),
3896 { /* MC1 Channel 2 */
3897 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x204a),
3898 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC
, 5),
3901 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2066),
3902 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M
, 0),
3905 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2066),
3906 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M
, 1),
3909 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2058),
3910 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI
, 0),
3913 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2058),
3914 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI
, 1),
3917 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2058),
3918 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI
, 2),
3921 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2088),
3922 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE
, 0),
3925 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2088),
3926 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE
, 1),
3929 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2088),
3930 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE
, 2),
3933 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2088),
3934 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE
, 3),
3936 { /* M3UPI0 Link 0 */
3937 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x204D),
3938 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI
, 0),
3940 { /* M3UPI0 Link 1 */
3941 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x204E),
3942 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI
, 1),
3944 { /* M3UPI1 Link 2 */
3945 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x204D),
3946 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI
, 2),
3948 { /* end: all zeroes */ }
3952 static struct pci_driver skx_uncore_pci_driver
= {
3953 .name
= "skx_uncore",
3954 .id_table
= skx_uncore_pci_ids
,
3957 int skx_uncore_pci_init(void)
3959 /* need to double check pci address */
3960 int ret
= snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID
, SKX_GIDNIDMAP
, false);
3965 uncore_pci_uncores
= skx_pci_uncores
;
3966 uncore_pci_driver
= &skx_uncore_pci_driver
;
3970 /* end of SKX uncore support */