x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / arch / x86 / events / intel / uncore_snbep.c
blob6b66285c6cedeb8cc9957b60fa6d04e6a2cacf88
1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID 0x40
7 #define SNBEP_GIDNIDMAP 0x54
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
20 #define SNBEP_PMON_CTL_RST (1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
23 #define SNBEP_PMON_CTL_EN (1 << 22)
24 #define SNBEP_PMON_CTL_INVERT (1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
27 SNBEP_PMON_CTL_UMASK_MASK | \
28 SNBEP_PMON_CTL_EDGE_DET | \
29 SNBEP_PMON_CTL_INVERT | \
30 SNBEP_PMON_CTL_TRESH_MASK)
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
35 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36 SNBEP_PMON_CTL_UMASK_MASK | \
37 SNBEP_PMON_CTL_EDGE_DET | \
38 SNBEP_PMON_CTL_INVERT | \
39 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
41 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
43 SNBEP_CBO_PMON_CTL_TID_EN)
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
51 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 SNBEP_PMON_CTL_EDGE_DET | \
54 SNBEP_PMON_CTL_INVERT | \
55 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
60 (SNBEP_PMON_RAW_EVENT_MASK | \
61 SNBEP_PMON_CTL_EV_SEL_EXT)
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
65 #define SNBEP_PCI_PMON_CTL0 0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0 0xa0
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0 0xc16
84 #define SNBEP_U_MSR_PMON_CTL0 0xc10
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
94 #define SNBEP_CBO_MSR_OFFSET 0x20
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
102 .event = (e), \
103 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
104 .config_mask = (m), \
105 .idx = (i) \
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
121 SNBEP_PMON_CTL_UMASK_MASK | \
122 SNBEP_PMON_CTL_EDGE_DET | \
123 SNBEP_PMON_CTL_TRESH_MASK)
124 /* IVBEP Ubox */
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131 SNBEP_PMON_CTL_UMASK_MASK | \
132 SNBEP_PMON_CTL_EDGE_DET | \
133 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134 /* IVBEP Cbo */
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
136 SNBEP_CBO_PMON_CTL_TID_EN)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
150 (IVBEP_PMON_RAW_EVENT_MASK | \
151 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152 /* IVBEP PCU */
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
154 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 SNBEP_PMON_CTL_EDGE_DET | \
157 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160 /* IVBEP QPI */
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
162 (IVBEP_PMON_RAW_EVENT_MASK | \
163 SNBEP_PMON_CTL_EV_SEL_EXT)
165 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
166 ((1ULL << (n)) - 1)))
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0 0x709
170 #define HSWEP_U_MSR_PMON_CTL0 0x705
171 #define HSWEP_U_MSR_PMON_FILTER 0x707
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
182 /* Haswell-EP CBo */
183 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
187 #define HSWEP_CBO_MSR_OFFSET 0x10
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0 0x726
202 #define HSWEP_S0_MSR_PMON_CTL0 0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
204 #define HSWEP_SBOX_MSR_OFFSET 0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
206 SNBEP_CBO_PMON_CTL_TID_EN)
208 /* Haswell-EP PCU */
209 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
214 /* KNL Ubox */
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 SNBEP_CBO_PMON_CTL_TID_EN)
218 /* KNL CHA */
219 #define KNL_CHA_MSR_OFFSET 0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
233 #define KNL_UCLK_MSR_PMON_CTL0 0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
237 #define KNL_PMON_FIXED_CTL_EN 0x1
239 /* KNL EDC */
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
246 /* KNL MC */
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
253 /* KNL IRP */
254 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
256 KNL_CHA_MSR_PMON_CTL_QOR)
257 /* KNL PCU */
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 SNBEP_PMON_CTL_EDGE_DET | \
266 SNBEP_CBO_PMON_CTL_TID_EN | \
267 SNBEP_PMON_CTL_INVERT | \
268 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID 0xc0
274 #define SKX_GIDNIDMAP 0xd4
276 /* SKX CHA */
277 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
278 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
279 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
280 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
281 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
282 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
283 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
284 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
285 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
286 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
287 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
288 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
289 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
291 /* SKX IIO */
292 #define SKX_IIO0_MSR_PMON_CTL0 0xa48
293 #define SKX_IIO0_MSR_PMON_CTR0 0xa41
294 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
295 #define SKX_IIO_MSR_OFFSET 0x20
297 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
298 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
299 #define SKX_PMON_CTL_CH_MASK (0xff << 4)
300 #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
301 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
302 SNBEP_PMON_CTL_UMASK_MASK | \
303 SNBEP_PMON_CTL_EDGE_DET | \
304 SNBEP_PMON_CTL_INVERT | \
305 SKX_PMON_CTL_TRESH_MASK)
306 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
307 SKX_PMON_CTL_CH_MASK | \
308 SKX_PMON_CTL_FC_MASK)
310 /* SKX IRP */
311 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
312 #define SKX_IRP0_MSR_PMON_CTR0 0xa59
313 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
314 #define SKX_IRP_MSR_OFFSET 0x20
316 /* SKX UPI */
317 #define SKX_UPI_PCI_PMON_CTL0 0x350
318 #define SKX_UPI_PCI_PMON_CTR0 0x318
319 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
320 #define SKX_UPI_CTL_UMASK_EXT 0xffefff
322 /* SKX M2M */
323 #define SKX_M2M_PCI_PMON_CTL0 0x228
324 #define SKX_M2M_PCI_PMON_CTR0 0x200
325 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
327 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
328 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
329 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
330 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
331 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
332 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
333 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
334 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
335 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
336 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
337 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
338 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
339 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
340 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
341 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
342 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
343 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
344 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
345 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
346 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
347 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
348 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
349 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
350 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
351 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
352 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
353 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
354 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
355 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
356 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
357 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
358 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
359 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
360 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
361 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
362 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
363 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
364 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
365 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
366 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
367 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
368 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
369 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
370 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
371 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
372 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
373 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
374 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
375 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
376 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
377 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
378 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
379 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
380 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
381 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
382 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
383 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
384 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
385 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
386 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
387 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
388 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
389 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
390 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
391 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
392 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
393 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
394 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
395 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
396 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
397 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
398 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
400 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
402 struct pci_dev *pdev = box->pci_dev;
403 int box_ctl = uncore_pci_box_ctl(box);
404 u32 config = 0;
406 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
407 config |= SNBEP_PMON_BOX_CTL_FRZ;
408 pci_write_config_dword(pdev, box_ctl, config);
412 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
414 struct pci_dev *pdev = box->pci_dev;
415 int box_ctl = uncore_pci_box_ctl(box);
416 u32 config = 0;
418 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
419 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
420 pci_write_config_dword(pdev, box_ctl, config);
424 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
426 struct pci_dev *pdev = box->pci_dev;
427 struct hw_perf_event *hwc = &event->hw;
429 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
432 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
434 struct pci_dev *pdev = box->pci_dev;
435 struct hw_perf_event *hwc = &event->hw;
437 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
440 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
442 struct pci_dev *pdev = box->pci_dev;
443 struct hw_perf_event *hwc = &event->hw;
444 u64 count = 0;
446 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
447 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
449 return count;
452 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
454 struct pci_dev *pdev = box->pci_dev;
455 int box_ctl = uncore_pci_box_ctl(box);
457 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
460 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
462 u64 config;
463 unsigned msr;
465 msr = uncore_msr_box_ctl(box);
466 if (msr) {
467 rdmsrl(msr, config);
468 config |= SNBEP_PMON_BOX_CTL_FRZ;
469 wrmsrl(msr, config);
473 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
475 u64 config;
476 unsigned msr;
478 msr = uncore_msr_box_ctl(box);
479 if (msr) {
480 rdmsrl(msr, config);
481 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
482 wrmsrl(msr, config);
486 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
488 struct hw_perf_event *hwc = &event->hw;
489 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
491 if (reg1->idx != EXTRA_REG_NONE)
492 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
494 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
497 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
498 struct perf_event *event)
500 struct hw_perf_event *hwc = &event->hw;
502 wrmsrl(hwc->config_base, hwc->config);
505 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
507 unsigned msr = uncore_msr_box_ctl(box);
509 if (msr)
510 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
513 static struct attribute *snbep_uncore_formats_attr[] = {
514 &format_attr_event.attr,
515 &format_attr_umask.attr,
516 &format_attr_edge.attr,
517 &format_attr_inv.attr,
518 &format_attr_thresh8.attr,
519 NULL,
522 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
523 &format_attr_event.attr,
524 &format_attr_umask.attr,
525 &format_attr_edge.attr,
526 &format_attr_inv.attr,
527 &format_attr_thresh5.attr,
528 NULL,
531 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
532 &format_attr_event.attr,
533 &format_attr_umask.attr,
534 &format_attr_edge.attr,
535 &format_attr_tid_en.attr,
536 &format_attr_inv.attr,
537 &format_attr_thresh8.attr,
538 &format_attr_filter_tid.attr,
539 &format_attr_filter_nid.attr,
540 &format_attr_filter_state.attr,
541 &format_attr_filter_opc.attr,
542 NULL,
545 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
546 &format_attr_event.attr,
547 &format_attr_occ_sel.attr,
548 &format_attr_edge.attr,
549 &format_attr_inv.attr,
550 &format_attr_thresh5.attr,
551 &format_attr_occ_invert.attr,
552 &format_attr_occ_edge.attr,
553 &format_attr_filter_band0.attr,
554 &format_attr_filter_band1.attr,
555 &format_attr_filter_band2.attr,
556 &format_attr_filter_band3.attr,
557 NULL,
560 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
561 &format_attr_event_ext.attr,
562 &format_attr_umask.attr,
563 &format_attr_edge.attr,
564 &format_attr_inv.attr,
565 &format_attr_thresh8.attr,
566 &format_attr_match_rds.attr,
567 &format_attr_match_rnid30.attr,
568 &format_attr_match_rnid4.attr,
569 &format_attr_match_dnid.attr,
570 &format_attr_match_mc.attr,
571 &format_attr_match_opc.attr,
572 &format_attr_match_vnw.attr,
573 &format_attr_match0.attr,
574 &format_attr_match1.attr,
575 &format_attr_mask_rds.attr,
576 &format_attr_mask_rnid30.attr,
577 &format_attr_mask_rnid4.attr,
578 &format_attr_mask_dnid.attr,
579 &format_attr_mask_mc.attr,
580 &format_attr_mask_opc.attr,
581 &format_attr_mask_vnw.attr,
582 &format_attr_mask0.attr,
583 &format_attr_mask1.attr,
584 NULL,
587 static struct uncore_event_desc snbep_uncore_imc_events[] = {
588 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
589 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
590 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
591 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
592 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
593 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
594 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
595 { /* end: all zeroes */ },
598 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
599 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
600 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
601 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
602 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
603 { /* end: all zeroes */ },
606 static const struct attribute_group snbep_uncore_format_group = {
607 .name = "format",
608 .attrs = snbep_uncore_formats_attr,
611 static const struct attribute_group snbep_uncore_ubox_format_group = {
612 .name = "format",
613 .attrs = snbep_uncore_ubox_formats_attr,
616 static const struct attribute_group snbep_uncore_cbox_format_group = {
617 .name = "format",
618 .attrs = snbep_uncore_cbox_formats_attr,
621 static const struct attribute_group snbep_uncore_pcu_format_group = {
622 .name = "format",
623 .attrs = snbep_uncore_pcu_formats_attr,
626 static const struct attribute_group snbep_uncore_qpi_format_group = {
627 .name = "format",
628 .attrs = snbep_uncore_qpi_formats_attr,
631 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
632 .disable_box = snbep_uncore_msr_disable_box, \
633 .enable_box = snbep_uncore_msr_enable_box, \
634 .disable_event = snbep_uncore_msr_disable_event, \
635 .enable_event = snbep_uncore_msr_enable_event, \
636 .read_counter = uncore_msr_read_counter
638 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
639 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
640 .init_box = snbep_uncore_msr_init_box \
642 static struct intel_uncore_ops snbep_uncore_msr_ops = {
643 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
646 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
647 .init_box = snbep_uncore_pci_init_box, \
648 .disable_box = snbep_uncore_pci_disable_box, \
649 .enable_box = snbep_uncore_pci_enable_box, \
650 .disable_event = snbep_uncore_pci_disable_event, \
651 .read_counter = snbep_uncore_pci_read_counter
653 static struct intel_uncore_ops snbep_uncore_pci_ops = {
654 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
655 .enable_event = snbep_uncore_pci_enable_event, \
658 static struct event_constraint snbep_uncore_cbox_constraints[] = {
659 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
660 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
661 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
662 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
663 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
664 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
665 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
666 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
667 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
668 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
669 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
670 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
671 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
672 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
673 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
674 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
675 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
676 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
677 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
678 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
679 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
680 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
681 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
682 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
683 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
684 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
685 EVENT_CONSTRAINT_END
688 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
689 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
690 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
691 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
692 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
693 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
694 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
695 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
696 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
697 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
698 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
699 EVENT_CONSTRAINT_END
702 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
703 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
704 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
705 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
706 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
707 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
708 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
709 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
710 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
711 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
712 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
713 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
714 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
715 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
716 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
717 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
718 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
719 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
720 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
721 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
722 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
723 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
724 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
725 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
726 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
727 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
728 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
729 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
730 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
731 EVENT_CONSTRAINT_END
734 static struct intel_uncore_type snbep_uncore_ubox = {
735 .name = "ubox",
736 .num_counters = 2,
737 .num_boxes = 1,
738 .perf_ctr_bits = 44,
739 .fixed_ctr_bits = 48,
740 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
741 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
742 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
743 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
744 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
745 .ops = &snbep_uncore_msr_ops,
746 .format_group = &snbep_uncore_ubox_format_group,
749 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
750 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
751 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
752 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
753 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
754 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
755 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
756 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
757 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
758 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
759 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
760 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
761 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
762 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
763 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
764 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
765 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
766 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
767 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
768 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
769 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
770 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
771 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
772 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
773 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
774 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
775 EVENT_EXTRA_END
778 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
780 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
781 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
782 int i;
784 if (uncore_box_is_fake(box))
785 return;
787 for (i = 0; i < 5; i++) {
788 if (reg1->alloc & (0x1 << i))
789 atomic_sub(1 << (i * 6), &er->ref);
791 reg1->alloc = 0;
794 static struct event_constraint *
795 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
796 u64 (*cbox_filter_mask)(int fields))
798 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
799 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
800 int i, alloc = 0;
801 unsigned long flags;
802 u64 mask;
804 if (reg1->idx == EXTRA_REG_NONE)
805 return NULL;
807 raw_spin_lock_irqsave(&er->lock, flags);
808 for (i = 0; i < 5; i++) {
809 if (!(reg1->idx & (0x1 << i)))
810 continue;
811 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
812 continue;
814 mask = cbox_filter_mask(0x1 << i);
815 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
816 !((reg1->config ^ er->config) & mask)) {
817 atomic_add(1 << (i * 6), &er->ref);
818 er->config &= ~mask;
819 er->config |= reg1->config & mask;
820 alloc |= (0x1 << i);
821 } else {
822 break;
825 raw_spin_unlock_irqrestore(&er->lock, flags);
826 if (i < 5)
827 goto fail;
829 if (!uncore_box_is_fake(box))
830 reg1->alloc |= alloc;
832 return NULL;
833 fail:
834 for (; i >= 0; i--) {
835 if (alloc & (0x1 << i))
836 atomic_sub(1 << (i * 6), &er->ref);
838 return &uncore_constraint_empty;
841 static u64 snbep_cbox_filter_mask(int fields)
843 u64 mask = 0;
845 if (fields & 0x1)
846 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
847 if (fields & 0x2)
848 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
849 if (fields & 0x4)
850 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
851 if (fields & 0x8)
852 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
854 return mask;
857 static struct event_constraint *
858 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
860 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
863 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
865 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
866 struct extra_reg *er;
867 int idx = 0;
869 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
870 if (er->event != (event->hw.config & er->config_mask))
871 continue;
872 idx |= er->idx;
875 if (idx) {
876 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
877 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
878 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
879 reg1->idx = idx;
881 return 0;
884 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
885 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
886 .hw_config = snbep_cbox_hw_config,
887 .get_constraint = snbep_cbox_get_constraint,
888 .put_constraint = snbep_cbox_put_constraint,
891 static struct intel_uncore_type snbep_uncore_cbox = {
892 .name = "cbox",
893 .num_counters = 4,
894 .num_boxes = 8,
895 .perf_ctr_bits = 44,
896 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
897 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
898 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
899 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
900 .msr_offset = SNBEP_CBO_MSR_OFFSET,
901 .num_shared_regs = 1,
902 .constraints = snbep_uncore_cbox_constraints,
903 .ops = &snbep_uncore_cbox_ops,
904 .format_group = &snbep_uncore_cbox_format_group,
907 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
909 struct hw_perf_event *hwc = &event->hw;
910 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
911 u64 config = reg1->config;
913 if (new_idx > reg1->idx)
914 config <<= 8 * (new_idx - reg1->idx);
915 else
916 config >>= 8 * (reg1->idx - new_idx);
918 if (modify) {
919 hwc->config += new_idx - reg1->idx;
920 reg1->config = config;
921 reg1->idx = new_idx;
923 return config;
926 static struct event_constraint *
927 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
929 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
930 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
931 unsigned long flags;
932 int idx = reg1->idx;
933 u64 mask, config1 = reg1->config;
934 bool ok = false;
936 if (reg1->idx == EXTRA_REG_NONE ||
937 (!uncore_box_is_fake(box) && reg1->alloc))
938 return NULL;
939 again:
940 mask = 0xffULL << (idx * 8);
941 raw_spin_lock_irqsave(&er->lock, flags);
942 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
943 !((config1 ^ er->config) & mask)) {
944 atomic_add(1 << (idx * 8), &er->ref);
945 er->config &= ~mask;
946 er->config |= config1 & mask;
947 ok = true;
949 raw_spin_unlock_irqrestore(&er->lock, flags);
951 if (!ok) {
952 idx = (idx + 1) % 4;
953 if (idx != reg1->idx) {
954 config1 = snbep_pcu_alter_er(event, idx, false);
955 goto again;
957 return &uncore_constraint_empty;
960 if (!uncore_box_is_fake(box)) {
961 if (idx != reg1->idx)
962 snbep_pcu_alter_er(event, idx, true);
963 reg1->alloc = 1;
965 return NULL;
968 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
970 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
971 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
973 if (uncore_box_is_fake(box) || !reg1->alloc)
974 return;
976 atomic_sub(1 << (reg1->idx * 8), &er->ref);
977 reg1->alloc = 0;
980 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
982 struct hw_perf_event *hwc = &event->hw;
983 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
984 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
986 if (ev_sel >= 0xb && ev_sel <= 0xe) {
987 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
988 reg1->idx = ev_sel - 0xb;
989 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
991 return 0;
994 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
995 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
996 .hw_config = snbep_pcu_hw_config,
997 .get_constraint = snbep_pcu_get_constraint,
998 .put_constraint = snbep_pcu_put_constraint,
1001 static struct intel_uncore_type snbep_uncore_pcu = {
1002 .name = "pcu",
1003 .num_counters = 4,
1004 .num_boxes = 1,
1005 .perf_ctr_bits = 48,
1006 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1007 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1008 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1009 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1010 .num_shared_regs = 1,
1011 .ops = &snbep_uncore_pcu_ops,
1012 .format_group = &snbep_uncore_pcu_format_group,
1015 static struct intel_uncore_type *snbep_msr_uncores[] = {
1016 &snbep_uncore_ubox,
1017 &snbep_uncore_cbox,
1018 &snbep_uncore_pcu,
1019 NULL,
1022 void snbep_uncore_cpu_init(void)
1024 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1025 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1026 uncore_msr_uncores = snbep_msr_uncores;
1029 enum {
1030 SNBEP_PCI_QPI_PORT0_FILTER,
1031 SNBEP_PCI_QPI_PORT1_FILTER,
1032 HSWEP_PCI_PCU_3,
1035 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1037 struct hw_perf_event *hwc = &event->hw;
1038 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1039 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1041 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1042 reg1->idx = 0;
1043 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1044 reg1->config = event->attr.config1;
1045 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1046 reg2->config = event->attr.config2;
1048 return 0;
1051 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1053 struct pci_dev *pdev = box->pci_dev;
1054 struct hw_perf_event *hwc = &event->hw;
1055 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1056 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1058 if (reg1->idx != EXTRA_REG_NONE) {
1059 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1060 int pkg = topology_phys_to_logical_pkg(box->pci_phys_id);
1061 struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
1063 if (filter_pdev) {
1064 pci_write_config_dword(filter_pdev, reg1->reg,
1065 (u32)reg1->config);
1066 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1067 (u32)(reg1->config >> 32));
1068 pci_write_config_dword(filter_pdev, reg2->reg,
1069 (u32)reg2->config);
1070 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1071 (u32)(reg2->config >> 32));
1075 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1078 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1079 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1080 .enable_event = snbep_qpi_enable_event,
1081 .hw_config = snbep_qpi_hw_config,
1082 .get_constraint = uncore_get_constraint,
1083 .put_constraint = uncore_put_constraint,
1086 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1087 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1088 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1089 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1090 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1091 .ops = &snbep_uncore_pci_ops, \
1092 .format_group = &snbep_uncore_format_group
1094 static struct intel_uncore_type snbep_uncore_ha = {
1095 .name = "ha",
1096 .num_counters = 4,
1097 .num_boxes = 1,
1098 .perf_ctr_bits = 48,
1099 SNBEP_UNCORE_PCI_COMMON_INIT(),
1102 static struct intel_uncore_type snbep_uncore_imc = {
1103 .name = "imc",
1104 .num_counters = 4,
1105 .num_boxes = 4,
1106 .perf_ctr_bits = 48,
1107 .fixed_ctr_bits = 48,
1108 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1109 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1110 .event_descs = snbep_uncore_imc_events,
1111 SNBEP_UNCORE_PCI_COMMON_INIT(),
1114 static struct intel_uncore_type snbep_uncore_qpi = {
1115 .name = "qpi",
1116 .num_counters = 4,
1117 .num_boxes = 2,
1118 .perf_ctr_bits = 48,
1119 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1120 .event_ctl = SNBEP_PCI_PMON_CTL0,
1121 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1122 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1123 .num_shared_regs = 1,
1124 .ops = &snbep_uncore_qpi_ops,
1125 .event_descs = snbep_uncore_qpi_events,
1126 .format_group = &snbep_uncore_qpi_format_group,
1130 static struct intel_uncore_type snbep_uncore_r2pcie = {
1131 .name = "r2pcie",
1132 .num_counters = 4,
1133 .num_boxes = 1,
1134 .perf_ctr_bits = 44,
1135 .constraints = snbep_uncore_r2pcie_constraints,
1136 SNBEP_UNCORE_PCI_COMMON_INIT(),
1139 static struct intel_uncore_type snbep_uncore_r3qpi = {
1140 .name = "r3qpi",
1141 .num_counters = 3,
1142 .num_boxes = 2,
1143 .perf_ctr_bits = 44,
1144 .constraints = snbep_uncore_r3qpi_constraints,
1145 SNBEP_UNCORE_PCI_COMMON_INIT(),
1148 enum {
1149 SNBEP_PCI_UNCORE_HA,
1150 SNBEP_PCI_UNCORE_IMC,
1151 SNBEP_PCI_UNCORE_QPI,
1152 SNBEP_PCI_UNCORE_R2PCIE,
1153 SNBEP_PCI_UNCORE_R3QPI,
1156 static struct intel_uncore_type *snbep_pci_uncores[] = {
1157 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1158 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1159 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1160 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1161 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1162 NULL,
1165 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1166 { /* Home Agent */
1167 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1168 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1170 { /* MC Channel 0 */
1171 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1172 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1174 { /* MC Channel 1 */
1175 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1176 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1178 { /* MC Channel 2 */
1179 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1180 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1182 { /* MC Channel 3 */
1183 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1184 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1186 { /* QPI Port 0 */
1187 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1188 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1190 { /* QPI Port 1 */
1191 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1192 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1194 { /* R2PCIe */
1195 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1196 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1198 { /* R3QPI Link 0 */
1199 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1200 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1202 { /* R3QPI Link 1 */
1203 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1204 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1206 { /* QPI Port 0 filter */
1207 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1208 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1209 SNBEP_PCI_QPI_PORT0_FILTER),
1211 { /* QPI Port 0 filter */
1212 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1213 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1214 SNBEP_PCI_QPI_PORT1_FILTER),
1216 { /* end: all zeroes */ }
1219 static struct pci_driver snbep_uncore_pci_driver = {
1220 .name = "snbep_uncore",
1221 .id_table = snbep_uncore_pci_ids,
1224 #define NODE_ID_MASK 0x7
1227 * build pci bus to socket mapping
1229 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1231 struct pci_dev *ubox_dev = NULL;
1232 int i, bus, nodeid, segment;
1233 struct pci2phy_map *map;
1234 int err = 0;
1235 u32 config = 0;
1237 while (1) {
1238 /* find the UBOX device */
1239 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1240 if (!ubox_dev)
1241 break;
1242 bus = ubox_dev->bus->number;
1243 /* get the Node ID of the local register */
1244 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1245 if (err)
1246 break;
1247 nodeid = config & NODE_ID_MASK;
1248 /* get the Node ID mapping */
1249 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1250 if (err)
1251 break;
1253 segment = pci_domain_nr(ubox_dev->bus);
1254 raw_spin_lock(&pci2phy_map_lock);
1255 map = __find_pci2phy_map(segment);
1256 if (!map) {
1257 raw_spin_unlock(&pci2phy_map_lock);
1258 err = -ENOMEM;
1259 break;
1263 * every three bits in the Node ID mapping register maps
1264 * to a particular node.
1266 for (i = 0; i < 8; i++) {
1267 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1268 map->pbus_to_physid[bus] = i;
1269 break;
1272 raw_spin_unlock(&pci2phy_map_lock);
1275 if (!err) {
1277 * For PCI bus with no UBOX device, find the next bus
1278 * that has UBOX device and use its mapping.
1280 raw_spin_lock(&pci2phy_map_lock);
1281 list_for_each_entry(map, &pci2phy_map_head, list) {
1282 i = -1;
1283 if (reverse) {
1284 for (bus = 255; bus >= 0; bus--) {
1285 if (map->pbus_to_physid[bus] >= 0)
1286 i = map->pbus_to_physid[bus];
1287 else
1288 map->pbus_to_physid[bus] = i;
1290 } else {
1291 for (bus = 0; bus <= 255; bus++) {
1292 if (map->pbus_to_physid[bus] >= 0)
1293 i = map->pbus_to_physid[bus];
1294 else
1295 map->pbus_to_physid[bus] = i;
1299 raw_spin_unlock(&pci2phy_map_lock);
1302 pci_dev_put(ubox_dev);
1304 return err ? pcibios_err_to_errno(err) : 0;
1307 int snbep_uncore_pci_init(void)
1309 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1310 if (ret)
1311 return ret;
1312 uncore_pci_uncores = snbep_pci_uncores;
1313 uncore_pci_driver = &snbep_uncore_pci_driver;
1314 return 0;
1316 /* end of Sandy Bridge-EP uncore support */
1318 /* IvyTown uncore support */
1319 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1321 unsigned msr = uncore_msr_box_ctl(box);
1322 if (msr)
1323 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1326 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1328 struct pci_dev *pdev = box->pci_dev;
1330 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1333 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1334 .init_box = ivbep_uncore_msr_init_box, \
1335 .disable_box = snbep_uncore_msr_disable_box, \
1336 .enable_box = snbep_uncore_msr_enable_box, \
1337 .disable_event = snbep_uncore_msr_disable_event, \
1338 .enable_event = snbep_uncore_msr_enable_event, \
1339 .read_counter = uncore_msr_read_counter
1341 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1342 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1345 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1346 .init_box = ivbep_uncore_pci_init_box,
1347 .disable_box = snbep_uncore_pci_disable_box,
1348 .enable_box = snbep_uncore_pci_enable_box,
1349 .disable_event = snbep_uncore_pci_disable_event,
1350 .enable_event = snbep_uncore_pci_enable_event,
1351 .read_counter = snbep_uncore_pci_read_counter,
1354 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1355 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1356 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1357 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1358 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1359 .ops = &ivbep_uncore_pci_ops, \
1360 .format_group = &ivbep_uncore_format_group
1362 static struct attribute *ivbep_uncore_formats_attr[] = {
1363 &format_attr_event.attr,
1364 &format_attr_umask.attr,
1365 &format_attr_edge.attr,
1366 &format_attr_inv.attr,
1367 &format_attr_thresh8.attr,
1368 NULL,
1371 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1372 &format_attr_event.attr,
1373 &format_attr_umask.attr,
1374 &format_attr_edge.attr,
1375 &format_attr_inv.attr,
1376 &format_attr_thresh5.attr,
1377 NULL,
1380 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1381 &format_attr_event.attr,
1382 &format_attr_umask.attr,
1383 &format_attr_edge.attr,
1384 &format_attr_tid_en.attr,
1385 &format_attr_thresh8.attr,
1386 &format_attr_filter_tid.attr,
1387 &format_attr_filter_link.attr,
1388 &format_attr_filter_state2.attr,
1389 &format_attr_filter_nid2.attr,
1390 &format_attr_filter_opc2.attr,
1391 &format_attr_filter_nc.attr,
1392 &format_attr_filter_c6.attr,
1393 &format_attr_filter_isoc.attr,
1394 NULL,
1397 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1398 &format_attr_event.attr,
1399 &format_attr_occ_sel.attr,
1400 &format_attr_edge.attr,
1401 &format_attr_thresh5.attr,
1402 &format_attr_occ_invert.attr,
1403 &format_attr_occ_edge.attr,
1404 &format_attr_filter_band0.attr,
1405 &format_attr_filter_band1.attr,
1406 &format_attr_filter_band2.attr,
1407 &format_attr_filter_band3.attr,
1408 NULL,
1411 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1412 &format_attr_event_ext.attr,
1413 &format_attr_umask.attr,
1414 &format_attr_edge.attr,
1415 &format_attr_thresh8.attr,
1416 &format_attr_match_rds.attr,
1417 &format_attr_match_rnid30.attr,
1418 &format_attr_match_rnid4.attr,
1419 &format_attr_match_dnid.attr,
1420 &format_attr_match_mc.attr,
1421 &format_attr_match_opc.attr,
1422 &format_attr_match_vnw.attr,
1423 &format_attr_match0.attr,
1424 &format_attr_match1.attr,
1425 &format_attr_mask_rds.attr,
1426 &format_attr_mask_rnid30.attr,
1427 &format_attr_mask_rnid4.attr,
1428 &format_attr_mask_dnid.attr,
1429 &format_attr_mask_mc.attr,
1430 &format_attr_mask_opc.attr,
1431 &format_attr_mask_vnw.attr,
1432 &format_attr_mask0.attr,
1433 &format_attr_mask1.attr,
1434 NULL,
1437 static const struct attribute_group ivbep_uncore_format_group = {
1438 .name = "format",
1439 .attrs = ivbep_uncore_formats_attr,
1442 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1443 .name = "format",
1444 .attrs = ivbep_uncore_ubox_formats_attr,
1447 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1448 .name = "format",
1449 .attrs = ivbep_uncore_cbox_formats_attr,
1452 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1453 .name = "format",
1454 .attrs = ivbep_uncore_pcu_formats_attr,
1457 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1458 .name = "format",
1459 .attrs = ivbep_uncore_qpi_formats_attr,
1462 static struct intel_uncore_type ivbep_uncore_ubox = {
1463 .name = "ubox",
1464 .num_counters = 2,
1465 .num_boxes = 1,
1466 .perf_ctr_bits = 44,
1467 .fixed_ctr_bits = 48,
1468 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1469 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1470 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1471 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1472 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1473 .ops = &ivbep_uncore_msr_ops,
1474 .format_group = &ivbep_uncore_ubox_format_group,
1477 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1478 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1479 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1480 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1481 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1482 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1483 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1484 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1485 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1486 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1487 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1488 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1489 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1490 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1491 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1492 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1493 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1494 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1495 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1496 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1497 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1498 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1499 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1500 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1501 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1502 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1503 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1504 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1505 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1506 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1507 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1508 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1509 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1510 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1511 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1512 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1513 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1514 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1515 EVENT_EXTRA_END
1518 static u64 ivbep_cbox_filter_mask(int fields)
1520 u64 mask = 0;
1522 if (fields & 0x1)
1523 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1524 if (fields & 0x2)
1525 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1526 if (fields & 0x4)
1527 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1528 if (fields & 0x8)
1529 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1530 if (fields & 0x10) {
1531 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1532 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1533 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1534 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1537 return mask;
1540 static struct event_constraint *
1541 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1543 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1546 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1548 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1549 struct extra_reg *er;
1550 int idx = 0;
1552 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1553 if (er->event != (event->hw.config & er->config_mask))
1554 continue;
1555 idx |= er->idx;
1558 if (idx) {
1559 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1560 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1561 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1562 reg1->idx = idx;
1564 return 0;
1567 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1569 struct hw_perf_event *hwc = &event->hw;
1570 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1572 if (reg1->idx != EXTRA_REG_NONE) {
1573 u64 filter = uncore_shared_reg_config(box, 0);
1574 wrmsrl(reg1->reg, filter & 0xffffffff);
1575 wrmsrl(reg1->reg + 6, filter >> 32);
1578 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1581 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1582 .init_box = ivbep_uncore_msr_init_box,
1583 .disable_box = snbep_uncore_msr_disable_box,
1584 .enable_box = snbep_uncore_msr_enable_box,
1585 .disable_event = snbep_uncore_msr_disable_event,
1586 .enable_event = ivbep_cbox_enable_event,
1587 .read_counter = uncore_msr_read_counter,
1588 .hw_config = ivbep_cbox_hw_config,
1589 .get_constraint = ivbep_cbox_get_constraint,
1590 .put_constraint = snbep_cbox_put_constraint,
1593 static struct intel_uncore_type ivbep_uncore_cbox = {
1594 .name = "cbox",
1595 .num_counters = 4,
1596 .num_boxes = 15,
1597 .perf_ctr_bits = 44,
1598 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1599 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1600 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1601 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1602 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1603 .num_shared_regs = 1,
1604 .constraints = snbep_uncore_cbox_constraints,
1605 .ops = &ivbep_uncore_cbox_ops,
1606 .format_group = &ivbep_uncore_cbox_format_group,
1609 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1610 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1611 .hw_config = snbep_pcu_hw_config,
1612 .get_constraint = snbep_pcu_get_constraint,
1613 .put_constraint = snbep_pcu_put_constraint,
1616 static struct intel_uncore_type ivbep_uncore_pcu = {
1617 .name = "pcu",
1618 .num_counters = 4,
1619 .num_boxes = 1,
1620 .perf_ctr_bits = 48,
1621 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1622 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1623 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1624 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1625 .num_shared_regs = 1,
1626 .ops = &ivbep_uncore_pcu_ops,
1627 .format_group = &ivbep_uncore_pcu_format_group,
1630 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1631 &ivbep_uncore_ubox,
1632 &ivbep_uncore_cbox,
1633 &ivbep_uncore_pcu,
1634 NULL,
1637 void ivbep_uncore_cpu_init(void)
1639 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1640 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1641 uncore_msr_uncores = ivbep_msr_uncores;
1644 static struct intel_uncore_type ivbep_uncore_ha = {
1645 .name = "ha",
1646 .num_counters = 4,
1647 .num_boxes = 2,
1648 .perf_ctr_bits = 48,
1649 IVBEP_UNCORE_PCI_COMMON_INIT(),
1652 static struct intel_uncore_type ivbep_uncore_imc = {
1653 .name = "imc",
1654 .num_counters = 4,
1655 .num_boxes = 8,
1656 .perf_ctr_bits = 48,
1657 .fixed_ctr_bits = 48,
1658 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1659 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1660 .event_descs = snbep_uncore_imc_events,
1661 IVBEP_UNCORE_PCI_COMMON_INIT(),
1664 /* registers in IRP boxes are not properly aligned */
1665 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1666 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1668 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1670 struct pci_dev *pdev = box->pci_dev;
1671 struct hw_perf_event *hwc = &event->hw;
1673 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1674 hwc->config | SNBEP_PMON_CTL_EN);
1677 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1679 struct pci_dev *pdev = box->pci_dev;
1680 struct hw_perf_event *hwc = &event->hw;
1682 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1685 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1687 struct pci_dev *pdev = box->pci_dev;
1688 struct hw_perf_event *hwc = &event->hw;
1689 u64 count = 0;
1691 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1692 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1694 return count;
1697 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1698 .init_box = ivbep_uncore_pci_init_box,
1699 .disable_box = snbep_uncore_pci_disable_box,
1700 .enable_box = snbep_uncore_pci_enable_box,
1701 .disable_event = ivbep_uncore_irp_disable_event,
1702 .enable_event = ivbep_uncore_irp_enable_event,
1703 .read_counter = ivbep_uncore_irp_read_counter,
1706 static struct intel_uncore_type ivbep_uncore_irp = {
1707 .name = "irp",
1708 .num_counters = 4,
1709 .num_boxes = 1,
1710 .perf_ctr_bits = 48,
1711 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1712 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1713 .ops = &ivbep_uncore_irp_ops,
1714 .format_group = &ivbep_uncore_format_group,
1717 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1718 .init_box = ivbep_uncore_pci_init_box,
1719 .disable_box = snbep_uncore_pci_disable_box,
1720 .enable_box = snbep_uncore_pci_enable_box,
1721 .disable_event = snbep_uncore_pci_disable_event,
1722 .enable_event = snbep_qpi_enable_event,
1723 .read_counter = snbep_uncore_pci_read_counter,
1724 .hw_config = snbep_qpi_hw_config,
1725 .get_constraint = uncore_get_constraint,
1726 .put_constraint = uncore_put_constraint,
1729 static struct intel_uncore_type ivbep_uncore_qpi = {
1730 .name = "qpi",
1731 .num_counters = 4,
1732 .num_boxes = 3,
1733 .perf_ctr_bits = 48,
1734 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1735 .event_ctl = SNBEP_PCI_PMON_CTL0,
1736 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1737 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1738 .num_shared_regs = 1,
1739 .ops = &ivbep_uncore_qpi_ops,
1740 .format_group = &ivbep_uncore_qpi_format_group,
1743 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1744 .name = "r2pcie",
1745 .num_counters = 4,
1746 .num_boxes = 1,
1747 .perf_ctr_bits = 44,
1748 .constraints = snbep_uncore_r2pcie_constraints,
1749 IVBEP_UNCORE_PCI_COMMON_INIT(),
1752 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1753 .name = "r3qpi",
1754 .num_counters = 3,
1755 .num_boxes = 2,
1756 .perf_ctr_bits = 44,
1757 .constraints = snbep_uncore_r3qpi_constraints,
1758 IVBEP_UNCORE_PCI_COMMON_INIT(),
1761 enum {
1762 IVBEP_PCI_UNCORE_HA,
1763 IVBEP_PCI_UNCORE_IMC,
1764 IVBEP_PCI_UNCORE_IRP,
1765 IVBEP_PCI_UNCORE_QPI,
1766 IVBEP_PCI_UNCORE_R2PCIE,
1767 IVBEP_PCI_UNCORE_R3QPI,
1770 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1771 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1772 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1773 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1774 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1775 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1776 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1777 NULL,
1780 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1781 { /* Home Agent 0 */
1782 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1783 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1785 { /* Home Agent 1 */
1786 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1787 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1789 { /* MC0 Channel 0 */
1790 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1791 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1793 { /* MC0 Channel 1 */
1794 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1795 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1797 { /* MC0 Channel 3 */
1798 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1799 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1801 { /* MC0 Channel 4 */
1802 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1803 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1805 { /* MC1 Channel 0 */
1806 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1807 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1809 { /* MC1 Channel 1 */
1810 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1811 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1813 { /* MC1 Channel 3 */
1814 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1815 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1817 { /* MC1 Channel 4 */
1818 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1819 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1821 { /* IRP */
1822 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1823 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1825 { /* QPI0 Port 0 */
1826 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1827 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1829 { /* QPI0 Port 1 */
1830 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1831 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1833 { /* QPI1 Port 2 */
1834 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1835 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1837 { /* R2PCIe */
1838 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1839 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1841 { /* R3QPI0 Link 0 */
1842 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1843 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1845 { /* R3QPI0 Link 1 */
1846 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1847 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1849 { /* R3QPI1 Link 2 */
1850 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1851 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1853 { /* QPI Port 0 filter */
1854 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1855 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1856 SNBEP_PCI_QPI_PORT0_FILTER),
1858 { /* QPI Port 0 filter */
1859 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1860 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1861 SNBEP_PCI_QPI_PORT1_FILTER),
1863 { /* end: all zeroes */ }
1866 static struct pci_driver ivbep_uncore_pci_driver = {
1867 .name = "ivbep_uncore",
1868 .id_table = ivbep_uncore_pci_ids,
1871 int ivbep_uncore_pci_init(void)
1873 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1874 if (ret)
1875 return ret;
1876 uncore_pci_uncores = ivbep_pci_uncores;
1877 uncore_pci_driver = &ivbep_uncore_pci_driver;
1878 return 0;
1880 /* end of IvyTown uncore support */
1882 /* KNL uncore support */
1883 static struct attribute *knl_uncore_ubox_formats_attr[] = {
1884 &format_attr_event.attr,
1885 &format_attr_umask.attr,
1886 &format_attr_edge.attr,
1887 &format_attr_tid_en.attr,
1888 &format_attr_inv.attr,
1889 &format_attr_thresh5.attr,
1890 NULL,
1893 static const struct attribute_group knl_uncore_ubox_format_group = {
1894 .name = "format",
1895 .attrs = knl_uncore_ubox_formats_attr,
1898 static struct intel_uncore_type knl_uncore_ubox = {
1899 .name = "ubox",
1900 .num_counters = 2,
1901 .num_boxes = 1,
1902 .perf_ctr_bits = 48,
1903 .fixed_ctr_bits = 48,
1904 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
1905 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
1906 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1907 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1908 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1909 .ops = &snbep_uncore_msr_ops,
1910 .format_group = &knl_uncore_ubox_format_group,
1913 static struct attribute *knl_uncore_cha_formats_attr[] = {
1914 &format_attr_event.attr,
1915 &format_attr_umask.attr,
1916 &format_attr_qor.attr,
1917 &format_attr_edge.attr,
1918 &format_attr_tid_en.attr,
1919 &format_attr_inv.attr,
1920 &format_attr_thresh8.attr,
1921 &format_attr_filter_tid4.attr,
1922 &format_attr_filter_link3.attr,
1923 &format_attr_filter_state4.attr,
1924 &format_attr_filter_local.attr,
1925 &format_attr_filter_all_op.attr,
1926 &format_attr_filter_nnm.attr,
1927 &format_attr_filter_opc3.attr,
1928 &format_attr_filter_nc.attr,
1929 &format_attr_filter_isoc.attr,
1930 NULL,
1933 static const struct attribute_group knl_uncore_cha_format_group = {
1934 .name = "format",
1935 .attrs = knl_uncore_cha_formats_attr,
1938 static struct event_constraint knl_uncore_cha_constraints[] = {
1939 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1940 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
1941 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1942 EVENT_CONSTRAINT_END
1945 static struct extra_reg knl_uncore_cha_extra_regs[] = {
1946 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1947 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1948 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
1949 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
1950 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
1951 EVENT_EXTRA_END
1954 static u64 knl_cha_filter_mask(int fields)
1956 u64 mask = 0;
1958 if (fields & 0x1)
1959 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
1960 if (fields & 0x2)
1961 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
1962 if (fields & 0x4)
1963 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
1964 return mask;
1967 static struct event_constraint *
1968 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1970 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
1973 static int knl_cha_hw_config(struct intel_uncore_box *box,
1974 struct perf_event *event)
1976 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1977 struct extra_reg *er;
1978 int idx = 0;
1980 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
1981 if (er->event != (event->hw.config & er->config_mask))
1982 continue;
1983 idx |= er->idx;
1986 if (idx) {
1987 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1988 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
1989 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
1991 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
1992 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
1993 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
1994 reg1->idx = idx;
1996 return 0;
1999 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2000 struct perf_event *event);
2002 static struct intel_uncore_ops knl_uncore_cha_ops = {
2003 .init_box = snbep_uncore_msr_init_box,
2004 .disable_box = snbep_uncore_msr_disable_box,
2005 .enable_box = snbep_uncore_msr_enable_box,
2006 .disable_event = snbep_uncore_msr_disable_event,
2007 .enable_event = hswep_cbox_enable_event,
2008 .read_counter = uncore_msr_read_counter,
2009 .hw_config = knl_cha_hw_config,
2010 .get_constraint = knl_cha_get_constraint,
2011 .put_constraint = snbep_cbox_put_constraint,
2014 static struct intel_uncore_type knl_uncore_cha = {
2015 .name = "cha",
2016 .num_counters = 4,
2017 .num_boxes = 38,
2018 .perf_ctr_bits = 48,
2019 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2020 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2021 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2022 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2023 .msr_offset = KNL_CHA_MSR_OFFSET,
2024 .num_shared_regs = 1,
2025 .constraints = knl_uncore_cha_constraints,
2026 .ops = &knl_uncore_cha_ops,
2027 .format_group = &knl_uncore_cha_format_group,
2030 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2031 &format_attr_event2.attr,
2032 &format_attr_use_occ_ctr.attr,
2033 &format_attr_occ_sel.attr,
2034 &format_attr_edge.attr,
2035 &format_attr_tid_en.attr,
2036 &format_attr_inv.attr,
2037 &format_attr_thresh6.attr,
2038 &format_attr_occ_invert.attr,
2039 &format_attr_occ_edge_det.attr,
2040 NULL,
2043 static const struct attribute_group knl_uncore_pcu_format_group = {
2044 .name = "format",
2045 .attrs = knl_uncore_pcu_formats_attr,
2048 static struct intel_uncore_type knl_uncore_pcu = {
2049 .name = "pcu",
2050 .num_counters = 4,
2051 .num_boxes = 1,
2052 .perf_ctr_bits = 48,
2053 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2054 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2055 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2056 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2057 .ops = &snbep_uncore_msr_ops,
2058 .format_group = &knl_uncore_pcu_format_group,
2061 static struct intel_uncore_type *knl_msr_uncores[] = {
2062 &knl_uncore_ubox,
2063 &knl_uncore_cha,
2064 &knl_uncore_pcu,
2065 NULL,
2068 void knl_uncore_cpu_init(void)
2070 uncore_msr_uncores = knl_msr_uncores;
2073 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2075 struct pci_dev *pdev = box->pci_dev;
2076 int box_ctl = uncore_pci_box_ctl(box);
2078 pci_write_config_dword(pdev, box_ctl, 0);
2081 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2082 struct perf_event *event)
2084 struct pci_dev *pdev = box->pci_dev;
2085 struct hw_perf_event *hwc = &event->hw;
2087 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2088 == UNCORE_FIXED_EVENT)
2089 pci_write_config_dword(pdev, hwc->config_base,
2090 hwc->config | KNL_PMON_FIXED_CTL_EN);
2091 else
2092 pci_write_config_dword(pdev, hwc->config_base,
2093 hwc->config | SNBEP_PMON_CTL_EN);
2096 static struct intel_uncore_ops knl_uncore_imc_ops = {
2097 .init_box = snbep_uncore_pci_init_box,
2098 .disable_box = snbep_uncore_pci_disable_box,
2099 .enable_box = knl_uncore_imc_enable_box,
2100 .read_counter = snbep_uncore_pci_read_counter,
2101 .enable_event = knl_uncore_imc_enable_event,
2102 .disable_event = snbep_uncore_pci_disable_event,
2105 static struct intel_uncore_type knl_uncore_imc_uclk = {
2106 .name = "imc_uclk",
2107 .num_counters = 4,
2108 .num_boxes = 2,
2109 .perf_ctr_bits = 48,
2110 .fixed_ctr_bits = 48,
2111 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2112 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2113 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2114 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2115 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2116 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2117 .ops = &knl_uncore_imc_ops,
2118 .format_group = &snbep_uncore_format_group,
2121 static struct intel_uncore_type knl_uncore_imc_dclk = {
2122 .name = "imc",
2123 .num_counters = 4,
2124 .num_boxes = 6,
2125 .perf_ctr_bits = 48,
2126 .fixed_ctr_bits = 48,
2127 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2128 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2129 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2130 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2131 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2132 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2133 .ops = &knl_uncore_imc_ops,
2134 .format_group = &snbep_uncore_format_group,
2137 static struct intel_uncore_type knl_uncore_edc_uclk = {
2138 .name = "edc_uclk",
2139 .num_counters = 4,
2140 .num_boxes = 8,
2141 .perf_ctr_bits = 48,
2142 .fixed_ctr_bits = 48,
2143 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2144 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2145 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2146 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2147 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2148 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2149 .ops = &knl_uncore_imc_ops,
2150 .format_group = &snbep_uncore_format_group,
2153 static struct intel_uncore_type knl_uncore_edc_eclk = {
2154 .name = "edc_eclk",
2155 .num_counters = 4,
2156 .num_boxes = 8,
2157 .perf_ctr_bits = 48,
2158 .fixed_ctr_bits = 48,
2159 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2160 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2161 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2162 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2163 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2164 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2165 .ops = &knl_uncore_imc_ops,
2166 .format_group = &snbep_uncore_format_group,
2169 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2170 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2171 EVENT_CONSTRAINT_END
2174 static struct intel_uncore_type knl_uncore_m2pcie = {
2175 .name = "m2pcie",
2176 .num_counters = 4,
2177 .num_boxes = 1,
2178 .perf_ctr_bits = 48,
2179 .constraints = knl_uncore_m2pcie_constraints,
2180 SNBEP_UNCORE_PCI_COMMON_INIT(),
2183 static struct attribute *knl_uncore_irp_formats_attr[] = {
2184 &format_attr_event.attr,
2185 &format_attr_umask.attr,
2186 &format_attr_qor.attr,
2187 &format_attr_edge.attr,
2188 &format_attr_inv.attr,
2189 &format_attr_thresh8.attr,
2190 NULL,
2193 static const struct attribute_group knl_uncore_irp_format_group = {
2194 .name = "format",
2195 .attrs = knl_uncore_irp_formats_attr,
2198 static struct intel_uncore_type knl_uncore_irp = {
2199 .name = "irp",
2200 .num_counters = 2,
2201 .num_boxes = 1,
2202 .perf_ctr_bits = 48,
2203 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2204 .event_ctl = SNBEP_PCI_PMON_CTL0,
2205 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2206 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2207 .ops = &snbep_uncore_pci_ops,
2208 .format_group = &knl_uncore_irp_format_group,
2211 enum {
2212 KNL_PCI_UNCORE_MC_UCLK,
2213 KNL_PCI_UNCORE_MC_DCLK,
2214 KNL_PCI_UNCORE_EDC_UCLK,
2215 KNL_PCI_UNCORE_EDC_ECLK,
2216 KNL_PCI_UNCORE_M2PCIE,
2217 KNL_PCI_UNCORE_IRP,
2220 static struct intel_uncore_type *knl_pci_uncores[] = {
2221 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2222 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2223 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2224 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2225 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2226 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2227 NULL,
2231 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2232 * device type. prior to KNL, each instance of a PMU device type had a unique
2233 * device ID.
2235 * PCI Device ID Uncore PMU Devices
2236 * ----------------------------------
2237 * 0x7841 MC0 UClk, MC1 UClk
2238 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2239 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2240 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2241 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2242 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2243 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2244 * 0x7817 M2PCIe
2245 * 0x7814 IRP
2248 static const struct pci_device_id knl_uncore_pci_ids[] = {
2249 { /* MC0 UClk */
2250 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2251 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2253 { /* MC1 UClk */
2254 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2255 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2257 { /* MC0 DClk CH 0 */
2258 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2259 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2261 { /* MC0 DClk CH 1 */
2262 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2263 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2265 { /* MC0 DClk CH 2 */
2266 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2267 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2269 { /* MC1 DClk CH 0 */
2270 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2271 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2273 { /* MC1 DClk CH 1 */
2274 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2275 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2277 { /* MC1 DClk CH 2 */
2278 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2279 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2281 { /* EDC0 UClk */
2282 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2283 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2285 { /* EDC1 UClk */
2286 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2287 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2289 { /* EDC2 UClk */
2290 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2291 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2293 { /* EDC3 UClk */
2294 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2295 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2297 { /* EDC4 UClk */
2298 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2299 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2301 { /* EDC5 UClk */
2302 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2303 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2305 { /* EDC6 UClk */
2306 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2307 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2309 { /* EDC7 UClk */
2310 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2311 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2313 { /* EDC0 EClk */
2314 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2315 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2317 { /* EDC1 EClk */
2318 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2319 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2321 { /* EDC2 EClk */
2322 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2323 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2325 { /* EDC3 EClk */
2326 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2327 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2329 { /* EDC4 EClk */
2330 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2331 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2333 { /* EDC5 EClk */
2334 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2335 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2337 { /* EDC6 EClk */
2338 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2339 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2341 { /* EDC7 EClk */
2342 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2343 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2345 { /* M2PCIe */
2346 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2347 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2349 { /* IRP */
2350 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2351 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2353 { /* end: all zeroes */ }
2356 static struct pci_driver knl_uncore_pci_driver = {
2357 .name = "knl_uncore",
2358 .id_table = knl_uncore_pci_ids,
2361 int knl_uncore_pci_init(void)
2363 int ret;
2365 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2366 ret = snb_pci2phy_map_init(0x7814); /* IRP */
2367 if (ret)
2368 return ret;
2369 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2370 if (ret)
2371 return ret;
2372 uncore_pci_uncores = knl_pci_uncores;
2373 uncore_pci_driver = &knl_uncore_pci_driver;
2374 return 0;
2377 /* end of KNL uncore support */
2379 /* Haswell-EP uncore support */
2380 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2381 &format_attr_event.attr,
2382 &format_attr_umask.attr,
2383 &format_attr_edge.attr,
2384 &format_attr_inv.attr,
2385 &format_attr_thresh5.attr,
2386 &format_attr_filter_tid2.attr,
2387 &format_attr_filter_cid.attr,
2388 NULL,
2391 static const struct attribute_group hswep_uncore_ubox_format_group = {
2392 .name = "format",
2393 .attrs = hswep_uncore_ubox_formats_attr,
2396 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2398 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2399 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2400 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2401 reg1->idx = 0;
2402 return 0;
2405 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2406 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2407 .hw_config = hswep_ubox_hw_config,
2408 .get_constraint = uncore_get_constraint,
2409 .put_constraint = uncore_put_constraint,
2412 static struct intel_uncore_type hswep_uncore_ubox = {
2413 .name = "ubox",
2414 .num_counters = 2,
2415 .num_boxes = 1,
2416 .perf_ctr_bits = 44,
2417 .fixed_ctr_bits = 48,
2418 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2419 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2420 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2421 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2422 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2423 .num_shared_regs = 1,
2424 .ops = &hswep_uncore_ubox_ops,
2425 .format_group = &hswep_uncore_ubox_format_group,
2428 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2429 &format_attr_event.attr,
2430 &format_attr_umask.attr,
2431 &format_attr_edge.attr,
2432 &format_attr_tid_en.attr,
2433 &format_attr_thresh8.attr,
2434 &format_attr_filter_tid3.attr,
2435 &format_attr_filter_link2.attr,
2436 &format_attr_filter_state3.attr,
2437 &format_attr_filter_nid2.attr,
2438 &format_attr_filter_opc2.attr,
2439 &format_attr_filter_nc.attr,
2440 &format_attr_filter_c6.attr,
2441 &format_attr_filter_isoc.attr,
2442 NULL,
2445 static const struct attribute_group hswep_uncore_cbox_format_group = {
2446 .name = "format",
2447 .attrs = hswep_uncore_cbox_formats_attr,
2450 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2451 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2452 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2453 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2454 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2455 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2456 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2457 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2458 EVENT_CONSTRAINT_END
2461 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2462 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2463 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2464 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2465 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2466 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2467 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2468 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2469 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2470 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2471 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2472 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2473 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2474 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2475 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2476 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2477 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2478 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2479 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2480 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2481 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2482 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2483 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2484 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2485 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2486 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2487 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2488 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2489 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2490 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2491 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2492 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2493 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2494 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2495 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2496 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2497 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2498 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2499 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2500 EVENT_EXTRA_END
2503 static u64 hswep_cbox_filter_mask(int fields)
2505 u64 mask = 0;
2506 if (fields & 0x1)
2507 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2508 if (fields & 0x2)
2509 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2510 if (fields & 0x4)
2511 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2512 if (fields & 0x8)
2513 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2514 if (fields & 0x10) {
2515 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2516 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2517 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2518 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2520 return mask;
2523 static struct event_constraint *
2524 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2526 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2529 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2531 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2532 struct extra_reg *er;
2533 int idx = 0;
2535 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2536 if (er->event != (event->hw.config & er->config_mask))
2537 continue;
2538 idx |= er->idx;
2541 if (idx) {
2542 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2543 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2544 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2545 reg1->idx = idx;
2547 return 0;
2550 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2551 struct perf_event *event)
2553 struct hw_perf_event *hwc = &event->hw;
2554 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2556 if (reg1->idx != EXTRA_REG_NONE) {
2557 u64 filter = uncore_shared_reg_config(box, 0);
2558 wrmsrl(reg1->reg, filter & 0xffffffff);
2559 wrmsrl(reg1->reg + 1, filter >> 32);
2562 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2565 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2566 .init_box = snbep_uncore_msr_init_box,
2567 .disable_box = snbep_uncore_msr_disable_box,
2568 .enable_box = snbep_uncore_msr_enable_box,
2569 .disable_event = snbep_uncore_msr_disable_event,
2570 .enable_event = hswep_cbox_enable_event,
2571 .read_counter = uncore_msr_read_counter,
2572 .hw_config = hswep_cbox_hw_config,
2573 .get_constraint = hswep_cbox_get_constraint,
2574 .put_constraint = snbep_cbox_put_constraint,
2577 static struct intel_uncore_type hswep_uncore_cbox = {
2578 .name = "cbox",
2579 .num_counters = 4,
2580 .num_boxes = 18,
2581 .perf_ctr_bits = 48,
2582 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2583 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2584 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2585 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2586 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2587 .num_shared_regs = 1,
2588 .constraints = hswep_uncore_cbox_constraints,
2589 .ops = &hswep_uncore_cbox_ops,
2590 .format_group = &hswep_uncore_cbox_format_group,
2594 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2596 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2598 unsigned msr = uncore_msr_box_ctl(box);
2600 if (msr) {
2601 u64 init = SNBEP_PMON_BOX_CTL_INT;
2602 u64 flags = 0;
2603 int i;
2605 for_each_set_bit(i, (unsigned long *)&init, 64) {
2606 flags |= (1ULL << i);
2607 wrmsrl(msr, flags);
2612 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2613 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2614 .init_box = hswep_uncore_sbox_msr_init_box
2617 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2618 &format_attr_event.attr,
2619 &format_attr_umask.attr,
2620 &format_attr_edge.attr,
2621 &format_attr_tid_en.attr,
2622 &format_attr_inv.attr,
2623 &format_attr_thresh8.attr,
2624 NULL,
2627 static const struct attribute_group hswep_uncore_sbox_format_group = {
2628 .name = "format",
2629 .attrs = hswep_uncore_sbox_formats_attr,
2632 static struct intel_uncore_type hswep_uncore_sbox = {
2633 .name = "sbox",
2634 .num_counters = 4,
2635 .num_boxes = 4,
2636 .perf_ctr_bits = 44,
2637 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2638 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2639 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2640 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2641 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2642 .ops = &hswep_uncore_sbox_msr_ops,
2643 .format_group = &hswep_uncore_sbox_format_group,
2646 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2648 struct hw_perf_event *hwc = &event->hw;
2649 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2650 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2652 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2653 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2654 reg1->idx = ev_sel - 0xb;
2655 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2657 return 0;
2660 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2661 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2662 .hw_config = hswep_pcu_hw_config,
2663 .get_constraint = snbep_pcu_get_constraint,
2664 .put_constraint = snbep_pcu_put_constraint,
2667 static struct intel_uncore_type hswep_uncore_pcu = {
2668 .name = "pcu",
2669 .num_counters = 4,
2670 .num_boxes = 1,
2671 .perf_ctr_bits = 48,
2672 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2673 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2674 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2675 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2676 .num_shared_regs = 1,
2677 .ops = &hswep_uncore_pcu_ops,
2678 .format_group = &snbep_uncore_pcu_format_group,
2681 static struct intel_uncore_type *hswep_msr_uncores[] = {
2682 &hswep_uncore_ubox,
2683 &hswep_uncore_cbox,
2684 &hswep_uncore_sbox,
2685 &hswep_uncore_pcu,
2686 NULL,
2689 void hswep_uncore_cpu_init(void)
2691 int pkg = boot_cpu_data.logical_proc_id;
2693 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2694 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2696 /* Detect 6-8 core systems with only two SBOXes */
2697 if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2698 u32 capid4;
2700 pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2701 0x94, &capid4);
2702 if (((capid4 >> 6) & 0x3) == 0)
2703 hswep_uncore_sbox.num_boxes = 2;
2706 uncore_msr_uncores = hswep_msr_uncores;
2709 static struct intel_uncore_type hswep_uncore_ha = {
2710 .name = "ha",
2711 .num_counters = 4,
2712 .num_boxes = 2,
2713 .perf_ctr_bits = 48,
2714 SNBEP_UNCORE_PCI_COMMON_INIT(),
2717 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2718 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2719 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2720 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2721 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2722 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2723 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2724 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2725 { /* end: all zeroes */ },
2728 static struct intel_uncore_type hswep_uncore_imc = {
2729 .name = "imc",
2730 .num_counters = 4,
2731 .num_boxes = 8,
2732 .perf_ctr_bits = 48,
2733 .fixed_ctr_bits = 48,
2734 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2735 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2736 .event_descs = hswep_uncore_imc_events,
2737 SNBEP_UNCORE_PCI_COMMON_INIT(),
2740 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2742 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2744 struct pci_dev *pdev = box->pci_dev;
2745 struct hw_perf_event *hwc = &event->hw;
2746 u64 count = 0;
2748 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2749 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2751 return count;
2754 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2755 .init_box = snbep_uncore_pci_init_box,
2756 .disable_box = snbep_uncore_pci_disable_box,
2757 .enable_box = snbep_uncore_pci_enable_box,
2758 .disable_event = ivbep_uncore_irp_disable_event,
2759 .enable_event = ivbep_uncore_irp_enable_event,
2760 .read_counter = hswep_uncore_irp_read_counter,
2763 static struct intel_uncore_type hswep_uncore_irp = {
2764 .name = "irp",
2765 .num_counters = 4,
2766 .num_boxes = 1,
2767 .perf_ctr_bits = 48,
2768 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2769 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2770 .ops = &hswep_uncore_irp_ops,
2771 .format_group = &snbep_uncore_format_group,
2774 static struct intel_uncore_type hswep_uncore_qpi = {
2775 .name = "qpi",
2776 .num_counters = 4,
2777 .num_boxes = 3,
2778 .perf_ctr_bits = 48,
2779 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2780 .event_ctl = SNBEP_PCI_PMON_CTL0,
2781 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2782 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2783 .num_shared_regs = 1,
2784 .ops = &snbep_uncore_qpi_ops,
2785 .format_group = &snbep_uncore_qpi_format_group,
2788 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2789 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2790 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2791 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2792 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2793 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2794 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2795 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2796 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2797 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2798 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2799 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2800 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2801 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2802 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2803 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2804 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2805 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2806 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2807 EVENT_CONSTRAINT_END
2810 static struct intel_uncore_type hswep_uncore_r2pcie = {
2811 .name = "r2pcie",
2812 .num_counters = 4,
2813 .num_boxes = 1,
2814 .perf_ctr_bits = 48,
2815 .constraints = hswep_uncore_r2pcie_constraints,
2816 SNBEP_UNCORE_PCI_COMMON_INIT(),
2819 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2820 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2821 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2822 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2823 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2824 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2825 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2826 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2827 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2828 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2829 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2830 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2831 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2832 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2833 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2834 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2835 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2836 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2837 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2838 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2839 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2840 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2841 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2842 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2843 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2844 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2845 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2846 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2847 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2848 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2849 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2850 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2851 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2852 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2853 EVENT_CONSTRAINT_END
2856 static struct intel_uncore_type hswep_uncore_r3qpi = {
2857 .name = "r3qpi",
2858 .num_counters = 3,
2859 .num_boxes = 3,
2860 .perf_ctr_bits = 44,
2861 .constraints = hswep_uncore_r3qpi_constraints,
2862 SNBEP_UNCORE_PCI_COMMON_INIT(),
2865 enum {
2866 HSWEP_PCI_UNCORE_HA,
2867 HSWEP_PCI_UNCORE_IMC,
2868 HSWEP_PCI_UNCORE_IRP,
2869 HSWEP_PCI_UNCORE_QPI,
2870 HSWEP_PCI_UNCORE_R2PCIE,
2871 HSWEP_PCI_UNCORE_R3QPI,
2874 static struct intel_uncore_type *hswep_pci_uncores[] = {
2875 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
2876 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
2877 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
2878 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
2879 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
2880 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
2881 NULL,
2884 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2885 { /* Home Agent 0 */
2886 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2887 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2889 { /* Home Agent 1 */
2890 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2891 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2893 { /* MC0 Channel 0 */
2894 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2895 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2897 { /* MC0 Channel 1 */
2898 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2899 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2901 { /* MC0 Channel 2 */
2902 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2903 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2905 { /* MC0 Channel 3 */
2906 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2907 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2909 { /* MC1 Channel 0 */
2910 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2911 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2913 { /* MC1 Channel 1 */
2914 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2915 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2917 { /* MC1 Channel 2 */
2918 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2919 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2921 { /* MC1 Channel 3 */
2922 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2923 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2925 { /* IRP */
2926 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2927 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2929 { /* QPI0 Port 0 */
2930 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2931 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2933 { /* QPI0 Port 1 */
2934 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2935 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2937 { /* QPI1 Port 2 */
2938 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2939 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2941 { /* R2PCIe */
2942 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2943 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2945 { /* R3QPI0 Link 0 */
2946 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2947 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2949 { /* R3QPI0 Link 1 */
2950 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2951 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2953 { /* R3QPI1 Link 2 */
2954 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2955 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2957 { /* QPI Port 0 filter */
2958 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2959 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2960 SNBEP_PCI_QPI_PORT0_FILTER),
2962 { /* QPI Port 1 filter */
2963 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2964 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2965 SNBEP_PCI_QPI_PORT1_FILTER),
2967 { /* PCU.3 (for Capability registers) */
2968 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2969 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2970 HSWEP_PCI_PCU_3),
2972 { /* end: all zeroes */ }
2975 static struct pci_driver hswep_uncore_pci_driver = {
2976 .name = "hswep_uncore",
2977 .id_table = hswep_uncore_pci_ids,
2980 int hswep_uncore_pci_init(void)
2982 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2983 if (ret)
2984 return ret;
2985 uncore_pci_uncores = hswep_pci_uncores;
2986 uncore_pci_driver = &hswep_uncore_pci_driver;
2987 return 0;
2989 /* end of Haswell-EP uncore support */
2991 /* BDX uncore support */
2993 static struct intel_uncore_type bdx_uncore_ubox = {
2994 .name = "ubox",
2995 .num_counters = 2,
2996 .num_boxes = 1,
2997 .perf_ctr_bits = 48,
2998 .fixed_ctr_bits = 48,
2999 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3000 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3001 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3002 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3003 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3004 .num_shared_regs = 1,
3005 .ops = &ivbep_uncore_msr_ops,
3006 .format_group = &ivbep_uncore_ubox_format_group,
3009 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3010 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3011 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3012 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3013 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3014 EVENT_CONSTRAINT_END
3017 static struct intel_uncore_type bdx_uncore_cbox = {
3018 .name = "cbox",
3019 .num_counters = 4,
3020 .num_boxes = 24,
3021 .perf_ctr_bits = 48,
3022 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3023 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3024 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3025 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3026 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3027 .num_shared_regs = 1,
3028 .constraints = bdx_uncore_cbox_constraints,
3029 .ops = &hswep_uncore_cbox_ops,
3030 .format_group = &hswep_uncore_cbox_format_group,
3033 static struct intel_uncore_type *bdx_msr_uncores[] = {
3034 &bdx_uncore_ubox,
3035 &bdx_uncore_cbox,
3036 &hswep_uncore_pcu,
3037 NULL,
3040 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3041 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3042 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3043 EVENT_CONSTRAINT_END
3046 void bdx_uncore_cpu_init(void)
3048 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3049 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3050 uncore_msr_uncores = bdx_msr_uncores;
3052 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3055 static struct intel_uncore_type bdx_uncore_ha = {
3056 .name = "ha",
3057 .num_counters = 4,
3058 .num_boxes = 2,
3059 .perf_ctr_bits = 48,
3060 SNBEP_UNCORE_PCI_COMMON_INIT(),
3063 static struct intel_uncore_type bdx_uncore_imc = {
3064 .name = "imc",
3065 .num_counters = 4,
3066 .num_boxes = 8,
3067 .perf_ctr_bits = 48,
3068 .fixed_ctr_bits = 48,
3069 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3070 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3071 .event_descs = hswep_uncore_imc_events,
3072 SNBEP_UNCORE_PCI_COMMON_INIT(),
3075 static struct intel_uncore_type bdx_uncore_irp = {
3076 .name = "irp",
3077 .num_counters = 4,
3078 .num_boxes = 1,
3079 .perf_ctr_bits = 48,
3080 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3081 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3082 .ops = &hswep_uncore_irp_ops,
3083 .format_group = &snbep_uncore_format_group,
3086 static struct intel_uncore_type bdx_uncore_qpi = {
3087 .name = "qpi",
3088 .num_counters = 4,
3089 .num_boxes = 3,
3090 .perf_ctr_bits = 48,
3091 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3092 .event_ctl = SNBEP_PCI_PMON_CTL0,
3093 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3094 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3095 .num_shared_regs = 1,
3096 .ops = &snbep_uncore_qpi_ops,
3097 .format_group = &snbep_uncore_qpi_format_group,
3100 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3101 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3102 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3103 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3104 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3105 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3106 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3107 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3108 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3109 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3110 EVENT_CONSTRAINT_END
3113 static struct intel_uncore_type bdx_uncore_r2pcie = {
3114 .name = "r2pcie",
3115 .num_counters = 4,
3116 .num_boxes = 1,
3117 .perf_ctr_bits = 48,
3118 .constraints = bdx_uncore_r2pcie_constraints,
3119 SNBEP_UNCORE_PCI_COMMON_INIT(),
3122 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3123 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3124 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3125 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3126 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3127 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3128 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3129 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3130 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3131 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3132 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3133 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3134 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3135 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3136 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3137 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3138 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3139 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3140 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3141 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3142 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3143 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3144 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3145 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3146 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3147 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3148 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3149 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3150 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3151 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3152 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3153 EVENT_CONSTRAINT_END
3156 static struct intel_uncore_type bdx_uncore_r3qpi = {
3157 .name = "r3qpi",
3158 .num_counters = 3,
3159 .num_boxes = 3,
3160 .perf_ctr_bits = 48,
3161 .constraints = bdx_uncore_r3qpi_constraints,
3162 SNBEP_UNCORE_PCI_COMMON_INIT(),
3165 enum {
3166 BDX_PCI_UNCORE_HA,
3167 BDX_PCI_UNCORE_IMC,
3168 BDX_PCI_UNCORE_IRP,
3169 BDX_PCI_UNCORE_QPI,
3170 BDX_PCI_UNCORE_R2PCIE,
3171 BDX_PCI_UNCORE_R3QPI,
3174 static struct intel_uncore_type *bdx_pci_uncores[] = {
3175 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3176 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3177 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
3178 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
3179 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3180 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
3181 NULL,
3184 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3185 { /* Home Agent 0 */
3186 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3187 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3189 { /* Home Agent 1 */
3190 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3191 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3193 { /* MC0 Channel 0 */
3194 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3195 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3197 { /* MC0 Channel 1 */
3198 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3199 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3201 { /* MC0 Channel 2 */
3202 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3203 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3205 { /* MC0 Channel 3 */
3206 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3207 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3209 { /* MC1 Channel 0 */
3210 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3211 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3213 { /* MC1 Channel 1 */
3214 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3215 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3217 { /* MC1 Channel 2 */
3218 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3219 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3221 { /* MC1 Channel 3 */
3222 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3223 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3225 { /* IRP */
3226 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3227 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3229 { /* QPI0 Port 0 */
3230 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3231 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3233 { /* QPI0 Port 1 */
3234 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3235 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3237 { /* QPI1 Port 2 */
3238 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3239 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3241 { /* R2PCIe */
3242 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3243 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3245 { /* R3QPI0 Link 0 */
3246 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3247 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3249 { /* R3QPI0 Link 1 */
3250 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3251 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3253 { /* R3QPI1 Link 2 */
3254 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3255 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3257 { /* QPI Port 0 filter */
3258 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3259 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
3261 { /* QPI Port 1 filter */
3262 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3263 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
3265 { /* QPI Port 2 filter */
3266 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3267 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
3269 { /* end: all zeroes */ }
3272 static struct pci_driver bdx_uncore_pci_driver = {
3273 .name = "bdx_uncore",
3274 .id_table = bdx_uncore_pci_ids,
3277 int bdx_uncore_pci_init(void)
3279 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3281 if (ret)
3282 return ret;
3283 uncore_pci_uncores = bdx_pci_uncores;
3284 uncore_pci_driver = &bdx_uncore_pci_driver;
3285 return 0;
3288 /* end of BDX uncore support */
3290 /* SKX uncore support */
3292 static struct intel_uncore_type skx_uncore_ubox = {
3293 .name = "ubox",
3294 .num_counters = 2,
3295 .num_boxes = 1,
3296 .perf_ctr_bits = 48,
3297 .fixed_ctr_bits = 48,
3298 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3299 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3300 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3301 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3302 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3303 .ops = &ivbep_uncore_msr_ops,
3304 .format_group = &ivbep_uncore_ubox_format_group,
3307 static struct attribute *skx_uncore_cha_formats_attr[] = {
3308 &format_attr_event.attr,
3309 &format_attr_umask.attr,
3310 &format_attr_edge.attr,
3311 &format_attr_tid_en.attr,
3312 &format_attr_inv.attr,
3313 &format_attr_thresh8.attr,
3314 &format_attr_filter_tid4.attr,
3315 &format_attr_filter_state5.attr,
3316 &format_attr_filter_rem.attr,
3317 &format_attr_filter_loc.attr,
3318 &format_attr_filter_nm.attr,
3319 &format_attr_filter_all_op.attr,
3320 &format_attr_filter_not_nm.attr,
3321 &format_attr_filter_opc_0.attr,
3322 &format_attr_filter_opc_1.attr,
3323 &format_attr_filter_nc.attr,
3324 &format_attr_filter_isoc.attr,
3325 NULL,
3328 static const struct attribute_group skx_uncore_chabox_format_group = {
3329 .name = "format",
3330 .attrs = skx_uncore_cha_formats_attr,
3333 static struct event_constraint skx_uncore_chabox_constraints[] = {
3334 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3335 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3336 EVENT_CONSTRAINT_END
3339 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3340 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3341 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3342 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3343 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3344 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3345 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3346 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3347 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3348 EVENT_EXTRA_END
3351 static u64 skx_cha_filter_mask(int fields)
3353 u64 mask = 0;
3355 if (fields & 0x1)
3356 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3357 if (fields & 0x2)
3358 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3359 if (fields & 0x4)
3360 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3361 if (fields & 0x8) {
3362 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3363 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3364 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3365 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3366 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3367 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3368 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3369 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3370 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3372 return mask;
3375 static struct event_constraint *
3376 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3378 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3381 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3383 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3384 struct extra_reg *er;
3385 int idx = 0;
3387 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3388 if (er->event != (event->hw.config & er->config_mask))
3389 continue;
3390 idx |= er->idx;
3393 if (idx) {
3394 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3395 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3396 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3397 reg1->idx = idx;
3399 return 0;
3402 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3403 /* There is no frz_en for chabox ctl */
3404 .init_box = ivbep_uncore_msr_init_box,
3405 .disable_box = snbep_uncore_msr_disable_box,
3406 .enable_box = snbep_uncore_msr_enable_box,
3407 .disable_event = snbep_uncore_msr_disable_event,
3408 .enable_event = hswep_cbox_enable_event,
3409 .read_counter = uncore_msr_read_counter,
3410 .hw_config = skx_cha_hw_config,
3411 .get_constraint = skx_cha_get_constraint,
3412 .put_constraint = snbep_cbox_put_constraint,
3415 static struct intel_uncore_type skx_uncore_chabox = {
3416 .name = "cha",
3417 .num_counters = 4,
3418 .perf_ctr_bits = 48,
3419 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3420 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3421 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3422 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3423 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3424 .num_shared_regs = 1,
3425 .constraints = skx_uncore_chabox_constraints,
3426 .ops = &skx_uncore_chabox_ops,
3427 .format_group = &skx_uncore_chabox_format_group,
3430 static struct attribute *skx_uncore_iio_formats_attr[] = {
3431 &format_attr_event.attr,
3432 &format_attr_umask.attr,
3433 &format_attr_edge.attr,
3434 &format_attr_inv.attr,
3435 &format_attr_thresh9.attr,
3436 &format_attr_ch_mask.attr,
3437 &format_attr_fc_mask.attr,
3438 NULL,
3441 static const struct attribute_group skx_uncore_iio_format_group = {
3442 .name = "format",
3443 .attrs = skx_uncore_iio_formats_attr,
3446 static struct event_constraint skx_uncore_iio_constraints[] = {
3447 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3448 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3449 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3450 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3451 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3452 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3453 EVENT_CONSTRAINT_END
3456 static void skx_iio_enable_event(struct intel_uncore_box *box,
3457 struct perf_event *event)
3459 struct hw_perf_event *hwc = &event->hw;
3461 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3464 static struct intel_uncore_ops skx_uncore_iio_ops = {
3465 .init_box = ivbep_uncore_msr_init_box,
3466 .disable_box = snbep_uncore_msr_disable_box,
3467 .enable_box = snbep_uncore_msr_enable_box,
3468 .disable_event = snbep_uncore_msr_disable_event,
3469 .enable_event = skx_iio_enable_event,
3470 .read_counter = uncore_msr_read_counter,
3473 static struct intel_uncore_type skx_uncore_iio = {
3474 .name = "iio",
3475 .num_counters = 4,
3476 .num_boxes = 6,
3477 .perf_ctr_bits = 48,
3478 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3479 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3480 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3481 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3482 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3483 .msr_offset = SKX_IIO_MSR_OFFSET,
3484 .constraints = skx_uncore_iio_constraints,
3485 .ops = &skx_uncore_iio_ops,
3486 .format_group = &skx_uncore_iio_format_group,
3489 static struct attribute *skx_uncore_formats_attr[] = {
3490 &format_attr_event.attr,
3491 &format_attr_umask.attr,
3492 &format_attr_edge.attr,
3493 &format_attr_inv.attr,
3494 &format_attr_thresh8.attr,
3495 NULL,
3498 static const struct attribute_group skx_uncore_format_group = {
3499 .name = "format",
3500 .attrs = skx_uncore_formats_attr,
3503 static struct intel_uncore_type skx_uncore_irp = {
3504 .name = "irp",
3505 .num_counters = 2,
3506 .num_boxes = 6,
3507 .perf_ctr_bits = 48,
3508 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
3509 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
3510 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3511 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
3512 .msr_offset = SKX_IRP_MSR_OFFSET,
3513 .ops = &skx_uncore_iio_ops,
3514 .format_group = &skx_uncore_format_group,
3517 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3518 &format_attr_event.attr,
3519 &format_attr_umask.attr,
3520 &format_attr_edge.attr,
3521 &format_attr_inv.attr,
3522 &format_attr_thresh8.attr,
3523 &format_attr_occ_invert.attr,
3524 &format_attr_occ_edge_det.attr,
3525 &format_attr_filter_band0.attr,
3526 &format_attr_filter_band1.attr,
3527 &format_attr_filter_band2.attr,
3528 &format_attr_filter_band3.attr,
3529 NULL,
3532 static struct attribute_group skx_uncore_pcu_format_group = {
3533 .name = "format",
3534 .attrs = skx_uncore_pcu_formats_attr,
3537 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3538 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3539 .hw_config = hswep_pcu_hw_config,
3540 .get_constraint = snbep_pcu_get_constraint,
3541 .put_constraint = snbep_pcu_put_constraint,
3544 static struct intel_uncore_type skx_uncore_pcu = {
3545 .name = "pcu",
3546 .num_counters = 4,
3547 .num_boxes = 1,
3548 .perf_ctr_bits = 48,
3549 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
3550 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
3551 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3552 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
3553 .num_shared_regs = 1,
3554 .ops = &skx_uncore_pcu_ops,
3555 .format_group = &skx_uncore_pcu_format_group,
3558 static struct intel_uncore_type *skx_msr_uncores[] = {
3559 &skx_uncore_ubox,
3560 &skx_uncore_chabox,
3561 &skx_uncore_iio,
3562 &skx_uncore_irp,
3563 &skx_uncore_pcu,
3564 NULL,
3568 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3569 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3571 #define SKX_CAPID6 0x9c
3572 #define SKX_CHA_BIT_MASK GENMASK(27, 0)
3574 static int skx_count_chabox(void)
3576 struct pci_dev *dev = NULL;
3577 u32 val = 0;
3579 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
3580 if (!dev)
3581 goto out;
3583 pci_read_config_dword(dev, SKX_CAPID6, &val);
3584 val &= SKX_CHA_BIT_MASK;
3585 out:
3586 pci_dev_put(dev);
3587 return hweight32(val);
3590 void skx_uncore_cpu_init(void)
3592 skx_uncore_chabox.num_boxes = skx_count_chabox();
3593 uncore_msr_uncores = skx_msr_uncores;
3596 static struct intel_uncore_type skx_uncore_imc = {
3597 .name = "imc",
3598 .num_counters = 4,
3599 .num_boxes = 6,
3600 .perf_ctr_bits = 48,
3601 .fixed_ctr_bits = 48,
3602 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3603 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3604 .event_descs = hswep_uncore_imc_events,
3605 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3606 .event_ctl = SNBEP_PCI_PMON_CTL0,
3607 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3608 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3609 .ops = &ivbep_uncore_pci_ops,
3610 .format_group = &skx_uncore_format_group,
3613 static struct attribute *skx_upi_uncore_formats_attr[] = {
3614 &format_attr_event.attr,
3615 &format_attr_umask_ext.attr,
3616 &format_attr_edge.attr,
3617 &format_attr_inv.attr,
3618 &format_attr_thresh8.attr,
3619 NULL,
3622 static const struct attribute_group skx_upi_uncore_format_group = {
3623 .name = "format",
3624 .attrs = skx_upi_uncore_formats_attr,
3627 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3629 struct pci_dev *pdev = box->pci_dev;
3631 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3632 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3635 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3636 .init_box = skx_upi_uncore_pci_init_box,
3637 .disable_box = snbep_uncore_pci_disable_box,
3638 .enable_box = snbep_uncore_pci_enable_box,
3639 .disable_event = snbep_uncore_pci_disable_event,
3640 .enable_event = snbep_uncore_pci_enable_event,
3641 .read_counter = snbep_uncore_pci_read_counter,
3644 static struct intel_uncore_type skx_uncore_upi = {
3645 .name = "upi",
3646 .num_counters = 4,
3647 .num_boxes = 3,
3648 .perf_ctr_bits = 48,
3649 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
3650 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
3651 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3652 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
3653 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
3654 .ops = &skx_upi_uncore_pci_ops,
3655 .format_group = &skx_upi_uncore_format_group,
3658 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3660 struct pci_dev *pdev = box->pci_dev;
3662 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3663 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3666 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3667 .init_box = skx_m2m_uncore_pci_init_box,
3668 .disable_box = snbep_uncore_pci_disable_box,
3669 .enable_box = snbep_uncore_pci_enable_box,
3670 .disable_event = snbep_uncore_pci_disable_event,
3671 .enable_event = snbep_uncore_pci_enable_event,
3672 .read_counter = snbep_uncore_pci_read_counter,
3675 static struct intel_uncore_type skx_uncore_m2m = {
3676 .name = "m2m",
3677 .num_counters = 4,
3678 .num_boxes = 2,
3679 .perf_ctr_bits = 48,
3680 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
3681 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
3682 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3683 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
3684 .ops = &skx_m2m_uncore_pci_ops,
3685 .format_group = &skx_uncore_format_group,
3688 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3689 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3690 EVENT_CONSTRAINT_END
3693 static struct intel_uncore_type skx_uncore_m2pcie = {
3694 .name = "m2pcie",
3695 .num_counters = 4,
3696 .num_boxes = 4,
3697 .perf_ctr_bits = 48,
3698 .constraints = skx_uncore_m2pcie_constraints,
3699 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3700 .event_ctl = SNBEP_PCI_PMON_CTL0,
3701 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3702 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3703 .ops = &ivbep_uncore_pci_ops,
3704 .format_group = &skx_uncore_format_group,
3707 static struct event_constraint skx_uncore_m3upi_constraints[] = {
3708 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3709 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3710 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3711 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3712 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3713 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3714 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3715 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3716 EVENT_CONSTRAINT_END
3719 static struct intel_uncore_type skx_uncore_m3upi = {
3720 .name = "m3upi",
3721 .num_counters = 3,
3722 .num_boxes = 3,
3723 .perf_ctr_bits = 48,
3724 .constraints = skx_uncore_m3upi_constraints,
3725 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3726 .event_ctl = SNBEP_PCI_PMON_CTL0,
3727 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3728 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3729 .ops = &ivbep_uncore_pci_ops,
3730 .format_group = &skx_uncore_format_group,
3733 enum {
3734 SKX_PCI_UNCORE_IMC,
3735 SKX_PCI_UNCORE_M2M,
3736 SKX_PCI_UNCORE_UPI,
3737 SKX_PCI_UNCORE_M2PCIE,
3738 SKX_PCI_UNCORE_M3UPI,
3741 static struct intel_uncore_type *skx_pci_uncores[] = {
3742 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
3743 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
3744 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
3745 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
3746 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
3747 NULL,
3750 static const struct pci_device_id skx_uncore_pci_ids[] = {
3751 { /* MC0 Channel 0 */
3752 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3753 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3755 { /* MC0 Channel 1 */
3756 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3757 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3759 { /* MC0 Channel 2 */
3760 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3761 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3763 { /* MC1 Channel 0 */
3764 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3765 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3767 { /* MC1 Channel 1 */
3768 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3769 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3771 { /* MC1 Channel 2 */
3772 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3773 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3775 { /* M2M0 */
3776 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3777 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
3779 { /* M2M1 */
3780 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3781 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
3783 { /* UPI0 Link 0 */
3784 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3785 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
3787 { /* UPI0 Link 1 */
3788 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3789 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
3791 { /* UPI1 Link 2 */
3792 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3793 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
3795 { /* M2PCIe 0 */
3796 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3797 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
3799 { /* M2PCIe 1 */
3800 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3801 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
3803 { /* M2PCIe 2 */
3804 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3805 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
3807 { /* M2PCIe 3 */
3808 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3809 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
3811 { /* M3UPI0 Link 0 */
3812 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3813 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
3815 { /* M3UPI0 Link 1 */
3816 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
3817 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
3819 { /* M3UPI1 Link 2 */
3820 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3821 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
3823 { /* end: all zeroes */ }
3827 static struct pci_driver skx_uncore_pci_driver = {
3828 .name = "skx_uncore",
3829 .id_table = skx_uncore_pci_ids,
3832 int skx_uncore_pci_init(void)
3834 /* need to double check pci address */
3835 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
3837 if (ret)
3838 return ret;
3840 uncore_pci_uncores = skx_pci_uncores;
3841 uncore_pci_driver = &skx_uncore_pci_driver;
3842 return 0;
3845 /* end of SKX uncore support */