1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID 0x40
7 #define SNBEP_GIDNIDMAP 0x54
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
20 #define SNBEP_PMON_CTL_RST (1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
23 #define SNBEP_PMON_CTL_EN (1 << 22)
24 #define SNBEP_PMON_CTL_INVERT (1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
27 SNBEP_PMON_CTL_UMASK_MASK | \
28 SNBEP_PMON_CTL_EDGE_DET | \
29 SNBEP_PMON_CTL_INVERT | \
30 SNBEP_PMON_CTL_TRESH_MASK)
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
35 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36 SNBEP_PMON_CTL_UMASK_MASK | \
37 SNBEP_PMON_CTL_EDGE_DET | \
38 SNBEP_PMON_CTL_INVERT | \
39 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
41 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
43 SNBEP_CBO_PMON_CTL_TID_EN)
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
51 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 SNBEP_PMON_CTL_EDGE_DET | \
54 SNBEP_PMON_CTL_INVERT | \
55 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
60 (SNBEP_PMON_RAW_EVENT_MASK | \
61 SNBEP_PMON_CTL_EV_SEL_EXT)
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
65 #define SNBEP_PCI_PMON_CTL0 0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0 0xa0
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0 0xc16
84 #define SNBEP_U_MSR_PMON_CTL0 0xc10
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
94 #define SNBEP_CBO_MSR_OFFSET 0x20
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
103 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
104 .config_mask = (m), \
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
121 SNBEP_PMON_CTL_UMASK_MASK | \
122 SNBEP_PMON_CTL_EDGE_DET | \
123 SNBEP_PMON_CTL_TRESH_MASK)
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131 SNBEP_PMON_CTL_UMASK_MASK | \
132 SNBEP_PMON_CTL_EDGE_DET | \
133 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
136 SNBEP_CBO_PMON_CTL_TID_EN)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
150 (IVBEP_PMON_RAW_EVENT_MASK | \
151 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
154 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 SNBEP_PMON_CTL_EDGE_DET | \
157 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
162 (IVBEP_PMON_RAW_EVENT_MASK | \
163 SNBEP_PMON_CTL_EV_SEL_EXT)
165 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
166 ((1ULL << (n)) - 1)))
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0 0x709
170 #define HSWEP_U_MSR_PMON_CTL0 0x705
171 #define HSWEP_U_MSR_PMON_FILTER 0x707
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
183 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
187 #define HSWEP_CBO_MSR_OFFSET 0x10
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0 0x726
202 #define HSWEP_S0_MSR_PMON_CTL0 0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
204 #define HSWEP_SBOX_MSR_OFFSET 0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
206 SNBEP_CBO_PMON_CTL_TID_EN)
209 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 SNBEP_CBO_PMON_CTL_TID_EN)
219 #define KNL_CHA_MSR_OFFSET 0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
233 #define KNL_UCLK_MSR_PMON_CTL0 0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
237 #define KNL_PMON_FIXED_CTL_EN 0x1
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
254 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
256 KNL_CHA_MSR_PMON_CTL_QOR)
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 SNBEP_PMON_CTL_EDGE_DET | \
266 SNBEP_CBO_PMON_CTL_TID_EN | \
267 SNBEP_PMON_CTL_INVERT | \
268 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID 0xc0
274 #define SKX_GIDNIDMAP 0xd4
277 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
278 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
279 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
280 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
281 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
282 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
283 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
284 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
285 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
286 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
287 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
288 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
289 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
292 #define SKX_IIO0_MSR_PMON_CTL0 0xa48
293 #define SKX_IIO0_MSR_PMON_CTR0 0xa41
294 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
295 #define SKX_IIO_MSR_OFFSET 0x20
297 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
298 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
299 #define SKX_PMON_CTL_CH_MASK (0xff << 4)
300 #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
301 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
302 SNBEP_PMON_CTL_UMASK_MASK | \
303 SNBEP_PMON_CTL_EDGE_DET | \
304 SNBEP_PMON_CTL_INVERT | \
305 SKX_PMON_CTL_TRESH_MASK)
306 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
307 SKX_PMON_CTL_CH_MASK | \
308 SKX_PMON_CTL_FC_MASK)
311 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
312 #define SKX_IRP0_MSR_PMON_CTR0 0xa59
313 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
314 #define SKX_IRP_MSR_OFFSET 0x20
317 #define SKX_UPI_PCI_PMON_CTL0 0x350
318 #define SKX_UPI_PCI_PMON_CTR0 0x318
319 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
320 #define SKX_UPI_CTL_UMASK_EXT 0xffefff
323 #define SKX_M2M_PCI_PMON_CTL0 0x228
324 #define SKX_M2M_PCI_PMON_CTR0 0x200
325 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
328 #define SNR_U_MSR_PMON_CTR0 0x1f98
329 #define SNR_U_MSR_PMON_CTL0 0x1f91
330 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
331 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
334 #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
335 #define SNR_CHA_MSR_PMON_CTL0 0x1c01
336 #define SNR_CHA_MSR_PMON_CTR0 0x1c08
337 #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
338 #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
342 #define SNR_IIO_MSR_PMON_CTL0 0x1e08
343 #define SNR_IIO_MSR_PMON_CTR0 0x1e01
344 #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
345 #define SNR_IIO_MSR_OFFSET 0x10
346 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
349 #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
350 #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
351 #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
352 #define SNR_IRP_MSR_OFFSET 0x10
355 #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
356 #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
357 #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
358 #define SNR_M2PCIE_MSR_OFFSET 0x10
361 #define SNR_PCU_MSR_PMON_CTL0 0x1ef1
362 #define SNR_PCU_MSR_PMON_CTR0 0x1ef8
363 #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
364 #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
367 #define SNR_M2M_PCI_PMON_CTL0 0x468
368 #define SNR_M2M_PCI_PMON_CTR0 0x440
369 #define SNR_M2M_PCI_PMON_BOX_CTL 0x438
370 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
373 #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
374 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
375 #define SNR_IMC_MMIO_PMON_CTL0 0x40
376 #define SNR_IMC_MMIO_PMON_CTR0 0x8
377 #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
378 #define SNR_IMC_MMIO_OFFSET 0x4000
379 #define SNR_IMC_MMIO_SIZE 0x4000
380 #define SNR_IMC_MMIO_BASE_OFFSET 0xd0
381 #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
382 #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
383 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF
385 DEFINE_UNCORE_FORMAT_ATTR(event
, event
, "config:0-7");
386 DEFINE_UNCORE_FORMAT_ATTR(event2
, event
, "config:0-6");
387 DEFINE_UNCORE_FORMAT_ATTR(event_ext
, event
, "config:0-7,21");
388 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr
, use_occ_ctr
, "config:7");
389 DEFINE_UNCORE_FORMAT_ATTR(umask
, umask
, "config:8-15");
390 DEFINE_UNCORE_FORMAT_ATTR(umask_ext
, umask
, "config:8-15,32-43,45-55");
391 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2
, umask
, "config:8-15,32-57");
392 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3
, umask
, "config:8-15,32-39");
393 DEFINE_UNCORE_FORMAT_ATTR(qor
, qor
, "config:16");
394 DEFINE_UNCORE_FORMAT_ATTR(edge
, edge
, "config:18");
395 DEFINE_UNCORE_FORMAT_ATTR(tid_en
, tid_en
, "config:19");
396 DEFINE_UNCORE_FORMAT_ATTR(inv
, inv
, "config:23");
397 DEFINE_UNCORE_FORMAT_ATTR(thresh9
, thresh
, "config:24-35");
398 DEFINE_UNCORE_FORMAT_ATTR(thresh8
, thresh
, "config:24-31");
399 DEFINE_UNCORE_FORMAT_ATTR(thresh6
, thresh
, "config:24-29");
400 DEFINE_UNCORE_FORMAT_ATTR(thresh5
, thresh
, "config:24-28");
401 DEFINE_UNCORE_FORMAT_ATTR(occ_sel
, occ_sel
, "config:14-15");
402 DEFINE_UNCORE_FORMAT_ATTR(occ_invert
, occ_invert
, "config:30");
403 DEFINE_UNCORE_FORMAT_ATTR(occ_edge
, occ_edge
, "config:14-51");
404 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det
, occ_edge_det
, "config:31");
405 DEFINE_UNCORE_FORMAT_ATTR(ch_mask
, ch_mask
, "config:36-43");
406 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2
, ch_mask
, "config:36-47");
407 DEFINE_UNCORE_FORMAT_ATTR(fc_mask
, fc_mask
, "config:44-46");
408 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2
, fc_mask
, "config:48-50");
409 DEFINE_UNCORE_FORMAT_ATTR(filter_tid
, filter_tid
, "config1:0-4");
410 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2
, filter_tid
, "config1:0");
411 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3
, filter_tid
, "config1:0-5");
412 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4
, filter_tid
, "config1:0-8");
413 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5
, filter_tid
, "config1:0-9");
414 DEFINE_UNCORE_FORMAT_ATTR(filter_cid
, filter_cid
, "config1:5");
415 DEFINE_UNCORE_FORMAT_ATTR(filter_link
, filter_link
, "config1:5-8");
416 DEFINE_UNCORE_FORMAT_ATTR(filter_link2
, filter_link
, "config1:6-8");
417 DEFINE_UNCORE_FORMAT_ATTR(filter_link3
, filter_link
, "config1:12");
418 DEFINE_UNCORE_FORMAT_ATTR(filter_nid
, filter_nid
, "config1:10-17");
419 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2
, filter_nid
, "config1:32-47");
420 DEFINE_UNCORE_FORMAT_ATTR(filter_state
, filter_state
, "config1:18-22");
421 DEFINE_UNCORE_FORMAT_ATTR(filter_state2
, filter_state
, "config1:17-22");
422 DEFINE_UNCORE_FORMAT_ATTR(filter_state3
, filter_state
, "config1:17-23");
423 DEFINE_UNCORE_FORMAT_ATTR(filter_state4
, filter_state
, "config1:18-20");
424 DEFINE_UNCORE_FORMAT_ATTR(filter_state5
, filter_state
, "config1:17-26");
425 DEFINE_UNCORE_FORMAT_ATTR(filter_rem
, filter_rem
, "config1:32");
426 DEFINE_UNCORE_FORMAT_ATTR(filter_loc
, filter_loc
, "config1:33");
427 DEFINE_UNCORE_FORMAT_ATTR(filter_nm
, filter_nm
, "config1:36");
428 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm
, filter_not_nm
, "config1:37");
429 DEFINE_UNCORE_FORMAT_ATTR(filter_local
, filter_local
, "config1:33");
430 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op
, filter_all_op
, "config1:35");
431 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm
, filter_nnm
, "config1:37");
432 DEFINE_UNCORE_FORMAT_ATTR(filter_opc
, filter_opc
, "config1:23-31");
433 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2
, filter_opc
, "config1:52-60");
434 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3
, filter_opc
, "config1:41-60");
435 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0
, filter_opc0
, "config1:41-50");
436 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1
, filter_opc1
, "config1:51-60");
437 DEFINE_UNCORE_FORMAT_ATTR(filter_nc
, filter_nc
, "config1:62");
438 DEFINE_UNCORE_FORMAT_ATTR(filter_c6
, filter_c6
, "config1:61");
439 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc
, filter_isoc
, "config1:63");
440 DEFINE_UNCORE_FORMAT_ATTR(filter_band0
, filter_band0
, "config1:0-7");
441 DEFINE_UNCORE_FORMAT_ATTR(filter_band1
, filter_band1
, "config1:8-15");
442 DEFINE_UNCORE_FORMAT_ATTR(filter_band2
, filter_band2
, "config1:16-23");
443 DEFINE_UNCORE_FORMAT_ATTR(filter_band3
, filter_band3
, "config1:24-31");
444 DEFINE_UNCORE_FORMAT_ATTR(match_rds
, match_rds
, "config1:48-51");
445 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30
, match_rnid30
, "config1:32-35");
446 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4
, match_rnid4
, "config1:31");
447 DEFINE_UNCORE_FORMAT_ATTR(match_dnid
, match_dnid
, "config1:13-17");
448 DEFINE_UNCORE_FORMAT_ATTR(match_mc
, match_mc
, "config1:9-12");
449 DEFINE_UNCORE_FORMAT_ATTR(match_opc
, match_opc
, "config1:5-8");
450 DEFINE_UNCORE_FORMAT_ATTR(match_vnw
, match_vnw
, "config1:3-4");
451 DEFINE_UNCORE_FORMAT_ATTR(match0
, match0
, "config1:0-31");
452 DEFINE_UNCORE_FORMAT_ATTR(match1
, match1
, "config1:32-63");
453 DEFINE_UNCORE_FORMAT_ATTR(mask_rds
, mask_rds
, "config2:48-51");
454 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30
, mask_rnid30
, "config2:32-35");
455 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4
, mask_rnid4
, "config2:31");
456 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid
, mask_dnid
, "config2:13-17");
457 DEFINE_UNCORE_FORMAT_ATTR(mask_mc
, mask_mc
, "config2:9-12");
458 DEFINE_UNCORE_FORMAT_ATTR(mask_opc
, mask_opc
, "config2:5-8");
459 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw
, mask_vnw
, "config2:3-4");
460 DEFINE_UNCORE_FORMAT_ATTR(mask0
, mask0
, "config2:0-31");
461 DEFINE_UNCORE_FORMAT_ATTR(mask1
, mask1
, "config2:32-63");
463 static void snbep_uncore_pci_disable_box(struct intel_uncore_box
*box
)
465 struct pci_dev
*pdev
= box
->pci_dev
;
466 int box_ctl
= uncore_pci_box_ctl(box
);
469 if (!pci_read_config_dword(pdev
, box_ctl
, &config
)) {
470 config
|= SNBEP_PMON_BOX_CTL_FRZ
;
471 pci_write_config_dword(pdev
, box_ctl
, config
);
475 static void snbep_uncore_pci_enable_box(struct intel_uncore_box
*box
)
477 struct pci_dev
*pdev
= box
->pci_dev
;
478 int box_ctl
= uncore_pci_box_ctl(box
);
481 if (!pci_read_config_dword(pdev
, box_ctl
, &config
)) {
482 config
&= ~SNBEP_PMON_BOX_CTL_FRZ
;
483 pci_write_config_dword(pdev
, box_ctl
, config
);
487 static void snbep_uncore_pci_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
489 struct pci_dev
*pdev
= box
->pci_dev
;
490 struct hw_perf_event
*hwc
= &event
->hw
;
492 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
495 static void snbep_uncore_pci_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
497 struct pci_dev
*pdev
= box
->pci_dev
;
498 struct hw_perf_event
*hwc
= &event
->hw
;
500 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
);
503 static u64
snbep_uncore_pci_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
505 struct pci_dev
*pdev
= box
->pci_dev
;
506 struct hw_perf_event
*hwc
= &event
->hw
;
509 pci_read_config_dword(pdev
, hwc
->event_base
, (u32
*)&count
);
510 pci_read_config_dword(pdev
, hwc
->event_base
+ 4, (u32
*)&count
+ 1);
515 static void snbep_uncore_pci_init_box(struct intel_uncore_box
*box
)
517 struct pci_dev
*pdev
= box
->pci_dev
;
518 int box_ctl
= uncore_pci_box_ctl(box
);
520 pci_write_config_dword(pdev
, box_ctl
, SNBEP_PMON_BOX_CTL_INT
);
523 static void snbep_uncore_msr_disable_box(struct intel_uncore_box
*box
)
528 msr
= uncore_msr_box_ctl(box
);
531 config
|= SNBEP_PMON_BOX_CTL_FRZ
;
536 static void snbep_uncore_msr_enable_box(struct intel_uncore_box
*box
)
541 msr
= uncore_msr_box_ctl(box
);
544 config
&= ~SNBEP_PMON_BOX_CTL_FRZ
;
549 static void snbep_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
551 struct hw_perf_event
*hwc
= &event
->hw
;
552 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
554 if (reg1
->idx
!= EXTRA_REG_NONE
)
555 wrmsrl(reg1
->reg
, uncore_shared_reg_config(box
, 0));
557 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
560 static void snbep_uncore_msr_disable_event(struct intel_uncore_box
*box
,
561 struct perf_event
*event
)
563 struct hw_perf_event
*hwc
= &event
->hw
;
565 wrmsrl(hwc
->config_base
, hwc
->config
);
568 static void snbep_uncore_msr_init_box(struct intel_uncore_box
*box
)
570 unsigned msr
= uncore_msr_box_ctl(box
);
573 wrmsrl(msr
, SNBEP_PMON_BOX_CTL_INT
);
576 static struct attribute
*snbep_uncore_formats_attr
[] = {
577 &format_attr_event
.attr
,
578 &format_attr_umask
.attr
,
579 &format_attr_edge
.attr
,
580 &format_attr_inv
.attr
,
581 &format_attr_thresh8
.attr
,
585 static struct attribute
*snbep_uncore_ubox_formats_attr
[] = {
586 &format_attr_event
.attr
,
587 &format_attr_umask
.attr
,
588 &format_attr_edge
.attr
,
589 &format_attr_inv
.attr
,
590 &format_attr_thresh5
.attr
,
594 static struct attribute
*snbep_uncore_cbox_formats_attr
[] = {
595 &format_attr_event
.attr
,
596 &format_attr_umask
.attr
,
597 &format_attr_edge
.attr
,
598 &format_attr_tid_en
.attr
,
599 &format_attr_inv
.attr
,
600 &format_attr_thresh8
.attr
,
601 &format_attr_filter_tid
.attr
,
602 &format_attr_filter_nid
.attr
,
603 &format_attr_filter_state
.attr
,
604 &format_attr_filter_opc
.attr
,
608 static struct attribute
*snbep_uncore_pcu_formats_attr
[] = {
609 &format_attr_event
.attr
,
610 &format_attr_occ_sel
.attr
,
611 &format_attr_edge
.attr
,
612 &format_attr_inv
.attr
,
613 &format_attr_thresh5
.attr
,
614 &format_attr_occ_invert
.attr
,
615 &format_attr_occ_edge
.attr
,
616 &format_attr_filter_band0
.attr
,
617 &format_attr_filter_band1
.attr
,
618 &format_attr_filter_band2
.attr
,
619 &format_attr_filter_band3
.attr
,
623 static struct attribute
*snbep_uncore_qpi_formats_attr
[] = {
624 &format_attr_event_ext
.attr
,
625 &format_attr_umask
.attr
,
626 &format_attr_edge
.attr
,
627 &format_attr_inv
.attr
,
628 &format_attr_thresh8
.attr
,
629 &format_attr_match_rds
.attr
,
630 &format_attr_match_rnid30
.attr
,
631 &format_attr_match_rnid4
.attr
,
632 &format_attr_match_dnid
.attr
,
633 &format_attr_match_mc
.attr
,
634 &format_attr_match_opc
.attr
,
635 &format_attr_match_vnw
.attr
,
636 &format_attr_match0
.attr
,
637 &format_attr_match1
.attr
,
638 &format_attr_mask_rds
.attr
,
639 &format_attr_mask_rnid30
.attr
,
640 &format_attr_mask_rnid4
.attr
,
641 &format_attr_mask_dnid
.attr
,
642 &format_attr_mask_mc
.attr
,
643 &format_attr_mask_opc
.attr
,
644 &format_attr_mask_vnw
.attr
,
645 &format_attr_mask0
.attr
,
646 &format_attr_mask1
.attr
,
650 static struct uncore_event_desc snbep_uncore_imc_events
[] = {
651 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
652 INTEL_UNCORE_EVENT_DESC(cas_count_read
, "event=0x04,umask=0x03"),
653 INTEL_UNCORE_EVENT_DESC(cas_count_read
.scale
, "6.103515625e-5"),
654 INTEL_UNCORE_EVENT_DESC(cas_count_read
.unit
, "MiB"),
655 INTEL_UNCORE_EVENT_DESC(cas_count_write
, "event=0x04,umask=0x0c"),
656 INTEL_UNCORE_EVENT_DESC(cas_count_write
.scale
, "6.103515625e-5"),
657 INTEL_UNCORE_EVENT_DESC(cas_count_write
.unit
, "MiB"),
658 { /* end: all zeroes */ },
661 static struct uncore_event_desc snbep_uncore_qpi_events
[] = {
662 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0x14"),
663 INTEL_UNCORE_EVENT_DESC(txl_flits_active
, "event=0x00,umask=0x06"),
664 INTEL_UNCORE_EVENT_DESC(drs_data
, "event=0x102,umask=0x08"),
665 INTEL_UNCORE_EVENT_DESC(ncb_data
, "event=0x103,umask=0x04"),
666 { /* end: all zeroes */ },
669 static const struct attribute_group snbep_uncore_format_group
= {
671 .attrs
= snbep_uncore_formats_attr
,
674 static const struct attribute_group snbep_uncore_ubox_format_group
= {
676 .attrs
= snbep_uncore_ubox_formats_attr
,
679 static const struct attribute_group snbep_uncore_cbox_format_group
= {
681 .attrs
= snbep_uncore_cbox_formats_attr
,
684 static const struct attribute_group snbep_uncore_pcu_format_group
= {
686 .attrs
= snbep_uncore_pcu_formats_attr
,
689 static const struct attribute_group snbep_uncore_qpi_format_group
= {
691 .attrs
= snbep_uncore_qpi_formats_attr
,
694 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
695 .disable_box = snbep_uncore_msr_disable_box, \
696 .enable_box = snbep_uncore_msr_enable_box, \
697 .disable_event = snbep_uncore_msr_disable_event, \
698 .enable_event = snbep_uncore_msr_enable_event, \
699 .read_counter = uncore_msr_read_counter
701 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
702 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
703 .init_box = snbep_uncore_msr_init_box \
705 static struct intel_uncore_ops snbep_uncore_msr_ops = {
706 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
709 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
710 .init_box = snbep_uncore_pci_init_box, \
711 .disable_box = snbep_uncore_pci_disable_box, \
712 .enable_box = snbep_uncore_pci_enable_box, \
713 .disable_event = snbep_uncore_pci_disable_event, \
714 .read_counter = snbep_uncore_pci_read_counter
716 static struct intel_uncore_ops snbep_uncore_pci_ops
= {
717 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
718 .enable_event
= snbep_uncore_pci_enable_event
, \
721 static struct event_constraint snbep_uncore_cbox_constraints
[] = {
722 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
723 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
724 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
725 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
726 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
727 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
728 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
729 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
730 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
731 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
732 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
733 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
734 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
735 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
736 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
737 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
738 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
739 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
740 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
741 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
742 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
743 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
744 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
745 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
746 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
747 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
751 static struct event_constraint snbep_uncore_r2pcie_constraints
[] = {
752 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
753 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
754 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
755 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
756 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
757 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
758 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
759 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
760 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
761 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
765 static struct event_constraint snbep_uncore_r3qpi_constraints
[] = {
766 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
767 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
768 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
769 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
770 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
771 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
772 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
773 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
774 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
775 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
776 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
777 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
778 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
779 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
780 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
781 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
782 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
783 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
784 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
785 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
786 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
787 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
788 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
789 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
790 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
791 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
792 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
793 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
797 static struct intel_uncore_type snbep_uncore_ubox
= {
802 .fixed_ctr_bits
= 48,
803 .perf_ctr
= SNBEP_U_MSR_PMON_CTR0
,
804 .event_ctl
= SNBEP_U_MSR_PMON_CTL0
,
805 .event_mask
= SNBEP_U_MSR_PMON_RAW_EVENT_MASK
,
806 .fixed_ctr
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR
,
807 .fixed_ctl
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL
,
808 .ops
= &snbep_uncore_msr_ops
,
809 .format_group
= &snbep_uncore_ubox_format_group
,
812 static struct extra_reg snbep_uncore_cbox_extra_regs
[] = {
813 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
814 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
815 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
816 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
817 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
818 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
819 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
820 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
821 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
822 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
823 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
824 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
825 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
826 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
827 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
828 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
829 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
830 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
831 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
832 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
833 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
834 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
835 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
836 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
837 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
841 static void snbep_cbox_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
843 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
844 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
847 if (uncore_box_is_fake(box
))
850 for (i
= 0; i
< 5; i
++) {
851 if (reg1
->alloc
& (0x1 << i
))
852 atomic_sub(1 << (i
* 6), &er
->ref
);
857 static struct event_constraint
*
858 __snbep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
,
859 u64 (*cbox_filter_mask
)(int fields
))
861 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
862 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
867 if (reg1
->idx
== EXTRA_REG_NONE
)
870 raw_spin_lock_irqsave(&er
->lock
, flags
);
871 for (i
= 0; i
< 5; i
++) {
872 if (!(reg1
->idx
& (0x1 << i
)))
874 if (!uncore_box_is_fake(box
) && (reg1
->alloc
& (0x1 << i
)))
877 mask
= cbox_filter_mask(0x1 << i
);
878 if (!__BITS_VALUE(atomic_read(&er
->ref
), i
, 6) ||
879 !((reg1
->config
^ er
->config
) & mask
)) {
880 atomic_add(1 << (i
* 6), &er
->ref
);
882 er
->config
|= reg1
->config
& mask
;
888 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
892 if (!uncore_box_is_fake(box
))
893 reg1
->alloc
|= alloc
;
897 for (; i
>= 0; i
--) {
898 if (alloc
& (0x1 << i
))
899 atomic_sub(1 << (i
* 6), &er
->ref
);
901 return &uncore_constraint_empty
;
904 static u64
snbep_cbox_filter_mask(int fields
)
909 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID
;
911 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID
;
913 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE
;
915 mask
|= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC
;
920 static struct event_constraint
*
921 snbep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
923 return __snbep_cbox_get_constraint(box
, event
, snbep_cbox_filter_mask
);
926 static int snbep_cbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
928 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
929 struct extra_reg
*er
;
932 for (er
= snbep_uncore_cbox_extra_regs
; er
->msr
; er
++) {
933 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
939 reg1
->reg
= SNBEP_C0_MSR_PMON_BOX_FILTER
+
940 SNBEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
941 reg1
->config
= event
->attr
.config1
& snbep_cbox_filter_mask(idx
);
947 static struct intel_uncore_ops snbep_uncore_cbox_ops
= {
948 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
949 .hw_config
= snbep_cbox_hw_config
,
950 .get_constraint
= snbep_cbox_get_constraint
,
951 .put_constraint
= snbep_cbox_put_constraint
,
954 static struct intel_uncore_type snbep_uncore_cbox
= {
959 .event_ctl
= SNBEP_C0_MSR_PMON_CTL0
,
960 .perf_ctr
= SNBEP_C0_MSR_PMON_CTR0
,
961 .event_mask
= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
962 .box_ctl
= SNBEP_C0_MSR_PMON_BOX_CTL
,
963 .msr_offset
= SNBEP_CBO_MSR_OFFSET
,
964 .num_shared_regs
= 1,
965 .constraints
= snbep_uncore_cbox_constraints
,
966 .ops
= &snbep_uncore_cbox_ops
,
967 .format_group
= &snbep_uncore_cbox_format_group
,
970 static u64
snbep_pcu_alter_er(struct perf_event
*event
, int new_idx
, bool modify
)
972 struct hw_perf_event
*hwc
= &event
->hw
;
973 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
974 u64 config
= reg1
->config
;
976 if (new_idx
> reg1
->idx
)
977 config
<<= 8 * (new_idx
- reg1
->idx
);
979 config
>>= 8 * (reg1
->idx
- new_idx
);
982 hwc
->config
+= new_idx
- reg1
->idx
;
983 reg1
->config
= config
;
989 static struct event_constraint
*
990 snbep_pcu_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
992 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
993 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
996 u64 mask
, config1
= reg1
->config
;
999 if (reg1
->idx
== EXTRA_REG_NONE
||
1000 (!uncore_box_is_fake(box
) && reg1
->alloc
))
1003 mask
= 0xffULL
<< (idx
* 8);
1004 raw_spin_lock_irqsave(&er
->lock
, flags
);
1005 if (!__BITS_VALUE(atomic_read(&er
->ref
), idx
, 8) ||
1006 !((config1
^ er
->config
) & mask
)) {
1007 atomic_add(1 << (idx
* 8), &er
->ref
);
1008 er
->config
&= ~mask
;
1009 er
->config
|= config1
& mask
;
1012 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
1015 idx
= (idx
+ 1) % 4;
1016 if (idx
!= reg1
->idx
) {
1017 config1
= snbep_pcu_alter_er(event
, idx
, false);
1020 return &uncore_constraint_empty
;
1023 if (!uncore_box_is_fake(box
)) {
1024 if (idx
!= reg1
->idx
)
1025 snbep_pcu_alter_er(event
, idx
, true);
1031 static void snbep_pcu_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
1033 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
1034 struct intel_uncore_extra_reg
*er
= &box
->shared_regs
[0];
1036 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
1039 atomic_sub(1 << (reg1
->idx
* 8), &er
->ref
);
1043 static int snbep_pcu_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
1045 struct hw_perf_event
*hwc
= &event
->hw
;
1046 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1047 int ev_sel
= hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
;
1049 if (ev_sel
>= 0xb && ev_sel
<= 0xe) {
1050 reg1
->reg
= SNBEP_PCU_MSR_PMON_BOX_FILTER
;
1051 reg1
->idx
= ev_sel
- 0xb;
1052 reg1
->config
= event
->attr
.config1
& (0xff << (reg1
->idx
* 8));
1057 static struct intel_uncore_ops snbep_uncore_pcu_ops
= {
1058 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1059 .hw_config
= snbep_pcu_hw_config
,
1060 .get_constraint
= snbep_pcu_get_constraint
,
1061 .put_constraint
= snbep_pcu_put_constraint
,
1064 static struct intel_uncore_type snbep_uncore_pcu
= {
1068 .perf_ctr_bits
= 48,
1069 .perf_ctr
= SNBEP_PCU_MSR_PMON_CTR0
,
1070 .event_ctl
= SNBEP_PCU_MSR_PMON_CTL0
,
1071 .event_mask
= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK
,
1072 .box_ctl
= SNBEP_PCU_MSR_PMON_BOX_CTL
,
1073 .num_shared_regs
= 1,
1074 .ops
= &snbep_uncore_pcu_ops
,
1075 .format_group
= &snbep_uncore_pcu_format_group
,
1078 static struct intel_uncore_type
*snbep_msr_uncores
[] = {
1085 void snbep_uncore_cpu_init(void)
1087 if (snbep_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
1088 snbep_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
1089 uncore_msr_uncores
= snbep_msr_uncores
;
1093 SNBEP_PCI_QPI_PORT0_FILTER
,
1094 SNBEP_PCI_QPI_PORT1_FILTER
,
1095 BDX_PCI_QPI_PORT2_FILTER
,
1099 static int snbep_qpi_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
1101 struct hw_perf_event
*hwc
= &event
->hw
;
1102 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1103 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
1105 if ((hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
) == 0x38) {
1107 reg1
->reg
= SNBEP_Q_Py_PCI_PMON_PKT_MATCH0
;
1108 reg1
->config
= event
->attr
.config1
;
1109 reg2
->reg
= SNBEP_Q_Py_PCI_PMON_PKT_MASK0
;
1110 reg2
->config
= event
->attr
.config2
;
1115 static void snbep_qpi_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1117 struct pci_dev
*pdev
= box
->pci_dev
;
1118 struct hw_perf_event
*hwc
= &event
->hw
;
1119 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1120 struct hw_perf_event_extra
*reg2
= &hwc
->branch_reg
;
1122 if (reg1
->idx
!= EXTRA_REG_NONE
) {
1123 int idx
= box
->pmu
->pmu_idx
+ SNBEP_PCI_QPI_PORT0_FILTER
;
1124 int die
= box
->dieid
;
1125 struct pci_dev
*filter_pdev
= uncore_extra_pci_dev
[die
].dev
[idx
];
1128 pci_write_config_dword(filter_pdev
, reg1
->reg
,
1130 pci_write_config_dword(filter_pdev
, reg1
->reg
+ 4,
1131 (u32
)(reg1
->config
>> 32));
1132 pci_write_config_dword(filter_pdev
, reg2
->reg
,
1134 pci_write_config_dword(filter_pdev
, reg2
->reg
+ 4,
1135 (u32
)(reg2
->config
>> 32));
1139 pci_write_config_dword(pdev
, hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
1142 static struct intel_uncore_ops snbep_uncore_qpi_ops
= {
1143 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1144 .enable_event
= snbep_qpi_enable_event
,
1145 .hw_config
= snbep_qpi_hw_config
,
1146 .get_constraint
= uncore_get_constraint
,
1147 .put_constraint
= uncore_put_constraint
,
1150 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1151 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1152 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1153 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1154 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1155 .ops = &snbep_uncore_pci_ops, \
1156 .format_group = &snbep_uncore_format_group
1158 static struct intel_uncore_type snbep_uncore_ha
= {
1162 .perf_ctr_bits
= 48,
1163 SNBEP_UNCORE_PCI_COMMON_INIT(),
1166 static struct intel_uncore_type snbep_uncore_imc
= {
1170 .perf_ctr_bits
= 48,
1171 .fixed_ctr_bits
= 48,
1172 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
1173 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
1174 .event_descs
= snbep_uncore_imc_events
,
1175 SNBEP_UNCORE_PCI_COMMON_INIT(),
1178 static struct intel_uncore_type snbep_uncore_qpi
= {
1182 .perf_ctr_bits
= 48,
1183 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
1184 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
1185 .event_mask
= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
1186 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
1187 .num_shared_regs
= 1,
1188 .ops
= &snbep_uncore_qpi_ops
,
1189 .event_descs
= snbep_uncore_qpi_events
,
1190 .format_group
= &snbep_uncore_qpi_format_group
,
1194 static struct intel_uncore_type snbep_uncore_r2pcie
= {
1198 .perf_ctr_bits
= 44,
1199 .constraints
= snbep_uncore_r2pcie_constraints
,
1200 SNBEP_UNCORE_PCI_COMMON_INIT(),
1203 static struct intel_uncore_type snbep_uncore_r3qpi
= {
1207 .perf_ctr_bits
= 44,
1208 .constraints
= snbep_uncore_r3qpi_constraints
,
1209 SNBEP_UNCORE_PCI_COMMON_INIT(),
1213 SNBEP_PCI_UNCORE_HA
,
1214 SNBEP_PCI_UNCORE_IMC
,
1215 SNBEP_PCI_UNCORE_QPI
,
1216 SNBEP_PCI_UNCORE_R2PCIE
,
1217 SNBEP_PCI_UNCORE_R3QPI
,
1220 static struct intel_uncore_type
*snbep_pci_uncores
[] = {
1221 [SNBEP_PCI_UNCORE_HA
] = &snbep_uncore_ha
,
1222 [SNBEP_PCI_UNCORE_IMC
] = &snbep_uncore_imc
,
1223 [SNBEP_PCI_UNCORE_QPI
] = &snbep_uncore_qpi
,
1224 [SNBEP_PCI_UNCORE_R2PCIE
] = &snbep_uncore_r2pcie
,
1225 [SNBEP_PCI_UNCORE_R3QPI
] = &snbep_uncore_r3qpi
,
1229 static const struct pci_device_id snbep_uncore_pci_ids
[] = {
1231 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_HA
),
1232 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA
, 0),
1234 { /* MC Channel 0 */
1235 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC0
),
1236 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 0),
1238 { /* MC Channel 1 */
1239 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC1
),
1240 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 1),
1242 { /* MC Channel 2 */
1243 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC2
),
1244 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 2),
1246 { /* MC Channel 3 */
1247 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_IMC3
),
1248 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC
, 3),
1251 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_QPI0
),
1252 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI
, 0),
1255 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_QPI1
),
1256 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI
, 1),
1259 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R2PCIE
),
1260 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE
, 0),
1262 { /* R3QPI Link 0 */
1263 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R3QPI0
),
1264 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI
, 0),
1266 { /* R3QPI Link 1 */
1267 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_UNC_R3QPI1
),
1268 .driver_data
= UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI
, 1),
1270 { /* QPI Port 0 filter */
1271 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x3c86),
1272 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1273 SNBEP_PCI_QPI_PORT0_FILTER
),
1275 { /* QPI Port 0 filter */
1276 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x3c96),
1277 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1278 SNBEP_PCI_QPI_PORT1_FILTER
),
1280 { /* end: all zeroes */ }
1283 static struct pci_driver snbep_uncore_pci_driver
= {
1284 .name
= "snbep_uncore",
1285 .id_table
= snbep_uncore_pci_ids
,
1288 #define NODE_ID_MASK 0x7
1291 * build pci bus to socket mapping
1293 static int snbep_pci2phy_map_init(int devid
, int nodeid_loc
, int idmap_loc
, bool reverse
)
1295 struct pci_dev
*ubox_dev
= NULL
;
1296 int i
, bus
, nodeid
, segment
;
1297 struct pci2phy_map
*map
;
1302 /* find the UBOX device */
1303 ubox_dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, devid
, ubox_dev
);
1306 bus
= ubox_dev
->bus
->number
;
1307 /* get the Node ID of the local register */
1308 err
= pci_read_config_dword(ubox_dev
, nodeid_loc
, &config
);
1311 nodeid
= config
& NODE_ID_MASK
;
1312 /* get the Node ID mapping */
1313 err
= pci_read_config_dword(ubox_dev
, idmap_loc
, &config
);
1317 segment
= pci_domain_nr(ubox_dev
->bus
);
1318 raw_spin_lock(&pci2phy_map_lock
);
1319 map
= __find_pci2phy_map(segment
);
1321 raw_spin_unlock(&pci2phy_map_lock
);
1327 * every three bits in the Node ID mapping register maps
1328 * to a particular node.
1330 for (i
= 0; i
< 8; i
++) {
1331 if (nodeid
== ((config
>> (3 * i
)) & 0x7)) {
1332 map
->pbus_to_physid
[bus
] = i
;
1336 raw_spin_unlock(&pci2phy_map_lock
);
1341 * For PCI bus with no UBOX device, find the next bus
1342 * that has UBOX device and use its mapping.
1344 raw_spin_lock(&pci2phy_map_lock
);
1345 list_for_each_entry(map
, &pci2phy_map_head
, list
) {
1348 for (bus
= 255; bus
>= 0; bus
--) {
1349 if (map
->pbus_to_physid
[bus
] >= 0)
1350 i
= map
->pbus_to_physid
[bus
];
1352 map
->pbus_to_physid
[bus
] = i
;
1355 for (bus
= 0; bus
<= 255; bus
++) {
1356 if (map
->pbus_to_physid
[bus
] >= 0)
1357 i
= map
->pbus_to_physid
[bus
];
1359 map
->pbus_to_physid
[bus
] = i
;
1363 raw_spin_unlock(&pci2phy_map_lock
);
1366 pci_dev_put(ubox_dev
);
1368 return err
? pcibios_err_to_errno(err
) : 0;
1371 int snbep_uncore_pci_init(void)
1373 int ret
= snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID
, SNBEP_GIDNIDMAP
, true);
1376 uncore_pci_uncores
= snbep_pci_uncores
;
1377 uncore_pci_driver
= &snbep_uncore_pci_driver
;
1380 /* end of Sandy Bridge-EP uncore support */
1382 /* IvyTown uncore support */
1383 static void ivbep_uncore_msr_init_box(struct intel_uncore_box
*box
)
1385 unsigned msr
= uncore_msr_box_ctl(box
);
1387 wrmsrl(msr
, IVBEP_PMON_BOX_CTL_INT
);
1390 static void ivbep_uncore_pci_init_box(struct intel_uncore_box
*box
)
1392 struct pci_dev
*pdev
= box
->pci_dev
;
1394 pci_write_config_dword(pdev
, SNBEP_PCI_PMON_BOX_CTL
, IVBEP_PMON_BOX_CTL_INT
);
1397 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1398 .init_box = ivbep_uncore_msr_init_box, \
1399 .disable_box = snbep_uncore_msr_disable_box, \
1400 .enable_box = snbep_uncore_msr_enable_box, \
1401 .disable_event = snbep_uncore_msr_disable_event, \
1402 .enable_event = snbep_uncore_msr_enable_event, \
1403 .read_counter = uncore_msr_read_counter
1405 static struct intel_uncore_ops ivbep_uncore_msr_ops
= {
1406 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1409 static struct intel_uncore_ops ivbep_uncore_pci_ops
= {
1410 .init_box
= ivbep_uncore_pci_init_box
,
1411 .disable_box
= snbep_uncore_pci_disable_box
,
1412 .enable_box
= snbep_uncore_pci_enable_box
,
1413 .disable_event
= snbep_uncore_pci_disable_event
,
1414 .enable_event
= snbep_uncore_pci_enable_event
,
1415 .read_counter
= snbep_uncore_pci_read_counter
,
1418 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1419 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1420 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1421 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1422 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1423 .ops = &ivbep_uncore_pci_ops, \
1424 .format_group = &ivbep_uncore_format_group
1426 static struct attribute
*ivbep_uncore_formats_attr
[] = {
1427 &format_attr_event
.attr
,
1428 &format_attr_umask
.attr
,
1429 &format_attr_edge
.attr
,
1430 &format_attr_inv
.attr
,
1431 &format_attr_thresh8
.attr
,
1435 static struct attribute
*ivbep_uncore_ubox_formats_attr
[] = {
1436 &format_attr_event
.attr
,
1437 &format_attr_umask
.attr
,
1438 &format_attr_edge
.attr
,
1439 &format_attr_inv
.attr
,
1440 &format_attr_thresh5
.attr
,
1444 static struct attribute
*ivbep_uncore_cbox_formats_attr
[] = {
1445 &format_attr_event
.attr
,
1446 &format_attr_umask
.attr
,
1447 &format_attr_edge
.attr
,
1448 &format_attr_tid_en
.attr
,
1449 &format_attr_thresh8
.attr
,
1450 &format_attr_filter_tid
.attr
,
1451 &format_attr_filter_link
.attr
,
1452 &format_attr_filter_state2
.attr
,
1453 &format_attr_filter_nid2
.attr
,
1454 &format_attr_filter_opc2
.attr
,
1455 &format_attr_filter_nc
.attr
,
1456 &format_attr_filter_c6
.attr
,
1457 &format_attr_filter_isoc
.attr
,
1461 static struct attribute
*ivbep_uncore_pcu_formats_attr
[] = {
1462 &format_attr_event
.attr
,
1463 &format_attr_occ_sel
.attr
,
1464 &format_attr_edge
.attr
,
1465 &format_attr_thresh5
.attr
,
1466 &format_attr_occ_invert
.attr
,
1467 &format_attr_occ_edge
.attr
,
1468 &format_attr_filter_band0
.attr
,
1469 &format_attr_filter_band1
.attr
,
1470 &format_attr_filter_band2
.attr
,
1471 &format_attr_filter_band3
.attr
,
1475 static struct attribute
*ivbep_uncore_qpi_formats_attr
[] = {
1476 &format_attr_event_ext
.attr
,
1477 &format_attr_umask
.attr
,
1478 &format_attr_edge
.attr
,
1479 &format_attr_thresh8
.attr
,
1480 &format_attr_match_rds
.attr
,
1481 &format_attr_match_rnid30
.attr
,
1482 &format_attr_match_rnid4
.attr
,
1483 &format_attr_match_dnid
.attr
,
1484 &format_attr_match_mc
.attr
,
1485 &format_attr_match_opc
.attr
,
1486 &format_attr_match_vnw
.attr
,
1487 &format_attr_match0
.attr
,
1488 &format_attr_match1
.attr
,
1489 &format_attr_mask_rds
.attr
,
1490 &format_attr_mask_rnid30
.attr
,
1491 &format_attr_mask_rnid4
.attr
,
1492 &format_attr_mask_dnid
.attr
,
1493 &format_attr_mask_mc
.attr
,
1494 &format_attr_mask_opc
.attr
,
1495 &format_attr_mask_vnw
.attr
,
1496 &format_attr_mask0
.attr
,
1497 &format_attr_mask1
.attr
,
1501 static const struct attribute_group ivbep_uncore_format_group
= {
1503 .attrs
= ivbep_uncore_formats_attr
,
1506 static const struct attribute_group ivbep_uncore_ubox_format_group
= {
1508 .attrs
= ivbep_uncore_ubox_formats_attr
,
1511 static const struct attribute_group ivbep_uncore_cbox_format_group
= {
1513 .attrs
= ivbep_uncore_cbox_formats_attr
,
1516 static const struct attribute_group ivbep_uncore_pcu_format_group
= {
1518 .attrs
= ivbep_uncore_pcu_formats_attr
,
1521 static const struct attribute_group ivbep_uncore_qpi_format_group
= {
1523 .attrs
= ivbep_uncore_qpi_formats_attr
,
1526 static struct intel_uncore_type ivbep_uncore_ubox
= {
1530 .perf_ctr_bits
= 44,
1531 .fixed_ctr_bits
= 48,
1532 .perf_ctr
= SNBEP_U_MSR_PMON_CTR0
,
1533 .event_ctl
= SNBEP_U_MSR_PMON_CTL0
,
1534 .event_mask
= IVBEP_U_MSR_PMON_RAW_EVENT_MASK
,
1535 .fixed_ctr
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR
,
1536 .fixed_ctl
= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL
,
1537 .ops
= &ivbep_uncore_msr_ops
,
1538 .format_group
= &ivbep_uncore_ubox_format_group
,
1541 static struct extra_reg ivbep_uncore_cbox_extra_regs
[] = {
1542 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
1543 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
1544 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1545 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1546 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1547 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1548 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1549 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1550 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1551 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1552 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1553 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1554 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1555 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1556 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1557 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1558 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1559 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1560 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1561 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1562 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1563 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1564 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1565 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1566 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1567 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1568 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1569 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1570 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1571 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1572 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1573 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1574 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1575 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1576 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1577 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1578 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1582 static u64
ivbep_cbox_filter_mask(int fields
)
1587 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID
;
1589 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK
;
1591 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE
;
1593 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID
;
1594 if (fields
& 0x10) {
1595 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC
;
1596 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC
;
1597 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6
;
1598 mask
|= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC
;
1604 static struct event_constraint
*
1605 ivbep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
1607 return __snbep_cbox_get_constraint(box
, event
, ivbep_cbox_filter_mask
);
1610 static int ivbep_cbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
1612 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
1613 struct extra_reg
*er
;
1616 for (er
= ivbep_uncore_cbox_extra_regs
; er
->msr
; er
++) {
1617 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
1623 reg1
->reg
= SNBEP_C0_MSR_PMON_BOX_FILTER
+
1624 SNBEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
1625 reg1
->config
= event
->attr
.config1
& ivbep_cbox_filter_mask(idx
);
1631 static void ivbep_cbox_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1633 struct hw_perf_event
*hwc
= &event
->hw
;
1634 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
1636 if (reg1
->idx
!= EXTRA_REG_NONE
) {
1637 u64 filter
= uncore_shared_reg_config(box
, 0);
1638 wrmsrl(reg1
->reg
, filter
& 0xffffffff);
1639 wrmsrl(reg1
->reg
+ 6, filter
>> 32);
1642 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
1645 static struct intel_uncore_ops ivbep_uncore_cbox_ops
= {
1646 .init_box
= ivbep_uncore_msr_init_box
,
1647 .disable_box
= snbep_uncore_msr_disable_box
,
1648 .enable_box
= snbep_uncore_msr_enable_box
,
1649 .disable_event
= snbep_uncore_msr_disable_event
,
1650 .enable_event
= ivbep_cbox_enable_event
,
1651 .read_counter
= uncore_msr_read_counter
,
1652 .hw_config
= ivbep_cbox_hw_config
,
1653 .get_constraint
= ivbep_cbox_get_constraint
,
1654 .put_constraint
= snbep_cbox_put_constraint
,
1657 static struct intel_uncore_type ivbep_uncore_cbox
= {
1661 .perf_ctr_bits
= 44,
1662 .event_ctl
= SNBEP_C0_MSR_PMON_CTL0
,
1663 .perf_ctr
= SNBEP_C0_MSR_PMON_CTR0
,
1664 .event_mask
= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
1665 .box_ctl
= SNBEP_C0_MSR_PMON_BOX_CTL
,
1666 .msr_offset
= SNBEP_CBO_MSR_OFFSET
,
1667 .num_shared_regs
= 1,
1668 .constraints
= snbep_uncore_cbox_constraints
,
1669 .ops
= &ivbep_uncore_cbox_ops
,
1670 .format_group
= &ivbep_uncore_cbox_format_group
,
1673 static struct intel_uncore_ops ivbep_uncore_pcu_ops
= {
1674 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1675 .hw_config
= snbep_pcu_hw_config
,
1676 .get_constraint
= snbep_pcu_get_constraint
,
1677 .put_constraint
= snbep_pcu_put_constraint
,
1680 static struct intel_uncore_type ivbep_uncore_pcu
= {
1684 .perf_ctr_bits
= 48,
1685 .perf_ctr
= SNBEP_PCU_MSR_PMON_CTR0
,
1686 .event_ctl
= SNBEP_PCU_MSR_PMON_CTL0
,
1687 .event_mask
= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK
,
1688 .box_ctl
= SNBEP_PCU_MSR_PMON_BOX_CTL
,
1689 .num_shared_regs
= 1,
1690 .ops
= &ivbep_uncore_pcu_ops
,
1691 .format_group
= &ivbep_uncore_pcu_format_group
,
1694 static struct intel_uncore_type
*ivbep_msr_uncores
[] = {
1701 void ivbep_uncore_cpu_init(void)
1703 if (ivbep_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
1704 ivbep_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
1705 uncore_msr_uncores
= ivbep_msr_uncores
;
1708 static struct intel_uncore_type ivbep_uncore_ha
= {
1712 .perf_ctr_bits
= 48,
1713 IVBEP_UNCORE_PCI_COMMON_INIT(),
1716 static struct intel_uncore_type ivbep_uncore_imc
= {
1720 .perf_ctr_bits
= 48,
1721 .fixed_ctr_bits
= 48,
1722 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
1723 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
1724 .event_descs
= snbep_uncore_imc_events
,
1725 IVBEP_UNCORE_PCI_COMMON_INIT(),
1728 /* registers in IRP boxes are not properly aligned */
1729 static unsigned ivbep_uncore_irp_ctls
[] = {0xd8, 0xdc, 0xe0, 0xe4};
1730 static unsigned ivbep_uncore_irp_ctrs
[] = {0xa0, 0xb0, 0xb8, 0xc0};
1732 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1734 struct pci_dev
*pdev
= box
->pci_dev
;
1735 struct hw_perf_event
*hwc
= &event
->hw
;
1737 pci_write_config_dword(pdev
, ivbep_uncore_irp_ctls
[hwc
->idx
],
1738 hwc
->config
| SNBEP_PMON_CTL_EN
);
1741 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
1743 struct pci_dev
*pdev
= box
->pci_dev
;
1744 struct hw_perf_event
*hwc
= &event
->hw
;
1746 pci_write_config_dword(pdev
, ivbep_uncore_irp_ctls
[hwc
->idx
], hwc
->config
);
1749 static u64
ivbep_uncore_irp_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
1751 struct pci_dev
*pdev
= box
->pci_dev
;
1752 struct hw_perf_event
*hwc
= &event
->hw
;
1755 pci_read_config_dword(pdev
, ivbep_uncore_irp_ctrs
[hwc
->idx
], (u32
*)&count
);
1756 pci_read_config_dword(pdev
, ivbep_uncore_irp_ctrs
[hwc
->idx
] + 4, (u32
*)&count
+ 1);
1761 static struct intel_uncore_ops ivbep_uncore_irp_ops
= {
1762 .init_box
= ivbep_uncore_pci_init_box
,
1763 .disable_box
= snbep_uncore_pci_disable_box
,
1764 .enable_box
= snbep_uncore_pci_enable_box
,
1765 .disable_event
= ivbep_uncore_irp_disable_event
,
1766 .enable_event
= ivbep_uncore_irp_enable_event
,
1767 .read_counter
= ivbep_uncore_irp_read_counter
,
1770 static struct intel_uncore_type ivbep_uncore_irp
= {
1774 .perf_ctr_bits
= 48,
1775 .event_mask
= IVBEP_PMON_RAW_EVENT_MASK
,
1776 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
1777 .ops
= &ivbep_uncore_irp_ops
,
1778 .format_group
= &ivbep_uncore_format_group
,
1781 static struct intel_uncore_ops ivbep_uncore_qpi_ops
= {
1782 .init_box
= ivbep_uncore_pci_init_box
,
1783 .disable_box
= snbep_uncore_pci_disable_box
,
1784 .enable_box
= snbep_uncore_pci_enable_box
,
1785 .disable_event
= snbep_uncore_pci_disable_event
,
1786 .enable_event
= snbep_qpi_enable_event
,
1787 .read_counter
= snbep_uncore_pci_read_counter
,
1788 .hw_config
= snbep_qpi_hw_config
,
1789 .get_constraint
= uncore_get_constraint
,
1790 .put_constraint
= uncore_put_constraint
,
1793 static struct intel_uncore_type ivbep_uncore_qpi
= {
1797 .perf_ctr_bits
= 48,
1798 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
1799 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
1800 .event_mask
= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
1801 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
1802 .num_shared_regs
= 1,
1803 .ops
= &ivbep_uncore_qpi_ops
,
1804 .format_group
= &ivbep_uncore_qpi_format_group
,
1807 static struct intel_uncore_type ivbep_uncore_r2pcie
= {
1811 .perf_ctr_bits
= 44,
1812 .constraints
= snbep_uncore_r2pcie_constraints
,
1813 IVBEP_UNCORE_PCI_COMMON_INIT(),
1816 static struct intel_uncore_type ivbep_uncore_r3qpi
= {
1820 .perf_ctr_bits
= 44,
1821 .constraints
= snbep_uncore_r3qpi_constraints
,
1822 IVBEP_UNCORE_PCI_COMMON_INIT(),
1826 IVBEP_PCI_UNCORE_HA
,
1827 IVBEP_PCI_UNCORE_IMC
,
1828 IVBEP_PCI_UNCORE_IRP
,
1829 IVBEP_PCI_UNCORE_QPI
,
1830 IVBEP_PCI_UNCORE_R2PCIE
,
1831 IVBEP_PCI_UNCORE_R3QPI
,
1834 static struct intel_uncore_type
*ivbep_pci_uncores
[] = {
1835 [IVBEP_PCI_UNCORE_HA
] = &ivbep_uncore_ha
,
1836 [IVBEP_PCI_UNCORE_IMC
] = &ivbep_uncore_imc
,
1837 [IVBEP_PCI_UNCORE_IRP
] = &ivbep_uncore_irp
,
1838 [IVBEP_PCI_UNCORE_QPI
] = &ivbep_uncore_qpi
,
1839 [IVBEP_PCI_UNCORE_R2PCIE
] = &ivbep_uncore_r2pcie
,
1840 [IVBEP_PCI_UNCORE_R3QPI
] = &ivbep_uncore_r3qpi
,
1844 static const struct pci_device_id ivbep_uncore_pci_ids
[] = {
1845 { /* Home Agent 0 */
1846 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe30),
1847 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA
, 0),
1849 { /* Home Agent 1 */
1850 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe38),
1851 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA
, 1),
1853 { /* MC0 Channel 0 */
1854 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb4),
1855 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 0),
1857 { /* MC0 Channel 1 */
1858 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb5),
1859 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 1),
1861 { /* MC0 Channel 3 */
1862 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb0),
1863 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 2),
1865 { /* MC0 Channel 4 */
1866 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xeb1),
1867 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 3),
1869 { /* MC1 Channel 0 */
1870 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef4),
1871 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 4),
1873 { /* MC1 Channel 1 */
1874 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef5),
1875 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 5),
1877 { /* MC1 Channel 3 */
1878 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef0),
1879 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 6),
1881 { /* MC1 Channel 4 */
1882 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xef1),
1883 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC
, 7),
1886 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe39),
1887 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP
, 0),
1890 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe32),
1891 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI
, 0),
1894 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe33),
1895 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI
, 1),
1898 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe3a),
1899 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI
, 2),
1902 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe34),
1903 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE
, 0),
1905 { /* R3QPI0 Link 0 */
1906 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe36),
1907 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI
, 0),
1909 { /* R3QPI0 Link 1 */
1910 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe37),
1911 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI
, 1),
1913 { /* R3QPI1 Link 2 */
1914 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe3e),
1915 .driver_data
= UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI
, 2),
1917 { /* QPI Port 0 filter */
1918 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe86),
1919 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1920 SNBEP_PCI_QPI_PORT0_FILTER
),
1922 { /* QPI Port 0 filter */
1923 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0xe96),
1924 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
1925 SNBEP_PCI_QPI_PORT1_FILTER
),
1927 { /* end: all zeroes */ }
1930 static struct pci_driver ivbep_uncore_pci_driver
= {
1931 .name
= "ivbep_uncore",
1932 .id_table
= ivbep_uncore_pci_ids
,
1935 int ivbep_uncore_pci_init(void)
1937 int ret
= snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID
, SNBEP_GIDNIDMAP
, true);
1940 uncore_pci_uncores
= ivbep_pci_uncores
;
1941 uncore_pci_driver
= &ivbep_uncore_pci_driver
;
1944 /* end of IvyTown uncore support */
1946 /* KNL uncore support */
1947 static struct attribute
*knl_uncore_ubox_formats_attr
[] = {
1948 &format_attr_event
.attr
,
1949 &format_attr_umask
.attr
,
1950 &format_attr_edge
.attr
,
1951 &format_attr_tid_en
.attr
,
1952 &format_attr_inv
.attr
,
1953 &format_attr_thresh5
.attr
,
1957 static const struct attribute_group knl_uncore_ubox_format_group
= {
1959 .attrs
= knl_uncore_ubox_formats_attr
,
1962 static struct intel_uncore_type knl_uncore_ubox
= {
1966 .perf_ctr_bits
= 48,
1967 .fixed_ctr_bits
= 48,
1968 .perf_ctr
= HSWEP_U_MSR_PMON_CTR0
,
1969 .event_ctl
= HSWEP_U_MSR_PMON_CTL0
,
1970 .event_mask
= KNL_U_MSR_PMON_RAW_EVENT_MASK
,
1971 .fixed_ctr
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR
,
1972 .fixed_ctl
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL
,
1973 .ops
= &snbep_uncore_msr_ops
,
1974 .format_group
= &knl_uncore_ubox_format_group
,
1977 static struct attribute
*knl_uncore_cha_formats_attr
[] = {
1978 &format_attr_event
.attr
,
1979 &format_attr_umask
.attr
,
1980 &format_attr_qor
.attr
,
1981 &format_attr_edge
.attr
,
1982 &format_attr_tid_en
.attr
,
1983 &format_attr_inv
.attr
,
1984 &format_attr_thresh8
.attr
,
1985 &format_attr_filter_tid4
.attr
,
1986 &format_attr_filter_link3
.attr
,
1987 &format_attr_filter_state4
.attr
,
1988 &format_attr_filter_local
.attr
,
1989 &format_attr_filter_all_op
.attr
,
1990 &format_attr_filter_nnm
.attr
,
1991 &format_attr_filter_opc3
.attr
,
1992 &format_attr_filter_nc
.attr
,
1993 &format_attr_filter_isoc
.attr
,
1997 static const struct attribute_group knl_uncore_cha_format_group
= {
1999 .attrs
= knl_uncore_cha_formats_attr
,
2002 static struct event_constraint knl_uncore_cha_constraints
[] = {
2003 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2004 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2005 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2006 EVENT_CONSTRAINT_END
2009 static struct extra_reg knl_uncore_cha_extra_regs
[] = {
2010 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
2011 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
2012 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2013 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2014 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2018 static u64
knl_cha_filter_mask(int fields
)
2023 mask
|= KNL_CHA_MSR_PMON_BOX_FILTER_TID
;
2025 mask
|= KNL_CHA_MSR_PMON_BOX_FILTER_STATE
;
2027 mask
|= KNL_CHA_MSR_PMON_BOX_FILTER_OP
;
2031 static struct event_constraint
*
2032 knl_cha_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2034 return __snbep_cbox_get_constraint(box
, event
, knl_cha_filter_mask
);
2037 static int knl_cha_hw_config(struct intel_uncore_box
*box
,
2038 struct perf_event
*event
)
2040 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2041 struct extra_reg
*er
;
2044 for (er
= knl_uncore_cha_extra_regs
; er
->msr
; er
++) {
2045 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
2051 reg1
->reg
= HSWEP_C0_MSR_PMON_BOX_FILTER0
+
2052 KNL_CHA_MSR_OFFSET
* box
->pmu
->pmu_idx
;
2053 reg1
->config
= event
->attr
.config1
& knl_cha_filter_mask(idx
);
2055 reg1
->config
|= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE
;
2056 reg1
->config
|= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE
;
2057 reg1
->config
|= KNL_CHA_MSR_PMON_BOX_FILTER_NNC
;
2063 static void hswep_cbox_enable_event(struct intel_uncore_box
*box
,
2064 struct perf_event
*event
);
2066 static struct intel_uncore_ops knl_uncore_cha_ops
= {
2067 .init_box
= snbep_uncore_msr_init_box
,
2068 .disable_box
= snbep_uncore_msr_disable_box
,
2069 .enable_box
= snbep_uncore_msr_enable_box
,
2070 .disable_event
= snbep_uncore_msr_disable_event
,
2071 .enable_event
= hswep_cbox_enable_event
,
2072 .read_counter
= uncore_msr_read_counter
,
2073 .hw_config
= knl_cha_hw_config
,
2074 .get_constraint
= knl_cha_get_constraint
,
2075 .put_constraint
= snbep_cbox_put_constraint
,
2078 static struct intel_uncore_type knl_uncore_cha
= {
2082 .perf_ctr_bits
= 48,
2083 .event_ctl
= HSWEP_C0_MSR_PMON_CTL0
,
2084 .perf_ctr
= HSWEP_C0_MSR_PMON_CTR0
,
2085 .event_mask
= KNL_CHA_MSR_PMON_RAW_EVENT_MASK
,
2086 .box_ctl
= HSWEP_C0_MSR_PMON_BOX_CTL
,
2087 .msr_offset
= KNL_CHA_MSR_OFFSET
,
2088 .num_shared_regs
= 1,
2089 .constraints
= knl_uncore_cha_constraints
,
2090 .ops
= &knl_uncore_cha_ops
,
2091 .format_group
= &knl_uncore_cha_format_group
,
2094 static struct attribute
*knl_uncore_pcu_formats_attr
[] = {
2095 &format_attr_event2
.attr
,
2096 &format_attr_use_occ_ctr
.attr
,
2097 &format_attr_occ_sel
.attr
,
2098 &format_attr_edge
.attr
,
2099 &format_attr_tid_en
.attr
,
2100 &format_attr_inv
.attr
,
2101 &format_attr_thresh6
.attr
,
2102 &format_attr_occ_invert
.attr
,
2103 &format_attr_occ_edge_det
.attr
,
2107 static const struct attribute_group knl_uncore_pcu_format_group
= {
2109 .attrs
= knl_uncore_pcu_formats_attr
,
2112 static struct intel_uncore_type knl_uncore_pcu
= {
2116 .perf_ctr_bits
= 48,
2117 .perf_ctr
= HSWEP_PCU_MSR_PMON_CTR0
,
2118 .event_ctl
= HSWEP_PCU_MSR_PMON_CTL0
,
2119 .event_mask
= KNL_PCU_MSR_PMON_RAW_EVENT_MASK
,
2120 .box_ctl
= HSWEP_PCU_MSR_PMON_BOX_CTL
,
2121 .ops
= &snbep_uncore_msr_ops
,
2122 .format_group
= &knl_uncore_pcu_format_group
,
2125 static struct intel_uncore_type
*knl_msr_uncores
[] = {
2132 void knl_uncore_cpu_init(void)
2134 uncore_msr_uncores
= knl_msr_uncores
;
2137 static void knl_uncore_imc_enable_box(struct intel_uncore_box
*box
)
2139 struct pci_dev
*pdev
= box
->pci_dev
;
2140 int box_ctl
= uncore_pci_box_ctl(box
);
2142 pci_write_config_dword(pdev
, box_ctl
, 0);
2145 static void knl_uncore_imc_enable_event(struct intel_uncore_box
*box
,
2146 struct perf_event
*event
)
2148 struct pci_dev
*pdev
= box
->pci_dev
;
2149 struct hw_perf_event
*hwc
= &event
->hw
;
2151 if ((event
->attr
.config
& SNBEP_PMON_CTL_EV_SEL_MASK
)
2152 == UNCORE_FIXED_EVENT
)
2153 pci_write_config_dword(pdev
, hwc
->config_base
,
2154 hwc
->config
| KNL_PMON_FIXED_CTL_EN
);
2156 pci_write_config_dword(pdev
, hwc
->config_base
,
2157 hwc
->config
| SNBEP_PMON_CTL_EN
);
2160 static struct intel_uncore_ops knl_uncore_imc_ops
= {
2161 .init_box
= snbep_uncore_pci_init_box
,
2162 .disable_box
= snbep_uncore_pci_disable_box
,
2163 .enable_box
= knl_uncore_imc_enable_box
,
2164 .read_counter
= snbep_uncore_pci_read_counter
,
2165 .enable_event
= knl_uncore_imc_enable_event
,
2166 .disable_event
= snbep_uncore_pci_disable_event
,
2169 static struct intel_uncore_type knl_uncore_imc_uclk
= {
2173 .perf_ctr_bits
= 48,
2174 .fixed_ctr_bits
= 48,
2175 .perf_ctr
= KNL_UCLK_MSR_PMON_CTR0_LOW
,
2176 .event_ctl
= KNL_UCLK_MSR_PMON_CTL0
,
2177 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2178 .fixed_ctr
= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW
,
2179 .fixed_ctl
= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL
,
2180 .box_ctl
= KNL_UCLK_MSR_PMON_BOX_CTL
,
2181 .ops
= &knl_uncore_imc_ops
,
2182 .format_group
= &snbep_uncore_format_group
,
2185 static struct intel_uncore_type knl_uncore_imc_dclk
= {
2189 .perf_ctr_bits
= 48,
2190 .fixed_ctr_bits
= 48,
2191 .perf_ctr
= KNL_MC0_CH0_MSR_PMON_CTR0_LOW
,
2192 .event_ctl
= KNL_MC0_CH0_MSR_PMON_CTL0
,
2193 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2194 .fixed_ctr
= KNL_MC0_CH0_MSR_PMON_FIXED_LOW
,
2195 .fixed_ctl
= KNL_MC0_CH0_MSR_PMON_FIXED_CTL
,
2196 .box_ctl
= KNL_MC0_CH0_MSR_PMON_BOX_CTL
,
2197 .ops
= &knl_uncore_imc_ops
,
2198 .format_group
= &snbep_uncore_format_group
,
2201 static struct intel_uncore_type knl_uncore_edc_uclk
= {
2205 .perf_ctr_bits
= 48,
2206 .fixed_ctr_bits
= 48,
2207 .perf_ctr
= KNL_UCLK_MSR_PMON_CTR0_LOW
,
2208 .event_ctl
= KNL_UCLK_MSR_PMON_CTL0
,
2209 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2210 .fixed_ctr
= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW
,
2211 .fixed_ctl
= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL
,
2212 .box_ctl
= KNL_UCLK_MSR_PMON_BOX_CTL
,
2213 .ops
= &knl_uncore_imc_ops
,
2214 .format_group
= &snbep_uncore_format_group
,
2217 static struct intel_uncore_type knl_uncore_edc_eclk
= {
2221 .perf_ctr_bits
= 48,
2222 .fixed_ctr_bits
= 48,
2223 .perf_ctr
= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW
,
2224 .event_ctl
= KNL_EDC0_ECLK_MSR_PMON_CTL0
,
2225 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2226 .fixed_ctr
= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW
,
2227 .fixed_ctl
= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL
,
2228 .box_ctl
= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL
,
2229 .ops
= &knl_uncore_imc_ops
,
2230 .format_group
= &snbep_uncore_format_group
,
2233 static struct event_constraint knl_uncore_m2pcie_constraints
[] = {
2234 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2235 EVENT_CONSTRAINT_END
2238 static struct intel_uncore_type knl_uncore_m2pcie
= {
2242 .perf_ctr_bits
= 48,
2243 .constraints
= knl_uncore_m2pcie_constraints
,
2244 SNBEP_UNCORE_PCI_COMMON_INIT(),
2247 static struct attribute
*knl_uncore_irp_formats_attr
[] = {
2248 &format_attr_event
.attr
,
2249 &format_attr_umask
.attr
,
2250 &format_attr_qor
.attr
,
2251 &format_attr_edge
.attr
,
2252 &format_attr_inv
.attr
,
2253 &format_attr_thresh8
.attr
,
2257 static const struct attribute_group knl_uncore_irp_format_group
= {
2259 .attrs
= knl_uncore_irp_formats_attr
,
2262 static struct intel_uncore_type knl_uncore_irp
= {
2266 .perf_ctr_bits
= 48,
2267 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
2268 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
2269 .event_mask
= KNL_IRP_PCI_PMON_RAW_EVENT_MASK
,
2270 .box_ctl
= KNL_IRP_PCI_PMON_BOX_CTL
,
2271 .ops
= &snbep_uncore_pci_ops
,
2272 .format_group
= &knl_uncore_irp_format_group
,
2276 KNL_PCI_UNCORE_MC_UCLK
,
2277 KNL_PCI_UNCORE_MC_DCLK
,
2278 KNL_PCI_UNCORE_EDC_UCLK
,
2279 KNL_PCI_UNCORE_EDC_ECLK
,
2280 KNL_PCI_UNCORE_M2PCIE
,
2284 static struct intel_uncore_type
*knl_pci_uncores
[] = {
2285 [KNL_PCI_UNCORE_MC_UCLK
] = &knl_uncore_imc_uclk
,
2286 [KNL_PCI_UNCORE_MC_DCLK
] = &knl_uncore_imc_dclk
,
2287 [KNL_PCI_UNCORE_EDC_UCLK
] = &knl_uncore_edc_uclk
,
2288 [KNL_PCI_UNCORE_EDC_ECLK
] = &knl_uncore_edc_eclk
,
2289 [KNL_PCI_UNCORE_M2PCIE
] = &knl_uncore_m2pcie
,
2290 [KNL_PCI_UNCORE_IRP
] = &knl_uncore_irp
,
2295 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2296 * device type. prior to KNL, each instance of a PMU device type had a unique
2299 * PCI Device ID Uncore PMU Devices
2300 * ----------------------------------
2301 * 0x7841 MC0 UClk, MC1 UClk
2302 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2303 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2304 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2305 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2306 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2307 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2312 static const struct pci_device_id knl_uncore_pci_ids
[] = {
2314 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7841),
2315 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK
, 0),
2318 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7841),
2319 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK
, 1),
2321 { /* MC0 DClk CH 0 */
2322 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2323 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK
, 0),
2325 { /* MC0 DClk CH 1 */
2326 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2327 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK
, 1),
2329 { /* MC0 DClk CH 2 */
2330 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2331 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK
, 2),
2333 { /* MC1 DClk CH 0 */
2334 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2335 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK
, 3),
2337 { /* MC1 DClk CH 1 */
2338 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2339 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK
, 4),
2341 { /* MC1 DClk CH 2 */
2342 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7843),
2343 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK
, 5),
2346 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2347 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK
, 0),
2350 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2351 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK
, 1),
2354 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2355 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK
, 2),
2358 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2359 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK
, 3),
2362 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2363 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK
, 4),
2366 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2367 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK
, 5),
2370 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2371 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK
, 6),
2374 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7833),
2375 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK
, 7),
2378 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2379 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK
, 0),
2382 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2383 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK
, 1),
2386 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2387 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK
, 2),
2390 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2391 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK
, 3),
2394 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2395 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK
, 4),
2398 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2399 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK
, 5),
2402 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2403 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK
, 6),
2406 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7835),
2407 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK
, 7),
2410 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7817),
2411 .driver_data
= UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE
, 0),
2414 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x7814),
2415 .driver_data
= UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP
, 0),
2417 { /* end: all zeroes */ }
2420 static struct pci_driver knl_uncore_pci_driver
= {
2421 .name
= "knl_uncore",
2422 .id_table
= knl_uncore_pci_ids
,
2425 int knl_uncore_pci_init(void)
2429 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2430 ret
= snb_pci2phy_map_init(0x7814); /* IRP */
2433 ret
= snb_pci2phy_map_init(0x7817); /* M2PCIe */
2436 uncore_pci_uncores
= knl_pci_uncores
;
2437 uncore_pci_driver
= &knl_uncore_pci_driver
;
2441 /* end of KNL uncore support */
2443 /* Haswell-EP uncore support */
2444 static struct attribute
*hswep_uncore_ubox_formats_attr
[] = {
2445 &format_attr_event
.attr
,
2446 &format_attr_umask
.attr
,
2447 &format_attr_edge
.attr
,
2448 &format_attr_inv
.attr
,
2449 &format_attr_thresh5
.attr
,
2450 &format_attr_filter_tid2
.attr
,
2451 &format_attr_filter_cid
.attr
,
2455 static const struct attribute_group hswep_uncore_ubox_format_group
= {
2457 .attrs
= hswep_uncore_ubox_formats_attr
,
2460 static int hswep_ubox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2462 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2463 reg1
->reg
= HSWEP_U_MSR_PMON_FILTER
;
2464 reg1
->config
= event
->attr
.config1
& HSWEP_U_MSR_PMON_BOX_FILTER_MASK
;
2469 static struct intel_uncore_ops hswep_uncore_ubox_ops
= {
2470 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2471 .hw_config
= hswep_ubox_hw_config
,
2472 .get_constraint
= uncore_get_constraint
,
2473 .put_constraint
= uncore_put_constraint
,
2476 static struct intel_uncore_type hswep_uncore_ubox
= {
2480 .perf_ctr_bits
= 44,
2481 .fixed_ctr_bits
= 48,
2482 .perf_ctr
= HSWEP_U_MSR_PMON_CTR0
,
2483 .event_ctl
= HSWEP_U_MSR_PMON_CTL0
,
2484 .event_mask
= SNBEP_U_MSR_PMON_RAW_EVENT_MASK
,
2485 .fixed_ctr
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR
,
2486 .fixed_ctl
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL
,
2487 .num_shared_regs
= 1,
2488 .ops
= &hswep_uncore_ubox_ops
,
2489 .format_group
= &hswep_uncore_ubox_format_group
,
2492 static struct attribute
*hswep_uncore_cbox_formats_attr
[] = {
2493 &format_attr_event
.attr
,
2494 &format_attr_umask
.attr
,
2495 &format_attr_edge
.attr
,
2496 &format_attr_tid_en
.attr
,
2497 &format_attr_thresh8
.attr
,
2498 &format_attr_filter_tid3
.attr
,
2499 &format_attr_filter_link2
.attr
,
2500 &format_attr_filter_state3
.attr
,
2501 &format_attr_filter_nid2
.attr
,
2502 &format_attr_filter_opc2
.attr
,
2503 &format_attr_filter_nc
.attr
,
2504 &format_attr_filter_c6
.attr
,
2505 &format_attr_filter_isoc
.attr
,
2509 static const struct attribute_group hswep_uncore_cbox_format_group
= {
2511 .attrs
= hswep_uncore_cbox_formats_attr
,
2514 static struct event_constraint hswep_uncore_cbox_constraints
[] = {
2515 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2516 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2517 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2518 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2519 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2520 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2521 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2522 EVENT_CONSTRAINT_END
2525 static struct extra_reg hswep_uncore_cbox_extra_regs
[] = {
2526 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN
,
2527 SNBEP_CBO_PMON_CTL_TID_EN
, 0x1),
2528 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2529 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2530 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2531 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2532 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2533 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2534 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2535 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2536 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2537 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2538 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2539 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2540 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2541 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2542 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2543 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2544 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2545 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2546 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2547 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2548 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2549 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2550 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2551 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2552 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2553 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2554 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2555 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2556 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2557 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2558 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2559 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2560 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2561 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2562 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2563 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2567 static u64
hswep_cbox_filter_mask(int fields
)
2571 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID
;
2573 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK
;
2575 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE
;
2577 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID
;
2578 if (fields
& 0x10) {
2579 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC
;
2580 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC
;
2581 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6
;
2582 mask
|= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC
;
2587 static struct event_constraint
*
2588 hswep_cbox_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
2590 return __snbep_cbox_get_constraint(box
, event
, hswep_cbox_filter_mask
);
2593 static int hswep_cbox_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2595 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
2596 struct extra_reg
*er
;
2599 for (er
= hswep_uncore_cbox_extra_regs
; er
->msr
; er
++) {
2600 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
2606 reg1
->reg
= HSWEP_C0_MSR_PMON_BOX_FILTER0
+
2607 HSWEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
2608 reg1
->config
= event
->attr
.config1
& hswep_cbox_filter_mask(idx
);
2614 static void hswep_cbox_enable_event(struct intel_uncore_box
*box
,
2615 struct perf_event
*event
)
2617 struct hw_perf_event
*hwc
= &event
->hw
;
2618 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2620 if (reg1
->idx
!= EXTRA_REG_NONE
) {
2621 u64 filter
= uncore_shared_reg_config(box
, 0);
2622 wrmsrl(reg1
->reg
, filter
& 0xffffffff);
2623 wrmsrl(reg1
->reg
+ 1, filter
>> 32);
2626 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
2629 static struct intel_uncore_ops hswep_uncore_cbox_ops
= {
2630 .init_box
= snbep_uncore_msr_init_box
,
2631 .disable_box
= snbep_uncore_msr_disable_box
,
2632 .enable_box
= snbep_uncore_msr_enable_box
,
2633 .disable_event
= snbep_uncore_msr_disable_event
,
2634 .enable_event
= hswep_cbox_enable_event
,
2635 .read_counter
= uncore_msr_read_counter
,
2636 .hw_config
= hswep_cbox_hw_config
,
2637 .get_constraint
= hswep_cbox_get_constraint
,
2638 .put_constraint
= snbep_cbox_put_constraint
,
2641 static struct intel_uncore_type hswep_uncore_cbox
= {
2645 .perf_ctr_bits
= 48,
2646 .event_ctl
= HSWEP_C0_MSR_PMON_CTL0
,
2647 .perf_ctr
= HSWEP_C0_MSR_PMON_CTR0
,
2648 .event_mask
= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
2649 .box_ctl
= HSWEP_C0_MSR_PMON_BOX_CTL
,
2650 .msr_offset
= HSWEP_CBO_MSR_OFFSET
,
2651 .num_shared_regs
= 1,
2652 .constraints
= hswep_uncore_cbox_constraints
,
2653 .ops
= &hswep_uncore_cbox_ops
,
2654 .format_group
= &hswep_uncore_cbox_format_group
,
2658 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2660 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box
*box
)
2662 unsigned msr
= uncore_msr_box_ctl(box
);
2665 u64 init
= SNBEP_PMON_BOX_CTL_INT
;
2669 for_each_set_bit(i
, (unsigned long *)&init
, 64) {
2670 flags
|= (1ULL << i
);
2676 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops
= {
2677 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2678 .init_box
= hswep_uncore_sbox_msr_init_box
2681 static struct attribute
*hswep_uncore_sbox_formats_attr
[] = {
2682 &format_attr_event
.attr
,
2683 &format_attr_umask
.attr
,
2684 &format_attr_edge
.attr
,
2685 &format_attr_tid_en
.attr
,
2686 &format_attr_inv
.attr
,
2687 &format_attr_thresh8
.attr
,
2691 static const struct attribute_group hswep_uncore_sbox_format_group
= {
2693 .attrs
= hswep_uncore_sbox_formats_attr
,
2696 static struct intel_uncore_type hswep_uncore_sbox
= {
2700 .perf_ctr_bits
= 44,
2701 .event_ctl
= HSWEP_S0_MSR_PMON_CTL0
,
2702 .perf_ctr
= HSWEP_S0_MSR_PMON_CTR0
,
2703 .event_mask
= HSWEP_S_MSR_PMON_RAW_EVENT_MASK
,
2704 .box_ctl
= HSWEP_S0_MSR_PMON_BOX_CTL
,
2705 .msr_offset
= HSWEP_SBOX_MSR_OFFSET
,
2706 .ops
= &hswep_uncore_sbox_msr_ops
,
2707 .format_group
= &hswep_uncore_sbox_format_group
,
2710 static int hswep_pcu_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
2712 struct hw_perf_event
*hwc
= &event
->hw
;
2713 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
2714 int ev_sel
= hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
;
2716 if (ev_sel
>= 0xb && ev_sel
<= 0xe) {
2717 reg1
->reg
= HSWEP_PCU_MSR_PMON_BOX_FILTER
;
2718 reg1
->idx
= ev_sel
- 0xb;
2719 reg1
->config
= event
->attr
.config1
& (0xff << reg1
->idx
);
2724 static struct intel_uncore_ops hswep_uncore_pcu_ops
= {
2725 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2726 .hw_config
= hswep_pcu_hw_config
,
2727 .get_constraint
= snbep_pcu_get_constraint
,
2728 .put_constraint
= snbep_pcu_put_constraint
,
2731 static struct intel_uncore_type hswep_uncore_pcu
= {
2735 .perf_ctr_bits
= 48,
2736 .perf_ctr
= HSWEP_PCU_MSR_PMON_CTR0
,
2737 .event_ctl
= HSWEP_PCU_MSR_PMON_CTL0
,
2738 .event_mask
= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK
,
2739 .box_ctl
= HSWEP_PCU_MSR_PMON_BOX_CTL
,
2740 .num_shared_regs
= 1,
2741 .ops
= &hswep_uncore_pcu_ops
,
2742 .format_group
= &snbep_uncore_pcu_format_group
,
2745 static struct intel_uncore_type
*hswep_msr_uncores
[] = {
2753 void hswep_uncore_cpu_init(void)
2755 int pkg
= boot_cpu_data
.logical_proc_id
;
2757 if (hswep_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
2758 hswep_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
2760 /* Detect 6-8 core systems with only two SBOXes */
2761 if (uncore_extra_pci_dev
[pkg
].dev
[HSWEP_PCI_PCU_3
]) {
2764 pci_read_config_dword(uncore_extra_pci_dev
[pkg
].dev
[HSWEP_PCI_PCU_3
],
2766 if (((capid4
>> 6) & 0x3) == 0)
2767 hswep_uncore_sbox
.num_boxes
= 2;
2770 uncore_msr_uncores
= hswep_msr_uncores
;
2773 static struct intel_uncore_type hswep_uncore_ha
= {
2777 .perf_ctr_bits
= 48,
2778 SNBEP_UNCORE_PCI_COMMON_INIT(),
2781 static struct uncore_event_desc hswep_uncore_imc_events
[] = {
2782 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0x00,umask=0x00"),
2783 INTEL_UNCORE_EVENT_DESC(cas_count_read
, "event=0x04,umask=0x03"),
2784 INTEL_UNCORE_EVENT_DESC(cas_count_read
.scale
, "6.103515625e-5"),
2785 INTEL_UNCORE_EVENT_DESC(cas_count_read
.unit
, "MiB"),
2786 INTEL_UNCORE_EVENT_DESC(cas_count_write
, "event=0x04,umask=0x0c"),
2787 INTEL_UNCORE_EVENT_DESC(cas_count_write
.scale
, "6.103515625e-5"),
2788 INTEL_UNCORE_EVENT_DESC(cas_count_write
.unit
, "MiB"),
2789 { /* end: all zeroes */ },
2792 static struct intel_uncore_type hswep_uncore_imc
= {
2796 .perf_ctr_bits
= 48,
2797 .fixed_ctr_bits
= 48,
2798 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
2799 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
2800 .event_descs
= hswep_uncore_imc_events
,
2801 SNBEP_UNCORE_PCI_COMMON_INIT(),
2804 static unsigned hswep_uncore_irp_ctrs
[] = {0xa0, 0xa8, 0xb0, 0xb8};
2806 static u64
hswep_uncore_irp_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
2808 struct pci_dev
*pdev
= box
->pci_dev
;
2809 struct hw_perf_event
*hwc
= &event
->hw
;
2812 pci_read_config_dword(pdev
, hswep_uncore_irp_ctrs
[hwc
->idx
], (u32
*)&count
);
2813 pci_read_config_dword(pdev
, hswep_uncore_irp_ctrs
[hwc
->idx
] + 4, (u32
*)&count
+ 1);
2818 static struct intel_uncore_ops hswep_uncore_irp_ops
= {
2819 .init_box
= snbep_uncore_pci_init_box
,
2820 .disable_box
= snbep_uncore_pci_disable_box
,
2821 .enable_box
= snbep_uncore_pci_enable_box
,
2822 .disable_event
= ivbep_uncore_irp_disable_event
,
2823 .enable_event
= ivbep_uncore_irp_enable_event
,
2824 .read_counter
= hswep_uncore_irp_read_counter
,
2827 static struct intel_uncore_type hswep_uncore_irp
= {
2831 .perf_ctr_bits
= 48,
2832 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
2833 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
2834 .ops
= &hswep_uncore_irp_ops
,
2835 .format_group
= &snbep_uncore_format_group
,
2838 static struct intel_uncore_type hswep_uncore_qpi
= {
2842 .perf_ctr_bits
= 48,
2843 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
2844 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
2845 .event_mask
= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
2846 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
2847 .num_shared_regs
= 1,
2848 .ops
= &snbep_uncore_qpi_ops
,
2849 .format_group
= &snbep_uncore_qpi_format_group
,
2852 static struct event_constraint hswep_uncore_r2pcie_constraints
[] = {
2853 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2854 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2855 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2856 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2857 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2858 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2859 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2860 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2861 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2862 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2863 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2864 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2865 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2866 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2867 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2868 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2869 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2870 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2871 EVENT_CONSTRAINT_END
2874 static struct intel_uncore_type hswep_uncore_r2pcie
= {
2878 .perf_ctr_bits
= 48,
2879 .constraints
= hswep_uncore_r2pcie_constraints
,
2880 SNBEP_UNCORE_PCI_COMMON_INIT(),
2883 static struct event_constraint hswep_uncore_r3qpi_constraints
[] = {
2884 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2885 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2886 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2887 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2888 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2889 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2890 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2891 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2892 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2893 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2894 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2895 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2896 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2897 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2898 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2899 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2900 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2901 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2902 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2903 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2904 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2905 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2906 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2907 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2908 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2909 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2910 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2911 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2912 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2913 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2914 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2915 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2916 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2917 EVENT_CONSTRAINT_END
2920 static struct intel_uncore_type hswep_uncore_r3qpi
= {
2924 .perf_ctr_bits
= 44,
2925 .constraints
= hswep_uncore_r3qpi_constraints
,
2926 SNBEP_UNCORE_PCI_COMMON_INIT(),
2930 HSWEP_PCI_UNCORE_HA
,
2931 HSWEP_PCI_UNCORE_IMC
,
2932 HSWEP_PCI_UNCORE_IRP
,
2933 HSWEP_PCI_UNCORE_QPI
,
2934 HSWEP_PCI_UNCORE_R2PCIE
,
2935 HSWEP_PCI_UNCORE_R3QPI
,
2938 static struct intel_uncore_type
*hswep_pci_uncores
[] = {
2939 [HSWEP_PCI_UNCORE_HA
] = &hswep_uncore_ha
,
2940 [HSWEP_PCI_UNCORE_IMC
] = &hswep_uncore_imc
,
2941 [HSWEP_PCI_UNCORE_IRP
] = &hswep_uncore_irp
,
2942 [HSWEP_PCI_UNCORE_QPI
] = &hswep_uncore_qpi
,
2943 [HSWEP_PCI_UNCORE_R2PCIE
] = &hswep_uncore_r2pcie
,
2944 [HSWEP_PCI_UNCORE_R3QPI
] = &hswep_uncore_r3qpi
,
2948 static const struct pci_device_id hswep_uncore_pci_ids
[] = {
2949 { /* Home Agent 0 */
2950 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f30),
2951 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA
, 0),
2953 { /* Home Agent 1 */
2954 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f38),
2955 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA
, 1),
2957 { /* MC0 Channel 0 */
2958 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fb0),
2959 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 0),
2961 { /* MC0 Channel 1 */
2962 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fb1),
2963 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 1),
2965 { /* MC0 Channel 2 */
2966 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fb4),
2967 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 2),
2969 { /* MC0 Channel 3 */
2970 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fb5),
2971 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 3),
2973 { /* MC1 Channel 0 */
2974 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fd0),
2975 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 4),
2977 { /* MC1 Channel 1 */
2978 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fd1),
2979 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 5),
2981 { /* MC1 Channel 2 */
2982 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fd4),
2983 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 6),
2985 { /* MC1 Channel 3 */
2986 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fd5),
2987 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC
, 7),
2990 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f39),
2991 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP
, 0),
2994 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f32),
2995 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI
, 0),
2998 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f33),
2999 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI
, 1),
3002 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f3a),
3003 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI
, 2),
3006 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f34),
3007 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE
, 0),
3009 { /* R3QPI0 Link 0 */
3010 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f36),
3011 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI
, 0),
3013 { /* R3QPI0 Link 1 */
3014 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f37),
3015 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI
, 1),
3017 { /* R3QPI1 Link 2 */
3018 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f3e),
3019 .driver_data
= UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI
, 2),
3021 { /* QPI Port 0 filter */
3022 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f86),
3023 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
3024 SNBEP_PCI_QPI_PORT0_FILTER
),
3026 { /* QPI Port 1 filter */
3027 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2f96),
3028 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
3029 SNBEP_PCI_QPI_PORT1_FILTER
),
3031 { /* PCU.3 (for Capability registers) */
3032 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2fc0),
3033 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
3036 { /* end: all zeroes */ }
3039 static struct pci_driver hswep_uncore_pci_driver
= {
3040 .name
= "hswep_uncore",
3041 .id_table
= hswep_uncore_pci_ids
,
3044 int hswep_uncore_pci_init(void)
3046 int ret
= snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID
, SNBEP_GIDNIDMAP
, true);
3049 uncore_pci_uncores
= hswep_pci_uncores
;
3050 uncore_pci_driver
= &hswep_uncore_pci_driver
;
3053 /* end of Haswell-EP uncore support */
3055 /* BDX uncore support */
3057 static struct intel_uncore_type bdx_uncore_ubox
= {
3061 .perf_ctr_bits
= 48,
3062 .fixed_ctr_bits
= 48,
3063 .perf_ctr
= HSWEP_U_MSR_PMON_CTR0
,
3064 .event_ctl
= HSWEP_U_MSR_PMON_CTL0
,
3065 .event_mask
= SNBEP_U_MSR_PMON_RAW_EVENT_MASK
,
3066 .fixed_ctr
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR
,
3067 .fixed_ctl
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL
,
3068 .num_shared_regs
= 1,
3069 .ops
= &ivbep_uncore_msr_ops
,
3070 .format_group
= &ivbep_uncore_ubox_format_group
,
3073 static struct event_constraint bdx_uncore_cbox_constraints
[] = {
3074 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3075 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3076 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3077 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3078 EVENT_CONSTRAINT_END
3081 static struct intel_uncore_type bdx_uncore_cbox
= {
3085 .perf_ctr_bits
= 48,
3086 .event_ctl
= HSWEP_C0_MSR_PMON_CTL0
,
3087 .perf_ctr
= HSWEP_C0_MSR_PMON_CTR0
,
3088 .event_mask
= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK
,
3089 .box_ctl
= HSWEP_C0_MSR_PMON_BOX_CTL
,
3090 .msr_offset
= HSWEP_CBO_MSR_OFFSET
,
3091 .num_shared_regs
= 1,
3092 .constraints
= bdx_uncore_cbox_constraints
,
3093 .ops
= &hswep_uncore_cbox_ops
,
3094 .format_group
= &hswep_uncore_cbox_format_group
,
3097 static struct intel_uncore_type bdx_uncore_sbox
= {
3101 .perf_ctr_bits
= 48,
3102 .event_ctl
= HSWEP_S0_MSR_PMON_CTL0
,
3103 .perf_ctr
= HSWEP_S0_MSR_PMON_CTR0
,
3104 .event_mask
= HSWEP_S_MSR_PMON_RAW_EVENT_MASK
,
3105 .box_ctl
= HSWEP_S0_MSR_PMON_BOX_CTL
,
3106 .msr_offset
= HSWEP_SBOX_MSR_OFFSET
,
3107 .ops
= &hswep_uncore_sbox_msr_ops
,
3108 .format_group
= &hswep_uncore_sbox_format_group
,
3111 #define BDX_MSR_UNCORE_SBOX 3
3113 static struct intel_uncore_type
*bdx_msr_uncores
[] = {
3121 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3122 static struct event_constraint bdx_uncore_pcu_constraints
[] = {
3123 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3124 EVENT_CONSTRAINT_END
3127 void bdx_uncore_cpu_init(void)
3129 int pkg
= topology_phys_to_logical_pkg(boot_cpu_data
.phys_proc_id
);
3131 if (bdx_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
3132 bdx_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
3133 uncore_msr_uncores
= bdx_msr_uncores
;
3135 /* BDX-DE doesn't have SBOX */
3136 if (boot_cpu_data
.x86_model
== 86) {
3137 uncore_msr_uncores
[BDX_MSR_UNCORE_SBOX
] = NULL
;
3138 /* Detect systems with no SBOXes */
3139 } else if (uncore_extra_pci_dev
[pkg
].dev
[HSWEP_PCI_PCU_3
]) {
3140 struct pci_dev
*pdev
;
3143 pdev
= uncore_extra_pci_dev
[pkg
].dev
[HSWEP_PCI_PCU_3
];
3144 pci_read_config_dword(pdev
, 0x94, &capid4
);
3145 if (((capid4
>> 6) & 0x3) == 0)
3146 bdx_msr_uncores
[BDX_MSR_UNCORE_SBOX
] = NULL
;
3148 hswep_uncore_pcu
.constraints
= bdx_uncore_pcu_constraints
;
3151 static struct intel_uncore_type bdx_uncore_ha
= {
3155 .perf_ctr_bits
= 48,
3156 SNBEP_UNCORE_PCI_COMMON_INIT(),
3159 static struct intel_uncore_type bdx_uncore_imc
= {
3163 .perf_ctr_bits
= 48,
3164 .fixed_ctr_bits
= 48,
3165 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
3166 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
3167 .event_descs
= hswep_uncore_imc_events
,
3168 SNBEP_UNCORE_PCI_COMMON_INIT(),
3171 static struct intel_uncore_type bdx_uncore_irp
= {
3175 .perf_ctr_bits
= 48,
3176 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3177 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
3178 .ops
= &hswep_uncore_irp_ops
,
3179 .format_group
= &snbep_uncore_format_group
,
3182 static struct intel_uncore_type bdx_uncore_qpi
= {
3186 .perf_ctr_bits
= 48,
3187 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
3188 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
3189 .event_mask
= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK
,
3190 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
3191 .num_shared_regs
= 1,
3192 .ops
= &snbep_uncore_qpi_ops
,
3193 .format_group
= &snbep_uncore_qpi_format_group
,
3196 static struct event_constraint bdx_uncore_r2pcie_constraints
[] = {
3197 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3198 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3199 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3200 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3201 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3202 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3203 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3204 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3205 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3206 EVENT_CONSTRAINT_END
3209 static struct intel_uncore_type bdx_uncore_r2pcie
= {
3213 .perf_ctr_bits
= 48,
3214 .constraints
= bdx_uncore_r2pcie_constraints
,
3215 SNBEP_UNCORE_PCI_COMMON_INIT(),
3218 static struct event_constraint bdx_uncore_r3qpi_constraints
[] = {
3219 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3220 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3221 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3222 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3223 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3224 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3225 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3226 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3227 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3228 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3229 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3230 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3231 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3232 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3233 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3234 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3235 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3236 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3237 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3238 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3239 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3240 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3241 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3242 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3243 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3244 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3245 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3246 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3247 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3248 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3249 EVENT_CONSTRAINT_END
3252 static struct intel_uncore_type bdx_uncore_r3qpi
= {
3256 .perf_ctr_bits
= 48,
3257 .constraints
= bdx_uncore_r3qpi_constraints
,
3258 SNBEP_UNCORE_PCI_COMMON_INIT(),
3266 BDX_PCI_UNCORE_R2PCIE
,
3267 BDX_PCI_UNCORE_R3QPI
,
3270 static struct intel_uncore_type
*bdx_pci_uncores
[] = {
3271 [BDX_PCI_UNCORE_HA
] = &bdx_uncore_ha
,
3272 [BDX_PCI_UNCORE_IMC
] = &bdx_uncore_imc
,
3273 [BDX_PCI_UNCORE_IRP
] = &bdx_uncore_irp
,
3274 [BDX_PCI_UNCORE_QPI
] = &bdx_uncore_qpi
,
3275 [BDX_PCI_UNCORE_R2PCIE
] = &bdx_uncore_r2pcie
,
3276 [BDX_PCI_UNCORE_R3QPI
] = &bdx_uncore_r3qpi
,
3280 static const struct pci_device_id bdx_uncore_pci_ids
[] = {
3281 { /* Home Agent 0 */
3282 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f30),
3283 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA
, 0),
3285 { /* Home Agent 1 */
3286 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f38),
3287 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA
, 1),
3289 { /* MC0 Channel 0 */
3290 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fb0),
3291 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 0),
3293 { /* MC0 Channel 1 */
3294 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fb1),
3295 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 1),
3297 { /* MC0 Channel 2 */
3298 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fb4),
3299 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 2),
3301 { /* MC0 Channel 3 */
3302 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fb5),
3303 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 3),
3305 { /* MC1 Channel 0 */
3306 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fd0),
3307 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 4),
3309 { /* MC1 Channel 1 */
3310 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fd1),
3311 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 5),
3313 { /* MC1 Channel 2 */
3314 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fd4),
3315 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 6),
3317 { /* MC1 Channel 3 */
3318 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fd5),
3319 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC
, 7),
3322 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f39),
3323 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP
, 0),
3326 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f32),
3327 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI
, 0),
3330 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f33),
3331 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI
, 1),
3334 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f3a),
3335 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI
, 2),
3338 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f34),
3339 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE
, 0),
3341 { /* R3QPI0 Link 0 */
3342 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f36),
3343 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI
, 0),
3345 { /* R3QPI0 Link 1 */
3346 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f37),
3347 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI
, 1),
3349 { /* R3QPI1 Link 2 */
3350 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f3e),
3351 .driver_data
= UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI
, 2),
3353 { /* QPI Port 0 filter */
3354 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f86),
3355 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
3356 SNBEP_PCI_QPI_PORT0_FILTER
),
3358 { /* QPI Port 1 filter */
3359 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f96),
3360 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
3361 SNBEP_PCI_QPI_PORT1_FILTER
),
3363 { /* QPI Port 2 filter */
3364 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6f46),
3365 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
3366 BDX_PCI_QPI_PORT2_FILTER
),
3368 { /* PCU.3 (for Capability registers) */
3369 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x6fc0),
3370 .driver_data
= UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV
,
3373 { /* end: all zeroes */ }
3376 static struct pci_driver bdx_uncore_pci_driver
= {
3377 .name
= "bdx_uncore",
3378 .id_table
= bdx_uncore_pci_ids
,
3381 int bdx_uncore_pci_init(void)
3383 int ret
= snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID
, SNBEP_GIDNIDMAP
, true);
3387 uncore_pci_uncores
= bdx_pci_uncores
;
3388 uncore_pci_driver
= &bdx_uncore_pci_driver
;
3392 /* end of BDX uncore support */
3394 /* SKX uncore support */
3396 static struct intel_uncore_type skx_uncore_ubox
= {
3400 .perf_ctr_bits
= 48,
3401 .fixed_ctr_bits
= 48,
3402 .perf_ctr
= HSWEP_U_MSR_PMON_CTR0
,
3403 .event_ctl
= HSWEP_U_MSR_PMON_CTL0
,
3404 .event_mask
= SNBEP_U_MSR_PMON_RAW_EVENT_MASK
,
3405 .fixed_ctr
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR
,
3406 .fixed_ctl
= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL
,
3407 .ops
= &ivbep_uncore_msr_ops
,
3408 .format_group
= &ivbep_uncore_ubox_format_group
,
3411 static struct attribute
*skx_uncore_cha_formats_attr
[] = {
3412 &format_attr_event
.attr
,
3413 &format_attr_umask
.attr
,
3414 &format_attr_edge
.attr
,
3415 &format_attr_tid_en
.attr
,
3416 &format_attr_inv
.attr
,
3417 &format_attr_thresh8
.attr
,
3418 &format_attr_filter_tid4
.attr
,
3419 &format_attr_filter_state5
.attr
,
3420 &format_attr_filter_rem
.attr
,
3421 &format_attr_filter_loc
.attr
,
3422 &format_attr_filter_nm
.attr
,
3423 &format_attr_filter_all_op
.attr
,
3424 &format_attr_filter_not_nm
.attr
,
3425 &format_attr_filter_opc_0
.attr
,
3426 &format_attr_filter_opc_1
.attr
,
3427 &format_attr_filter_nc
.attr
,
3428 &format_attr_filter_isoc
.attr
,
3432 static const struct attribute_group skx_uncore_chabox_format_group
= {
3434 .attrs
= skx_uncore_cha_formats_attr
,
3437 static struct event_constraint skx_uncore_chabox_constraints
[] = {
3438 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3439 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3440 EVENT_CONSTRAINT_END
3443 static struct extra_reg skx_uncore_cha_extra_regs
[] = {
3444 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3445 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3446 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3447 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3448 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3449 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3450 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3451 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3452 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3456 static u64
skx_cha_filter_mask(int fields
)
3461 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_TID
;
3463 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_LINK
;
3465 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_STATE
;
3467 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_REM
;
3468 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_LOC
;
3469 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC
;
3470 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_NM
;
3471 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM
;
3472 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0
;
3473 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1
;
3474 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_NC
;
3475 mask
|= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC
;
3480 static struct event_constraint
*
3481 skx_cha_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
3483 return __snbep_cbox_get_constraint(box
, event
, skx_cha_filter_mask
);
3486 static int skx_cha_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
3488 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
3489 struct extra_reg
*er
;
3492 for (er
= skx_uncore_cha_extra_regs
; er
->msr
; er
++) {
3493 if (er
->event
!= (event
->hw
.config
& er
->config_mask
))
3499 reg1
->reg
= HSWEP_C0_MSR_PMON_BOX_FILTER0
+
3500 HSWEP_CBO_MSR_OFFSET
* box
->pmu
->pmu_idx
;
3501 reg1
->config
= event
->attr
.config1
& skx_cha_filter_mask(idx
);
3507 static struct intel_uncore_ops skx_uncore_chabox_ops
= {
3508 /* There is no frz_en for chabox ctl */
3509 .init_box
= ivbep_uncore_msr_init_box
,
3510 .disable_box
= snbep_uncore_msr_disable_box
,
3511 .enable_box
= snbep_uncore_msr_enable_box
,
3512 .disable_event
= snbep_uncore_msr_disable_event
,
3513 .enable_event
= hswep_cbox_enable_event
,
3514 .read_counter
= uncore_msr_read_counter
,
3515 .hw_config
= skx_cha_hw_config
,
3516 .get_constraint
= skx_cha_get_constraint
,
3517 .put_constraint
= snbep_cbox_put_constraint
,
3520 static struct intel_uncore_type skx_uncore_chabox
= {
3523 .perf_ctr_bits
= 48,
3524 .event_ctl
= HSWEP_C0_MSR_PMON_CTL0
,
3525 .perf_ctr
= HSWEP_C0_MSR_PMON_CTR0
,
3526 .event_mask
= HSWEP_S_MSR_PMON_RAW_EVENT_MASK
,
3527 .box_ctl
= HSWEP_C0_MSR_PMON_BOX_CTL
,
3528 .msr_offset
= HSWEP_CBO_MSR_OFFSET
,
3529 .num_shared_regs
= 1,
3530 .constraints
= skx_uncore_chabox_constraints
,
3531 .ops
= &skx_uncore_chabox_ops
,
3532 .format_group
= &skx_uncore_chabox_format_group
,
3535 static struct attribute
*skx_uncore_iio_formats_attr
[] = {
3536 &format_attr_event
.attr
,
3537 &format_attr_umask
.attr
,
3538 &format_attr_edge
.attr
,
3539 &format_attr_inv
.attr
,
3540 &format_attr_thresh9
.attr
,
3541 &format_attr_ch_mask
.attr
,
3542 &format_attr_fc_mask
.attr
,
3546 static const struct attribute_group skx_uncore_iio_format_group
= {
3548 .attrs
= skx_uncore_iio_formats_attr
,
3551 static struct event_constraint skx_uncore_iio_constraints
[] = {
3552 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3553 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3554 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3555 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3556 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3557 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3558 EVENT_CONSTRAINT_END
3561 static void skx_iio_enable_event(struct intel_uncore_box
*box
,
3562 struct perf_event
*event
)
3564 struct hw_perf_event
*hwc
= &event
->hw
;
3566 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
3569 static struct intel_uncore_ops skx_uncore_iio_ops
= {
3570 .init_box
= ivbep_uncore_msr_init_box
,
3571 .disable_box
= snbep_uncore_msr_disable_box
,
3572 .enable_box
= snbep_uncore_msr_enable_box
,
3573 .disable_event
= snbep_uncore_msr_disable_event
,
3574 .enable_event
= skx_iio_enable_event
,
3575 .read_counter
= uncore_msr_read_counter
,
3578 static struct intel_uncore_type skx_uncore_iio
= {
3582 .perf_ctr_bits
= 48,
3583 .event_ctl
= SKX_IIO0_MSR_PMON_CTL0
,
3584 .perf_ctr
= SKX_IIO0_MSR_PMON_CTR0
,
3585 .event_mask
= SKX_IIO_PMON_RAW_EVENT_MASK
,
3586 .event_mask_ext
= SKX_IIO_PMON_RAW_EVENT_MASK_EXT
,
3587 .box_ctl
= SKX_IIO0_MSR_PMON_BOX_CTL
,
3588 .msr_offset
= SKX_IIO_MSR_OFFSET
,
3589 .constraints
= skx_uncore_iio_constraints
,
3590 .ops
= &skx_uncore_iio_ops
,
3591 .format_group
= &skx_uncore_iio_format_group
,
3594 enum perf_uncore_iio_freerunning_type_id
{
3595 SKX_IIO_MSR_IOCLK
= 0,
3597 SKX_IIO_MSR_UTIL
= 2,
3599 SKX_IIO_FREERUNNING_TYPE_MAX
,
3603 static struct freerunning_counters skx_iio_freerunning
[] = {
3604 [SKX_IIO_MSR_IOCLK
] = { 0xa45, 0x1, 0x20, 1, 36 },
3605 [SKX_IIO_MSR_BW
] = { 0xb00, 0x1, 0x10, 8, 36 },
3606 [SKX_IIO_MSR_UTIL
] = { 0xb08, 0x1, 0x10, 8, 36 },
3609 static struct uncore_event_desc skx_uncore_iio_freerunning_events
[] = {
3610 /* Free-Running IO CLOCKS Counter */
3611 INTEL_UNCORE_EVENT_DESC(ioclk
, "event=0xff,umask=0x10"),
3612 /* Free-Running IIO BANDWIDTH Counters */
3613 INTEL_UNCORE_EVENT_DESC(bw_in_port0
, "event=0xff,umask=0x20"),
3614 INTEL_UNCORE_EVENT_DESC(bw_in_port0
.scale
, "3.814697266e-6"),
3615 INTEL_UNCORE_EVENT_DESC(bw_in_port0
.unit
, "MiB"),
3616 INTEL_UNCORE_EVENT_DESC(bw_in_port1
, "event=0xff,umask=0x21"),
3617 INTEL_UNCORE_EVENT_DESC(bw_in_port1
.scale
, "3.814697266e-6"),
3618 INTEL_UNCORE_EVENT_DESC(bw_in_port1
.unit
, "MiB"),
3619 INTEL_UNCORE_EVENT_DESC(bw_in_port2
, "event=0xff,umask=0x22"),
3620 INTEL_UNCORE_EVENT_DESC(bw_in_port2
.scale
, "3.814697266e-6"),
3621 INTEL_UNCORE_EVENT_DESC(bw_in_port2
.unit
, "MiB"),
3622 INTEL_UNCORE_EVENT_DESC(bw_in_port3
, "event=0xff,umask=0x23"),
3623 INTEL_UNCORE_EVENT_DESC(bw_in_port3
.scale
, "3.814697266e-6"),
3624 INTEL_UNCORE_EVENT_DESC(bw_in_port3
.unit
, "MiB"),
3625 INTEL_UNCORE_EVENT_DESC(bw_out_port0
, "event=0xff,umask=0x24"),
3626 INTEL_UNCORE_EVENT_DESC(bw_out_port0
.scale
, "3.814697266e-6"),
3627 INTEL_UNCORE_EVENT_DESC(bw_out_port0
.unit
, "MiB"),
3628 INTEL_UNCORE_EVENT_DESC(bw_out_port1
, "event=0xff,umask=0x25"),
3629 INTEL_UNCORE_EVENT_DESC(bw_out_port1
.scale
, "3.814697266e-6"),
3630 INTEL_UNCORE_EVENT_DESC(bw_out_port1
.unit
, "MiB"),
3631 INTEL_UNCORE_EVENT_DESC(bw_out_port2
, "event=0xff,umask=0x26"),
3632 INTEL_UNCORE_EVENT_DESC(bw_out_port2
.scale
, "3.814697266e-6"),
3633 INTEL_UNCORE_EVENT_DESC(bw_out_port2
.unit
, "MiB"),
3634 INTEL_UNCORE_EVENT_DESC(bw_out_port3
, "event=0xff,umask=0x27"),
3635 INTEL_UNCORE_EVENT_DESC(bw_out_port3
.scale
, "3.814697266e-6"),
3636 INTEL_UNCORE_EVENT_DESC(bw_out_port3
.unit
, "MiB"),
3637 /* Free-running IIO UTILIZATION Counters */
3638 INTEL_UNCORE_EVENT_DESC(util_in_port0
, "event=0xff,umask=0x30"),
3639 INTEL_UNCORE_EVENT_DESC(util_out_port0
, "event=0xff,umask=0x31"),
3640 INTEL_UNCORE_EVENT_DESC(util_in_port1
, "event=0xff,umask=0x32"),
3641 INTEL_UNCORE_EVENT_DESC(util_out_port1
, "event=0xff,umask=0x33"),
3642 INTEL_UNCORE_EVENT_DESC(util_in_port2
, "event=0xff,umask=0x34"),
3643 INTEL_UNCORE_EVENT_DESC(util_out_port2
, "event=0xff,umask=0x35"),
3644 INTEL_UNCORE_EVENT_DESC(util_in_port3
, "event=0xff,umask=0x36"),
3645 INTEL_UNCORE_EVENT_DESC(util_out_port3
, "event=0xff,umask=0x37"),
3646 { /* end: all zeroes */ },
3649 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops
= {
3650 .read_counter
= uncore_msr_read_counter
,
3651 .hw_config
= uncore_freerunning_hw_config
,
3654 static struct attribute
*skx_uncore_iio_freerunning_formats_attr
[] = {
3655 &format_attr_event
.attr
,
3656 &format_attr_umask
.attr
,
3660 static const struct attribute_group skx_uncore_iio_freerunning_format_group
= {
3662 .attrs
= skx_uncore_iio_freerunning_formats_attr
,
3665 static struct intel_uncore_type skx_uncore_iio_free_running
= {
3666 .name
= "iio_free_running",
3669 .num_freerunning_types
= SKX_IIO_FREERUNNING_TYPE_MAX
,
3670 .freerunning
= skx_iio_freerunning
,
3671 .ops
= &skx_uncore_iio_freerunning_ops
,
3672 .event_descs
= skx_uncore_iio_freerunning_events
,
3673 .format_group
= &skx_uncore_iio_freerunning_format_group
,
3676 static struct attribute
*skx_uncore_formats_attr
[] = {
3677 &format_attr_event
.attr
,
3678 &format_attr_umask
.attr
,
3679 &format_attr_edge
.attr
,
3680 &format_attr_inv
.attr
,
3681 &format_attr_thresh8
.attr
,
3685 static const struct attribute_group skx_uncore_format_group
= {
3687 .attrs
= skx_uncore_formats_attr
,
3690 static struct intel_uncore_type skx_uncore_irp
= {
3694 .perf_ctr_bits
= 48,
3695 .event_ctl
= SKX_IRP0_MSR_PMON_CTL0
,
3696 .perf_ctr
= SKX_IRP0_MSR_PMON_CTR0
,
3697 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3698 .box_ctl
= SKX_IRP0_MSR_PMON_BOX_CTL
,
3699 .msr_offset
= SKX_IRP_MSR_OFFSET
,
3700 .ops
= &skx_uncore_iio_ops
,
3701 .format_group
= &skx_uncore_format_group
,
3704 static struct attribute
*skx_uncore_pcu_formats_attr
[] = {
3705 &format_attr_event
.attr
,
3706 &format_attr_umask
.attr
,
3707 &format_attr_edge
.attr
,
3708 &format_attr_inv
.attr
,
3709 &format_attr_thresh8
.attr
,
3710 &format_attr_occ_invert
.attr
,
3711 &format_attr_occ_edge_det
.attr
,
3712 &format_attr_filter_band0
.attr
,
3713 &format_attr_filter_band1
.attr
,
3714 &format_attr_filter_band2
.attr
,
3715 &format_attr_filter_band3
.attr
,
3719 static struct attribute_group skx_uncore_pcu_format_group
= {
3721 .attrs
= skx_uncore_pcu_formats_attr
,
3724 static struct intel_uncore_ops skx_uncore_pcu_ops
= {
3725 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3726 .hw_config
= hswep_pcu_hw_config
,
3727 .get_constraint
= snbep_pcu_get_constraint
,
3728 .put_constraint
= snbep_pcu_put_constraint
,
3731 static struct intel_uncore_type skx_uncore_pcu
= {
3735 .perf_ctr_bits
= 48,
3736 .perf_ctr
= HSWEP_PCU_MSR_PMON_CTR0
,
3737 .event_ctl
= HSWEP_PCU_MSR_PMON_CTL0
,
3738 .event_mask
= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK
,
3739 .box_ctl
= HSWEP_PCU_MSR_PMON_BOX_CTL
,
3740 .num_shared_regs
= 1,
3741 .ops
= &skx_uncore_pcu_ops
,
3742 .format_group
= &skx_uncore_pcu_format_group
,
3745 static struct intel_uncore_type
*skx_msr_uncores
[] = {
3749 &skx_uncore_iio_free_running
,
3756 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3757 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3759 #define SKX_CAPID6 0x9c
3760 #define SKX_CHA_BIT_MASK GENMASK(27, 0)
3762 static int skx_count_chabox(void)
3764 struct pci_dev
*dev
= NULL
;
3767 dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x2083, dev
);
3771 pci_read_config_dword(dev
, SKX_CAPID6
, &val
);
3772 val
&= SKX_CHA_BIT_MASK
;
3775 return hweight32(val
);
3778 void skx_uncore_cpu_init(void)
3780 skx_uncore_chabox
.num_boxes
= skx_count_chabox();
3781 uncore_msr_uncores
= skx_msr_uncores
;
3784 static struct intel_uncore_type skx_uncore_imc
= {
3788 .perf_ctr_bits
= 48,
3789 .fixed_ctr_bits
= 48,
3790 .fixed_ctr
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR
,
3791 .fixed_ctl
= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL
,
3792 .event_descs
= hswep_uncore_imc_events
,
3793 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
3794 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
3795 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3796 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
3797 .ops
= &ivbep_uncore_pci_ops
,
3798 .format_group
= &skx_uncore_format_group
,
3801 static struct attribute
*skx_upi_uncore_formats_attr
[] = {
3802 &format_attr_event
.attr
,
3803 &format_attr_umask_ext
.attr
,
3804 &format_attr_edge
.attr
,
3805 &format_attr_inv
.attr
,
3806 &format_attr_thresh8
.attr
,
3810 static const struct attribute_group skx_upi_uncore_format_group
= {
3812 .attrs
= skx_upi_uncore_formats_attr
,
3815 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box
*box
)
3817 struct pci_dev
*pdev
= box
->pci_dev
;
3819 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8
, &box
->flags
);
3820 pci_write_config_dword(pdev
, SKX_UPI_PCI_PMON_BOX_CTL
, IVBEP_PMON_BOX_CTL_INT
);
3823 static struct intel_uncore_ops skx_upi_uncore_pci_ops
= {
3824 .init_box
= skx_upi_uncore_pci_init_box
,
3825 .disable_box
= snbep_uncore_pci_disable_box
,
3826 .enable_box
= snbep_uncore_pci_enable_box
,
3827 .disable_event
= snbep_uncore_pci_disable_event
,
3828 .enable_event
= snbep_uncore_pci_enable_event
,
3829 .read_counter
= snbep_uncore_pci_read_counter
,
3832 static struct intel_uncore_type skx_uncore_upi
= {
3836 .perf_ctr_bits
= 48,
3837 .perf_ctr
= SKX_UPI_PCI_PMON_CTR0
,
3838 .event_ctl
= SKX_UPI_PCI_PMON_CTL0
,
3839 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3840 .event_mask_ext
= SKX_UPI_CTL_UMASK_EXT
,
3841 .box_ctl
= SKX_UPI_PCI_PMON_BOX_CTL
,
3842 .ops
= &skx_upi_uncore_pci_ops
,
3843 .format_group
= &skx_upi_uncore_format_group
,
3846 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box
*box
)
3848 struct pci_dev
*pdev
= box
->pci_dev
;
3850 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8
, &box
->flags
);
3851 pci_write_config_dword(pdev
, SKX_M2M_PCI_PMON_BOX_CTL
, IVBEP_PMON_BOX_CTL_INT
);
3854 static struct intel_uncore_ops skx_m2m_uncore_pci_ops
= {
3855 .init_box
= skx_m2m_uncore_pci_init_box
,
3856 .disable_box
= snbep_uncore_pci_disable_box
,
3857 .enable_box
= snbep_uncore_pci_enable_box
,
3858 .disable_event
= snbep_uncore_pci_disable_event
,
3859 .enable_event
= snbep_uncore_pci_enable_event
,
3860 .read_counter
= snbep_uncore_pci_read_counter
,
3863 static struct intel_uncore_type skx_uncore_m2m
= {
3867 .perf_ctr_bits
= 48,
3868 .perf_ctr
= SKX_M2M_PCI_PMON_CTR0
,
3869 .event_ctl
= SKX_M2M_PCI_PMON_CTL0
,
3870 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3871 .box_ctl
= SKX_M2M_PCI_PMON_BOX_CTL
,
3872 .ops
= &skx_m2m_uncore_pci_ops
,
3873 .format_group
= &skx_uncore_format_group
,
3876 static struct event_constraint skx_uncore_m2pcie_constraints
[] = {
3877 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3878 EVENT_CONSTRAINT_END
3881 static struct intel_uncore_type skx_uncore_m2pcie
= {
3885 .perf_ctr_bits
= 48,
3886 .constraints
= skx_uncore_m2pcie_constraints
,
3887 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
3888 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
3889 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3890 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
3891 .ops
= &ivbep_uncore_pci_ops
,
3892 .format_group
= &skx_uncore_format_group
,
3895 static struct event_constraint skx_uncore_m3upi_constraints
[] = {
3896 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3897 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3898 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3899 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3900 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3901 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3902 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3903 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3904 EVENT_CONSTRAINT_END
3907 static struct intel_uncore_type skx_uncore_m3upi
= {
3911 .perf_ctr_bits
= 48,
3912 .constraints
= skx_uncore_m3upi_constraints
,
3913 .perf_ctr
= SNBEP_PCI_PMON_CTR0
,
3914 .event_ctl
= SNBEP_PCI_PMON_CTL0
,
3915 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
3916 .box_ctl
= SNBEP_PCI_PMON_BOX_CTL
,
3917 .ops
= &ivbep_uncore_pci_ops
,
3918 .format_group
= &skx_uncore_format_group
,
3925 SKX_PCI_UNCORE_M2PCIE
,
3926 SKX_PCI_UNCORE_M3UPI
,
3929 static struct intel_uncore_type
*skx_pci_uncores
[] = {
3930 [SKX_PCI_UNCORE_IMC
] = &skx_uncore_imc
,
3931 [SKX_PCI_UNCORE_M2M
] = &skx_uncore_m2m
,
3932 [SKX_PCI_UNCORE_UPI
] = &skx_uncore_upi
,
3933 [SKX_PCI_UNCORE_M2PCIE
] = &skx_uncore_m2pcie
,
3934 [SKX_PCI_UNCORE_M3UPI
] = &skx_uncore_m3upi
,
3938 static const struct pci_device_id skx_uncore_pci_ids
[] = {
3939 { /* MC0 Channel 0 */
3940 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2042),
3941 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC
, 0),
3943 { /* MC0 Channel 1 */
3944 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2046),
3945 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC
, 1),
3947 { /* MC0 Channel 2 */
3948 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x204a),
3949 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC
, 2),
3951 { /* MC1 Channel 0 */
3952 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2042),
3953 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC
, 3),
3955 { /* MC1 Channel 1 */
3956 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2046),
3957 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC
, 4),
3959 { /* MC1 Channel 2 */
3960 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x204a),
3961 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC
, 5),
3964 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2066),
3965 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M
, 0),
3968 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2066),
3969 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M
, 1),
3972 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2058),
3973 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI
, 0),
3976 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2058),
3977 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI
, 1),
3980 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2058),
3981 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI
, 2),
3984 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2088),
3985 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE
, 0),
3988 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2088),
3989 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE
, 1),
3992 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2088),
3993 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE
, 2),
3996 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x2088),
3997 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE
, 3),
3999 { /* M3UPI0 Link 0 */
4000 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x204D),
4001 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI
, 0),
4003 { /* M3UPI0 Link 1 */
4004 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x204E),
4005 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI
, 1),
4007 { /* M3UPI1 Link 2 */
4008 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x204D),
4009 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI
, 2),
4011 { /* end: all zeroes */ }
4015 static struct pci_driver skx_uncore_pci_driver
= {
4016 .name
= "skx_uncore",
4017 .id_table
= skx_uncore_pci_ids
,
4020 int skx_uncore_pci_init(void)
4022 /* need to double check pci address */
4023 int ret
= snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID
, SKX_GIDNIDMAP
, false);
4028 uncore_pci_uncores
= skx_pci_uncores
;
4029 uncore_pci_driver
= &skx_uncore_pci_driver
;
4033 /* end of SKX uncore support */
4035 /* SNR uncore support */
4037 static struct intel_uncore_type snr_uncore_ubox
= {
4041 .perf_ctr_bits
= 48,
4042 .fixed_ctr_bits
= 48,
4043 .perf_ctr
= SNR_U_MSR_PMON_CTR0
,
4044 .event_ctl
= SNR_U_MSR_PMON_CTL0
,
4045 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
4046 .fixed_ctr
= SNR_U_MSR_PMON_UCLK_FIXED_CTR
,
4047 .fixed_ctl
= SNR_U_MSR_PMON_UCLK_FIXED_CTL
,
4048 .ops
= &ivbep_uncore_msr_ops
,
4049 .format_group
= &ivbep_uncore_format_group
,
4052 static struct attribute
*snr_uncore_cha_formats_attr
[] = {
4053 &format_attr_event
.attr
,
4054 &format_attr_umask_ext2
.attr
,
4055 &format_attr_edge
.attr
,
4056 &format_attr_tid_en
.attr
,
4057 &format_attr_inv
.attr
,
4058 &format_attr_thresh8
.attr
,
4059 &format_attr_filter_tid5
.attr
,
4062 static const struct attribute_group snr_uncore_chabox_format_group
= {
4064 .attrs
= snr_uncore_cha_formats_attr
,
4067 static int snr_cha_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
4069 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
4071 reg1
->reg
= SNR_C0_MSR_PMON_BOX_FILTER0
+
4072 box
->pmu
->type
->msr_offset
* box
->pmu
->pmu_idx
;
4073 reg1
->config
= event
->attr
.config1
& SKX_CHA_MSR_PMON_BOX_FILTER_TID
;
4079 static void snr_cha_enable_event(struct intel_uncore_box
*box
,
4080 struct perf_event
*event
)
4082 struct hw_perf_event
*hwc
= &event
->hw
;
4083 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
4085 if (reg1
->idx
!= EXTRA_REG_NONE
)
4086 wrmsrl(reg1
->reg
, reg1
->config
);
4088 wrmsrl(hwc
->config_base
, hwc
->config
| SNBEP_PMON_CTL_EN
);
4091 static struct intel_uncore_ops snr_uncore_chabox_ops
= {
4092 .init_box
= ivbep_uncore_msr_init_box
,
4093 .disable_box
= snbep_uncore_msr_disable_box
,
4094 .enable_box
= snbep_uncore_msr_enable_box
,
4095 .disable_event
= snbep_uncore_msr_disable_event
,
4096 .enable_event
= snr_cha_enable_event
,
4097 .read_counter
= uncore_msr_read_counter
,
4098 .hw_config
= snr_cha_hw_config
,
4101 static struct intel_uncore_type snr_uncore_chabox
= {
4105 .perf_ctr_bits
= 48,
4106 .event_ctl
= SNR_CHA_MSR_PMON_CTL0
,
4107 .perf_ctr
= SNR_CHA_MSR_PMON_CTR0
,
4108 .box_ctl
= SNR_CHA_MSR_PMON_BOX_CTL
,
4109 .msr_offset
= HSWEP_CBO_MSR_OFFSET
,
4110 .event_mask
= HSWEP_S_MSR_PMON_RAW_EVENT_MASK
,
4111 .event_mask_ext
= SNR_CHA_RAW_EVENT_MASK_EXT
,
4112 .ops
= &snr_uncore_chabox_ops
,
4113 .format_group
= &snr_uncore_chabox_format_group
,
4116 static struct attribute
*snr_uncore_iio_formats_attr
[] = {
4117 &format_attr_event
.attr
,
4118 &format_attr_umask
.attr
,
4119 &format_attr_edge
.attr
,
4120 &format_attr_inv
.attr
,
4121 &format_attr_thresh9
.attr
,
4122 &format_attr_ch_mask2
.attr
,
4123 &format_attr_fc_mask2
.attr
,
4127 static const struct attribute_group snr_uncore_iio_format_group
= {
4129 .attrs
= snr_uncore_iio_formats_attr
,
4132 static struct intel_uncore_type snr_uncore_iio
= {
4136 .perf_ctr_bits
= 48,
4137 .event_ctl
= SNR_IIO_MSR_PMON_CTL0
,
4138 .perf_ctr
= SNR_IIO_MSR_PMON_CTR0
,
4139 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
4140 .event_mask_ext
= SNR_IIO_PMON_RAW_EVENT_MASK_EXT
,
4141 .box_ctl
= SNR_IIO_MSR_PMON_BOX_CTL
,
4142 .msr_offset
= SNR_IIO_MSR_OFFSET
,
4143 .ops
= &ivbep_uncore_msr_ops
,
4144 .format_group
= &snr_uncore_iio_format_group
,
4147 static struct intel_uncore_type snr_uncore_irp
= {
4151 .perf_ctr_bits
= 48,
4152 .event_ctl
= SNR_IRP0_MSR_PMON_CTL0
,
4153 .perf_ctr
= SNR_IRP0_MSR_PMON_CTR0
,
4154 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
4155 .box_ctl
= SNR_IRP0_MSR_PMON_BOX_CTL
,
4156 .msr_offset
= SNR_IRP_MSR_OFFSET
,
4157 .ops
= &ivbep_uncore_msr_ops
,
4158 .format_group
= &ivbep_uncore_format_group
,
4161 static struct intel_uncore_type snr_uncore_m2pcie
= {
4165 .perf_ctr_bits
= 48,
4166 .event_ctl
= SNR_M2PCIE_MSR_PMON_CTL0
,
4167 .perf_ctr
= SNR_M2PCIE_MSR_PMON_CTR0
,
4168 .box_ctl
= SNR_M2PCIE_MSR_PMON_BOX_CTL
,
4169 .msr_offset
= SNR_M2PCIE_MSR_OFFSET
,
4170 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
4171 .ops
= &ivbep_uncore_msr_ops
,
4172 .format_group
= &ivbep_uncore_format_group
,
4175 static int snr_pcu_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
4177 struct hw_perf_event
*hwc
= &event
->hw
;
4178 struct hw_perf_event_extra
*reg1
= &hwc
->extra_reg
;
4179 int ev_sel
= hwc
->config
& SNBEP_PMON_CTL_EV_SEL_MASK
;
4181 if (ev_sel
>= 0xb && ev_sel
<= 0xe) {
4182 reg1
->reg
= SNR_PCU_MSR_PMON_BOX_FILTER
;
4183 reg1
->idx
= ev_sel
- 0xb;
4184 reg1
->config
= event
->attr
.config1
& (0xff << reg1
->idx
);
4189 static struct intel_uncore_ops snr_uncore_pcu_ops
= {
4190 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4191 .hw_config
= snr_pcu_hw_config
,
4192 .get_constraint
= snbep_pcu_get_constraint
,
4193 .put_constraint
= snbep_pcu_put_constraint
,
4196 static struct intel_uncore_type snr_uncore_pcu
= {
4200 .perf_ctr_bits
= 48,
4201 .perf_ctr
= SNR_PCU_MSR_PMON_CTR0
,
4202 .event_ctl
= SNR_PCU_MSR_PMON_CTL0
,
4203 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
4204 .box_ctl
= SNR_PCU_MSR_PMON_BOX_CTL
,
4205 .num_shared_regs
= 1,
4206 .ops
= &snr_uncore_pcu_ops
,
4207 .format_group
= &skx_uncore_pcu_format_group
,
4210 enum perf_uncore_snr_iio_freerunning_type_id
{
4214 SNR_IIO_FREERUNNING_TYPE_MAX
,
4217 static struct freerunning_counters snr_iio_freerunning
[] = {
4218 [SNR_IIO_MSR_IOCLK
] = { 0x1eac, 0x1, 0x10, 1, 48 },
4219 [SNR_IIO_MSR_BW_IN
] = { 0x1f00, 0x1, 0x10, 8, 48 },
4222 static struct uncore_event_desc snr_uncore_iio_freerunning_events
[] = {
4223 /* Free-Running IIO CLOCKS Counter */
4224 INTEL_UNCORE_EVENT_DESC(ioclk
, "event=0xff,umask=0x10"),
4225 /* Free-Running IIO BANDWIDTH IN Counters */
4226 INTEL_UNCORE_EVENT_DESC(bw_in_port0
, "event=0xff,umask=0x20"),
4227 INTEL_UNCORE_EVENT_DESC(bw_in_port0
.scale
, "3.814697266e-6"),
4228 INTEL_UNCORE_EVENT_DESC(bw_in_port0
.unit
, "MiB"),
4229 INTEL_UNCORE_EVENT_DESC(bw_in_port1
, "event=0xff,umask=0x21"),
4230 INTEL_UNCORE_EVENT_DESC(bw_in_port1
.scale
, "3.814697266e-6"),
4231 INTEL_UNCORE_EVENT_DESC(bw_in_port1
.unit
, "MiB"),
4232 INTEL_UNCORE_EVENT_DESC(bw_in_port2
, "event=0xff,umask=0x22"),
4233 INTEL_UNCORE_EVENT_DESC(bw_in_port2
.scale
, "3.814697266e-6"),
4234 INTEL_UNCORE_EVENT_DESC(bw_in_port2
.unit
, "MiB"),
4235 INTEL_UNCORE_EVENT_DESC(bw_in_port3
, "event=0xff,umask=0x23"),
4236 INTEL_UNCORE_EVENT_DESC(bw_in_port3
.scale
, "3.814697266e-6"),
4237 INTEL_UNCORE_EVENT_DESC(bw_in_port3
.unit
, "MiB"),
4238 INTEL_UNCORE_EVENT_DESC(bw_in_port4
, "event=0xff,umask=0x24"),
4239 INTEL_UNCORE_EVENT_DESC(bw_in_port4
.scale
, "3.814697266e-6"),
4240 INTEL_UNCORE_EVENT_DESC(bw_in_port4
.unit
, "MiB"),
4241 INTEL_UNCORE_EVENT_DESC(bw_in_port5
, "event=0xff,umask=0x25"),
4242 INTEL_UNCORE_EVENT_DESC(bw_in_port5
.scale
, "3.814697266e-6"),
4243 INTEL_UNCORE_EVENT_DESC(bw_in_port5
.unit
, "MiB"),
4244 INTEL_UNCORE_EVENT_DESC(bw_in_port6
, "event=0xff,umask=0x26"),
4245 INTEL_UNCORE_EVENT_DESC(bw_in_port6
.scale
, "3.814697266e-6"),
4246 INTEL_UNCORE_EVENT_DESC(bw_in_port6
.unit
, "MiB"),
4247 INTEL_UNCORE_EVENT_DESC(bw_in_port7
, "event=0xff,umask=0x27"),
4248 INTEL_UNCORE_EVENT_DESC(bw_in_port7
.scale
, "3.814697266e-6"),
4249 INTEL_UNCORE_EVENT_DESC(bw_in_port7
.unit
, "MiB"),
4250 { /* end: all zeroes */ },
4253 static struct intel_uncore_type snr_uncore_iio_free_running
= {
4254 .name
= "iio_free_running",
4257 .num_freerunning_types
= SNR_IIO_FREERUNNING_TYPE_MAX
,
4258 .freerunning
= snr_iio_freerunning
,
4259 .ops
= &skx_uncore_iio_freerunning_ops
,
4260 .event_descs
= snr_uncore_iio_freerunning_events
,
4261 .format_group
= &skx_uncore_iio_freerunning_format_group
,
4264 static struct intel_uncore_type
*snr_msr_uncores
[] = {
4271 &snr_uncore_iio_free_running
,
4275 void snr_uncore_cpu_init(void)
4277 uncore_msr_uncores
= snr_msr_uncores
;
4280 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box
*box
)
4282 struct pci_dev
*pdev
= box
->pci_dev
;
4283 int box_ctl
= uncore_pci_box_ctl(box
);
4285 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8
, &box
->flags
);
4286 pci_write_config_dword(pdev
, box_ctl
, IVBEP_PMON_BOX_CTL_INT
);
4289 static struct intel_uncore_ops snr_m2m_uncore_pci_ops
= {
4290 .init_box
= snr_m2m_uncore_pci_init_box
,
4291 .disable_box
= snbep_uncore_pci_disable_box
,
4292 .enable_box
= snbep_uncore_pci_enable_box
,
4293 .disable_event
= snbep_uncore_pci_disable_event
,
4294 .enable_event
= snbep_uncore_pci_enable_event
,
4295 .read_counter
= snbep_uncore_pci_read_counter
,
4298 static struct attribute
*snr_m2m_uncore_formats_attr
[] = {
4299 &format_attr_event
.attr
,
4300 &format_attr_umask_ext3
.attr
,
4301 &format_attr_edge
.attr
,
4302 &format_attr_inv
.attr
,
4303 &format_attr_thresh8
.attr
,
4307 static const struct attribute_group snr_m2m_uncore_format_group
= {
4309 .attrs
= snr_m2m_uncore_formats_attr
,
4312 static struct intel_uncore_type snr_uncore_m2m
= {
4316 .perf_ctr_bits
= 48,
4317 .perf_ctr
= SNR_M2M_PCI_PMON_CTR0
,
4318 .event_ctl
= SNR_M2M_PCI_PMON_CTL0
,
4319 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
4320 .event_mask_ext
= SNR_M2M_PCI_PMON_UMASK_EXT
,
4321 .box_ctl
= SNR_M2M_PCI_PMON_BOX_CTL
,
4322 .ops
= &snr_m2m_uncore_pci_ops
,
4323 .format_group
= &snr_m2m_uncore_format_group
,
4330 static struct intel_uncore_type
*snr_pci_uncores
[] = {
4331 [SNR_PCI_UNCORE_M2M
] = &snr_uncore_m2m
,
4335 static const struct pci_device_id snr_uncore_pci_ids
[] = {
4337 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x344a),
4338 .driver_data
= UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M
, 0),
4340 { /* end: all zeroes */ }
4343 static struct pci_driver snr_uncore_pci_driver
= {
4344 .name
= "snr_uncore",
4345 .id_table
= snr_uncore_pci_ids
,
4348 int snr_uncore_pci_init(void)
4351 int ret
= snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID
,
4352 SKX_GIDNIDMAP
, true);
4357 uncore_pci_uncores
= snr_pci_uncores
;
4358 uncore_pci_driver
= &snr_uncore_pci_driver
;
4362 static struct pci_dev
*snr_uncore_get_mc_dev(int id
)
4364 struct pci_dev
*mc_dev
= NULL
;
4368 mc_dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x3451, mc_dev
);
4371 phys_id
= uncore_pcibus_to_physid(mc_dev
->bus
);
4374 pkg
= topology_phys_to_logical_pkg(phys_id
);
4383 static void snr_uncore_mmio_init_box(struct intel_uncore_box
*box
)
4385 struct pci_dev
*pdev
= snr_uncore_get_mc_dev(box
->dieid
);
4386 unsigned int box_ctl
= uncore_mmio_box_ctl(box
);
4387 resource_size_t addr
;
4393 pci_read_config_dword(pdev
, SNR_IMC_MMIO_BASE_OFFSET
, &pci_dword
);
4394 addr
= (pci_dword
& SNR_IMC_MMIO_BASE_MASK
) << 23;
4396 pci_read_config_dword(pdev
, SNR_IMC_MMIO_MEM0_OFFSET
, &pci_dword
);
4397 addr
|= (pci_dword
& SNR_IMC_MMIO_MEM0_MASK
) << 12;
4401 box
->io_addr
= ioremap(addr
, SNR_IMC_MMIO_SIZE
);
4405 writel(IVBEP_PMON_BOX_CTL_INT
, box
->io_addr
);
4408 static void snr_uncore_mmio_disable_box(struct intel_uncore_box
*box
)
4415 config
= readl(box
->io_addr
);
4416 config
|= SNBEP_PMON_BOX_CTL_FRZ
;
4417 writel(config
, box
->io_addr
);
4420 static void snr_uncore_mmio_enable_box(struct intel_uncore_box
*box
)
4427 config
= readl(box
->io_addr
);
4428 config
&= ~SNBEP_PMON_BOX_CTL_FRZ
;
4429 writel(config
, box
->io_addr
);
4432 static void snr_uncore_mmio_enable_event(struct intel_uncore_box
*box
,
4433 struct perf_event
*event
)
4435 struct hw_perf_event
*hwc
= &event
->hw
;
4440 writel(hwc
->config
| SNBEP_PMON_CTL_EN
,
4441 box
->io_addr
+ hwc
->config_base
);
4444 static void snr_uncore_mmio_disable_event(struct intel_uncore_box
*box
,
4445 struct perf_event
*event
)
4447 struct hw_perf_event
*hwc
= &event
->hw
;
4452 writel(hwc
->config
, box
->io_addr
+ hwc
->config_base
);
4455 static struct intel_uncore_ops snr_uncore_mmio_ops
= {
4456 .init_box
= snr_uncore_mmio_init_box
,
4457 .exit_box
= uncore_mmio_exit_box
,
4458 .disable_box
= snr_uncore_mmio_disable_box
,
4459 .enable_box
= snr_uncore_mmio_enable_box
,
4460 .disable_event
= snr_uncore_mmio_disable_event
,
4461 .enable_event
= snr_uncore_mmio_enable_event
,
4462 .read_counter
= uncore_mmio_read_counter
,
4465 static struct uncore_event_desc snr_uncore_imc_events
[] = {
4466 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0x00,umask=0x00"),
4467 INTEL_UNCORE_EVENT_DESC(cas_count_read
, "event=0x04,umask=0x0f"),
4468 INTEL_UNCORE_EVENT_DESC(cas_count_read
.scale
, "6.103515625e-5"),
4469 INTEL_UNCORE_EVENT_DESC(cas_count_read
.unit
, "MiB"),
4470 INTEL_UNCORE_EVENT_DESC(cas_count_write
, "event=0x04,umask=0x30"),
4471 INTEL_UNCORE_EVENT_DESC(cas_count_write
.scale
, "6.103515625e-5"),
4472 INTEL_UNCORE_EVENT_DESC(cas_count_write
.unit
, "MiB"),
4473 { /* end: all zeroes */ },
4476 static struct intel_uncore_type snr_uncore_imc
= {
4480 .perf_ctr_bits
= 48,
4481 .fixed_ctr_bits
= 48,
4482 .fixed_ctr
= SNR_IMC_MMIO_PMON_FIXED_CTR
,
4483 .fixed_ctl
= SNR_IMC_MMIO_PMON_FIXED_CTL
,
4484 .event_descs
= snr_uncore_imc_events
,
4485 .perf_ctr
= SNR_IMC_MMIO_PMON_CTR0
,
4486 .event_ctl
= SNR_IMC_MMIO_PMON_CTL0
,
4487 .event_mask
= SNBEP_PMON_RAW_EVENT_MASK
,
4488 .box_ctl
= SNR_IMC_MMIO_PMON_BOX_CTL
,
4489 .mmio_offset
= SNR_IMC_MMIO_OFFSET
,
4490 .ops
= &snr_uncore_mmio_ops
,
4491 .format_group
= &skx_uncore_format_group
,
4494 enum perf_uncore_snr_imc_freerunning_type_id
{
4498 SNR_IMC_FREERUNNING_TYPE_MAX
,
4501 static struct freerunning_counters snr_imc_freerunning
[] = {
4502 [SNR_IMC_DCLK
] = { 0x22b0, 0x0, 0, 1, 48 },
4503 [SNR_IMC_DDR
] = { 0x2290, 0x8, 0, 2, 48 },
4506 static struct uncore_event_desc snr_uncore_imc_freerunning_events
[] = {
4507 INTEL_UNCORE_EVENT_DESC(dclk
, "event=0xff,umask=0x10"),
4509 INTEL_UNCORE_EVENT_DESC(read
, "event=0xff,umask=0x20"),
4510 INTEL_UNCORE_EVENT_DESC(read
.scale
, "3.814697266e-6"),
4511 INTEL_UNCORE_EVENT_DESC(read
.unit
, "MiB"),
4512 INTEL_UNCORE_EVENT_DESC(write
, "event=0xff,umask=0x21"),
4513 INTEL_UNCORE_EVENT_DESC(write
.scale
, "3.814697266e-6"),
4514 INTEL_UNCORE_EVENT_DESC(write
.unit
, "MiB"),
4515 { /* end: all zeroes */ },
4518 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops
= {
4519 .init_box
= snr_uncore_mmio_init_box
,
4520 .exit_box
= uncore_mmio_exit_box
,
4521 .read_counter
= uncore_mmio_read_counter
,
4522 .hw_config
= uncore_freerunning_hw_config
,
4525 static struct intel_uncore_type snr_uncore_imc_free_running
= {
4526 .name
= "imc_free_running",
4529 .num_freerunning_types
= SNR_IMC_FREERUNNING_TYPE_MAX
,
4530 .freerunning
= snr_imc_freerunning
,
4531 .ops
= &snr_uncore_imc_freerunning_ops
,
4532 .event_descs
= snr_uncore_imc_freerunning_events
,
4533 .format_group
= &skx_uncore_iio_freerunning_format_group
,
4536 static struct intel_uncore_type
*snr_mmio_uncores
[] = {
4538 &snr_uncore_imc_free_running
,
4542 void snr_uncore_mmio_init(void)
4544 uncore_mmio_uncores
= snr_mmio_uncores
;
4547 /* end of SNR uncore support */