Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / platform / mellanox / mlxbf-pmc.c
blob35883984251f4ddb1013d55e6f519d999171fa73
1 // SPDX-License-Identifier: GPL-2.0-only OR Linux-OpenIB
2 /*
3 * Mellanox BlueField Performance Monitoring Counters driver
5 * This driver provides a sysfs interface for monitoring
6 * performance statistics in BlueField SoC.
8 * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
9 */
11 #include <linux/acpi.h>
12 #include <linux/arm-smccc.h>
13 #include <linux/bitfield.h>
14 #include <linux/errno.h>
15 #include <linux/hwmon.h>
16 #include <linux/platform_device.h>
17 #include <linux/string.h>
18 #include <uapi/linux/psci.h>
20 #define MLXBF_PMC_WRITE_REG_32 0x82000009
21 #define MLXBF_PMC_READ_REG_32 0x8200000A
22 #define MLXBF_PMC_WRITE_REG_64 0x8200000B
23 #define MLXBF_PMC_READ_REG_64 0x8200000C
24 #define MLXBF_PMC_SIP_SVC_UID 0x8200ff01
25 #define MLXBF_PMC_SIP_SVC_VERSION 0x8200ff03
26 #define MLXBF_PMC_SVC_REQ_MAJOR 0
27 #define MLXBF_PMC_SVC_MIN_MINOR 3
29 #define MLXBF_PMC_SMCCC_ACCESS_VIOLATION -4
31 #define MLXBF_PMC_EVENT_SET_BF1 0
32 #define MLXBF_PMC_EVENT_SET_BF2 1
33 #define MLXBF_PMC_EVENT_INFO_LEN 100
35 #define MLXBF_PMC_MAX_BLOCKS 30
36 #define MLXBF_PMC_MAX_ATTRS 30
37 #define MLXBF_PMC_INFO_SZ 4
38 #define MLXBF_PMC_REG_SIZE 8
39 #define MLXBF_PMC_L3C_REG_SIZE 4
41 #define MLXBF_PMC_TYPE_COUNTER 1
42 #define MLXBF_PMC_TYPE_REGISTER 0
44 #define MLXBF_PMC_PERFCTL 0
45 #define MLXBF_PMC_PERFEVT 1
46 #define MLXBF_PMC_PERFACC0 4
48 #define MLXBF_PMC_PERFMON_CONFIG_WR_R_B BIT(0)
49 #define MLXBF_PMC_PERFMON_CONFIG_STROBE BIT(1)
50 #define MLXBF_PMC_PERFMON_CONFIG_ADDR GENMASK_ULL(4, 2)
51 #define MLXBF_PMC_PERFMON_CONFIG_WDATA GENMASK_ULL(60, 5)
53 #define MLXBF_PMC_PERFCTL_FM0 GENMASK_ULL(18, 16)
54 #define MLXBF_PMC_PERFCTL_MS0 GENMASK_ULL(21, 20)
55 #define MLXBF_PMC_PERFCTL_ACCM0 GENMASK_ULL(26, 24)
56 #define MLXBF_PMC_PERFCTL_AD0 BIT(27)
57 #define MLXBF_PMC_PERFCTL_ETRIG0 GENMASK_ULL(29, 28)
58 #define MLXBF_PMC_PERFCTL_EB0 BIT(30)
59 #define MLXBF_PMC_PERFCTL_EN0 BIT(31)
61 #define MLXBF_PMC_PERFEVT_EVTSEL GENMASK_ULL(31, 24)
63 #define MLXBF_PMC_L3C_PERF_CNT_CFG 0x0
64 #define MLXBF_PMC_L3C_PERF_CNT_SEL 0x10
65 #define MLXBF_PMC_L3C_PERF_CNT_SEL_1 0x14
66 #define MLXBF_PMC_L3C_PERF_CNT_LOW 0x40
67 #define MLXBF_PMC_L3C_PERF_CNT_HIGH 0x60
69 #define MLXBF_PMC_L3C_PERF_CNT_CFG_EN BIT(0)
70 #define MLXBF_PMC_L3C_PERF_CNT_CFG_RST BIT(1)
71 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0 GENMASK(5, 0)
72 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1 GENMASK(13, 8)
73 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2 GENMASK(21, 16)
74 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3 GENMASK(29, 24)
76 #define MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4 GENMASK(5, 0)
78 #define MLXBF_PMC_L3C_PERF_CNT_LOW_VAL GENMASK(31, 0)
79 #define MLXBF_PMC_L3C_PERF_CNT_HIGH_VAL GENMASK(24, 0)
81 /**
82 * Structure to hold attribute and block info for each sysfs entry
83 * @dev_attr: Device attribute struct
84 * @index: index to identify counter number within a block
85 * @nr: block number to which the sysfs belongs
87 struct mlxbf_pmc_attribute {
88 struct device_attribute dev_attr;
89 int index;
90 int nr;
93 /**
94 * Structure to hold info for each HW block
96 * @mmio_base: The VA at which the PMC block is mapped
97 * @blk_size: Size of each mapped region
98 * @counters: Number of counters in the block
99 * @type: Type of counters in the block
100 * @attr_counter: Attributes for "counter" sysfs files
101 * @attr_event: Attributes for "event" sysfs files
102 * @attr_event_list: Attributes for "event_list" sysfs files
103 * @attr_enable: Attributes for "enable" sysfs files
104 * @block_attr: All attributes needed for the block
105 * @blcok_attr_grp: Attribute group for the block
107 struct mlxbf_pmc_block_info {
108 void __iomem *mmio_base;
109 size_t blk_size;
110 size_t counters;
111 int type;
112 struct mlxbf_pmc_attribute *attr_counter;
113 struct mlxbf_pmc_attribute *attr_event;
114 struct mlxbf_pmc_attribute attr_event_list;
115 struct mlxbf_pmc_attribute attr_enable;
116 struct attribute *block_attr[MLXBF_PMC_MAX_ATTRS];
117 struct attribute_group block_attr_grp;
121 * Structure to hold PMC context info
123 * @pdev: The kernel structure representing the device
124 * @total_blocks: Total number of blocks
125 * @tile_count: Number of tiles in the system
126 * @hwmon_dev: Hwmon device for bfperf
127 * @block_name: Block name
128 * @block: Block info
129 * @groups: Attribute groups from each block
130 * @sv_sreg_support: Whether SMCs are used to access performance registers
131 * @sreg_tbl_perf: Secure register access table number
132 * @event_set: Event set to use
134 struct mlxbf_pmc_context {
135 struct platform_device *pdev;
136 uint32_t total_blocks;
137 uint32_t tile_count;
138 struct device *hwmon_dev;
139 const char *block_name[MLXBF_PMC_MAX_BLOCKS];
140 struct mlxbf_pmc_block_info block[MLXBF_PMC_MAX_BLOCKS];
141 const struct attribute_group *groups[MLXBF_PMC_MAX_BLOCKS];
142 bool svc_sreg_support;
143 uint32_t sreg_tbl_perf;
144 unsigned int event_set;
148 * Structure to hold supported events for each block
149 * @evt_num: Event number used to program counters
150 * @evt_name: Name of the event
152 struct mlxbf_pmc_events {
153 int evt_num;
154 char *evt_name;
157 static const struct mlxbf_pmc_events mlxbf_pmc_pcie_events[] = {
158 { 0x0, "IN_P_PKT_CNT" },
159 { 0x10, "IN_NP_PKT_CNT" },
160 { 0x18, "IN_C_PKT_CNT" },
161 { 0x20, "OUT_P_PKT_CNT" },
162 { 0x28, "OUT_NP_PKT_CNT" },
163 { 0x30, "OUT_C_PKT_CNT" },
164 { 0x38, "IN_P_BYTE_CNT" },
165 { 0x40, "IN_NP_BYTE_CNT" },
166 { 0x48, "IN_C_BYTE_CNT" },
167 { 0x50, "OUT_P_BYTE_CNT" },
168 { 0x58, "OUT_NP_BYTE_CNT" },
169 { 0x60, "OUT_C_BYTE_CNT" },
172 static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = {
173 { 0x0, "AW_REQ" },
174 { 0x1, "AW_BEATS" },
175 { 0x2, "AW_TRANS" },
176 { 0x3, "AW_RESP" },
177 { 0x4, "AW_STL" },
178 { 0x5, "AW_LAT" },
179 { 0x6, "AW_REQ_TBU" },
180 { 0x8, "AR_REQ" },
181 { 0x9, "AR_BEATS" },
182 { 0xa, "AR_TRANS" },
183 { 0xb, "AR_STL" },
184 { 0xc, "AR_LAT" },
185 { 0xd, "AR_REQ_TBU" },
186 { 0xe, "TBU_MISS" },
187 { 0xf, "TX_DAT_AF" },
188 { 0x10, "RX_DAT_AF" },
189 { 0x11, "RETRYQ_CRED" },
192 static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
193 { 0xa0, "TPIO_DATA_BEAT" },
194 { 0xa1, "TDMA_DATA_BEAT" },
195 { 0xa2, "MAP_DATA_BEAT" },
196 { 0xa3, "TXMSG_DATA_BEAT" },
197 { 0xa4, "TPIO_DATA_PACKET" },
198 { 0xa5, "TDMA_DATA_PACKET" },
199 { 0xa6, "MAP_DATA_PACKET" },
200 { 0xa7, "TXMSG_DATA_PACKET" },
201 { 0xa8, "TDMA_RT_AF" },
202 { 0xa9, "TDMA_PBUF_MAC_AF" },
203 { 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" },
204 { 0xab, "TRIO_MAP_CPL_BUF_EMPTY" },
205 { 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" },
206 { 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" },
207 { 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" },
208 { 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" },
209 { 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" },
210 { 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" },
211 { 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" },
212 { 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" },
215 static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
216 { 0xa0, "TPIO_DATA_BEAT" },
217 { 0xa1, "TDMA_DATA_BEAT" },
218 { 0xa2, "MAP_DATA_BEAT" },
219 { 0xa3, "TXMSG_DATA_BEAT" },
220 { 0xa4, "TPIO_DATA_PACKET" },
221 { 0xa5, "TDMA_DATA_PACKET" },
222 { 0xa6, "MAP_DATA_PACKET" },
223 { 0xa7, "TXMSG_DATA_PACKET" },
224 { 0xa8, "TDMA_RT_AF" },
225 { 0xa9, "TDMA_PBUF_MAC_AF" },
226 { 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" },
227 { 0xab, "TRIO_MAP_CPL_BUF_EMPTY" },
228 { 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" },
229 { 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" },
230 { 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" },
231 { 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" },
232 { 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" },
233 { 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" },
234 { 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" },
235 { 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" },
236 { 0xb4, "TRIO_RING_TX_FLIT_CH0" },
237 { 0xb5, "TRIO_RING_TX_FLIT_CH1" },
238 { 0xb6, "TRIO_RING_TX_FLIT_CH2" },
239 { 0xb7, "TRIO_RING_TX_FLIT_CH3" },
240 { 0xb8, "TRIO_RING_TX_FLIT_CH4" },
241 { 0xb9, "TRIO_RING_RX_FLIT_CH0" },
242 { 0xba, "TRIO_RING_RX_FLIT_CH1" },
243 { 0xbb, "TRIO_RING_RX_FLIT_CH2" },
244 { 0xbc, "TRIO_RING_RX_FLIT_CH3" },
247 static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
248 { 0x100, "ECC_SINGLE_ERROR_CNT" },
249 { 0x104, "ECC_DOUBLE_ERROR_CNT" },
250 { 0x114, "SERR_INJ" },
251 { 0x118, "DERR_INJ" },
252 { 0x124, "ECC_SINGLE_ERROR_0" },
253 { 0x164, "ECC_DOUBLE_ERROR_0" },
254 { 0x340, "DRAM_ECC_COUNT" },
255 { 0x344, "DRAM_ECC_INJECT" },
256 { 0x348, "DRAM_ECC_ERROR" },
259 static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
260 { 0xc0, "RXREQ_MSS" },
261 { 0xc1, "RXDAT_MSS" },
262 { 0xc2, "TXRSP_MSS" },
263 { 0xc3, "TXDAT_MSS" },
266 static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
267 { 0x45, "HNF_REQUESTS" },
268 { 0x46, "HNF_REJECTS" },
269 { 0x47, "ALL_BUSY" },
270 { 0x48, "MAF_BUSY" },
271 { 0x49, "MAF_REQUESTS" },
272 { 0x4a, "RNF_REQUESTS" },
273 { 0x4b, "REQUEST_TYPE" },
274 { 0x4c, "MEMORY_READS" },
275 { 0x4d, "MEMORY_WRITES" },
276 { 0x4e, "VICTIM_WRITE" },
277 { 0x4f, "POC_FULL" },
278 { 0x50, "POC_FAIL" },
279 { 0x51, "POC_SUCCESS" },
280 { 0x52, "POC_WRITES" },
281 { 0x53, "POC_READS" },
282 { 0x54, "FORWARD" },
283 { 0x55, "RXREQ_HNF" },
284 { 0x56, "RXRSP_HNF" },
285 { 0x57, "RXDAT_HNF" },
286 { 0x58, "TXREQ_HNF" },
287 { 0x59, "TXRSP_HNF" },
288 { 0x5a, "TXDAT_HNF" },
289 { 0x5b, "TXSNP_HNF" },
290 { 0x5c, "INDEX_MATCH" },
291 { 0x5d, "A72_ACCESS" },
292 { 0x5e, "IO_ACCESS" },
293 { 0x5f, "TSO_WRITE" },
294 { 0x60, "TSO_CONFLICT" },
295 { 0x61, "DIR_HIT" },
296 { 0x62, "HNF_ACCEPTS" },
297 { 0x63, "REQ_BUF_EMPTY" },
298 { 0x64, "REQ_BUF_IDLE_MAF" },
299 { 0x65, "TSO_NOARB" },
300 { 0x66, "TSO_NOARB_CYCLES" },
301 { 0x67, "MSS_NO_CREDIT" },
302 { 0x68, "TXDAT_NO_LCRD" },
303 { 0x69, "TXSNP_NO_LCRD" },
304 { 0x6a, "TXRSP_NO_LCRD" },
305 { 0x6b, "TXREQ_NO_LCRD" },
306 { 0x6c, "TSO_CL_MATCH" },
307 { 0x6d, "MEMORY_READS_BYPASS" },
308 { 0x6e, "TSO_NOARB_TIMEOUT" },
309 { 0x6f, "ALLOCATE" },
310 { 0x70, "VICTIM" },
311 { 0x71, "A72_WRITE" },
312 { 0x72, "A72_READ" },
313 { 0x73, "IO_WRITE" },
314 { 0x74, "IO_READ" },
315 { 0x75, "TSO_REJECT" },
316 { 0x80, "TXREQ_RN" },
317 { 0x81, "TXRSP_RN" },
318 { 0x82, "TXDAT_RN" },
319 { 0x83, "RXSNP_RN" },
320 { 0x84, "RXRSP_RN" },
321 { 0x85, "RXDAT_RN" },
324 static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
325 { 0x12, "CDN_REQ" },
326 { 0x13, "DDN_REQ" },
327 { 0x14, "NDN_REQ" },
328 { 0x15, "CDN_DIAG_N_OUT_OF_CRED" },
329 { 0x16, "CDN_DIAG_S_OUT_OF_CRED" },
330 { 0x17, "CDN_DIAG_E_OUT_OF_CRED" },
331 { 0x18, "CDN_DIAG_W_OUT_OF_CRED" },
332 { 0x19, "CDN_DIAG_C_OUT_OF_CRED" },
333 { 0x1a, "CDN_DIAG_N_EGRESS" },
334 { 0x1b, "CDN_DIAG_S_EGRESS" },
335 { 0x1c, "CDN_DIAG_E_EGRESS" },
336 { 0x1d, "CDN_DIAG_W_EGRESS" },
337 { 0x1e, "CDN_DIAG_C_EGRESS" },
338 { 0x1f, "CDN_DIAG_N_INGRESS" },
339 { 0x20, "CDN_DIAG_S_INGRESS" },
340 { 0x21, "CDN_DIAG_E_INGRESS" },
341 { 0x22, "CDN_DIAG_W_INGRESS" },
342 { 0x23, "CDN_DIAG_C_INGRESS" },
343 { 0x24, "CDN_DIAG_CORE_SENT" },
344 { 0x25, "DDN_DIAG_N_OUT_OF_CRED" },
345 { 0x26, "DDN_DIAG_S_OUT_OF_CRED" },
346 { 0x27, "DDN_DIAG_E_OUT_OF_CRED" },
347 { 0x28, "DDN_DIAG_W_OUT_OF_CRED" },
348 { 0x29, "DDN_DIAG_C_OUT_OF_CRED" },
349 { 0x2a, "DDN_DIAG_N_EGRESS" },
350 { 0x2b, "DDN_DIAG_S_EGRESS" },
351 { 0x2c, "DDN_DIAG_E_EGRESS" },
352 { 0x2d, "DDN_DIAG_W_EGRESS" },
353 { 0x2e, "DDN_DIAG_C_EGRESS" },
354 { 0x2f, "DDN_DIAG_N_INGRESS" },
355 { 0x30, "DDN_DIAG_S_INGRESS" },
356 { 0x31, "DDN_DIAG_E_INGRESS" },
357 { 0x32, "DDN_DIAG_W_INGRESS" },
358 { 0x33, "DDN_DIAG_C_INGRESS" },
359 { 0x34, "DDN_DIAG_CORE_SENT" },
360 { 0x35, "NDN_DIAG_S_OUT_OF_CRED" },
361 { 0x36, "NDN_DIAG_S_OUT_OF_CRED" },
362 { 0x37, "NDN_DIAG_E_OUT_OF_CRED" },
363 { 0x38, "NDN_DIAG_W_OUT_OF_CRED" },
364 { 0x39, "NDN_DIAG_C_OUT_OF_CRED" },
365 { 0x3a, "NDN_DIAG_N_EGRESS" },
366 { 0x3b, "NDN_DIAG_S_EGRESS" },
367 { 0x3c, "NDN_DIAG_E_EGRESS" },
368 { 0x3d, "NDN_DIAG_W_EGRESS" },
369 { 0x3e, "NDN_DIAG_C_EGRESS" },
370 { 0x3f, "NDN_DIAG_N_INGRESS" },
371 { 0x40, "NDN_DIAG_S_INGRESS" },
372 { 0x41, "NDN_DIAG_E_INGRESS" },
373 { 0x42, "NDN_DIAG_W_INGRESS" },
374 { 0x43, "NDN_DIAG_C_INGRESS" },
375 { 0x44, "NDN_DIAG_CORE_SENT" },
378 static const struct mlxbf_pmc_events mlxbf_pmc_l3c_events[] = {
379 { 0x00, "DISABLE" },
380 { 0x01, "CYCLES" },
381 { 0x02, "TOTAL_RD_REQ_IN" },
382 { 0x03, "TOTAL_WR_REQ_IN" },
383 { 0x04, "TOTAL_WR_DBID_ACK" },
384 { 0x05, "TOTAL_WR_DATA_IN" },
385 { 0x06, "TOTAL_WR_COMP" },
386 { 0x07, "TOTAL_RD_DATA_OUT" },
387 { 0x08, "TOTAL_CDN_REQ_IN_BANK0" },
388 { 0x09, "TOTAL_CDN_REQ_IN_BANK1" },
389 { 0x0a, "TOTAL_DDN_REQ_IN_BANK0" },
390 { 0x0b, "TOTAL_DDN_REQ_IN_BANK1" },
391 { 0x0c, "TOTAL_EMEM_RD_RES_IN_BANK0" },
392 { 0x0d, "TOTAL_EMEM_RD_RES_IN_BANK1" },
393 { 0x0e, "TOTAL_CACHE_RD_RES_IN_BANK0" },
394 { 0x0f, "TOTAL_CACHE_RD_RES_IN_BANK1" },
395 { 0x10, "TOTAL_EMEM_RD_REQ_BANK0" },
396 { 0x11, "TOTAL_EMEM_RD_REQ_BANK1" },
397 { 0x12, "TOTAL_EMEM_WR_REQ_BANK0" },
398 { 0x13, "TOTAL_EMEM_WR_REQ_BANK1" },
399 { 0x14, "TOTAL_RD_REQ_OUT" },
400 { 0x15, "TOTAL_WR_REQ_OUT" },
401 { 0x16, "TOTAL_RD_RES_IN" },
402 { 0x17, "HITS_BANK0" },
403 { 0x18, "HITS_BANK1" },
404 { 0x19, "MISSES_BANK0" },
405 { 0x1a, "MISSES_BANK1" },
406 { 0x1b, "ALLOCATIONS_BANK0" },
407 { 0x1c, "ALLOCATIONS_BANK1" },
408 { 0x1d, "EVICTIONS_BANK0" },
409 { 0x1e, "EVICTIONS_BANK1" },
410 { 0x1f, "DBID_REJECT" },
411 { 0x20, "WRDB_REJECT_BANK0" },
412 { 0x21, "WRDB_REJECT_BANK1" },
413 { 0x22, "CMDQ_REJECT_BANK0" },
414 { 0x23, "CMDQ_REJECT_BANK1" },
415 { 0x24, "COB_REJECT_BANK0" },
416 { 0x25, "COB_REJECT_BANK1" },
417 { 0x26, "TRB_REJECT_BANK0" },
418 { 0x27, "TRB_REJECT_BANK1" },
419 { 0x28, "TAG_REJECT_BANK0" },
420 { 0x29, "TAG_REJECT_BANK1" },
421 { 0x2a, "ANY_REJECT_BANK0" },
422 { 0x2b, "ANY_REJECT_BANK1" },
425 static struct mlxbf_pmc_context *pmc;
427 /* UUID used to probe ATF service. */
428 static const char *mlxbf_pmc_svc_uuid_str = "89c036b4-e7d7-11e6-8797-001aca00bfc4";
430 /* Calls an SMC to access a performance register */
431 static int mlxbf_pmc_secure_read(void __iomem *addr, uint32_t command,
432 uint64_t *result)
434 struct arm_smccc_res res;
435 int status, err = 0;
437 arm_smccc_smc(command, pmc->sreg_tbl_perf, (uintptr_t)addr, 0, 0, 0, 0,
438 0, &res);
440 status = res.a0;
442 switch (status) {
443 case PSCI_RET_NOT_SUPPORTED:
444 err = -EINVAL;
445 break;
446 case MLXBF_PMC_SMCCC_ACCESS_VIOLATION:
447 err = -EACCES;
448 break;
449 default:
450 *result = res.a1;
451 break;
454 return err;
457 /* Read from a performance counter */
458 static int mlxbf_pmc_read(void __iomem *addr, uint32_t command,
459 uint64_t *result)
461 if (pmc->svc_sreg_support)
462 return mlxbf_pmc_secure_read(addr, command, result);
464 if (command == MLXBF_PMC_READ_REG_32)
465 *result = readl(addr);
466 else
467 *result = readq(addr);
469 return 0;
472 /* Convenience function for 32-bit reads */
473 static int mlxbf_pmc_readl(void __iomem *addr, uint32_t *result)
475 uint64_t read_out;
476 int status;
478 status = mlxbf_pmc_read(addr, MLXBF_PMC_READ_REG_32, &read_out);
479 if (status)
480 return status;
481 *result = (uint32_t)read_out;
483 return 0;
486 /* Calls an SMC to access a performance register */
487 static int mlxbf_pmc_secure_write(void __iomem *addr, uint32_t command,
488 uint64_t value)
490 struct arm_smccc_res res;
491 int status, err = 0;
493 arm_smccc_smc(command, pmc->sreg_tbl_perf, value, (uintptr_t)addr, 0, 0,
494 0, 0, &res);
496 status = res.a0;
498 switch (status) {
499 case PSCI_RET_NOT_SUPPORTED:
500 err = -EINVAL;
501 break;
502 case MLXBF_PMC_SMCCC_ACCESS_VIOLATION:
503 err = -EACCES;
504 break;
507 return err;
510 /* Write to a performance counter */
511 static int mlxbf_pmc_write(void __iomem *addr, int command, uint64_t value)
513 if (pmc->svc_sreg_support)
514 return mlxbf_pmc_secure_write(addr, command, value);
516 if (command == MLXBF_PMC_WRITE_REG_32)
517 writel(value, addr);
518 else
519 writeq(value, addr);
521 return 0;
524 /* Check if the register offset is within the mapped region for the block */
525 static bool mlxbf_pmc_valid_range(int blk_num, uint32_t offset)
527 if ((offset >= 0) && !(offset % MLXBF_PMC_REG_SIZE) &&
528 (offset + MLXBF_PMC_REG_SIZE <= pmc->block[blk_num].blk_size))
529 return true; /* inside the mapped PMC space */
531 return false;
534 /* Get the event list corresponding to a certain block */
535 static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk,
536 int *size)
538 const struct mlxbf_pmc_events *events;
540 if (strstr(blk, "tilenet")) {
541 events = mlxbf_pmc_hnfnet_events;
542 *size = ARRAY_SIZE(mlxbf_pmc_hnfnet_events);
543 } else if (strstr(blk, "tile")) {
544 events = mlxbf_pmc_hnf_events;
545 *size = ARRAY_SIZE(mlxbf_pmc_hnf_events);
546 } else if (strstr(blk, "triogen")) {
547 events = mlxbf_pmc_smgen_events;
548 *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
549 } else if (strstr(blk, "trio")) {
550 switch (pmc->event_set) {
551 case MLXBF_PMC_EVENT_SET_BF1:
552 events = mlxbf_pmc_trio_events_1;
553 *size = ARRAY_SIZE(mlxbf_pmc_trio_events_1);
554 break;
555 case MLXBF_PMC_EVENT_SET_BF2:
556 events = mlxbf_pmc_trio_events_2;
557 *size = ARRAY_SIZE(mlxbf_pmc_trio_events_2);
558 break;
559 default:
560 events = NULL;
561 *size = 0;
562 break;
564 } else if (strstr(blk, "mss")) {
565 events = mlxbf_pmc_mss_events;
566 *size = ARRAY_SIZE(mlxbf_pmc_mss_events);
567 } else if (strstr(blk, "ecc")) {
568 events = mlxbf_pmc_ecc_events;
569 *size = ARRAY_SIZE(mlxbf_pmc_ecc_events);
570 } else if (strstr(blk, "pcie")) {
571 events = mlxbf_pmc_pcie_events;
572 *size = ARRAY_SIZE(mlxbf_pmc_pcie_events);
573 } else if (strstr(blk, "l3cache")) {
574 events = mlxbf_pmc_l3c_events;
575 *size = ARRAY_SIZE(mlxbf_pmc_l3c_events);
576 } else if (strstr(blk, "gic")) {
577 events = mlxbf_pmc_smgen_events;
578 *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
579 } else if (strstr(blk, "smmu")) {
580 events = mlxbf_pmc_smgen_events;
581 *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
582 } else {
583 events = NULL;
584 *size = 0;
587 return events;
590 /* Get the event number given the name */
591 static int mlxbf_pmc_get_event_num(const char *blk, const char *evt)
593 const struct mlxbf_pmc_events *events;
594 int i, size;
596 events = mlxbf_pmc_event_list(blk, &size);
597 if (!events)
598 return -EINVAL;
600 for (i = 0; i < size; ++i) {
601 if (!strcmp(evt, events[i].evt_name))
602 return events[i].evt_num;
605 return -ENODEV;
608 /* Get the event number given the name */
609 static char *mlxbf_pmc_get_event_name(const char *blk, int evt)
611 const struct mlxbf_pmc_events *events;
612 int i, size;
614 events = mlxbf_pmc_event_list(blk, &size);
615 if (!events)
616 return NULL;
618 for (i = 0; i < size; ++i) {
619 if (evt == events[i].evt_num)
620 return events[i].evt_name;
623 return NULL;
626 /* Method to enable/disable/reset l3cache counters */
627 static int mlxbf_pmc_config_l3_counters(int blk_num, bool enable, bool reset)
629 uint32_t perfcnt_cfg = 0;
631 if (enable)
632 perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_EN;
633 if (reset)
634 perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_RST;
636 return mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
637 MLXBF_PMC_L3C_PERF_CNT_CFG,
638 MLXBF_PMC_WRITE_REG_32, perfcnt_cfg);
641 /* Method to handle l3cache counter programming */
642 static int mlxbf_pmc_program_l3_counter(int blk_num, uint32_t cnt_num,
643 uint32_t evt)
645 uint32_t perfcnt_sel_1 = 0;
646 uint32_t perfcnt_sel = 0;
647 uint32_t *wordaddr;
648 void __iomem *pmcaddr;
649 int ret;
651 /* Disable all counters before programming them */
652 if (mlxbf_pmc_config_l3_counters(blk_num, false, false))
653 return -EINVAL;
655 /* Select appropriate register information */
656 switch (cnt_num) {
657 case 0 ... 3:
658 pmcaddr = pmc->block[blk_num].mmio_base +
659 MLXBF_PMC_L3C_PERF_CNT_SEL;
660 wordaddr = &perfcnt_sel;
661 break;
662 case 4:
663 pmcaddr = pmc->block[blk_num].mmio_base +
664 MLXBF_PMC_L3C_PERF_CNT_SEL_1;
665 wordaddr = &perfcnt_sel_1;
666 break;
667 default:
668 return -EINVAL;
671 ret = mlxbf_pmc_readl(pmcaddr, wordaddr);
672 if (ret)
673 return ret;
675 switch (cnt_num) {
676 case 0:
677 perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0;
678 perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0,
679 evt);
680 break;
681 case 1:
682 perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1;
683 perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1,
684 evt);
685 break;
686 case 2:
687 perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2;
688 perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2,
689 evt);
690 break;
691 case 3:
692 perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3;
693 perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3,
694 evt);
695 break;
696 case 4:
697 perfcnt_sel_1 &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4;
698 perfcnt_sel_1 |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4,
699 evt);
700 break;
701 default:
702 return -EINVAL;
705 return mlxbf_pmc_write(pmcaddr, MLXBF_PMC_WRITE_REG_32, *wordaddr);
708 /* Method to program a counter to monitor an event */
709 static int mlxbf_pmc_program_counter(int blk_num, uint32_t cnt_num,
710 uint32_t evt, bool is_l3)
712 uint64_t perfctl, perfevt, perfmon_cfg;
714 if (cnt_num >= pmc->block[blk_num].counters)
715 return -ENODEV;
717 if (is_l3)
718 return mlxbf_pmc_program_l3_counter(blk_num, cnt_num, evt);
720 /* Configure the counter */
721 perfctl = FIELD_PREP(MLXBF_PMC_PERFCTL_EN0, 1);
722 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_EB0, 0);
723 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_ETRIG0, 1);
724 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_AD0, 0);
725 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_ACCM0, 0);
726 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_MS0, 0);
727 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_FM0, 0);
729 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA, perfctl);
730 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
731 MLXBF_PMC_PERFCTL);
732 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
733 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
735 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
736 cnt_num * MLXBF_PMC_REG_SIZE,
737 MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
738 return -EFAULT;
740 /* Select the event */
741 perfevt = FIELD_PREP(MLXBF_PMC_PERFEVT_EVTSEL, evt);
743 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA, perfevt);
744 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
745 MLXBF_PMC_PERFEVT);
746 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
747 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
749 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
750 cnt_num * MLXBF_PMC_REG_SIZE,
751 MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
752 return -EFAULT;
754 /* Clear the accumulator */
755 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
756 MLXBF_PMC_PERFACC0);
757 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
758 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
760 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
761 cnt_num * MLXBF_PMC_REG_SIZE,
762 MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
763 return -EFAULT;
765 return 0;
768 /* Method to handle l3 counter reads */
769 static int mlxbf_pmc_read_l3_counter(int blk_num, uint32_t cnt_num,
770 uint64_t *result)
772 uint32_t perfcnt_low = 0, perfcnt_high = 0;
773 uint64_t value;
774 int status = 0;
776 status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
777 MLXBF_PMC_L3C_PERF_CNT_LOW +
778 cnt_num * MLXBF_PMC_L3C_REG_SIZE,
779 &perfcnt_low);
781 if (status)
782 return status;
784 status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
785 MLXBF_PMC_L3C_PERF_CNT_HIGH +
786 cnt_num * MLXBF_PMC_L3C_REG_SIZE,
787 &perfcnt_high);
789 if (status)
790 return status;
792 value = perfcnt_high;
793 value = value << 32;
794 value |= perfcnt_low;
795 *result = value;
797 return 0;
800 /* Method to read the counter value */
801 static int mlxbf_pmc_read_counter(int blk_num, uint32_t cnt_num, bool is_l3,
802 uint64_t *result)
804 uint32_t perfcfg_offset, perfval_offset;
805 uint64_t perfmon_cfg;
806 int status;
808 if (cnt_num >= pmc->block[blk_num].counters)
809 return -EINVAL;
811 if (is_l3)
812 return mlxbf_pmc_read_l3_counter(blk_num, cnt_num, result);
814 perfcfg_offset = cnt_num * MLXBF_PMC_REG_SIZE;
815 perfval_offset = perfcfg_offset +
816 pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
818 /* Set counter in "read" mode */
819 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
820 MLXBF_PMC_PERFACC0);
821 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
822 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
824 status = mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
825 MLXBF_PMC_WRITE_REG_64, perfmon_cfg);
827 if (status)
828 return status;
830 /* Get the counter value */
831 return mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
832 MLXBF_PMC_READ_REG_64, result);
835 /* Method to read L3 block event */
836 static int mlxbf_pmc_read_l3_event(int blk_num, uint32_t cnt_num,
837 uint64_t *result)
839 uint32_t perfcnt_sel = 0, perfcnt_sel_1 = 0;
840 uint32_t *wordaddr;
841 void __iomem *pmcaddr;
842 uint64_t evt;
844 /* Select appropriate register information */
845 switch (cnt_num) {
846 case 0 ... 3:
847 pmcaddr = pmc->block[blk_num].mmio_base +
848 MLXBF_PMC_L3C_PERF_CNT_SEL;
849 wordaddr = &perfcnt_sel;
850 break;
851 case 4:
852 pmcaddr = pmc->block[blk_num].mmio_base +
853 MLXBF_PMC_L3C_PERF_CNT_SEL_1;
854 wordaddr = &perfcnt_sel_1;
855 break;
856 default:
857 return -EINVAL;
860 if (mlxbf_pmc_readl(pmcaddr, wordaddr))
861 return -EINVAL;
863 /* Read from appropriate register field for the counter */
864 switch (cnt_num) {
865 case 0:
866 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0, perfcnt_sel);
867 break;
868 case 1:
869 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1, perfcnt_sel);
870 break;
871 case 2:
872 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2, perfcnt_sel);
873 break;
874 case 3:
875 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3, perfcnt_sel);
876 break;
877 case 4:
878 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4,
879 perfcnt_sel_1);
880 break;
881 default:
882 return -EINVAL;
884 *result = evt;
886 return 0;
889 /* Method to find the event currently being monitored by a counter */
890 static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
891 uint64_t *result)
893 uint32_t perfcfg_offset, perfval_offset;
894 uint64_t perfmon_cfg, perfevt, perfctl;
896 if (cnt_num >= pmc->block[blk_num].counters)
897 return -EINVAL;
899 if (is_l3)
900 return mlxbf_pmc_read_l3_event(blk_num, cnt_num, result);
902 perfcfg_offset = cnt_num * MLXBF_PMC_REG_SIZE;
903 perfval_offset = perfcfg_offset +
904 pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
906 /* Set counter in "read" mode */
907 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
908 MLXBF_PMC_PERFCTL);
909 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
910 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
912 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
913 MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
914 return -EFAULT;
916 /* Check if the counter is enabled */
918 if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
919 MLXBF_PMC_READ_REG_64, &perfctl))
920 return -EFAULT;
922 if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0, perfctl))
923 return -EINVAL;
925 /* Set counter in "read" mode */
926 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
927 MLXBF_PMC_PERFEVT);
928 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
929 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
931 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
932 MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
933 return -EFAULT;
935 /* Get the event number */
936 if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
937 MLXBF_PMC_READ_REG_64, &perfevt))
938 return -EFAULT;
940 *result = FIELD_GET(MLXBF_PMC_PERFEVT_EVTSEL, perfevt);
942 return 0;
945 /* Method to read a register */
946 static int mlxbf_pmc_read_reg(int blk_num, uint32_t offset, uint64_t *result)
948 uint32_t ecc_out;
950 if (strstr(pmc->block_name[blk_num], "ecc")) {
951 if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + offset,
952 &ecc_out))
953 return -EFAULT;
955 *result = ecc_out;
956 return 0;
959 if (mlxbf_pmc_valid_range(blk_num, offset))
960 return mlxbf_pmc_read(pmc->block[blk_num].mmio_base + offset,
961 MLXBF_PMC_READ_REG_64, result);
963 return -EINVAL;
966 /* Method to write to a register */
967 static int mlxbf_pmc_write_reg(int blk_num, uint32_t offset, uint64_t data)
969 if (strstr(pmc->block_name[blk_num], "ecc")) {
970 return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
971 MLXBF_PMC_WRITE_REG_32, data);
974 if (mlxbf_pmc_valid_range(blk_num, offset))
975 return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
976 MLXBF_PMC_WRITE_REG_64, data);
978 return -EINVAL;
981 /* Show function for "counter" sysfs files */
982 static ssize_t mlxbf_pmc_counter_show(struct device *dev,
983 struct device_attribute *attr, char *buf)
985 struct mlxbf_pmc_attribute *attr_counter = container_of(
986 attr, struct mlxbf_pmc_attribute, dev_attr);
987 int blk_num, cnt_num, offset;
988 bool is_l3 = false;
989 uint64_t value;
991 blk_num = attr_counter->nr;
992 cnt_num = attr_counter->index;
994 if (strstr(pmc->block_name[blk_num], "l3cache"))
995 is_l3 = true;
997 if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER) {
998 if (mlxbf_pmc_read_counter(blk_num, cnt_num, is_l3, &value))
999 return -EINVAL;
1000 } else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER) {
1001 offset = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
1002 attr->attr.name);
1003 if (offset < 0)
1004 return -EINVAL;
1005 if (mlxbf_pmc_read_reg(blk_num, offset, &value))
1006 return -EINVAL;
1007 } else
1008 return -EINVAL;
1010 return sprintf(buf, "0x%llx\n", value);
1013 /* Store function for "counter" sysfs files */
1014 static ssize_t mlxbf_pmc_counter_store(struct device *dev,
1015 struct device_attribute *attr,
1016 const char *buf, size_t count)
1018 struct mlxbf_pmc_attribute *attr_counter = container_of(
1019 attr, struct mlxbf_pmc_attribute, dev_attr);
1020 int blk_num, cnt_num, offset, err, data;
1021 bool is_l3 = false;
1022 uint64_t evt_num;
1024 blk_num = attr_counter->nr;
1025 cnt_num = attr_counter->index;
1027 err = kstrtoint(buf, 0, &data);
1028 if (err < 0)
1029 return err;
1031 /* Allow non-zero writes only to the ecc regs */
1032 if (!(strstr(pmc->block_name[blk_num], "ecc")) && data)
1033 return -EINVAL;
1035 /* Do not allow writes to the L3C regs */
1036 if (strstr(pmc->block_name[blk_num], "l3cache"))
1037 return -EINVAL;
1039 if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER) {
1040 err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
1041 if (err)
1042 return err;
1043 err = mlxbf_pmc_program_counter(blk_num, cnt_num, evt_num,
1044 is_l3);
1045 if (err)
1046 return err;
1047 } else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER) {
1048 offset = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
1049 attr->attr.name);
1050 if (offset < 0)
1051 return -EINVAL;
1052 err = mlxbf_pmc_write_reg(blk_num, offset, data);
1053 if (err)
1054 return err;
1055 } else
1056 return -EINVAL;
1058 return count;
1061 /* Show function for "event" sysfs files */
1062 static ssize_t mlxbf_pmc_event_show(struct device *dev,
1063 struct device_attribute *attr, char *buf)
1065 struct mlxbf_pmc_attribute *attr_event = container_of(
1066 attr, struct mlxbf_pmc_attribute, dev_attr);
1067 int blk_num, cnt_num, err;
1068 bool is_l3 = false;
1069 uint64_t evt_num;
1070 char *evt_name;
1072 blk_num = attr_event->nr;
1073 cnt_num = attr_event->index;
1075 if (strstr(pmc->block_name[blk_num], "l3cache"))
1076 is_l3 = true;
1078 err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
1079 if (err)
1080 return sprintf(buf, "No event being monitored\n");
1082 evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
1083 if (!evt_name)
1084 return -EINVAL;
1086 return sprintf(buf, "0x%llx: %s\n", evt_num, evt_name);
1089 /* Store function for "event" sysfs files */
1090 static ssize_t mlxbf_pmc_event_store(struct device *dev,
1091 struct device_attribute *attr,
1092 const char *buf, size_t count)
1094 struct mlxbf_pmc_attribute *attr_event = container_of(
1095 attr, struct mlxbf_pmc_attribute, dev_attr);
1096 int blk_num, cnt_num, evt_num, err;
1097 bool is_l3 = false;
1099 blk_num = attr_event->nr;
1100 cnt_num = attr_event->index;
1102 if (isalpha(buf[0])) {
1103 evt_num = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
1104 buf);
1105 if (evt_num < 0)
1106 return -EINVAL;
1107 } else {
1108 err = kstrtoint(buf, 0, &evt_num);
1109 if (err < 0)
1110 return err;
1113 if (strstr(pmc->block_name[blk_num], "l3cache"))
1114 is_l3 = true;
1116 err = mlxbf_pmc_program_counter(blk_num, cnt_num, evt_num, is_l3);
1117 if (err)
1118 return err;
1120 return count;
1123 /* Show function for "event_list" sysfs files */
1124 static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
1125 struct device_attribute *attr,
1126 char *buf)
1128 struct mlxbf_pmc_attribute *attr_event_list = container_of(
1129 attr, struct mlxbf_pmc_attribute, dev_attr);
1130 int blk_num, i, size, len = 0, ret = 0;
1131 const struct mlxbf_pmc_events *events;
1132 char e_info[MLXBF_PMC_EVENT_INFO_LEN];
1134 blk_num = attr_event_list->nr;
1136 events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &size);
1137 if (!events)
1138 return -EINVAL;
1140 for (i = 0, buf[0] = '\0'; i < size; ++i) {
1141 len += sprintf(e_info, "0x%x: %s\n", events[i].evt_num,
1142 events[i].evt_name);
1143 if (len > PAGE_SIZE)
1144 break;
1145 strcat(buf, e_info);
1146 ret = len;
1149 return ret;
1152 /* Show function for "enable" sysfs files - only for l3cache */
1153 static ssize_t mlxbf_pmc_enable_show(struct device *dev,
1154 struct device_attribute *attr, char *buf)
1156 struct mlxbf_pmc_attribute *attr_enable = container_of(
1157 attr, struct mlxbf_pmc_attribute, dev_attr);
1158 uint32_t perfcnt_cfg;
1159 int blk_num, value;
1161 blk_num = attr_enable->nr;
1163 if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
1164 MLXBF_PMC_L3C_PERF_CNT_CFG,
1165 &perfcnt_cfg))
1166 return -EINVAL;
1168 value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
1170 return sprintf(buf, "%d\n", value);
1173 /* Store function for "enable" sysfs files - only for l3cache */
1174 static ssize_t mlxbf_pmc_enable_store(struct device *dev,
1175 struct device_attribute *attr,
1176 const char *buf, size_t count)
1178 struct mlxbf_pmc_attribute *attr_enable = container_of(
1179 attr, struct mlxbf_pmc_attribute, dev_attr);
1180 int err, en, blk_num;
1182 blk_num = attr_enable->nr;
1184 err = kstrtoint(buf, 0, &en);
1185 if (err < 0)
1186 return err;
1188 if (!en) {
1189 err = mlxbf_pmc_config_l3_counters(blk_num, false, false);
1190 if (err)
1191 return err;
1192 } else if (en == 1) {
1193 err = mlxbf_pmc_config_l3_counters(blk_num, false, true);
1194 if (err)
1195 return err;
1196 err = mlxbf_pmc_config_l3_counters(blk_num, true, false);
1197 if (err)
1198 return err;
1199 } else
1200 return -EINVAL;
1202 return count;
1205 /* Populate attributes for blocks with counters to monitor performance */
1206 static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
1208 struct mlxbf_pmc_attribute *attr;
1209 int i = 0, j = 0;
1211 /* "event_list" sysfs to list events supported by the block */
1212 attr = &pmc->block[blk_num].attr_event_list;
1213 attr->dev_attr.attr.mode = 0444;
1214 attr->dev_attr.show = mlxbf_pmc_event_list_show;
1215 attr->nr = blk_num;
1216 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "event_list");
1217 pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
1218 attr = NULL;
1220 /* "enable" sysfs to start/stop the counters. Only in L3C blocks */
1221 if (strstr(pmc->block_name[blk_num], "l3cache")) {
1222 attr = &pmc->block[blk_num].attr_enable;
1223 attr->dev_attr.attr.mode = 0644;
1224 attr->dev_attr.show = mlxbf_pmc_enable_show;
1225 attr->dev_attr.store = mlxbf_pmc_enable_store;
1226 attr->nr = blk_num;
1227 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
1228 "enable");
1229 pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
1230 attr = NULL;
1233 pmc->block[blk_num].attr_counter = devm_kcalloc(
1234 dev, pmc->block[blk_num].counters,
1235 sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
1236 if (!pmc->block[blk_num].attr_counter)
1237 return -ENOMEM;
1239 pmc->block[blk_num].attr_event = devm_kcalloc(
1240 dev, pmc->block[blk_num].counters,
1241 sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
1242 if (!pmc->block[blk_num].attr_event)
1243 return -ENOMEM;
1245 /* "eventX" and "counterX" sysfs to program and read counter values */
1246 for (j = 0; j < pmc->block[blk_num].counters; ++j) {
1247 attr = &pmc->block[blk_num].attr_counter[j];
1248 attr->dev_attr.attr.mode = 0644;
1249 attr->dev_attr.show = mlxbf_pmc_counter_show;
1250 attr->dev_attr.store = mlxbf_pmc_counter_store;
1251 attr->index = j;
1252 attr->nr = blk_num;
1253 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
1254 "counter%d", j);
1255 pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
1256 attr = NULL;
1258 attr = &pmc->block[blk_num].attr_event[j];
1259 attr->dev_attr.attr.mode = 0644;
1260 attr->dev_attr.show = mlxbf_pmc_event_show;
1261 attr->dev_attr.store = mlxbf_pmc_event_store;
1262 attr->index = j;
1263 attr->nr = blk_num;
1264 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
1265 "event%d", j);
1266 pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
1267 attr = NULL;
1270 return 0;
1273 /* Populate attributes for blocks with registers to monitor performance */
1274 static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
1276 struct mlxbf_pmc_attribute *attr;
1277 const struct mlxbf_pmc_events *events;
1278 int i = 0, j = 0;
1280 events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &j);
1281 if (!events)
1282 return -EINVAL;
1284 pmc->block[blk_num].attr_event = devm_kcalloc(
1285 dev, j, sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
1286 if (!pmc->block[blk_num].attr_event)
1287 return -ENOMEM;
1289 while (j > 0) {
1290 --j;
1291 attr = &pmc->block[blk_num].attr_event[j];
1292 attr->dev_attr.attr.mode = 0644;
1293 attr->dev_attr.show = mlxbf_pmc_counter_show;
1294 attr->dev_attr.store = mlxbf_pmc_counter_store;
1295 attr->nr = blk_num;
1296 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
1297 events[j].evt_name);
1298 pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
1299 attr = NULL;
1300 i++;
1303 return 0;
1306 /* Helper to create the bfperf sysfs sub-directories and files */
1307 static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
1309 int err;
1311 /* Populate attributes based on counter type */
1312 if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER)
1313 err = mlxbf_pmc_init_perftype_counter(dev, blk_num);
1314 else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER)
1315 err = mlxbf_pmc_init_perftype_reg(dev, blk_num);
1316 else
1317 err = -EINVAL;
1319 if (err)
1320 return err;
1322 /* Add a new attribute_group for the block */
1323 pmc->block[blk_num].block_attr_grp.attrs = pmc->block[blk_num].block_attr;
1324 pmc->block[blk_num].block_attr_grp.name = devm_kasprintf(
1325 dev, GFP_KERNEL, pmc->block_name[blk_num]);
1326 pmc->groups[blk_num] = &pmc->block[blk_num].block_attr_grp;
1328 return 0;
1331 static bool mlxbf_pmc_guid_match(const guid_t *guid,
1332 const struct arm_smccc_res *res)
1334 guid_t id = GUID_INIT(res->a0, res->a1, res->a1 >> 16, res->a2,
1335 res->a2 >> 8, res->a2 >> 16, res->a2 >> 24,
1336 res->a3, res->a3 >> 8, res->a3 >> 16,
1337 res->a3 >> 24);
1339 return guid_equal(guid, &id);
1342 /* Helper to map the Performance Counters from the varios blocks */
1343 static int mlxbf_pmc_map_counters(struct device *dev)
1345 uint64_t info[MLXBF_PMC_INFO_SZ];
1346 int i, tile_num, ret;
1348 for (i = 0; i < pmc->total_blocks; ++i) {
1349 if (strstr(pmc->block_name[i], "tile")) {
1350 ret = sscanf(pmc->block_name[i], "tile%d", &tile_num);
1351 if (ret < 0)
1352 return ret;
1354 if (tile_num >= pmc->tile_count)
1355 continue;
1357 ret = device_property_read_u64_array(dev, pmc->block_name[i],
1358 info, MLXBF_PMC_INFO_SZ);
1359 if (ret)
1360 return ret;
1363 * Do not remap if the proper SMC calls are supported,
1364 * since the SMC calls expect physical addresses.
1366 if (pmc->svc_sreg_support)
1367 pmc->block[i].mmio_base = (void __iomem *)info[0];
1368 else
1369 pmc->block[i].mmio_base =
1370 devm_ioremap(dev, info[0], info[1]);
1372 pmc->block[i].blk_size = info[1];
1373 pmc->block[i].counters = info[2];
1374 pmc->block[i].type = info[3];
1376 if (IS_ERR(pmc->block[i].mmio_base))
1377 return PTR_ERR(pmc->block[i].mmio_base);
1379 ret = mlxbf_pmc_create_groups(dev, i);
1380 if (ret)
1381 return ret;
1384 return 0;
1387 static int mlxbf_pmc_probe(struct platform_device *pdev)
1389 struct acpi_device *acpi_dev = ACPI_COMPANION(&pdev->dev);
1390 const char *hid = acpi_device_hid(acpi_dev);
1391 struct device *dev = &pdev->dev;
1392 struct arm_smccc_res res;
1393 guid_t guid;
1394 int ret;
1396 /* Ensure we have the UUID we expect for this service. */
1397 arm_smccc_smc(MLXBF_PMC_SIP_SVC_UID, 0, 0, 0, 0, 0, 0, 0, &res);
1398 guid_parse(mlxbf_pmc_svc_uuid_str, &guid);
1399 if (!mlxbf_pmc_guid_match(&guid, &res))
1400 return -ENODEV;
1402 pmc = devm_kzalloc(dev, sizeof(struct mlxbf_pmc_context), GFP_KERNEL);
1403 if (!pmc)
1404 return -ENOMEM;
1407 * ACPI indicates whether we use SMCs to access registers or not.
1408 * If sreg_tbl_perf is not present, just assume we're not using SMCs.
1410 ret = device_property_read_u32(dev, "sec_reg_block",
1411 &pmc->sreg_tbl_perf);
1412 if (ret) {
1413 pmc->svc_sreg_support = false;
1414 } else {
1416 * Check service version to see if we actually do support the
1417 * needed SMCs. If we have the calls we need, mark support for
1418 * them in the pmc struct.
1420 arm_smccc_smc(MLXBF_PMC_SIP_SVC_VERSION, 0, 0, 0, 0, 0, 0, 0,
1421 &res);
1422 if (res.a0 == MLXBF_PMC_SVC_REQ_MAJOR &&
1423 res.a1 >= MLXBF_PMC_SVC_MIN_MINOR)
1424 pmc->svc_sreg_support = true;
1425 else
1426 return -EINVAL;
1429 if (!strcmp(hid, "MLNXBFD0"))
1430 pmc->event_set = MLXBF_PMC_EVENT_SET_BF1;
1431 else if (!strcmp(hid, "MLNXBFD1"))
1432 pmc->event_set = MLXBF_PMC_EVENT_SET_BF2;
1433 else
1434 return -ENODEV;
1436 ret = device_property_read_u32(dev, "block_num", &pmc->total_blocks);
1437 if (ret)
1438 return ret;
1440 ret = device_property_read_string_array(dev, "block_name",
1441 pmc->block_name,
1442 pmc->total_blocks);
1443 if (ret != pmc->total_blocks)
1444 return -EFAULT;
1446 ret = device_property_read_u32(dev, "tile_num", &pmc->tile_count);
1447 if (ret)
1448 return ret;
1450 pmc->pdev = pdev;
1452 ret = mlxbf_pmc_map_counters(dev);
1453 if (ret)
1454 return ret;
1456 pmc->hwmon_dev = devm_hwmon_device_register_with_groups(
1457 dev, "bfperf", pmc, pmc->groups);
1458 platform_set_drvdata(pdev, pmc);
1460 return 0;
1463 static const struct acpi_device_id mlxbf_pmc_acpi_ids[] = { { "MLNXBFD0", 0 },
1464 { "MLNXBFD1", 0 },
1465 {}, };
1467 MODULE_DEVICE_TABLE(acpi, mlxbf_pmc_acpi_ids);
1468 static struct platform_driver pmc_driver = {
1469 .driver = { .name = "mlxbf-pmc",
1470 .acpi_match_table = ACPI_PTR(mlxbf_pmc_acpi_ids), },
1471 .probe = mlxbf_pmc_probe,
1474 module_platform_driver(pmc_driver);
1476 MODULE_AUTHOR("Shravan Kumar Ramani <sramani@mellanox.com>");
1477 MODULE_DESCRIPTION("Mellanox PMC driver");
1478 MODULE_LICENSE("Dual BSD/GPL");