1 // SPDX-License-Identifier: GPL-2.0-only OR Linux-OpenIB
3 * Mellanox BlueField Performance Monitoring Counters driver
5 * This driver provides a sysfs interface for monitoring
6 * performance statistics in BlueField SoC.
8 * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
11 #include <linux/acpi.h>
12 #include <linux/arm-smccc.h>
13 #include <linux/bitfield.h>
14 #include <linux/errno.h>
15 #include <linux/hwmon.h>
16 #include <linux/platform_device.h>
17 #include <linux/string.h>
18 #include <uapi/linux/psci.h>
20 #define MLXBF_PMC_WRITE_REG_32 0x82000009
21 #define MLXBF_PMC_READ_REG_32 0x8200000A
22 #define MLXBF_PMC_WRITE_REG_64 0x8200000B
23 #define MLXBF_PMC_READ_REG_64 0x8200000C
24 #define MLXBF_PMC_SIP_SVC_UID 0x8200ff01
25 #define MLXBF_PMC_SIP_SVC_VERSION 0x8200ff03
26 #define MLXBF_PMC_SVC_REQ_MAJOR 0
27 #define MLXBF_PMC_SVC_MIN_MINOR 3
29 #define MLXBF_PMC_SMCCC_ACCESS_VIOLATION -4
31 #define MLXBF_PMC_EVENT_SET_BF1 0
32 #define MLXBF_PMC_EVENT_SET_BF2 1
33 #define MLXBF_PMC_EVENT_INFO_LEN 100
35 #define MLXBF_PMC_MAX_BLOCKS 30
36 #define MLXBF_PMC_MAX_ATTRS 30
37 #define MLXBF_PMC_INFO_SZ 4
38 #define MLXBF_PMC_REG_SIZE 8
39 #define MLXBF_PMC_L3C_REG_SIZE 4
41 #define MLXBF_PMC_TYPE_COUNTER 1
42 #define MLXBF_PMC_TYPE_REGISTER 0
44 #define MLXBF_PMC_PERFCTL 0
45 #define MLXBF_PMC_PERFEVT 1
46 #define MLXBF_PMC_PERFACC0 4
48 #define MLXBF_PMC_PERFMON_CONFIG_WR_R_B BIT(0)
49 #define MLXBF_PMC_PERFMON_CONFIG_STROBE BIT(1)
50 #define MLXBF_PMC_PERFMON_CONFIG_ADDR GENMASK_ULL(4, 2)
51 #define MLXBF_PMC_PERFMON_CONFIG_WDATA GENMASK_ULL(60, 5)
53 #define MLXBF_PMC_PERFCTL_FM0 GENMASK_ULL(18, 16)
54 #define MLXBF_PMC_PERFCTL_MS0 GENMASK_ULL(21, 20)
55 #define MLXBF_PMC_PERFCTL_ACCM0 GENMASK_ULL(26, 24)
56 #define MLXBF_PMC_PERFCTL_AD0 BIT(27)
57 #define MLXBF_PMC_PERFCTL_ETRIG0 GENMASK_ULL(29, 28)
58 #define MLXBF_PMC_PERFCTL_EB0 BIT(30)
59 #define MLXBF_PMC_PERFCTL_EN0 BIT(31)
61 #define MLXBF_PMC_PERFEVT_EVTSEL GENMASK_ULL(31, 24)
63 #define MLXBF_PMC_L3C_PERF_CNT_CFG 0x0
64 #define MLXBF_PMC_L3C_PERF_CNT_SEL 0x10
65 #define MLXBF_PMC_L3C_PERF_CNT_SEL_1 0x14
66 #define MLXBF_PMC_L3C_PERF_CNT_LOW 0x40
67 #define MLXBF_PMC_L3C_PERF_CNT_HIGH 0x60
69 #define MLXBF_PMC_L3C_PERF_CNT_CFG_EN BIT(0)
70 #define MLXBF_PMC_L3C_PERF_CNT_CFG_RST BIT(1)
71 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0 GENMASK(5, 0)
72 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1 GENMASK(13, 8)
73 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2 GENMASK(21, 16)
74 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3 GENMASK(29, 24)
76 #define MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4 GENMASK(5, 0)
78 #define MLXBF_PMC_L3C_PERF_CNT_LOW_VAL GENMASK(31, 0)
79 #define MLXBF_PMC_L3C_PERF_CNT_HIGH_VAL GENMASK(24, 0)
82 * Structure to hold attribute and block info for each sysfs entry
83 * @dev_attr: Device attribute struct
84 * @index: index to identify counter number within a block
85 * @nr: block number to which the sysfs belongs
87 struct mlxbf_pmc_attribute
{
88 struct device_attribute dev_attr
;
94 * Structure to hold info for each HW block
96 * @mmio_base: The VA at which the PMC block is mapped
97 * @blk_size: Size of each mapped region
98 * @counters: Number of counters in the block
99 * @type: Type of counters in the block
100 * @attr_counter: Attributes for "counter" sysfs files
101 * @attr_event: Attributes for "event" sysfs files
102 * @attr_event_list: Attributes for "event_list" sysfs files
103 * @attr_enable: Attributes for "enable" sysfs files
104 * @block_attr: All attributes needed for the block
105 * @blcok_attr_grp: Attribute group for the block
107 struct mlxbf_pmc_block_info
{
108 void __iomem
*mmio_base
;
112 struct mlxbf_pmc_attribute
*attr_counter
;
113 struct mlxbf_pmc_attribute
*attr_event
;
114 struct mlxbf_pmc_attribute attr_event_list
;
115 struct mlxbf_pmc_attribute attr_enable
;
116 struct attribute
*block_attr
[MLXBF_PMC_MAX_ATTRS
];
117 struct attribute_group block_attr_grp
;
121 * Structure to hold PMC context info
123 * @pdev: The kernel structure representing the device
124 * @total_blocks: Total number of blocks
125 * @tile_count: Number of tiles in the system
126 * @hwmon_dev: Hwmon device for bfperf
127 * @block_name: Block name
129 * @groups: Attribute groups from each block
130 * @sv_sreg_support: Whether SMCs are used to access performance registers
131 * @sreg_tbl_perf: Secure register access table number
132 * @event_set: Event set to use
134 struct mlxbf_pmc_context
{
135 struct platform_device
*pdev
;
136 uint32_t total_blocks
;
138 struct device
*hwmon_dev
;
139 const char *block_name
[MLXBF_PMC_MAX_BLOCKS
];
140 struct mlxbf_pmc_block_info block
[MLXBF_PMC_MAX_BLOCKS
];
141 const struct attribute_group
*groups
[MLXBF_PMC_MAX_BLOCKS
];
142 bool svc_sreg_support
;
143 uint32_t sreg_tbl_perf
;
144 unsigned int event_set
;
148 * Structure to hold supported events for each block
149 * @evt_num: Event number used to program counters
150 * @evt_name: Name of the event
152 struct mlxbf_pmc_events
{
157 static const struct mlxbf_pmc_events mlxbf_pmc_pcie_events
[] = {
158 { 0x0, "IN_P_PKT_CNT" },
159 { 0x10, "IN_NP_PKT_CNT" },
160 { 0x18, "IN_C_PKT_CNT" },
161 { 0x20, "OUT_P_PKT_CNT" },
162 { 0x28, "OUT_NP_PKT_CNT" },
163 { 0x30, "OUT_C_PKT_CNT" },
164 { 0x38, "IN_P_BYTE_CNT" },
165 { 0x40, "IN_NP_BYTE_CNT" },
166 { 0x48, "IN_C_BYTE_CNT" },
167 { 0x50, "OUT_P_BYTE_CNT" },
168 { 0x58, "OUT_NP_BYTE_CNT" },
169 { 0x60, "OUT_C_BYTE_CNT" },
172 static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events
[] = {
179 { 0x6, "AW_REQ_TBU" },
185 { 0xd, "AR_REQ_TBU" },
187 { 0xf, "TX_DAT_AF" },
188 { 0x10, "RX_DAT_AF" },
189 { 0x11, "RETRYQ_CRED" },
192 static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1
[] = {
193 { 0xa0, "TPIO_DATA_BEAT" },
194 { 0xa1, "TDMA_DATA_BEAT" },
195 { 0xa2, "MAP_DATA_BEAT" },
196 { 0xa3, "TXMSG_DATA_BEAT" },
197 { 0xa4, "TPIO_DATA_PACKET" },
198 { 0xa5, "TDMA_DATA_PACKET" },
199 { 0xa6, "MAP_DATA_PACKET" },
200 { 0xa7, "TXMSG_DATA_PACKET" },
201 { 0xa8, "TDMA_RT_AF" },
202 { 0xa9, "TDMA_PBUF_MAC_AF" },
203 { 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" },
204 { 0xab, "TRIO_MAP_CPL_BUF_EMPTY" },
205 { 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" },
206 { 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" },
207 { 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" },
208 { 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" },
209 { 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" },
210 { 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" },
211 { 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" },
212 { 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" },
215 static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2
[] = {
216 { 0xa0, "TPIO_DATA_BEAT" },
217 { 0xa1, "TDMA_DATA_BEAT" },
218 { 0xa2, "MAP_DATA_BEAT" },
219 { 0xa3, "TXMSG_DATA_BEAT" },
220 { 0xa4, "TPIO_DATA_PACKET" },
221 { 0xa5, "TDMA_DATA_PACKET" },
222 { 0xa6, "MAP_DATA_PACKET" },
223 { 0xa7, "TXMSG_DATA_PACKET" },
224 { 0xa8, "TDMA_RT_AF" },
225 { 0xa9, "TDMA_PBUF_MAC_AF" },
226 { 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" },
227 { 0xab, "TRIO_MAP_CPL_BUF_EMPTY" },
228 { 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" },
229 { 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" },
230 { 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" },
231 { 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" },
232 { 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" },
233 { 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" },
234 { 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" },
235 { 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" },
236 { 0xb4, "TRIO_RING_TX_FLIT_CH0" },
237 { 0xb5, "TRIO_RING_TX_FLIT_CH1" },
238 { 0xb6, "TRIO_RING_TX_FLIT_CH2" },
239 { 0xb7, "TRIO_RING_TX_FLIT_CH3" },
240 { 0xb8, "TRIO_RING_TX_FLIT_CH4" },
241 { 0xb9, "TRIO_RING_RX_FLIT_CH0" },
242 { 0xba, "TRIO_RING_RX_FLIT_CH1" },
243 { 0xbb, "TRIO_RING_RX_FLIT_CH2" },
244 { 0xbc, "TRIO_RING_RX_FLIT_CH3" },
247 static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events
[] = {
248 { 0x100, "ECC_SINGLE_ERROR_CNT" },
249 { 0x104, "ECC_DOUBLE_ERROR_CNT" },
250 { 0x114, "SERR_INJ" },
251 { 0x118, "DERR_INJ" },
252 { 0x124, "ECC_SINGLE_ERROR_0" },
253 { 0x164, "ECC_DOUBLE_ERROR_0" },
254 { 0x340, "DRAM_ECC_COUNT" },
255 { 0x344, "DRAM_ECC_INJECT" },
256 { 0x348, "DRAM_ECC_ERROR" },
259 static const struct mlxbf_pmc_events mlxbf_pmc_mss_events
[] = {
260 { 0xc0, "RXREQ_MSS" },
261 { 0xc1, "RXDAT_MSS" },
262 { 0xc2, "TXRSP_MSS" },
263 { 0xc3, "TXDAT_MSS" },
266 static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events
[] = {
267 { 0x45, "HNF_REQUESTS" },
268 { 0x46, "HNF_REJECTS" },
269 { 0x47, "ALL_BUSY" },
270 { 0x48, "MAF_BUSY" },
271 { 0x49, "MAF_REQUESTS" },
272 { 0x4a, "RNF_REQUESTS" },
273 { 0x4b, "REQUEST_TYPE" },
274 { 0x4c, "MEMORY_READS" },
275 { 0x4d, "MEMORY_WRITES" },
276 { 0x4e, "VICTIM_WRITE" },
277 { 0x4f, "POC_FULL" },
278 { 0x50, "POC_FAIL" },
279 { 0x51, "POC_SUCCESS" },
280 { 0x52, "POC_WRITES" },
281 { 0x53, "POC_READS" },
283 { 0x55, "RXREQ_HNF" },
284 { 0x56, "RXRSP_HNF" },
285 { 0x57, "RXDAT_HNF" },
286 { 0x58, "TXREQ_HNF" },
287 { 0x59, "TXRSP_HNF" },
288 { 0x5a, "TXDAT_HNF" },
289 { 0x5b, "TXSNP_HNF" },
290 { 0x5c, "INDEX_MATCH" },
291 { 0x5d, "A72_ACCESS" },
292 { 0x5e, "IO_ACCESS" },
293 { 0x5f, "TSO_WRITE" },
294 { 0x60, "TSO_CONFLICT" },
296 { 0x62, "HNF_ACCEPTS" },
297 { 0x63, "REQ_BUF_EMPTY" },
298 { 0x64, "REQ_BUF_IDLE_MAF" },
299 { 0x65, "TSO_NOARB" },
300 { 0x66, "TSO_NOARB_CYCLES" },
301 { 0x67, "MSS_NO_CREDIT" },
302 { 0x68, "TXDAT_NO_LCRD" },
303 { 0x69, "TXSNP_NO_LCRD" },
304 { 0x6a, "TXRSP_NO_LCRD" },
305 { 0x6b, "TXREQ_NO_LCRD" },
306 { 0x6c, "TSO_CL_MATCH" },
307 { 0x6d, "MEMORY_READS_BYPASS" },
308 { 0x6e, "TSO_NOARB_TIMEOUT" },
309 { 0x6f, "ALLOCATE" },
311 { 0x71, "A72_WRITE" },
312 { 0x72, "A72_READ" },
313 { 0x73, "IO_WRITE" },
315 { 0x75, "TSO_REJECT" },
316 { 0x80, "TXREQ_RN" },
317 { 0x81, "TXRSP_RN" },
318 { 0x82, "TXDAT_RN" },
319 { 0x83, "RXSNP_RN" },
320 { 0x84, "RXRSP_RN" },
321 { 0x85, "RXDAT_RN" },
324 static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events
[] = {
328 { 0x15, "CDN_DIAG_N_OUT_OF_CRED" },
329 { 0x16, "CDN_DIAG_S_OUT_OF_CRED" },
330 { 0x17, "CDN_DIAG_E_OUT_OF_CRED" },
331 { 0x18, "CDN_DIAG_W_OUT_OF_CRED" },
332 { 0x19, "CDN_DIAG_C_OUT_OF_CRED" },
333 { 0x1a, "CDN_DIAG_N_EGRESS" },
334 { 0x1b, "CDN_DIAG_S_EGRESS" },
335 { 0x1c, "CDN_DIAG_E_EGRESS" },
336 { 0x1d, "CDN_DIAG_W_EGRESS" },
337 { 0x1e, "CDN_DIAG_C_EGRESS" },
338 { 0x1f, "CDN_DIAG_N_INGRESS" },
339 { 0x20, "CDN_DIAG_S_INGRESS" },
340 { 0x21, "CDN_DIAG_E_INGRESS" },
341 { 0x22, "CDN_DIAG_W_INGRESS" },
342 { 0x23, "CDN_DIAG_C_INGRESS" },
343 { 0x24, "CDN_DIAG_CORE_SENT" },
344 { 0x25, "DDN_DIAG_N_OUT_OF_CRED" },
345 { 0x26, "DDN_DIAG_S_OUT_OF_CRED" },
346 { 0x27, "DDN_DIAG_E_OUT_OF_CRED" },
347 { 0x28, "DDN_DIAG_W_OUT_OF_CRED" },
348 { 0x29, "DDN_DIAG_C_OUT_OF_CRED" },
349 { 0x2a, "DDN_DIAG_N_EGRESS" },
350 { 0x2b, "DDN_DIAG_S_EGRESS" },
351 { 0x2c, "DDN_DIAG_E_EGRESS" },
352 { 0x2d, "DDN_DIAG_W_EGRESS" },
353 { 0x2e, "DDN_DIAG_C_EGRESS" },
354 { 0x2f, "DDN_DIAG_N_INGRESS" },
355 { 0x30, "DDN_DIAG_S_INGRESS" },
356 { 0x31, "DDN_DIAG_E_INGRESS" },
357 { 0x32, "DDN_DIAG_W_INGRESS" },
358 { 0x33, "DDN_DIAG_C_INGRESS" },
359 { 0x34, "DDN_DIAG_CORE_SENT" },
360 { 0x35, "NDN_DIAG_S_OUT_OF_CRED" },
361 { 0x36, "NDN_DIAG_S_OUT_OF_CRED" },
362 { 0x37, "NDN_DIAG_E_OUT_OF_CRED" },
363 { 0x38, "NDN_DIAG_W_OUT_OF_CRED" },
364 { 0x39, "NDN_DIAG_C_OUT_OF_CRED" },
365 { 0x3a, "NDN_DIAG_N_EGRESS" },
366 { 0x3b, "NDN_DIAG_S_EGRESS" },
367 { 0x3c, "NDN_DIAG_E_EGRESS" },
368 { 0x3d, "NDN_DIAG_W_EGRESS" },
369 { 0x3e, "NDN_DIAG_C_EGRESS" },
370 { 0x3f, "NDN_DIAG_N_INGRESS" },
371 { 0x40, "NDN_DIAG_S_INGRESS" },
372 { 0x41, "NDN_DIAG_E_INGRESS" },
373 { 0x42, "NDN_DIAG_W_INGRESS" },
374 { 0x43, "NDN_DIAG_C_INGRESS" },
375 { 0x44, "NDN_DIAG_CORE_SENT" },
378 static const struct mlxbf_pmc_events mlxbf_pmc_l3c_events
[] = {
381 { 0x02, "TOTAL_RD_REQ_IN" },
382 { 0x03, "TOTAL_WR_REQ_IN" },
383 { 0x04, "TOTAL_WR_DBID_ACK" },
384 { 0x05, "TOTAL_WR_DATA_IN" },
385 { 0x06, "TOTAL_WR_COMP" },
386 { 0x07, "TOTAL_RD_DATA_OUT" },
387 { 0x08, "TOTAL_CDN_REQ_IN_BANK0" },
388 { 0x09, "TOTAL_CDN_REQ_IN_BANK1" },
389 { 0x0a, "TOTAL_DDN_REQ_IN_BANK0" },
390 { 0x0b, "TOTAL_DDN_REQ_IN_BANK1" },
391 { 0x0c, "TOTAL_EMEM_RD_RES_IN_BANK0" },
392 { 0x0d, "TOTAL_EMEM_RD_RES_IN_BANK1" },
393 { 0x0e, "TOTAL_CACHE_RD_RES_IN_BANK0" },
394 { 0x0f, "TOTAL_CACHE_RD_RES_IN_BANK1" },
395 { 0x10, "TOTAL_EMEM_RD_REQ_BANK0" },
396 { 0x11, "TOTAL_EMEM_RD_REQ_BANK1" },
397 { 0x12, "TOTAL_EMEM_WR_REQ_BANK0" },
398 { 0x13, "TOTAL_EMEM_WR_REQ_BANK1" },
399 { 0x14, "TOTAL_RD_REQ_OUT" },
400 { 0x15, "TOTAL_WR_REQ_OUT" },
401 { 0x16, "TOTAL_RD_RES_IN" },
402 { 0x17, "HITS_BANK0" },
403 { 0x18, "HITS_BANK1" },
404 { 0x19, "MISSES_BANK0" },
405 { 0x1a, "MISSES_BANK1" },
406 { 0x1b, "ALLOCATIONS_BANK0" },
407 { 0x1c, "ALLOCATIONS_BANK1" },
408 { 0x1d, "EVICTIONS_BANK0" },
409 { 0x1e, "EVICTIONS_BANK1" },
410 { 0x1f, "DBID_REJECT" },
411 { 0x20, "WRDB_REJECT_BANK0" },
412 { 0x21, "WRDB_REJECT_BANK1" },
413 { 0x22, "CMDQ_REJECT_BANK0" },
414 { 0x23, "CMDQ_REJECT_BANK1" },
415 { 0x24, "COB_REJECT_BANK0" },
416 { 0x25, "COB_REJECT_BANK1" },
417 { 0x26, "TRB_REJECT_BANK0" },
418 { 0x27, "TRB_REJECT_BANK1" },
419 { 0x28, "TAG_REJECT_BANK0" },
420 { 0x29, "TAG_REJECT_BANK1" },
421 { 0x2a, "ANY_REJECT_BANK0" },
422 { 0x2b, "ANY_REJECT_BANK1" },
425 static struct mlxbf_pmc_context
*pmc
;
427 /* UUID used to probe ATF service. */
428 static const char *mlxbf_pmc_svc_uuid_str
= "89c036b4-e7d7-11e6-8797-001aca00bfc4";
430 /* Calls an SMC to access a performance register */
431 static int mlxbf_pmc_secure_read(void __iomem
*addr
, uint32_t command
,
434 struct arm_smccc_res res
;
437 arm_smccc_smc(command
, pmc
->sreg_tbl_perf
, (uintptr_t)addr
, 0, 0, 0, 0,
443 case PSCI_RET_NOT_SUPPORTED
:
446 case MLXBF_PMC_SMCCC_ACCESS_VIOLATION
:
457 /* Read from a performance counter */
458 static int mlxbf_pmc_read(void __iomem
*addr
, uint32_t command
,
461 if (pmc
->svc_sreg_support
)
462 return mlxbf_pmc_secure_read(addr
, command
, result
);
464 if (command
== MLXBF_PMC_READ_REG_32
)
465 *result
= readl(addr
);
467 *result
= readq(addr
);
472 /* Convenience function for 32-bit reads */
473 static int mlxbf_pmc_readl(void __iomem
*addr
, uint32_t *result
)
478 status
= mlxbf_pmc_read(addr
, MLXBF_PMC_READ_REG_32
, &read_out
);
481 *result
= (uint32_t)read_out
;
486 /* Calls an SMC to access a performance register */
487 static int mlxbf_pmc_secure_write(void __iomem
*addr
, uint32_t command
,
490 struct arm_smccc_res res
;
493 arm_smccc_smc(command
, pmc
->sreg_tbl_perf
, value
, (uintptr_t)addr
, 0, 0,
499 case PSCI_RET_NOT_SUPPORTED
:
502 case MLXBF_PMC_SMCCC_ACCESS_VIOLATION
:
510 /* Write to a performance counter */
511 static int mlxbf_pmc_write(void __iomem
*addr
, int command
, uint64_t value
)
513 if (pmc
->svc_sreg_support
)
514 return mlxbf_pmc_secure_write(addr
, command
, value
);
516 if (command
== MLXBF_PMC_WRITE_REG_32
)
524 /* Check if the register offset is within the mapped region for the block */
525 static bool mlxbf_pmc_valid_range(int blk_num
, uint32_t offset
)
527 if ((offset
>= 0) && !(offset
% MLXBF_PMC_REG_SIZE
) &&
528 (offset
+ MLXBF_PMC_REG_SIZE
<= pmc
->block
[blk_num
].blk_size
))
529 return true; /* inside the mapped PMC space */
534 /* Get the event list corresponding to a certain block */
535 static const struct mlxbf_pmc_events
*mlxbf_pmc_event_list(const char *blk
,
538 const struct mlxbf_pmc_events
*events
;
540 if (strstr(blk
, "tilenet")) {
541 events
= mlxbf_pmc_hnfnet_events
;
542 *size
= ARRAY_SIZE(mlxbf_pmc_hnfnet_events
);
543 } else if (strstr(blk
, "tile")) {
544 events
= mlxbf_pmc_hnf_events
;
545 *size
= ARRAY_SIZE(mlxbf_pmc_hnf_events
);
546 } else if (strstr(blk
, "triogen")) {
547 events
= mlxbf_pmc_smgen_events
;
548 *size
= ARRAY_SIZE(mlxbf_pmc_smgen_events
);
549 } else if (strstr(blk
, "trio")) {
550 switch (pmc
->event_set
) {
551 case MLXBF_PMC_EVENT_SET_BF1
:
552 events
= mlxbf_pmc_trio_events_1
;
553 *size
= ARRAY_SIZE(mlxbf_pmc_trio_events_1
);
555 case MLXBF_PMC_EVENT_SET_BF2
:
556 events
= mlxbf_pmc_trio_events_2
;
557 *size
= ARRAY_SIZE(mlxbf_pmc_trio_events_2
);
564 } else if (strstr(blk
, "mss")) {
565 events
= mlxbf_pmc_mss_events
;
566 *size
= ARRAY_SIZE(mlxbf_pmc_mss_events
);
567 } else if (strstr(blk
, "ecc")) {
568 events
= mlxbf_pmc_ecc_events
;
569 *size
= ARRAY_SIZE(mlxbf_pmc_ecc_events
);
570 } else if (strstr(blk
, "pcie")) {
571 events
= mlxbf_pmc_pcie_events
;
572 *size
= ARRAY_SIZE(mlxbf_pmc_pcie_events
);
573 } else if (strstr(blk
, "l3cache")) {
574 events
= mlxbf_pmc_l3c_events
;
575 *size
= ARRAY_SIZE(mlxbf_pmc_l3c_events
);
576 } else if (strstr(blk
, "gic")) {
577 events
= mlxbf_pmc_smgen_events
;
578 *size
= ARRAY_SIZE(mlxbf_pmc_smgen_events
);
579 } else if (strstr(blk
, "smmu")) {
580 events
= mlxbf_pmc_smgen_events
;
581 *size
= ARRAY_SIZE(mlxbf_pmc_smgen_events
);
590 /* Get the event number given the name */
591 static int mlxbf_pmc_get_event_num(const char *blk
, const char *evt
)
593 const struct mlxbf_pmc_events
*events
;
596 events
= mlxbf_pmc_event_list(blk
, &size
);
600 for (i
= 0; i
< size
; ++i
) {
601 if (!strcmp(evt
, events
[i
].evt_name
))
602 return events
[i
].evt_num
;
608 /* Get the event number given the name */
609 static char *mlxbf_pmc_get_event_name(const char *blk
, int evt
)
611 const struct mlxbf_pmc_events
*events
;
614 events
= mlxbf_pmc_event_list(blk
, &size
);
618 for (i
= 0; i
< size
; ++i
) {
619 if (evt
== events
[i
].evt_num
)
620 return events
[i
].evt_name
;
626 /* Method to enable/disable/reset l3cache counters */
627 static int mlxbf_pmc_config_l3_counters(int blk_num
, bool enable
, bool reset
)
629 uint32_t perfcnt_cfg
= 0;
632 perfcnt_cfg
|= MLXBF_PMC_L3C_PERF_CNT_CFG_EN
;
634 perfcnt_cfg
|= MLXBF_PMC_L3C_PERF_CNT_CFG_RST
;
636 return mlxbf_pmc_write(pmc
->block
[blk_num
].mmio_base
+
637 MLXBF_PMC_L3C_PERF_CNT_CFG
,
638 MLXBF_PMC_WRITE_REG_32
, perfcnt_cfg
);
641 /* Method to handle l3cache counter programming */
642 static int mlxbf_pmc_program_l3_counter(int blk_num
, uint32_t cnt_num
,
645 uint32_t perfcnt_sel_1
= 0;
646 uint32_t perfcnt_sel
= 0;
648 void __iomem
*pmcaddr
;
651 /* Disable all counters before programming them */
652 if (mlxbf_pmc_config_l3_counters(blk_num
, false, false))
655 /* Select appropriate register information */
658 pmcaddr
= pmc
->block
[blk_num
].mmio_base
+
659 MLXBF_PMC_L3C_PERF_CNT_SEL
;
660 wordaddr
= &perfcnt_sel
;
663 pmcaddr
= pmc
->block
[blk_num
].mmio_base
+
664 MLXBF_PMC_L3C_PERF_CNT_SEL_1
;
665 wordaddr
= &perfcnt_sel_1
;
671 ret
= mlxbf_pmc_readl(pmcaddr
, wordaddr
);
677 perfcnt_sel
&= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0
;
678 perfcnt_sel
|= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0
,
682 perfcnt_sel
&= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1
;
683 perfcnt_sel
|= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1
,
687 perfcnt_sel
&= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2
;
688 perfcnt_sel
|= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2
,
692 perfcnt_sel
&= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3
;
693 perfcnt_sel
|= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3
,
697 perfcnt_sel_1
&= ~MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4
;
698 perfcnt_sel_1
|= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4
,
705 return mlxbf_pmc_write(pmcaddr
, MLXBF_PMC_WRITE_REG_32
, *wordaddr
);
708 /* Method to program a counter to monitor an event */
709 static int mlxbf_pmc_program_counter(int blk_num
, uint32_t cnt_num
,
710 uint32_t evt
, bool is_l3
)
712 uint64_t perfctl
, perfevt
, perfmon_cfg
;
714 if (cnt_num
>= pmc
->block
[blk_num
].counters
)
718 return mlxbf_pmc_program_l3_counter(blk_num
, cnt_num
, evt
);
720 /* Configure the counter */
721 perfctl
= FIELD_PREP(MLXBF_PMC_PERFCTL_EN0
, 1);
722 perfctl
|= FIELD_PREP(MLXBF_PMC_PERFCTL_EB0
, 0);
723 perfctl
|= FIELD_PREP(MLXBF_PMC_PERFCTL_ETRIG0
, 1);
724 perfctl
|= FIELD_PREP(MLXBF_PMC_PERFCTL_AD0
, 0);
725 perfctl
|= FIELD_PREP(MLXBF_PMC_PERFCTL_ACCM0
, 0);
726 perfctl
|= FIELD_PREP(MLXBF_PMC_PERFCTL_MS0
, 0);
727 perfctl
|= FIELD_PREP(MLXBF_PMC_PERFCTL_FM0
, 0);
729 perfmon_cfg
= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA
, perfctl
);
730 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR
,
732 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE
, 1);
733 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B
, 1);
735 if (mlxbf_pmc_write(pmc
->block
[blk_num
].mmio_base
+
736 cnt_num
* MLXBF_PMC_REG_SIZE
,
737 MLXBF_PMC_WRITE_REG_64
, perfmon_cfg
))
740 /* Select the event */
741 perfevt
= FIELD_PREP(MLXBF_PMC_PERFEVT_EVTSEL
, evt
);
743 perfmon_cfg
= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA
, perfevt
);
744 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR
,
746 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE
, 1);
747 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B
, 1);
749 if (mlxbf_pmc_write(pmc
->block
[blk_num
].mmio_base
+
750 cnt_num
* MLXBF_PMC_REG_SIZE
,
751 MLXBF_PMC_WRITE_REG_64
, perfmon_cfg
))
754 /* Clear the accumulator */
755 perfmon_cfg
= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR
,
757 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE
, 1);
758 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B
, 1);
760 if (mlxbf_pmc_write(pmc
->block
[blk_num
].mmio_base
+
761 cnt_num
* MLXBF_PMC_REG_SIZE
,
762 MLXBF_PMC_WRITE_REG_64
, perfmon_cfg
))
768 /* Method to handle l3 counter reads */
769 static int mlxbf_pmc_read_l3_counter(int blk_num
, uint32_t cnt_num
,
772 uint32_t perfcnt_low
= 0, perfcnt_high
= 0;
776 status
= mlxbf_pmc_readl(pmc
->block
[blk_num
].mmio_base
+
777 MLXBF_PMC_L3C_PERF_CNT_LOW
+
778 cnt_num
* MLXBF_PMC_L3C_REG_SIZE
,
784 status
= mlxbf_pmc_readl(pmc
->block
[blk_num
].mmio_base
+
785 MLXBF_PMC_L3C_PERF_CNT_HIGH
+
786 cnt_num
* MLXBF_PMC_L3C_REG_SIZE
,
792 value
= perfcnt_high
;
794 value
|= perfcnt_low
;
800 /* Method to read the counter value */
801 static int mlxbf_pmc_read_counter(int blk_num
, uint32_t cnt_num
, bool is_l3
,
804 uint32_t perfcfg_offset
, perfval_offset
;
805 uint64_t perfmon_cfg
;
808 if (cnt_num
>= pmc
->block
[blk_num
].counters
)
812 return mlxbf_pmc_read_l3_counter(blk_num
, cnt_num
, result
);
814 perfcfg_offset
= cnt_num
* MLXBF_PMC_REG_SIZE
;
815 perfval_offset
= perfcfg_offset
+
816 pmc
->block
[blk_num
].counters
* MLXBF_PMC_REG_SIZE
;
818 /* Set counter in "read" mode */
819 perfmon_cfg
= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR
,
821 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE
, 1);
822 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B
, 0);
824 status
= mlxbf_pmc_write(pmc
->block
[blk_num
].mmio_base
+ perfcfg_offset
,
825 MLXBF_PMC_WRITE_REG_64
, perfmon_cfg
);
830 /* Get the counter value */
831 return mlxbf_pmc_read(pmc
->block
[blk_num
].mmio_base
+ perfval_offset
,
832 MLXBF_PMC_READ_REG_64
, result
);
835 /* Method to read L3 block event */
836 static int mlxbf_pmc_read_l3_event(int blk_num
, uint32_t cnt_num
,
839 uint32_t perfcnt_sel
= 0, perfcnt_sel_1
= 0;
841 void __iomem
*pmcaddr
;
844 /* Select appropriate register information */
847 pmcaddr
= pmc
->block
[blk_num
].mmio_base
+
848 MLXBF_PMC_L3C_PERF_CNT_SEL
;
849 wordaddr
= &perfcnt_sel
;
852 pmcaddr
= pmc
->block
[blk_num
].mmio_base
+
853 MLXBF_PMC_L3C_PERF_CNT_SEL_1
;
854 wordaddr
= &perfcnt_sel_1
;
860 if (mlxbf_pmc_readl(pmcaddr
, wordaddr
))
863 /* Read from appropriate register field for the counter */
866 evt
= FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0
, perfcnt_sel
);
869 evt
= FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1
, perfcnt_sel
);
872 evt
= FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2
, perfcnt_sel
);
875 evt
= FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3
, perfcnt_sel
);
878 evt
= FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4
,
889 /* Method to find the event currently being monitored by a counter */
890 static int mlxbf_pmc_read_event(int blk_num
, uint32_t cnt_num
, bool is_l3
,
893 uint32_t perfcfg_offset
, perfval_offset
;
894 uint64_t perfmon_cfg
, perfevt
, perfctl
;
896 if (cnt_num
>= pmc
->block
[blk_num
].counters
)
900 return mlxbf_pmc_read_l3_event(blk_num
, cnt_num
, result
);
902 perfcfg_offset
= cnt_num
* MLXBF_PMC_REG_SIZE
;
903 perfval_offset
= perfcfg_offset
+
904 pmc
->block
[blk_num
].counters
* MLXBF_PMC_REG_SIZE
;
906 /* Set counter in "read" mode */
907 perfmon_cfg
= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR
,
909 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE
, 1);
910 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B
, 0);
912 if (mlxbf_pmc_write(pmc
->block
[blk_num
].mmio_base
+ perfcfg_offset
,
913 MLXBF_PMC_WRITE_REG_64
, perfmon_cfg
))
916 /* Check if the counter is enabled */
918 if (mlxbf_pmc_read(pmc
->block
[blk_num
].mmio_base
+ perfval_offset
,
919 MLXBF_PMC_READ_REG_64
, &perfctl
))
922 if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0
, perfctl
))
925 /* Set counter in "read" mode */
926 perfmon_cfg
= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR
,
928 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE
, 1);
929 perfmon_cfg
|= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B
, 0);
931 if (mlxbf_pmc_write(pmc
->block
[blk_num
].mmio_base
+ perfcfg_offset
,
932 MLXBF_PMC_WRITE_REG_64
, perfmon_cfg
))
935 /* Get the event number */
936 if (mlxbf_pmc_read(pmc
->block
[blk_num
].mmio_base
+ perfval_offset
,
937 MLXBF_PMC_READ_REG_64
, &perfevt
))
940 *result
= FIELD_GET(MLXBF_PMC_PERFEVT_EVTSEL
, perfevt
);
945 /* Method to read a register */
946 static int mlxbf_pmc_read_reg(int blk_num
, uint32_t offset
, uint64_t *result
)
950 if (strstr(pmc
->block_name
[blk_num
], "ecc")) {
951 if (mlxbf_pmc_readl(pmc
->block
[blk_num
].mmio_base
+ offset
,
959 if (mlxbf_pmc_valid_range(blk_num
, offset
))
960 return mlxbf_pmc_read(pmc
->block
[blk_num
].mmio_base
+ offset
,
961 MLXBF_PMC_READ_REG_64
, result
);
966 /* Method to write to a register */
967 static int mlxbf_pmc_write_reg(int blk_num
, uint32_t offset
, uint64_t data
)
969 if (strstr(pmc
->block_name
[blk_num
], "ecc")) {
970 return mlxbf_pmc_write(pmc
->block
[blk_num
].mmio_base
+ offset
,
971 MLXBF_PMC_WRITE_REG_32
, data
);
974 if (mlxbf_pmc_valid_range(blk_num
, offset
))
975 return mlxbf_pmc_write(pmc
->block
[blk_num
].mmio_base
+ offset
,
976 MLXBF_PMC_WRITE_REG_64
, data
);
981 /* Show function for "counter" sysfs files */
982 static ssize_t
mlxbf_pmc_counter_show(struct device
*dev
,
983 struct device_attribute
*attr
, char *buf
)
985 struct mlxbf_pmc_attribute
*attr_counter
= container_of(
986 attr
, struct mlxbf_pmc_attribute
, dev_attr
);
987 int blk_num
, cnt_num
, offset
;
991 blk_num
= attr_counter
->nr
;
992 cnt_num
= attr_counter
->index
;
994 if (strstr(pmc
->block_name
[blk_num
], "l3cache"))
997 if (pmc
->block
[blk_num
].type
== MLXBF_PMC_TYPE_COUNTER
) {
998 if (mlxbf_pmc_read_counter(blk_num
, cnt_num
, is_l3
, &value
))
1000 } else if (pmc
->block
[blk_num
].type
== MLXBF_PMC_TYPE_REGISTER
) {
1001 offset
= mlxbf_pmc_get_event_num(pmc
->block_name
[blk_num
],
1005 if (mlxbf_pmc_read_reg(blk_num
, offset
, &value
))
1010 return sprintf(buf
, "0x%llx\n", value
);
1013 /* Store function for "counter" sysfs files */
1014 static ssize_t
mlxbf_pmc_counter_store(struct device
*dev
,
1015 struct device_attribute
*attr
,
1016 const char *buf
, size_t count
)
1018 struct mlxbf_pmc_attribute
*attr_counter
= container_of(
1019 attr
, struct mlxbf_pmc_attribute
, dev_attr
);
1020 int blk_num
, cnt_num
, offset
, err
, data
;
1024 blk_num
= attr_counter
->nr
;
1025 cnt_num
= attr_counter
->index
;
1027 err
= kstrtoint(buf
, 0, &data
);
1031 /* Allow non-zero writes only to the ecc regs */
1032 if (!(strstr(pmc
->block_name
[blk_num
], "ecc")) && data
)
1035 /* Do not allow writes to the L3C regs */
1036 if (strstr(pmc
->block_name
[blk_num
], "l3cache"))
1039 if (pmc
->block
[blk_num
].type
== MLXBF_PMC_TYPE_COUNTER
) {
1040 err
= mlxbf_pmc_read_event(blk_num
, cnt_num
, is_l3
, &evt_num
);
1043 err
= mlxbf_pmc_program_counter(blk_num
, cnt_num
, evt_num
,
1047 } else if (pmc
->block
[blk_num
].type
== MLXBF_PMC_TYPE_REGISTER
) {
1048 offset
= mlxbf_pmc_get_event_num(pmc
->block_name
[blk_num
],
1052 err
= mlxbf_pmc_write_reg(blk_num
, offset
, data
);
1061 /* Show function for "event" sysfs files */
1062 static ssize_t
mlxbf_pmc_event_show(struct device
*dev
,
1063 struct device_attribute
*attr
, char *buf
)
1065 struct mlxbf_pmc_attribute
*attr_event
= container_of(
1066 attr
, struct mlxbf_pmc_attribute
, dev_attr
);
1067 int blk_num
, cnt_num
, err
;
1072 blk_num
= attr_event
->nr
;
1073 cnt_num
= attr_event
->index
;
1075 if (strstr(pmc
->block_name
[blk_num
], "l3cache"))
1078 err
= mlxbf_pmc_read_event(blk_num
, cnt_num
, is_l3
, &evt_num
);
1080 return sprintf(buf
, "No event being monitored\n");
1082 evt_name
= mlxbf_pmc_get_event_name(pmc
->block_name
[blk_num
], evt_num
);
1086 return sprintf(buf
, "0x%llx: %s\n", evt_num
, evt_name
);
1089 /* Store function for "event" sysfs files */
1090 static ssize_t
mlxbf_pmc_event_store(struct device
*dev
,
1091 struct device_attribute
*attr
,
1092 const char *buf
, size_t count
)
1094 struct mlxbf_pmc_attribute
*attr_event
= container_of(
1095 attr
, struct mlxbf_pmc_attribute
, dev_attr
);
1096 int blk_num
, cnt_num
, evt_num
, err
;
1099 blk_num
= attr_event
->nr
;
1100 cnt_num
= attr_event
->index
;
1102 if (isalpha(buf
[0])) {
1103 evt_num
= mlxbf_pmc_get_event_num(pmc
->block_name
[blk_num
],
1108 err
= kstrtoint(buf
, 0, &evt_num
);
1113 if (strstr(pmc
->block_name
[blk_num
], "l3cache"))
1116 err
= mlxbf_pmc_program_counter(blk_num
, cnt_num
, evt_num
, is_l3
);
1123 /* Show function for "event_list" sysfs files */
1124 static ssize_t
mlxbf_pmc_event_list_show(struct device
*dev
,
1125 struct device_attribute
*attr
,
1128 struct mlxbf_pmc_attribute
*attr_event_list
= container_of(
1129 attr
, struct mlxbf_pmc_attribute
, dev_attr
);
1130 int blk_num
, i
, size
, len
= 0, ret
= 0;
1131 const struct mlxbf_pmc_events
*events
;
1132 char e_info
[MLXBF_PMC_EVENT_INFO_LEN
];
1134 blk_num
= attr_event_list
->nr
;
1136 events
= mlxbf_pmc_event_list(pmc
->block_name
[blk_num
], &size
);
1140 for (i
= 0, buf
[0] = '\0'; i
< size
; ++i
) {
1141 len
+= sprintf(e_info
, "0x%x: %s\n", events
[i
].evt_num
,
1142 events
[i
].evt_name
);
1143 if (len
> PAGE_SIZE
)
1145 strcat(buf
, e_info
);
1152 /* Show function for "enable" sysfs files - only for l3cache */
1153 static ssize_t
mlxbf_pmc_enable_show(struct device
*dev
,
1154 struct device_attribute
*attr
, char *buf
)
1156 struct mlxbf_pmc_attribute
*attr_enable
= container_of(
1157 attr
, struct mlxbf_pmc_attribute
, dev_attr
);
1158 uint32_t perfcnt_cfg
;
1161 blk_num
= attr_enable
->nr
;
1163 if (mlxbf_pmc_readl(pmc
->block
[blk_num
].mmio_base
+
1164 MLXBF_PMC_L3C_PERF_CNT_CFG
,
1168 value
= FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN
, perfcnt_cfg
);
1170 return sprintf(buf
, "%d\n", value
);
1173 /* Store function for "enable" sysfs files - only for l3cache */
1174 static ssize_t
mlxbf_pmc_enable_store(struct device
*dev
,
1175 struct device_attribute
*attr
,
1176 const char *buf
, size_t count
)
1178 struct mlxbf_pmc_attribute
*attr_enable
= container_of(
1179 attr
, struct mlxbf_pmc_attribute
, dev_attr
);
1180 int err
, en
, blk_num
;
1182 blk_num
= attr_enable
->nr
;
1184 err
= kstrtoint(buf
, 0, &en
);
1189 err
= mlxbf_pmc_config_l3_counters(blk_num
, false, false);
1192 } else if (en
== 1) {
1193 err
= mlxbf_pmc_config_l3_counters(blk_num
, false, true);
1196 err
= mlxbf_pmc_config_l3_counters(blk_num
, true, false);
1205 /* Populate attributes for blocks with counters to monitor performance */
1206 static int mlxbf_pmc_init_perftype_counter(struct device
*dev
, int blk_num
)
1208 struct mlxbf_pmc_attribute
*attr
;
1211 /* "event_list" sysfs to list events supported by the block */
1212 attr
= &pmc
->block
[blk_num
].attr_event_list
;
1213 attr
->dev_attr
.attr
.mode
= 0444;
1214 attr
->dev_attr
.show
= mlxbf_pmc_event_list_show
;
1216 attr
->dev_attr
.attr
.name
= devm_kasprintf(dev
, GFP_KERNEL
, "event_list");
1217 pmc
->block
[blk_num
].block_attr
[i
] = &attr
->dev_attr
.attr
;
1220 /* "enable" sysfs to start/stop the counters. Only in L3C blocks */
1221 if (strstr(pmc
->block_name
[blk_num
], "l3cache")) {
1222 attr
= &pmc
->block
[blk_num
].attr_enable
;
1223 attr
->dev_attr
.attr
.mode
= 0644;
1224 attr
->dev_attr
.show
= mlxbf_pmc_enable_show
;
1225 attr
->dev_attr
.store
= mlxbf_pmc_enable_store
;
1227 attr
->dev_attr
.attr
.name
= devm_kasprintf(dev
, GFP_KERNEL
,
1229 pmc
->block
[blk_num
].block_attr
[++i
] = &attr
->dev_attr
.attr
;
1233 pmc
->block
[blk_num
].attr_counter
= devm_kcalloc(
1234 dev
, pmc
->block
[blk_num
].counters
,
1235 sizeof(struct mlxbf_pmc_attribute
), GFP_KERNEL
);
1236 if (!pmc
->block
[blk_num
].attr_counter
)
1239 pmc
->block
[blk_num
].attr_event
= devm_kcalloc(
1240 dev
, pmc
->block
[blk_num
].counters
,
1241 sizeof(struct mlxbf_pmc_attribute
), GFP_KERNEL
);
1242 if (!pmc
->block
[blk_num
].attr_event
)
1245 /* "eventX" and "counterX" sysfs to program and read counter values */
1246 for (j
= 0; j
< pmc
->block
[blk_num
].counters
; ++j
) {
1247 attr
= &pmc
->block
[blk_num
].attr_counter
[j
];
1248 attr
->dev_attr
.attr
.mode
= 0644;
1249 attr
->dev_attr
.show
= mlxbf_pmc_counter_show
;
1250 attr
->dev_attr
.store
= mlxbf_pmc_counter_store
;
1253 attr
->dev_attr
.attr
.name
= devm_kasprintf(dev
, GFP_KERNEL
,
1255 pmc
->block
[blk_num
].block_attr
[++i
] = &attr
->dev_attr
.attr
;
1258 attr
= &pmc
->block
[blk_num
].attr_event
[j
];
1259 attr
->dev_attr
.attr
.mode
= 0644;
1260 attr
->dev_attr
.show
= mlxbf_pmc_event_show
;
1261 attr
->dev_attr
.store
= mlxbf_pmc_event_store
;
1264 attr
->dev_attr
.attr
.name
= devm_kasprintf(dev
, GFP_KERNEL
,
1266 pmc
->block
[blk_num
].block_attr
[++i
] = &attr
->dev_attr
.attr
;
1273 /* Populate attributes for blocks with registers to monitor performance */
1274 static int mlxbf_pmc_init_perftype_reg(struct device
*dev
, int blk_num
)
1276 struct mlxbf_pmc_attribute
*attr
;
1277 const struct mlxbf_pmc_events
*events
;
1280 events
= mlxbf_pmc_event_list(pmc
->block_name
[blk_num
], &j
);
1284 pmc
->block
[blk_num
].attr_event
= devm_kcalloc(
1285 dev
, j
, sizeof(struct mlxbf_pmc_attribute
), GFP_KERNEL
);
1286 if (!pmc
->block
[blk_num
].attr_event
)
1291 attr
= &pmc
->block
[blk_num
].attr_event
[j
];
1292 attr
->dev_attr
.attr
.mode
= 0644;
1293 attr
->dev_attr
.show
= mlxbf_pmc_counter_show
;
1294 attr
->dev_attr
.store
= mlxbf_pmc_counter_store
;
1296 attr
->dev_attr
.attr
.name
= devm_kasprintf(dev
, GFP_KERNEL
,
1297 events
[j
].evt_name
);
1298 pmc
->block
[blk_num
].block_attr
[i
] = &attr
->dev_attr
.attr
;
1306 /* Helper to create the bfperf sysfs sub-directories and files */
1307 static int mlxbf_pmc_create_groups(struct device
*dev
, int blk_num
)
1311 /* Populate attributes based on counter type */
1312 if (pmc
->block
[blk_num
].type
== MLXBF_PMC_TYPE_COUNTER
)
1313 err
= mlxbf_pmc_init_perftype_counter(dev
, blk_num
);
1314 else if (pmc
->block
[blk_num
].type
== MLXBF_PMC_TYPE_REGISTER
)
1315 err
= mlxbf_pmc_init_perftype_reg(dev
, blk_num
);
1322 /* Add a new attribute_group for the block */
1323 pmc
->block
[blk_num
].block_attr_grp
.attrs
= pmc
->block
[blk_num
].block_attr
;
1324 pmc
->block
[blk_num
].block_attr_grp
.name
= devm_kasprintf(
1325 dev
, GFP_KERNEL
, pmc
->block_name
[blk_num
]);
1326 pmc
->groups
[blk_num
] = &pmc
->block
[blk_num
].block_attr_grp
;
1331 static bool mlxbf_pmc_guid_match(const guid_t
*guid
,
1332 const struct arm_smccc_res
*res
)
1334 guid_t id
= GUID_INIT(res
->a0
, res
->a1
, res
->a1
>> 16, res
->a2
,
1335 res
->a2
>> 8, res
->a2
>> 16, res
->a2
>> 24,
1336 res
->a3
, res
->a3
>> 8, res
->a3
>> 16,
1339 return guid_equal(guid
, &id
);
1342 /* Helper to map the Performance Counters from the varios blocks */
1343 static int mlxbf_pmc_map_counters(struct device
*dev
)
1345 uint64_t info
[MLXBF_PMC_INFO_SZ
];
1346 int i
, tile_num
, ret
;
1348 for (i
= 0; i
< pmc
->total_blocks
; ++i
) {
1349 if (strstr(pmc
->block_name
[i
], "tile")) {
1350 ret
= sscanf(pmc
->block_name
[i
], "tile%d", &tile_num
);
1354 if (tile_num
>= pmc
->tile_count
)
1357 ret
= device_property_read_u64_array(dev
, pmc
->block_name
[i
],
1358 info
, MLXBF_PMC_INFO_SZ
);
1363 * Do not remap if the proper SMC calls are supported,
1364 * since the SMC calls expect physical addresses.
1366 if (pmc
->svc_sreg_support
)
1367 pmc
->block
[i
].mmio_base
= (void __iomem
*)info
[0];
1369 pmc
->block
[i
].mmio_base
=
1370 devm_ioremap(dev
, info
[0], info
[1]);
1372 pmc
->block
[i
].blk_size
= info
[1];
1373 pmc
->block
[i
].counters
= info
[2];
1374 pmc
->block
[i
].type
= info
[3];
1376 if (IS_ERR(pmc
->block
[i
].mmio_base
))
1377 return PTR_ERR(pmc
->block
[i
].mmio_base
);
1379 ret
= mlxbf_pmc_create_groups(dev
, i
);
1387 static int mlxbf_pmc_probe(struct platform_device
*pdev
)
1389 struct acpi_device
*acpi_dev
= ACPI_COMPANION(&pdev
->dev
);
1390 const char *hid
= acpi_device_hid(acpi_dev
);
1391 struct device
*dev
= &pdev
->dev
;
1392 struct arm_smccc_res res
;
1396 /* Ensure we have the UUID we expect for this service. */
1397 arm_smccc_smc(MLXBF_PMC_SIP_SVC_UID
, 0, 0, 0, 0, 0, 0, 0, &res
);
1398 guid_parse(mlxbf_pmc_svc_uuid_str
, &guid
);
1399 if (!mlxbf_pmc_guid_match(&guid
, &res
))
1402 pmc
= devm_kzalloc(dev
, sizeof(struct mlxbf_pmc_context
), GFP_KERNEL
);
1407 * ACPI indicates whether we use SMCs to access registers or not.
1408 * If sreg_tbl_perf is not present, just assume we're not using SMCs.
1410 ret
= device_property_read_u32(dev
, "sec_reg_block",
1411 &pmc
->sreg_tbl_perf
);
1413 pmc
->svc_sreg_support
= false;
1416 * Check service version to see if we actually do support the
1417 * needed SMCs. If we have the calls we need, mark support for
1418 * them in the pmc struct.
1420 arm_smccc_smc(MLXBF_PMC_SIP_SVC_VERSION
, 0, 0, 0, 0, 0, 0, 0,
1422 if (res
.a0
== MLXBF_PMC_SVC_REQ_MAJOR
&&
1423 res
.a1
>= MLXBF_PMC_SVC_MIN_MINOR
)
1424 pmc
->svc_sreg_support
= true;
1429 if (!strcmp(hid
, "MLNXBFD0"))
1430 pmc
->event_set
= MLXBF_PMC_EVENT_SET_BF1
;
1431 else if (!strcmp(hid
, "MLNXBFD1"))
1432 pmc
->event_set
= MLXBF_PMC_EVENT_SET_BF2
;
1436 ret
= device_property_read_u32(dev
, "block_num", &pmc
->total_blocks
);
1440 ret
= device_property_read_string_array(dev
, "block_name",
1443 if (ret
!= pmc
->total_blocks
)
1446 ret
= device_property_read_u32(dev
, "tile_num", &pmc
->tile_count
);
1452 ret
= mlxbf_pmc_map_counters(dev
);
1456 pmc
->hwmon_dev
= devm_hwmon_device_register_with_groups(
1457 dev
, "bfperf", pmc
, pmc
->groups
);
1458 platform_set_drvdata(pdev
, pmc
);
1463 static const struct acpi_device_id mlxbf_pmc_acpi_ids
[] = { { "MLNXBFD0", 0 },
1467 MODULE_DEVICE_TABLE(acpi
, mlxbf_pmc_acpi_ids
);
1468 static struct platform_driver pmc_driver
= {
1469 .driver
= { .name
= "mlxbf-pmc",
1470 .acpi_match_table
= ACPI_PTR(mlxbf_pmc_acpi_ids
), },
1471 .probe
= mlxbf_pmc_probe
,
1474 module_platform_driver(pmc_driver
);
1476 MODULE_AUTHOR("Shravan Kumar Ramani <sramani@mellanox.com>");
1477 MODULE_DESCRIPTION("Mellanox PMC driver");
1478 MODULE_LICENSE("Dual BSD/GPL");