1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
7 #include <linux/mlx5/eswitch.h>
12 struct mlx5_ib_counter
{
17 #define INIT_Q_COUNTER(_name) \
18 { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
20 static const struct mlx5_ib_counter basic_q_cnts
[] = {
21 INIT_Q_COUNTER(rx_write_requests
),
22 INIT_Q_COUNTER(rx_read_requests
),
23 INIT_Q_COUNTER(rx_atomic_requests
),
24 INIT_Q_COUNTER(out_of_buffer
),
27 static const struct mlx5_ib_counter out_of_seq_q_cnts
[] = {
28 INIT_Q_COUNTER(out_of_sequence
),
31 static const struct mlx5_ib_counter retrans_q_cnts
[] = {
32 INIT_Q_COUNTER(duplicate_request
),
33 INIT_Q_COUNTER(rnr_nak_retry_err
),
34 INIT_Q_COUNTER(packet_seq_err
),
35 INIT_Q_COUNTER(implied_nak_seq_err
),
36 INIT_Q_COUNTER(local_ack_timeout_err
),
39 #define INIT_CONG_COUNTER(_name) \
40 { .name = #_name, .offset = \
41 MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
43 static const struct mlx5_ib_counter cong_cnts
[] = {
44 INIT_CONG_COUNTER(rp_cnp_ignored
),
45 INIT_CONG_COUNTER(rp_cnp_handled
),
46 INIT_CONG_COUNTER(np_ecn_marked_roce_packets
),
47 INIT_CONG_COUNTER(np_cnp_sent
),
50 static const struct mlx5_ib_counter extended_err_cnts
[] = {
51 INIT_Q_COUNTER(resp_local_length_error
),
52 INIT_Q_COUNTER(resp_cqe_error
),
53 INIT_Q_COUNTER(req_cqe_error
),
54 INIT_Q_COUNTER(req_remote_invalid_request
),
55 INIT_Q_COUNTER(req_remote_access_errors
),
56 INIT_Q_COUNTER(resp_remote_access_errors
),
57 INIT_Q_COUNTER(resp_cqe_flush_error
),
58 INIT_Q_COUNTER(req_cqe_flush_error
),
61 static const struct mlx5_ib_counter roce_accl_cnts
[] = {
62 INIT_Q_COUNTER(roce_adp_retrans
),
63 INIT_Q_COUNTER(roce_adp_retrans_to
),
64 INIT_Q_COUNTER(roce_slow_restart
),
65 INIT_Q_COUNTER(roce_slow_restart_cnps
),
66 INIT_Q_COUNTER(roce_slow_restart_trans
),
69 #define INIT_EXT_PPCNT_COUNTER(_name) \
70 { .name = #_name, .offset = \
71 MLX5_BYTE_OFF(ppcnt_reg, \
72 counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
74 static const struct mlx5_ib_counter ext_ppcnt_cnts
[] = {
75 INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated
),
78 static int mlx5_ib_read_counters(struct ib_counters
*counters
,
79 struct ib_counters_read_attr
*read_attr
,
80 struct uverbs_attr_bundle
*attrs
)
82 struct mlx5_ib_mcounters
*mcounters
= to_mcounters(counters
);
83 struct mlx5_read_counters_attr mread_attr
= {};
84 struct mlx5_ib_flow_counters_desc
*desc
;
87 mutex_lock(&mcounters
->mcntrs_mutex
);
88 if (mcounters
->cntrs_max_index
> read_attr
->ncounters
) {
93 mread_attr
.out
= kcalloc(mcounters
->counters_num
, sizeof(u64
),
95 if (!mread_attr
.out
) {
100 mread_attr
.hw_cntrs_hndl
= mcounters
->hw_cntrs_hndl
;
101 mread_attr
.flags
= read_attr
->flags
;
102 ret
= mcounters
->read_counters(counters
->device
, &mread_attr
);
106 /* do the pass over the counters data array to assign according to the
107 * descriptions and indexing pairs
109 desc
= mcounters
->counters_data
;
110 for (i
= 0; i
< mcounters
->ncounters
; i
++)
111 read_attr
->counters_buff
[desc
[i
].index
] += mread_attr
.out
[desc
[i
].description
];
114 kfree(mread_attr
.out
);
116 mutex_unlock(&mcounters
->mcntrs_mutex
);
120 static int mlx5_ib_destroy_counters(struct ib_counters
*counters
)
122 struct mlx5_ib_mcounters
*mcounters
= to_mcounters(counters
);
124 mlx5_ib_counters_clear_description(counters
);
125 if (mcounters
->hw_cntrs_hndl
)
126 mlx5_fc_destroy(to_mdev(counters
->device
)->mdev
,
127 mcounters
->hw_cntrs_hndl
);
131 static int mlx5_ib_create_counters(struct ib_counters
*counters
,
132 struct uverbs_attr_bundle
*attrs
)
134 struct mlx5_ib_mcounters
*mcounters
= to_mcounters(counters
);
136 mutex_init(&mcounters
->mcntrs_mutex
);
141 static const struct mlx5_ib_counters
*get_counters(struct mlx5_ib_dev
*dev
,
144 return is_mdev_switchdev_mode(dev
->mdev
) ? &dev
->port
[0].cnts
:
145 &dev
->port
[port_num
].cnts
;
149 * mlx5_ib_get_counters_id - Returns counters id to use for device+port
150 * @dev: Pointer to mlx5 IB device
151 * @port_num: Zero based port number
153 * mlx5_ib_get_counters_id() Returns counters set id to use for given
154 * device port combination in switchdev and non switchdev mode of the
157 u16
mlx5_ib_get_counters_id(struct mlx5_ib_dev
*dev
, u8 port_num
)
159 const struct mlx5_ib_counters
*cnts
= get_counters(dev
, port_num
);
164 static struct rdma_hw_stats
*mlx5_ib_alloc_hw_stats(struct ib_device
*ibdev
,
167 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
168 const struct mlx5_ib_counters
*cnts
;
169 bool is_switchdev
= is_mdev_switchdev_mode(dev
->mdev
);
171 if ((is_switchdev
&& port_num
) || (!is_switchdev
&& !port_num
))
174 cnts
= get_counters(dev
, port_num
- 1);
176 return rdma_alloc_hw_stats_struct(cnts
->names
,
177 cnts
->num_q_counters
+
178 cnts
->num_cong_counters
+
179 cnts
->num_ext_ppcnt_counters
,
180 RDMA_HW_STATS_DEFAULT_LIFESPAN
);
183 static int mlx5_ib_query_q_counters(struct mlx5_core_dev
*mdev
,
184 const struct mlx5_ib_counters
*cnts
,
185 struct rdma_hw_stats
*stats
,
188 u32 out
[MLX5_ST_SZ_DW(query_q_counter_out
)] = {};
189 u32 in
[MLX5_ST_SZ_DW(query_q_counter_in
)] = {};
193 MLX5_SET(query_q_counter_in
, in
, opcode
, MLX5_CMD_OP_QUERY_Q_COUNTER
);
194 MLX5_SET(query_q_counter_in
, in
, counter_set_id
, set_id
);
195 ret
= mlx5_cmd_exec_inout(mdev
, query_q_counter
, in
, out
);
199 for (i
= 0; i
< cnts
->num_q_counters
; i
++) {
200 val
= *(__be32
*)((void *)out
+ cnts
->offsets
[i
]);
201 stats
->value
[i
] = (u64
)be32_to_cpu(val
);
207 static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev
*dev
,
208 const struct mlx5_ib_counters
*cnts
,
209 struct rdma_hw_stats
*stats
)
211 int offset
= cnts
->num_q_counters
+ cnts
->num_cong_counters
;
212 u32 in
[MLX5_ST_SZ_DW(ppcnt_reg
)] = {};
213 int sz
= MLX5_ST_SZ_BYTES(ppcnt_reg
);
217 out
= kvzalloc(sz
, GFP_KERNEL
);
221 MLX5_SET(ppcnt_reg
, in
, local_port
, 1);
222 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP
);
223 ret
= mlx5_core_access_reg(dev
->mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
,
228 for (i
= 0; i
< cnts
->num_ext_ppcnt_counters
; i
++)
229 stats
->value
[i
+ offset
] =
230 be64_to_cpup((__be64
*)(out
+
231 cnts
->offsets
[i
+ offset
]));
237 static int mlx5_ib_get_hw_stats(struct ib_device
*ibdev
,
238 struct rdma_hw_stats
*stats
,
239 u8 port_num
, int index
)
241 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
242 const struct mlx5_ib_counters
*cnts
= get_counters(dev
, port_num
- 1);
243 struct mlx5_core_dev
*mdev
;
244 int ret
, num_counters
;
250 num_counters
= cnts
->num_q_counters
+
251 cnts
->num_cong_counters
+
252 cnts
->num_ext_ppcnt_counters
;
254 /* q_counters are per IB device, query the master mdev */
255 ret
= mlx5_ib_query_q_counters(dev
->mdev
, cnts
, stats
, cnts
->set_id
);
259 if (MLX5_CAP_PCAM_FEATURE(dev
->mdev
, rx_icrc_encapsulated_counter
)) {
260 ret
= mlx5_ib_query_ext_ppcnt_counters(dev
, cnts
, stats
);
265 if (MLX5_CAP_GEN(dev
->mdev
, cc_query_allowed
)) {
266 mdev
= mlx5_ib_get_native_port_mdev(dev
, port_num
,
269 /* If port is not affiliated yet, its in down state
270 * which doesn't have any counters yet, so it would be
271 * zero. So no need to read from the HCA.
275 ret
= mlx5_lag_query_cong_counters(dev
->mdev
,
277 cnts
->num_q_counters
,
278 cnts
->num_cong_counters
,
280 cnts
->num_q_counters
);
282 mlx5_ib_put_native_port_mdev(dev
, port_num
);
291 static struct rdma_hw_stats
*
292 mlx5_ib_counter_alloc_stats(struct rdma_counter
*counter
)
294 struct mlx5_ib_dev
*dev
= to_mdev(counter
->device
);
295 const struct mlx5_ib_counters
*cnts
=
296 get_counters(dev
, counter
->port
- 1);
298 return rdma_alloc_hw_stats_struct(cnts
->names
,
299 cnts
->num_q_counters
+
300 cnts
->num_cong_counters
+
301 cnts
->num_ext_ppcnt_counters
,
302 RDMA_HW_STATS_DEFAULT_LIFESPAN
);
305 static int mlx5_ib_counter_update_stats(struct rdma_counter
*counter
)
307 struct mlx5_ib_dev
*dev
= to_mdev(counter
->device
);
308 const struct mlx5_ib_counters
*cnts
=
309 get_counters(dev
, counter
->port
- 1);
311 return mlx5_ib_query_q_counters(dev
->mdev
, cnts
,
312 counter
->stats
, counter
->id
);
315 static int mlx5_ib_counter_dealloc(struct rdma_counter
*counter
)
317 struct mlx5_ib_dev
*dev
= to_mdev(counter
->device
);
318 u32 in
[MLX5_ST_SZ_DW(dealloc_q_counter_in
)] = {};
323 MLX5_SET(dealloc_q_counter_in
, in
, opcode
,
324 MLX5_CMD_OP_DEALLOC_Q_COUNTER
);
325 MLX5_SET(dealloc_q_counter_in
, in
, counter_set_id
, counter
->id
);
326 return mlx5_cmd_exec_in(dev
->mdev
, dealloc_q_counter
, in
);
329 static int mlx5_ib_counter_bind_qp(struct rdma_counter
*counter
,
332 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
336 u32 out
[MLX5_ST_SZ_DW(alloc_q_counter_out
)] = {};
337 u32 in
[MLX5_ST_SZ_DW(alloc_q_counter_in
)] = {};
339 MLX5_SET(alloc_q_counter_in
, in
, opcode
,
340 MLX5_CMD_OP_ALLOC_Q_COUNTER
);
341 MLX5_SET(alloc_q_counter_in
, in
, uid
, MLX5_SHARED_RESOURCE_UID
);
342 err
= mlx5_cmd_exec_inout(dev
->mdev
, alloc_q_counter
, in
, out
);
346 MLX5_GET(alloc_q_counter_out
, out
, counter_set_id
);
349 err
= mlx5_ib_qp_set_counter(qp
, counter
);
351 goto fail_set_counter
;
356 mlx5_ib_counter_dealloc(counter
);
362 static int mlx5_ib_counter_unbind_qp(struct ib_qp
*qp
)
364 return mlx5_ib_qp_set_counter(qp
, NULL
);
368 static void mlx5_ib_fill_counters(struct mlx5_ib_dev
*dev
,
375 for (i
= 0; i
< ARRAY_SIZE(basic_q_cnts
); i
++, j
++) {
376 names
[j
] = basic_q_cnts
[i
].name
;
377 offsets
[j
] = basic_q_cnts
[i
].offset
;
380 if (MLX5_CAP_GEN(dev
->mdev
, out_of_seq_cnt
)) {
381 for (i
= 0; i
< ARRAY_SIZE(out_of_seq_q_cnts
); i
++, j
++) {
382 names
[j
] = out_of_seq_q_cnts
[i
].name
;
383 offsets
[j
] = out_of_seq_q_cnts
[i
].offset
;
387 if (MLX5_CAP_GEN(dev
->mdev
, retransmission_q_counters
)) {
388 for (i
= 0; i
< ARRAY_SIZE(retrans_q_cnts
); i
++, j
++) {
389 names
[j
] = retrans_q_cnts
[i
].name
;
390 offsets
[j
] = retrans_q_cnts
[i
].offset
;
394 if (MLX5_CAP_GEN(dev
->mdev
, enhanced_error_q_counters
)) {
395 for (i
= 0; i
< ARRAY_SIZE(extended_err_cnts
); i
++, j
++) {
396 names
[j
] = extended_err_cnts
[i
].name
;
397 offsets
[j
] = extended_err_cnts
[i
].offset
;
401 if (MLX5_CAP_GEN(dev
->mdev
, roce_accl
)) {
402 for (i
= 0; i
< ARRAY_SIZE(roce_accl_cnts
); i
++, j
++) {
403 names
[j
] = roce_accl_cnts
[i
].name
;
404 offsets
[j
] = roce_accl_cnts
[i
].offset
;
408 if (MLX5_CAP_GEN(dev
->mdev
, cc_query_allowed
)) {
409 for (i
= 0; i
< ARRAY_SIZE(cong_cnts
); i
++, j
++) {
410 names
[j
] = cong_cnts
[i
].name
;
411 offsets
[j
] = cong_cnts
[i
].offset
;
415 if (MLX5_CAP_PCAM_FEATURE(dev
->mdev
, rx_icrc_encapsulated_counter
)) {
416 for (i
= 0; i
< ARRAY_SIZE(ext_ppcnt_cnts
); i
++, j
++) {
417 names
[j
] = ext_ppcnt_cnts
[i
].name
;
418 offsets
[j
] = ext_ppcnt_cnts
[i
].offset
;
424 static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev
*dev
,
425 struct mlx5_ib_counters
*cnts
)
429 num_counters
= ARRAY_SIZE(basic_q_cnts
);
431 if (MLX5_CAP_GEN(dev
->mdev
, out_of_seq_cnt
))
432 num_counters
+= ARRAY_SIZE(out_of_seq_q_cnts
);
434 if (MLX5_CAP_GEN(dev
->mdev
, retransmission_q_counters
))
435 num_counters
+= ARRAY_SIZE(retrans_q_cnts
);
437 if (MLX5_CAP_GEN(dev
->mdev
, enhanced_error_q_counters
))
438 num_counters
+= ARRAY_SIZE(extended_err_cnts
);
440 if (MLX5_CAP_GEN(dev
->mdev
, roce_accl
))
441 num_counters
+= ARRAY_SIZE(roce_accl_cnts
);
443 cnts
->num_q_counters
= num_counters
;
445 if (MLX5_CAP_GEN(dev
->mdev
, cc_query_allowed
)) {
446 cnts
->num_cong_counters
= ARRAY_SIZE(cong_cnts
);
447 num_counters
+= ARRAY_SIZE(cong_cnts
);
449 if (MLX5_CAP_PCAM_FEATURE(dev
->mdev
, rx_icrc_encapsulated_counter
)) {
450 cnts
->num_ext_ppcnt_counters
= ARRAY_SIZE(ext_ppcnt_cnts
);
451 num_counters
+= ARRAY_SIZE(ext_ppcnt_cnts
);
453 cnts
->names
= kcalloc(num_counters
, sizeof(*cnts
->names
), GFP_KERNEL
);
457 cnts
->offsets
= kcalloc(num_counters
,
458 sizeof(*cnts
->offsets
), GFP_KERNEL
);
470 static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev
*dev
)
472 u32 in
[MLX5_ST_SZ_DW(dealloc_q_counter_in
)] = {};
476 num_cnt_ports
= is_mdev_switchdev_mode(dev
->mdev
) ? 1 : dev
->num_ports
;
478 MLX5_SET(dealloc_q_counter_in
, in
, opcode
,
479 MLX5_CMD_OP_DEALLOC_Q_COUNTER
);
481 for (i
= 0; i
< num_cnt_ports
; i
++) {
482 if (dev
->port
[i
].cnts
.set_id
) {
483 MLX5_SET(dealloc_q_counter_in
, in
, counter_set_id
,
484 dev
->port
[i
].cnts
.set_id
);
485 mlx5_cmd_exec_in(dev
->mdev
, dealloc_q_counter
, in
);
487 kfree(dev
->port
[i
].cnts
.names
);
488 kfree(dev
->port
[i
].cnts
.offsets
);
492 static int mlx5_ib_alloc_counters(struct mlx5_ib_dev
*dev
)
494 u32 out
[MLX5_ST_SZ_DW(alloc_q_counter_out
)] = {};
495 u32 in
[MLX5_ST_SZ_DW(alloc_q_counter_in
)] = {};
501 MLX5_SET(alloc_q_counter_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_Q_COUNTER
);
502 is_shared
= MLX5_CAP_GEN(dev
->mdev
, log_max_uctx
) != 0;
503 num_cnt_ports
= is_mdev_switchdev_mode(dev
->mdev
) ? 1 : dev
->num_ports
;
505 for (i
= 0; i
< num_cnt_ports
; i
++) {
506 err
= __mlx5_ib_alloc_counters(dev
, &dev
->port
[i
].cnts
);
510 mlx5_ib_fill_counters(dev
, dev
->port
[i
].cnts
.names
,
511 dev
->port
[i
].cnts
.offsets
);
513 MLX5_SET(alloc_q_counter_in
, in
, uid
,
514 is_shared
? MLX5_SHARED_RESOURCE_UID
: 0);
516 err
= mlx5_cmd_exec_inout(dev
->mdev
, alloc_q_counter
, in
, out
);
519 "couldn't allocate queue counter for port %d, err %d\n",
524 dev
->port
[i
].cnts
.set_id
=
525 MLX5_GET(alloc_q_counter_out
, out
, counter_set_id
);
530 mlx5_ib_dealloc_counters(dev
);
534 static int read_flow_counters(struct ib_device
*ibdev
,
535 struct mlx5_read_counters_attr
*read_attr
)
537 struct mlx5_fc
*fc
= read_attr
->hw_cntrs_hndl
;
538 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
540 return mlx5_fc_query(dev
->mdev
, fc
,
541 &read_attr
->out
[IB_COUNTER_PACKETS
],
542 &read_attr
->out
[IB_COUNTER_BYTES
]);
545 /* flow counters currently expose two counters packets and bytes */
546 #define FLOW_COUNTERS_NUM 2
547 static int counters_set_description(
548 struct ib_counters
*counters
, enum mlx5_ib_counters_type counters_type
,
549 struct mlx5_ib_flow_counters_desc
*desc_data
, u32 ncounters
)
551 struct mlx5_ib_mcounters
*mcounters
= to_mcounters(counters
);
552 u32 cntrs_max_index
= 0;
555 if (counters_type
!= MLX5_IB_COUNTERS_FLOW
)
558 /* init the fields for the object */
559 mcounters
->type
= counters_type
;
560 mcounters
->read_counters
= read_flow_counters
;
561 mcounters
->counters_num
= FLOW_COUNTERS_NUM
;
562 mcounters
->ncounters
= ncounters
;
563 /* each counter entry have both description and index pair */
564 for (i
= 0; i
< ncounters
; i
++) {
565 if (desc_data
[i
].description
> IB_COUNTER_BYTES
)
568 if (cntrs_max_index
<= desc_data
[i
].index
)
569 cntrs_max_index
= desc_data
[i
].index
+ 1;
572 mutex_lock(&mcounters
->mcntrs_mutex
);
573 mcounters
->counters_data
= desc_data
;
574 mcounters
->cntrs_max_index
= cntrs_max_index
;
575 mutex_unlock(&mcounters
->mcntrs_mutex
);
580 #define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
581 int mlx5_ib_flow_counters_set_data(struct ib_counters
*ibcounters
,
582 struct mlx5_ib_create_flow
*ucmd
)
584 struct mlx5_ib_mcounters
*mcounters
= to_mcounters(ibcounters
);
585 struct mlx5_ib_flow_counters_data
*cntrs_data
= NULL
;
586 struct mlx5_ib_flow_counters_desc
*desc_data
= NULL
;
587 bool hw_hndl
= false;
590 if (ucmd
&& ucmd
->ncounters_data
!= 0) {
591 cntrs_data
= ucmd
->data
;
592 if (cntrs_data
->ncounters
> MAX_COUNTERS_NUM
)
595 desc_data
= kcalloc(cntrs_data
->ncounters
,
601 if (copy_from_user(desc_data
,
602 u64_to_user_ptr(cntrs_data
->counters_data
),
603 sizeof(*desc_data
) * cntrs_data
->ncounters
)) {
609 if (!mcounters
->hw_cntrs_hndl
) {
610 mcounters
->hw_cntrs_hndl
= mlx5_fc_create(
611 to_mdev(ibcounters
->device
)->mdev
, false);
612 if (IS_ERR(mcounters
->hw_cntrs_hndl
)) {
613 ret
= PTR_ERR(mcounters
->hw_cntrs_hndl
);
620 /* counters already bound to at least one flow */
621 if (mcounters
->cntrs_max_index
) {
626 ret
= counters_set_description(ibcounters
,
627 MLX5_IB_COUNTERS_FLOW
,
629 cntrs_data
->ncounters
);
633 } else if (!mcounters
->cntrs_max_index
) {
634 /* counters not bound yet, must have udata passed */
643 mlx5_fc_destroy(to_mdev(ibcounters
->device
)->mdev
,
644 mcounters
->hw_cntrs_hndl
);
645 mcounters
->hw_cntrs_hndl
= NULL
;
652 void mlx5_ib_counters_clear_description(struct ib_counters
*counters
)
654 struct mlx5_ib_mcounters
*mcounters
;
656 if (!counters
|| atomic_read(&counters
->usecnt
) != 1)
659 mcounters
= to_mcounters(counters
);
661 mutex_lock(&mcounters
->mcntrs_mutex
);
662 kfree(mcounters
->counters_data
);
663 mcounters
->counters_data
= NULL
;
664 mcounters
->cntrs_max_index
= 0;
665 mutex_unlock(&mcounters
->mcntrs_mutex
);
668 static const struct ib_device_ops hw_stats_ops
= {
669 .alloc_hw_stats
= mlx5_ib_alloc_hw_stats
,
670 .get_hw_stats
= mlx5_ib_get_hw_stats
,
671 .counter_bind_qp
= mlx5_ib_counter_bind_qp
,
672 .counter_unbind_qp
= mlx5_ib_counter_unbind_qp
,
673 .counter_dealloc
= mlx5_ib_counter_dealloc
,
674 .counter_alloc_stats
= mlx5_ib_counter_alloc_stats
,
675 .counter_update_stats
= mlx5_ib_counter_update_stats
,
678 static const struct ib_device_ops counters_ops
= {
679 .create_counters
= mlx5_ib_create_counters
,
680 .destroy_counters
= mlx5_ib_destroy_counters
,
681 .read_counters
= mlx5_ib_read_counters
,
683 INIT_RDMA_OBJ_SIZE(ib_counters
, mlx5_ib_mcounters
, ibcntrs
),
686 int mlx5_ib_counters_init(struct mlx5_ib_dev
*dev
)
688 ib_set_device_ops(&dev
->ib_dev
, &counters_ops
);
690 if (!MLX5_CAP_GEN(dev
->mdev
, max_qp_cnt
))
693 ib_set_device_ops(&dev
->ib_dev
, &hw_stats_ops
);
694 return mlx5_ib_alloc_counters(dev
);
697 void mlx5_ib_counters_cleanup(struct mlx5_ib_dev
*dev
)
699 if (!MLX5_CAP_GEN(dev
->mdev
, max_qp_cnt
))
702 mlx5_ib_dealloc_counters(dev
);