2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/gfp.h>
34 #include <linux/export.h>
35 #include <linux/mlx5/cmd.h>
36 #include <linux/mlx5/qp.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/transobj.h>
40 #include "mlx5_core.h"
42 static struct mlx5_core_rsc_common
*mlx5_get_rsc(struct mlx5_core_dev
*dev
,
45 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
46 struct mlx5_core_rsc_common
*common
;
48 spin_lock(&table
->lock
);
50 common
= radix_tree_lookup(&table
->tree
, rsn
);
52 atomic_inc(&common
->refcount
);
54 spin_unlock(&table
->lock
);
57 mlx5_core_warn(dev
, "Async event for bogus resource 0x%x\n",
64 void mlx5_core_put_rsc(struct mlx5_core_rsc_common
*common
)
66 if (atomic_dec_and_test(&common
->refcount
))
67 complete(&common
->free
);
70 static u64
qp_allowed_event_types(void)
74 mask
= BIT(MLX5_EVENT_TYPE_PATH_MIG
) |
75 BIT(MLX5_EVENT_TYPE_COMM_EST
) |
76 BIT(MLX5_EVENT_TYPE_SQ_DRAINED
) |
77 BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE
) |
78 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
) |
79 BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED
) |
80 BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
) |
81 BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
);
86 static u64
rq_allowed_event_types(void)
90 mask
= BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE
) |
91 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
);
96 static u64
sq_allowed_event_types(void)
98 return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
);
101 static u64
dct_allowed_event_types(void)
103 return BIT(MLX5_EVENT_TYPE_DCT_DRAINED
);
106 static bool is_event_type_allowed(int rsc_type
, int event_type
)
109 case MLX5_EVENT_QUEUE_TYPE_QP
:
110 return BIT(event_type
) & qp_allowed_event_types();
111 case MLX5_EVENT_QUEUE_TYPE_RQ
:
112 return BIT(event_type
) & rq_allowed_event_types();
113 case MLX5_EVENT_QUEUE_TYPE_SQ
:
114 return BIT(event_type
) & sq_allowed_event_types();
115 case MLX5_EVENT_QUEUE_TYPE_DCT
:
116 return BIT(event_type
) & dct_allowed_event_types();
118 WARN(1, "Event arrived for unknown resource type");
123 void mlx5_rsc_event(struct mlx5_core_dev
*dev
, u32 rsn
, int event_type
)
125 struct mlx5_core_rsc_common
*common
= mlx5_get_rsc(dev
, rsn
);
126 struct mlx5_core_dct
*dct
;
127 struct mlx5_core_qp
*qp
;
132 if (!is_event_type_allowed((rsn
>> MLX5_USER_INDEX_LEN
), event_type
)) {
133 mlx5_core_warn(dev
, "event 0x%.2x is not allowed on resource 0x%.8x\n",
138 switch (common
->res
) {
142 qp
= (struct mlx5_core_qp
*)common
;
143 qp
->event(qp
, event_type
);
146 dct
= (struct mlx5_core_dct
*)common
;
147 if (event_type
== MLX5_EVENT_TYPE_DCT_DRAINED
)
148 complete(&dct
->drained
);
151 mlx5_core_warn(dev
, "invalid resource type for 0x%x\n", rsn
);
154 mlx5_core_put_rsc(common
);
157 static int create_resource_common(struct mlx5_core_dev
*dev
,
158 struct mlx5_core_qp
*qp
,
161 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
164 qp
->common
.res
= rsc_type
;
165 spin_lock_irq(&table
->lock
);
166 err
= radix_tree_insert(&table
->tree
,
167 qp
->qpn
| (rsc_type
<< MLX5_USER_INDEX_LEN
),
169 spin_unlock_irq(&table
->lock
);
173 atomic_set(&qp
->common
.refcount
, 1);
174 init_completion(&qp
->common
.free
);
175 qp
->pid
= current
->pid
;
180 static void destroy_resource_common(struct mlx5_core_dev
*dev
,
181 struct mlx5_core_qp
*qp
)
183 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
186 spin_lock_irqsave(&table
->lock
, flags
);
187 radix_tree_delete(&table
->tree
,
188 qp
->qpn
| (qp
->common
.res
<< MLX5_USER_INDEX_LEN
));
189 spin_unlock_irqrestore(&table
->lock
, flags
);
190 mlx5_core_put_rsc((struct mlx5_core_rsc_common
*)qp
);
191 wait_for_completion(&qp
->common
.free
);
194 int mlx5_core_create_dct(struct mlx5_core_dev
*dev
,
195 struct mlx5_core_dct
*dct
,
198 u32 out
[MLX5_ST_SZ_DW(create_dct_out
)] = {0};
199 u32 din
[MLX5_ST_SZ_DW(destroy_dct_in
)] = {0};
200 u32 dout
[MLX5_ST_SZ_DW(destroy_dct_out
)] = {0};
201 struct mlx5_core_qp
*qp
= &dct
->mqp
;
204 init_completion(&dct
->drained
);
205 MLX5_SET(create_dct_in
, in
, opcode
, MLX5_CMD_OP_CREATE_DCT
);
207 err
= mlx5_cmd_exec(dev
, in
, inlen
, &out
, sizeof(out
));
209 mlx5_core_warn(dev
, "create DCT failed, ret %d\n", err
);
213 qp
->qpn
= MLX5_GET(create_dct_out
, out
, dctn
);
214 err
= create_resource_common(dev
, qp
, MLX5_RES_DCT
);
220 MLX5_SET(destroy_dct_in
, din
, opcode
, MLX5_CMD_OP_DESTROY_DCT
);
221 MLX5_SET(destroy_dct_in
, din
, dctn
, qp
->qpn
);
222 mlx5_cmd_exec(dev
, (void *)&in
, sizeof(din
),
223 (void *)&out
, sizeof(dout
));
226 EXPORT_SYMBOL_GPL(mlx5_core_create_dct
);
228 int mlx5_core_create_qp(struct mlx5_core_dev
*dev
,
229 struct mlx5_core_qp
*qp
,
232 u32 out
[MLX5_ST_SZ_DW(create_qp_out
)] = {0};
233 u32 dout
[MLX5_ST_SZ_DW(destroy_qp_out
)];
234 u32 din
[MLX5_ST_SZ_DW(destroy_qp_in
)];
237 MLX5_SET(create_qp_in
, in
, opcode
, MLX5_CMD_OP_CREATE_QP
);
239 err
= mlx5_cmd_exec(dev
, in
, inlen
, out
, sizeof(out
));
243 qp
->qpn
= MLX5_GET(create_qp_out
, out
, qpn
);
244 mlx5_core_dbg(dev
, "qpn = 0x%x\n", qp
->qpn
);
246 err
= create_resource_common(dev
, qp
, MLX5_RES_QP
);
250 err
= mlx5_debug_qp_add(dev
, qp
);
252 mlx5_core_dbg(dev
, "failed adding QP 0x%x to debug file system\n",
255 atomic_inc(&dev
->num_qps
);
260 memset(din
, 0, sizeof(din
));
261 memset(dout
, 0, sizeof(dout
));
262 MLX5_SET(destroy_qp_in
, din
, opcode
, MLX5_CMD_OP_DESTROY_QP
);
263 MLX5_SET(destroy_qp_in
, din
, qpn
, qp
->qpn
);
264 mlx5_cmd_exec(dev
, din
, sizeof(din
), dout
, sizeof(dout
));
267 EXPORT_SYMBOL_GPL(mlx5_core_create_qp
);
269 static int mlx5_core_drain_dct(struct mlx5_core_dev
*dev
,
270 struct mlx5_core_dct
*dct
)
272 u32 out
[MLX5_ST_SZ_DW(drain_dct_out
)] = {0};
273 u32 in
[MLX5_ST_SZ_DW(drain_dct_in
)] = {0};
274 struct mlx5_core_qp
*qp
= &dct
->mqp
;
276 MLX5_SET(drain_dct_in
, in
, opcode
, MLX5_CMD_OP_DRAIN_DCT
);
277 MLX5_SET(drain_dct_in
, in
, dctn
, qp
->qpn
);
278 return mlx5_cmd_exec(dev
, (void *)&in
, sizeof(in
),
279 (void *)&out
, sizeof(out
));
282 int mlx5_core_destroy_dct(struct mlx5_core_dev
*dev
,
283 struct mlx5_core_dct
*dct
)
285 u32 out
[MLX5_ST_SZ_DW(destroy_dct_out
)] = {0};
286 u32 in
[MLX5_ST_SZ_DW(destroy_dct_in
)] = {0};
287 struct mlx5_core_qp
*qp
= &dct
->mqp
;
290 err
= mlx5_core_drain_dct(dev
, dct
);
292 if (dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
295 mlx5_core_warn(dev
, "failed drain DCT 0x%x with error 0x%x\n", qp
->qpn
, err
);
299 wait_for_completion(&dct
->drained
);
301 destroy_resource_common(dev
, &dct
->mqp
);
302 MLX5_SET(destroy_dct_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_DCT
);
303 MLX5_SET(destroy_dct_in
, in
, dctn
, qp
->qpn
);
304 err
= mlx5_cmd_exec(dev
, (void *)&in
, sizeof(in
),
305 (void *)&out
, sizeof(out
));
308 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct
);
310 int mlx5_core_destroy_qp(struct mlx5_core_dev
*dev
,
311 struct mlx5_core_qp
*qp
)
313 u32 out
[MLX5_ST_SZ_DW(destroy_qp_out
)] = {0};
314 u32 in
[MLX5_ST_SZ_DW(destroy_qp_in
)] = {0};
317 mlx5_debug_qp_remove(dev
, qp
);
319 destroy_resource_common(dev
, qp
);
321 MLX5_SET(destroy_qp_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_QP
);
322 MLX5_SET(destroy_qp_in
, in
, qpn
, qp
->qpn
);
323 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
327 atomic_dec(&dev
->num_qps
);
330 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp
);
332 int mlx5_core_set_delay_drop(struct mlx5_core_dev
*dev
,
335 u32 out
[MLX5_ST_SZ_DW(set_delay_drop_params_out
)] = {0};
336 u32 in
[MLX5_ST_SZ_DW(set_delay_drop_params_in
)] = {0};
338 MLX5_SET(set_delay_drop_params_in
, in
, opcode
,
339 MLX5_CMD_OP_SET_DELAY_DROP_PARAMS
);
340 MLX5_SET(set_delay_drop_params_in
, in
, delay_drop_timeout
,
342 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
344 EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop
);
353 static int mbox_alloc(struct mbox_info
*mbox
, int inlen
, int outlen
)
356 mbox
->outlen
= outlen
;
357 mbox
->in
= kzalloc(mbox
->inlen
, GFP_KERNEL
);
358 mbox
->out
= kzalloc(mbox
->outlen
, GFP_KERNEL
);
359 if (!mbox
->in
|| !mbox
->out
) {
368 static void mbox_free(struct mbox_info
*mbox
)
374 static int modify_qp_mbox_alloc(struct mlx5_core_dev
*dev
, u16 opcode
, int qpn
,
375 u32 opt_param_mask
, void *qpc
,
376 struct mbox_info
*mbox
)
381 #define MBOX_ALLOC(mbox, typ) \
382 mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
384 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
385 MLX5_SET(typ##_in, in, opcode, _opcode); \
386 MLX5_SET(typ##_in, in, qpn, _qpn)
388 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
389 MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
390 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
391 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
395 case MLX5_CMD_OP_2RST_QP
:
396 if (MBOX_ALLOC(mbox
, qp_2rst
))
398 MOD_QP_IN_SET(qp_2rst
, mbox
->in
, opcode
, qpn
);
400 case MLX5_CMD_OP_2ERR_QP
:
401 if (MBOX_ALLOC(mbox
, qp_2err
))
403 MOD_QP_IN_SET(qp_2err
, mbox
->in
, opcode
, qpn
);
406 /* MODIFY with QPC */
407 case MLX5_CMD_OP_RST2INIT_QP
:
408 if (MBOX_ALLOC(mbox
, rst2init_qp
))
410 MOD_QP_IN_SET_QPC(rst2init_qp
, mbox
->in
, opcode
, qpn
,
411 opt_param_mask
, qpc
);
413 case MLX5_CMD_OP_INIT2RTR_QP
:
414 if (MBOX_ALLOC(mbox
, init2rtr_qp
))
416 MOD_QP_IN_SET_QPC(init2rtr_qp
, mbox
->in
, opcode
, qpn
,
417 opt_param_mask
, qpc
);
419 case MLX5_CMD_OP_RTR2RTS_QP
:
420 if (MBOX_ALLOC(mbox
, rtr2rts_qp
))
422 MOD_QP_IN_SET_QPC(rtr2rts_qp
, mbox
->in
, opcode
, qpn
,
423 opt_param_mask
, qpc
);
425 case MLX5_CMD_OP_RTS2RTS_QP
:
426 if (MBOX_ALLOC(mbox
, rts2rts_qp
))
428 MOD_QP_IN_SET_QPC(rts2rts_qp
, mbox
->in
, opcode
, qpn
,
429 opt_param_mask
, qpc
);
431 case MLX5_CMD_OP_SQERR2RTS_QP
:
432 if (MBOX_ALLOC(mbox
, sqerr2rts_qp
))
434 MOD_QP_IN_SET_QPC(sqerr2rts_qp
, mbox
->in
, opcode
, qpn
,
435 opt_param_mask
, qpc
);
437 case MLX5_CMD_OP_INIT2INIT_QP
:
438 if (MBOX_ALLOC(mbox
, init2init_qp
))
440 MOD_QP_IN_SET_QPC(init2init_qp
, mbox
->in
, opcode
, qpn
,
441 opt_param_mask
, qpc
);
444 mlx5_core_err(dev
, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
451 int mlx5_core_qp_modify(struct mlx5_core_dev
*dev
, u16 opcode
,
452 u32 opt_param_mask
, void *qpc
,
453 struct mlx5_core_qp
*qp
)
455 struct mbox_info mbox
;
458 err
= modify_qp_mbox_alloc(dev
, opcode
, qp
->qpn
,
459 opt_param_mask
, qpc
, &mbox
);
463 err
= mlx5_cmd_exec(dev
, mbox
.in
, mbox
.inlen
, mbox
.out
, mbox
.outlen
);
467 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify
);
469 void mlx5_init_qp_table(struct mlx5_core_dev
*dev
)
471 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
473 memset(table
, 0, sizeof(*table
));
474 spin_lock_init(&table
->lock
);
475 INIT_RADIX_TREE(&table
->tree
, GFP_ATOMIC
);
476 mlx5_qp_debugfs_init(dev
);
479 void mlx5_cleanup_qp_table(struct mlx5_core_dev
*dev
)
481 mlx5_qp_debugfs_cleanup(dev
);
484 int mlx5_core_qp_query(struct mlx5_core_dev
*dev
, struct mlx5_core_qp
*qp
,
485 u32
*out
, int outlen
)
487 u32 in
[MLX5_ST_SZ_DW(query_qp_in
)] = {0};
489 MLX5_SET(query_qp_in
, in
, opcode
, MLX5_CMD_OP_QUERY_QP
);
490 MLX5_SET(query_qp_in
, in
, qpn
, qp
->qpn
);
491 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, outlen
);
493 EXPORT_SYMBOL_GPL(mlx5_core_qp_query
);
495 int mlx5_core_dct_query(struct mlx5_core_dev
*dev
, struct mlx5_core_dct
*dct
,
496 u32
*out
, int outlen
)
498 u32 in
[MLX5_ST_SZ_DW(query_dct_in
)] = {0};
499 struct mlx5_core_qp
*qp
= &dct
->mqp
;
501 MLX5_SET(query_dct_in
, in
, opcode
, MLX5_CMD_OP_QUERY_DCT
);
502 MLX5_SET(query_dct_in
, in
, dctn
, qp
->qpn
);
504 return mlx5_cmd_exec(dev
, (void *)&in
, sizeof(in
),
505 (void *)out
, outlen
);
507 EXPORT_SYMBOL_GPL(mlx5_core_dct_query
);
509 int mlx5_core_xrcd_alloc(struct mlx5_core_dev
*dev
, u32
*xrcdn
)
511 u32 out
[MLX5_ST_SZ_DW(alloc_xrcd_out
)] = {0};
512 u32 in
[MLX5_ST_SZ_DW(alloc_xrcd_in
)] = {0};
515 MLX5_SET(alloc_xrcd_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_XRCD
);
516 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
518 *xrcdn
= MLX5_GET(alloc_xrcd_out
, out
, xrcd
);
521 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc
);
523 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev
*dev
, u32 xrcdn
)
525 u32 out
[MLX5_ST_SZ_DW(dealloc_xrcd_out
)] = {0};
526 u32 in
[MLX5_ST_SZ_DW(dealloc_xrcd_in
)] = {0};
528 MLX5_SET(dealloc_xrcd_in
, in
, opcode
, MLX5_CMD_OP_DEALLOC_XRCD
);
529 MLX5_SET(dealloc_xrcd_in
, in
, xrcd
, xrcdn
);
530 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
532 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc
);
534 int mlx5_core_create_rq_tracked(struct mlx5_core_dev
*dev
, u32
*in
, int inlen
,
535 struct mlx5_core_qp
*rq
)
540 err
= mlx5_core_create_rq(dev
, in
, inlen
, &rqn
);
545 err
= create_resource_common(dev
, rq
, MLX5_RES_RQ
);
552 mlx5_core_destroy_rq(dev
, rq
->qpn
);
556 EXPORT_SYMBOL(mlx5_core_create_rq_tracked
);
558 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev
*dev
,
559 struct mlx5_core_qp
*rq
)
561 destroy_resource_common(dev
, rq
);
562 mlx5_core_destroy_rq(dev
, rq
->qpn
);
564 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked
);
566 int mlx5_core_create_sq_tracked(struct mlx5_core_dev
*dev
, u32
*in
, int inlen
,
567 struct mlx5_core_qp
*sq
)
572 err
= mlx5_core_create_sq(dev
, in
, inlen
, &sqn
);
577 err
= create_resource_common(dev
, sq
, MLX5_RES_SQ
);
584 mlx5_core_destroy_sq(dev
, sq
->qpn
);
588 EXPORT_SYMBOL(mlx5_core_create_sq_tracked
);
590 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev
*dev
,
591 struct mlx5_core_qp
*sq
)
593 destroy_resource_common(dev
, sq
);
594 mlx5_core_destroy_sq(dev
, sq
->qpn
);
596 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked
);
598 int mlx5_core_alloc_q_counter(struct mlx5_core_dev
*dev
, u16
*counter_id
)
600 u32 in
[MLX5_ST_SZ_DW(alloc_q_counter_in
)] = {0};
601 u32 out
[MLX5_ST_SZ_DW(alloc_q_counter_out
)] = {0};
604 MLX5_SET(alloc_q_counter_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_Q_COUNTER
);
605 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
607 *counter_id
= MLX5_GET(alloc_q_counter_out
, out
,
611 EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter
);
613 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev
*dev
, u16 counter_id
)
615 u32 in
[MLX5_ST_SZ_DW(dealloc_q_counter_in
)] = {0};
616 u32 out
[MLX5_ST_SZ_DW(dealloc_q_counter_out
)] = {0};
618 MLX5_SET(dealloc_q_counter_in
, in
, opcode
,
619 MLX5_CMD_OP_DEALLOC_Q_COUNTER
);
620 MLX5_SET(dealloc_q_counter_in
, in
, counter_set_id
, counter_id
);
621 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
623 EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter
);
625 int mlx5_core_query_q_counter(struct mlx5_core_dev
*dev
, u16 counter_id
,
626 int reset
, void *out
, int out_size
)
628 u32 in
[MLX5_ST_SZ_DW(query_q_counter_in
)] = {0};
630 MLX5_SET(query_q_counter_in
, in
, opcode
, MLX5_CMD_OP_QUERY_Q_COUNTER
);
631 MLX5_SET(query_q_counter_in
, in
, clear
, reset
);
632 MLX5_SET(query_q_counter_in
, in
, counter_set_id
, counter_id
);
633 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, out_size
);
635 EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter
);