x86/topology: Fix function name in documentation
[cris-mirror.git] / drivers / net / ethernet / mellanox / mlx5 / core / qp.c
blob02d6c5b5d502adfa5e7c8f11ea13d774a9eb2409
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/gfp.h>
34 #include <linux/export.h>
35 #include <linux/mlx5/cmd.h>
36 #include <linux/mlx5/qp.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/transobj.h>
40 #include "mlx5_core.h"
42 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
43 u32 rsn)
45 struct mlx5_qp_table *table = &dev->priv.qp_table;
46 struct mlx5_core_rsc_common *common;
48 spin_lock(&table->lock);
50 common = radix_tree_lookup(&table->tree, rsn);
51 if (common)
52 atomic_inc(&common->refcount);
54 spin_unlock(&table->lock);
56 if (!common) {
57 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
58 rsn);
59 return NULL;
61 return common;
64 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
66 if (atomic_dec_and_test(&common->refcount))
67 complete(&common->free);
70 static u64 qp_allowed_event_types(void)
72 u64 mask;
74 mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
75 BIT(MLX5_EVENT_TYPE_COMM_EST) |
76 BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
77 BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
78 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
79 BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
80 BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
81 BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
83 return mask;
86 static u64 rq_allowed_event_types(void)
88 u64 mask;
90 mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
91 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
93 return mask;
96 static u64 sq_allowed_event_types(void)
98 return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
101 static u64 dct_allowed_event_types(void)
103 return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
106 static bool is_event_type_allowed(int rsc_type, int event_type)
108 switch (rsc_type) {
109 case MLX5_EVENT_QUEUE_TYPE_QP:
110 return BIT(event_type) & qp_allowed_event_types();
111 case MLX5_EVENT_QUEUE_TYPE_RQ:
112 return BIT(event_type) & rq_allowed_event_types();
113 case MLX5_EVENT_QUEUE_TYPE_SQ:
114 return BIT(event_type) & sq_allowed_event_types();
115 case MLX5_EVENT_QUEUE_TYPE_DCT:
116 return BIT(event_type) & dct_allowed_event_types();
117 default:
118 WARN(1, "Event arrived for unknown resource type");
119 return false;
123 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
125 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
126 struct mlx5_core_dct *dct;
127 struct mlx5_core_qp *qp;
129 if (!common)
130 return;
132 if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
133 mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
134 event_type, rsn);
135 return;
138 switch (common->res) {
139 case MLX5_RES_QP:
140 case MLX5_RES_RQ:
141 case MLX5_RES_SQ:
142 qp = (struct mlx5_core_qp *)common;
143 qp->event(qp, event_type);
144 break;
145 case MLX5_RES_DCT:
146 dct = (struct mlx5_core_dct *)common;
147 if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
148 complete(&dct->drained);
149 break;
150 default:
151 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
154 mlx5_core_put_rsc(common);
157 static int create_resource_common(struct mlx5_core_dev *dev,
158 struct mlx5_core_qp *qp,
159 int rsc_type)
161 struct mlx5_qp_table *table = &dev->priv.qp_table;
162 int err;
164 qp->common.res = rsc_type;
165 spin_lock_irq(&table->lock);
166 err = radix_tree_insert(&table->tree,
167 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
168 qp);
169 spin_unlock_irq(&table->lock);
170 if (err)
171 return err;
173 atomic_set(&qp->common.refcount, 1);
174 init_completion(&qp->common.free);
175 qp->pid = current->pid;
177 return 0;
180 static void destroy_resource_common(struct mlx5_core_dev *dev,
181 struct mlx5_core_qp *qp)
183 struct mlx5_qp_table *table = &dev->priv.qp_table;
184 unsigned long flags;
186 spin_lock_irqsave(&table->lock, flags);
187 radix_tree_delete(&table->tree,
188 qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
189 spin_unlock_irqrestore(&table->lock, flags);
190 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
191 wait_for_completion(&qp->common.free);
194 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
195 struct mlx5_core_dct *dct,
196 u32 *in, int inlen)
198 u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
199 u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
200 u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
201 struct mlx5_core_qp *qp = &dct->mqp;
202 int err;
204 init_completion(&dct->drained);
205 MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
207 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
208 if (err) {
209 mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
210 return err;
213 qp->qpn = MLX5_GET(create_dct_out, out, dctn);
214 err = create_resource_common(dev, qp, MLX5_RES_DCT);
215 if (err)
216 goto err_cmd;
218 return 0;
219 err_cmd:
220 MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
221 MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
222 mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
223 (void *)&out, sizeof(dout));
224 return err;
226 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
228 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
229 struct mlx5_core_qp *qp,
230 u32 *in, int inlen)
232 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
233 u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
234 u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
235 int err;
237 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
239 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
240 if (err)
241 return err;
243 qp->qpn = MLX5_GET(create_qp_out, out, qpn);
244 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
246 err = create_resource_common(dev, qp, MLX5_RES_QP);
247 if (err)
248 goto err_cmd;
250 err = mlx5_debug_qp_add(dev, qp);
251 if (err)
252 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
253 qp->qpn);
255 atomic_inc(&dev->num_qps);
257 return 0;
259 err_cmd:
260 memset(din, 0, sizeof(din));
261 memset(dout, 0, sizeof(dout));
262 MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
263 MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
264 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
265 return err;
267 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
269 static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
270 struct mlx5_core_dct *dct)
272 u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
273 u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
274 struct mlx5_core_qp *qp = &dct->mqp;
276 MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
277 MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
278 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
279 (void *)&out, sizeof(out));
282 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
283 struct mlx5_core_dct *dct)
285 u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
286 u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
287 struct mlx5_core_qp *qp = &dct->mqp;
288 int err;
290 err = mlx5_core_drain_dct(dev, dct);
291 if (err) {
292 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
293 goto destroy;
294 } else {
295 mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
296 return err;
299 wait_for_completion(&dct->drained);
300 destroy:
301 destroy_resource_common(dev, &dct->mqp);
302 MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
303 MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
304 err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
305 (void *)&out, sizeof(out));
306 return err;
308 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
310 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
311 struct mlx5_core_qp *qp)
313 u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
314 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
315 int err;
317 mlx5_debug_qp_remove(dev, qp);
319 destroy_resource_common(dev, qp);
321 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
322 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
323 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
324 if (err)
325 return err;
327 atomic_dec(&dev->num_qps);
328 return 0;
330 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
332 int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
333 u32 timeout_usec)
335 u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
336 u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {0};
338 MLX5_SET(set_delay_drop_params_in, in, opcode,
339 MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
340 MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
341 timeout_usec / 100);
342 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
344 EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
346 struct mbox_info {
347 u32 *in;
348 u32 *out;
349 int inlen;
350 int outlen;
353 static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
355 mbox->inlen = inlen;
356 mbox->outlen = outlen;
357 mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
358 mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
359 if (!mbox->in || !mbox->out) {
360 kfree(mbox->in);
361 kfree(mbox->out);
362 return -ENOMEM;
365 return 0;
368 static void mbox_free(struct mbox_info *mbox)
370 kfree(mbox->in);
371 kfree(mbox->out);
374 static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
375 u32 opt_param_mask, void *qpc,
376 struct mbox_info *mbox)
378 mbox->out = NULL;
379 mbox->in = NULL;
381 #define MBOX_ALLOC(mbox, typ) \
382 mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
384 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
385 MLX5_SET(typ##_in, in, opcode, _opcode); \
386 MLX5_SET(typ##_in, in, qpn, _qpn)
388 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
389 MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
390 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
391 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
393 switch (opcode) {
394 /* 2RST & 2ERR */
395 case MLX5_CMD_OP_2RST_QP:
396 if (MBOX_ALLOC(mbox, qp_2rst))
397 return -ENOMEM;
398 MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
399 break;
400 case MLX5_CMD_OP_2ERR_QP:
401 if (MBOX_ALLOC(mbox, qp_2err))
402 return -ENOMEM;
403 MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
404 break;
406 /* MODIFY with QPC */
407 case MLX5_CMD_OP_RST2INIT_QP:
408 if (MBOX_ALLOC(mbox, rst2init_qp))
409 return -ENOMEM;
410 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
411 opt_param_mask, qpc);
412 break;
413 case MLX5_CMD_OP_INIT2RTR_QP:
414 if (MBOX_ALLOC(mbox, init2rtr_qp))
415 return -ENOMEM;
416 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
417 opt_param_mask, qpc);
418 break;
419 case MLX5_CMD_OP_RTR2RTS_QP:
420 if (MBOX_ALLOC(mbox, rtr2rts_qp))
421 return -ENOMEM;
422 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
423 opt_param_mask, qpc);
424 break;
425 case MLX5_CMD_OP_RTS2RTS_QP:
426 if (MBOX_ALLOC(mbox, rts2rts_qp))
427 return -ENOMEM;
428 MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
429 opt_param_mask, qpc);
430 break;
431 case MLX5_CMD_OP_SQERR2RTS_QP:
432 if (MBOX_ALLOC(mbox, sqerr2rts_qp))
433 return -ENOMEM;
434 MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
435 opt_param_mask, qpc);
436 break;
437 case MLX5_CMD_OP_INIT2INIT_QP:
438 if (MBOX_ALLOC(mbox, init2init_qp))
439 return -ENOMEM;
440 MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
441 opt_param_mask, qpc);
442 break;
443 default:
444 mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
445 opcode, qpn);
446 return -EINVAL;
448 return 0;
451 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
452 u32 opt_param_mask, void *qpc,
453 struct mlx5_core_qp *qp)
455 struct mbox_info mbox;
456 int err;
458 err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
459 opt_param_mask, qpc, &mbox);
460 if (err)
461 return err;
463 err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
464 mbox_free(&mbox);
465 return err;
467 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
469 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
471 struct mlx5_qp_table *table = &dev->priv.qp_table;
473 memset(table, 0, sizeof(*table));
474 spin_lock_init(&table->lock);
475 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
476 mlx5_qp_debugfs_init(dev);
479 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
481 mlx5_qp_debugfs_cleanup(dev);
484 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
485 u32 *out, int outlen)
487 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
489 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
490 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
491 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
493 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
495 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
496 u32 *out, int outlen)
498 u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
499 struct mlx5_core_qp *qp = &dct->mqp;
501 MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
502 MLX5_SET(query_dct_in, in, dctn, qp->qpn);
504 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
505 (void *)out, outlen);
507 EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
509 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
511 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
512 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
513 int err;
515 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
516 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
517 if (!err)
518 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
519 return err;
521 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
523 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
525 u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
526 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
528 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
529 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
530 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
532 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
534 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
535 struct mlx5_core_qp *rq)
537 int err;
538 u32 rqn;
540 err = mlx5_core_create_rq(dev, in, inlen, &rqn);
541 if (err)
542 return err;
544 rq->qpn = rqn;
545 err = create_resource_common(dev, rq, MLX5_RES_RQ);
546 if (err)
547 goto err_destroy_rq;
549 return 0;
551 err_destroy_rq:
552 mlx5_core_destroy_rq(dev, rq->qpn);
554 return err;
556 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
558 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
559 struct mlx5_core_qp *rq)
561 destroy_resource_common(dev, rq);
562 mlx5_core_destroy_rq(dev, rq->qpn);
564 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
566 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
567 struct mlx5_core_qp *sq)
569 int err;
570 u32 sqn;
572 err = mlx5_core_create_sq(dev, in, inlen, &sqn);
573 if (err)
574 return err;
576 sq->qpn = sqn;
577 err = create_resource_common(dev, sq, MLX5_RES_SQ);
578 if (err)
579 goto err_destroy_sq;
581 return 0;
583 err_destroy_sq:
584 mlx5_core_destroy_sq(dev, sq->qpn);
586 return err;
588 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
590 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
591 struct mlx5_core_qp *sq)
593 destroy_resource_common(dev, sq);
594 mlx5_core_destroy_sq(dev, sq->qpn);
596 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
598 int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
600 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
601 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
602 int err;
604 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
605 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
606 if (!err)
607 *counter_id = MLX5_GET(alloc_q_counter_out, out,
608 counter_set_id);
609 return err;
611 EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
613 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
615 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
616 u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
618 MLX5_SET(dealloc_q_counter_in, in, opcode,
619 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
620 MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
621 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
623 EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
625 int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
626 int reset, void *out, int out_size)
628 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
630 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
631 MLX5_SET(query_q_counter_in, in, clear, reset);
632 MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
633 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
635 EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);