Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / infiniband / hw / mlx5 / qpc.c
blobc683d7000168dbce0d36c8aadb806a58d3c8e28d
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4 */
6 #include <linux/gfp.h>
7 #include <linux/mlx5/qp.h>
8 #include <linux/mlx5/driver.h>
9 #include "mlx5_ib.h"
10 #include "qp.h"
12 static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
13 struct mlx5_core_dct *dct);
15 static struct mlx5_core_rsc_common *
16 mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
18 struct mlx5_core_rsc_common *common;
19 unsigned long flags;
21 spin_lock_irqsave(&table->lock, flags);
23 common = radix_tree_lookup(&table->tree, rsn);
24 if (common)
25 refcount_inc(&common->refcount);
27 spin_unlock_irqrestore(&table->lock, flags);
29 return common;
32 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
34 if (refcount_dec_and_test(&common->refcount))
35 complete(&common->free);
38 static u64 qp_allowed_event_types(void)
40 u64 mask;
42 mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
43 BIT(MLX5_EVENT_TYPE_COMM_EST) |
44 BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
45 BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
46 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
47 BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
48 BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
49 BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
51 return mask;
54 static u64 rq_allowed_event_types(void)
56 u64 mask;
58 mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
59 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
61 return mask;
64 static u64 sq_allowed_event_types(void)
66 return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
69 static u64 dct_allowed_event_types(void)
71 return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
74 static bool is_event_type_allowed(int rsc_type, int event_type)
76 switch (rsc_type) {
77 case MLX5_EVENT_QUEUE_TYPE_QP:
78 return BIT(event_type) & qp_allowed_event_types();
79 case MLX5_EVENT_QUEUE_TYPE_RQ:
80 return BIT(event_type) & rq_allowed_event_types();
81 case MLX5_EVENT_QUEUE_TYPE_SQ:
82 return BIT(event_type) & sq_allowed_event_types();
83 case MLX5_EVENT_QUEUE_TYPE_DCT:
84 return BIT(event_type) & dct_allowed_event_types();
85 default:
86 WARN(1, "Event arrived for unknown resource type");
87 return false;
91 static int rsc_event_notifier(struct notifier_block *nb,
92 unsigned long type, void *data)
94 struct mlx5_core_rsc_common *common;
95 struct mlx5_qp_table *table;
96 struct mlx5_core_dct *dct;
97 u8 event_type = (u8)type;
98 struct mlx5_core_qp *qp;
99 struct mlx5_eqe *eqe;
100 u32 rsn;
102 switch (event_type) {
103 case MLX5_EVENT_TYPE_DCT_DRAINED:
104 eqe = data;
105 rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
106 rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
107 break;
108 case MLX5_EVENT_TYPE_PATH_MIG:
109 case MLX5_EVENT_TYPE_COMM_EST:
110 case MLX5_EVENT_TYPE_SQ_DRAINED:
111 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
112 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
113 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
114 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
115 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
116 eqe = data;
117 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
118 rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
119 break;
120 default:
121 return NOTIFY_DONE;
124 table = container_of(nb, struct mlx5_qp_table, nb);
125 common = mlx5_get_rsc(table, rsn);
126 if (!common)
127 return NOTIFY_OK;
129 if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type))
130 goto out;
132 switch (common->res) {
133 case MLX5_RES_QP:
134 case MLX5_RES_RQ:
135 case MLX5_RES_SQ:
136 qp = (struct mlx5_core_qp *)common;
137 qp->event(qp, event_type);
138 break;
139 case MLX5_RES_DCT:
140 dct = (struct mlx5_core_dct *)common;
141 if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
142 complete(&dct->drained);
143 break;
144 default:
145 break;
147 out:
148 mlx5_core_put_rsc(common);
150 return NOTIFY_OK;
153 static int create_resource_common(struct mlx5_ib_dev *dev,
154 struct mlx5_core_qp *qp, int rsc_type)
156 struct mlx5_qp_table *table = &dev->qp_table;
157 int err;
159 qp->common.res = rsc_type;
160 spin_lock_irq(&table->lock);
161 err = radix_tree_insert(&table->tree,
162 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
163 qp);
164 spin_unlock_irq(&table->lock);
165 if (err)
166 return err;
168 refcount_set(&qp->common.refcount, 1);
169 init_completion(&qp->common.free);
170 qp->pid = current->pid;
172 return 0;
175 static void destroy_resource_common(struct mlx5_ib_dev *dev,
176 struct mlx5_core_qp *qp)
178 struct mlx5_qp_table *table = &dev->qp_table;
179 unsigned long flags;
181 spin_lock_irqsave(&table->lock, flags);
182 radix_tree_delete(&table->tree,
183 qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
184 spin_unlock_irqrestore(&table->lock, flags);
185 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
186 wait_for_completion(&qp->common.free);
189 static int _mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
190 struct mlx5_core_dct *dct, bool need_cleanup)
192 u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {};
193 struct mlx5_core_qp *qp = &dct->mqp;
194 int err;
196 err = mlx5_core_drain_dct(dev, dct);
197 if (err) {
198 if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
199 goto destroy;
201 return err;
203 wait_for_completion(&dct->drained);
204 destroy:
205 if (need_cleanup)
206 destroy_resource_common(dev, &dct->mqp);
207 MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
208 MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
209 MLX5_SET(destroy_dct_in, in, uid, qp->uid);
210 err = mlx5_cmd_exec_in(dev->mdev, destroy_dct, in);
211 return err;
214 int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
215 u32 *in, int inlen, u32 *out, int outlen)
217 struct mlx5_core_qp *qp = &dct->mqp;
218 int err;
220 init_completion(&dct->drained);
221 MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
223 err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
224 if (err)
225 return err;
227 qp->qpn = MLX5_GET(create_dct_out, out, dctn);
228 qp->uid = MLX5_GET(create_dct_in, in, uid);
229 err = create_resource_common(dev, qp, MLX5_RES_DCT);
230 if (err)
231 goto err_cmd;
233 return 0;
234 err_cmd:
235 _mlx5_core_destroy_dct(dev, dct, false);
236 return err;
239 int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
240 u32 *in, int inlen, u32 *out)
242 u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
243 int err;
245 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
247 err = mlx5_cmd_exec(dev->mdev, in, inlen, out,
248 MLX5_ST_SZ_BYTES(create_qp_out));
249 if (err)
250 return err;
252 qp->uid = MLX5_GET(create_qp_in, in, uid);
253 qp->qpn = MLX5_GET(create_qp_out, out, qpn);
255 err = create_resource_common(dev, qp, MLX5_RES_QP);
256 if (err)
257 goto err_cmd;
259 mlx5_debug_qp_add(dev->mdev, qp);
261 return 0;
263 err_cmd:
264 MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
265 MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
266 MLX5_SET(destroy_qp_in, din, uid, qp->uid);
267 mlx5_cmd_exec_in(dev->mdev, destroy_qp, din);
268 return err;
271 static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
272 struct mlx5_core_dct *dct)
274 u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {};
275 struct mlx5_core_qp *qp = &dct->mqp;
277 MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
278 MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
279 MLX5_SET(drain_dct_in, in, uid, qp->uid);
280 return mlx5_cmd_exec_in(dev->mdev, drain_dct, in);
283 int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
284 struct mlx5_core_dct *dct)
286 return _mlx5_core_destroy_dct(dev, dct, true);
289 int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp)
291 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
293 mlx5_debug_qp_remove(dev->mdev, qp);
295 destroy_resource_common(dev, qp);
297 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
298 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
299 MLX5_SET(destroy_qp_in, in, uid, qp->uid);
300 mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
301 return 0;
304 int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev,
305 u32 timeout_usec)
307 u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {};
309 MLX5_SET(set_delay_drop_params_in, in, opcode,
310 MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
311 MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
312 timeout_usec / 100);
313 return mlx5_cmd_exec_in(dev->mdev, set_delay_drop_params, in);
316 struct mbox_info {
317 u32 *in;
318 u32 *out;
319 int inlen;
320 int outlen;
323 static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
325 mbox->inlen = inlen;
326 mbox->outlen = outlen;
327 mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
328 mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
329 if (!mbox->in || !mbox->out) {
330 kfree(mbox->in);
331 kfree(mbox->out);
332 return -ENOMEM;
335 return 0;
338 static void mbox_free(struct mbox_info *mbox)
340 kfree(mbox->in);
341 kfree(mbox->out);
344 static int get_ece_from_mbox(void *out, u16 opcode)
346 int ece = 0;
348 switch (opcode) {
349 case MLX5_CMD_OP_INIT2INIT_QP:
350 ece = MLX5_GET(init2init_qp_out, out, ece);
351 break;
352 case MLX5_CMD_OP_INIT2RTR_QP:
353 ece = MLX5_GET(init2rtr_qp_out, out, ece);
354 break;
355 case MLX5_CMD_OP_RTR2RTS_QP:
356 ece = MLX5_GET(rtr2rts_qp_out, out, ece);
357 break;
358 case MLX5_CMD_OP_RTS2RTS_QP:
359 ece = MLX5_GET(rts2rts_qp_out, out, ece);
360 break;
361 case MLX5_CMD_OP_RST2INIT_QP:
362 ece = MLX5_GET(rst2init_qp_out, out, ece);
363 break;
364 default:
365 break;
368 return ece;
371 static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
372 u32 opt_param_mask, void *qpc,
373 struct mbox_info *mbox, u16 uid, u32 ece)
375 mbox->out = NULL;
376 mbox->in = NULL;
378 #define MBOX_ALLOC(mbox, typ) \
379 mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
381 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
382 do { \
383 MLX5_SET(typ##_in, in, opcode, _opcode); \
384 MLX5_SET(typ##_in, in, qpn, _qpn); \
385 MLX5_SET(typ##_in, in, uid, _uid); \
386 } while (0)
388 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
389 do { \
390 MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \
391 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
392 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \
393 MLX5_ST_SZ_BYTES(qpc)); \
394 } while (0)
396 switch (opcode) {
397 /* 2RST & 2ERR */
398 case MLX5_CMD_OP_2RST_QP:
399 if (MBOX_ALLOC(mbox, qp_2rst))
400 return -ENOMEM;
401 MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
402 break;
403 case MLX5_CMD_OP_2ERR_QP:
404 if (MBOX_ALLOC(mbox, qp_2err))
405 return -ENOMEM;
406 MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
407 break;
409 /* MODIFY with QPC */
410 case MLX5_CMD_OP_RST2INIT_QP:
411 if (MBOX_ALLOC(mbox, rst2init_qp))
412 return -ENOMEM;
413 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
414 opt_param_mask, qpc, uid);
415 MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
416 break;
417 case MLX5_CMD_OP_INIT2RTR_QP:
418 if (MBOX_ALLOC(mbox, init2rtr_qp))
419 return -ENOMEM;
420 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
421 opt_param_mask, qpc, uid);
422 MLX5_SET(init2rtr_qp_in, mbox->in, ece, ece);
423 break;
424 case MLX5_CMD_OP_RTR2RTS_QP:
425 if (MBOX_ALLOC(mbox, rtr2rts_qp))
426 return -ENOMEM;
427 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
428 opt_param_mask, qpc, uid);
429 MLX5_SET(rtr2rts_qp_in, mbox->in, ece, ece);
430 break;
431 case MLX5_CMD_OP_RTS2RTS_QP:
432 if (MBOX_ALLOC(mbox, rts2rts_qp))
433 return -ENOMEM;
434 MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
435 opt_param_mask, qpc, uid);
436 MLX5_SET(rts2rts_qp_in, mbox->in, ece, ece);
437 break;
438 case MLX5_CMD_OP_SQERR2RTS_QP:
439 if (MBOX_ALLOC(mbox, sqerr2rts_qp))
440 return -ENOMEM;
441 MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
442 opt_param_mask, qpc, uid);
443 break;
444 case MLX5_CMD_OP_INIT2INIT_QP:
445 if (MBOX_ALLOC(mbox, init2init_qp))
446 return -ENOMEM;
447 MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
448 opt_param_mask, qpc, uid);
449 MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
450 break;
451 default:
452 return -EINVAL;
454 return 0;
457 int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
458 void *qpc, struct mlx5_core_qp *qp, u32 *ece)
460 struct mbox_info mbox;
461 int err;
463 err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn, opt_param_mask,
464 qpc, &mbox, qp->uid, (ece) ? *ece : 0);
465 if (err)
466 return err;
468 err = mlx5_cmd_exec(dev->mdev, mbox.in, mbox.inlen, mbox.out,
469 mbox.outlen);
471 if (ece)
472 *ece = get_ece_from_mbox(mbox.out, opcode);
474 mbox_free(&mbox);
475 return err;
478 int mlx5_init_qp_table(struct mlx5_ib_dev *dev)
480 struct mlx5_qp_table *table = &dev->qp_table;
482 spin_lock_init(&table->lock);
483 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
484 mlx5_qp_debugfs_init(dev->mdev);
486 table->nb.notifier_call = rsc_event_notifier;
487 mlx5_notifier_register(dev->mdev, &table->nb);
489 return 0;
492 void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev)
494 struct mlx5_qp_table *table = &dev->qp_table;
496 mlx5_notifier_unregister(dev->mdev, &table->nb);
497 mlx5_qp_debugfs_cleanup(dev->mdev);
500 int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
501 u32 *out, int outlen)
503 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
505 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
506 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
507 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, outlen);
510 int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
511 u32 *out, int outlen)
513 u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {};
514 struct mlx5_core_qp *qp = &dct->mqp;
516 MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
517 MLX5_SET(query_dct_in, in, dctn, qp->qpn);
519 return mlx5_cmd_exec(dev->mdev, (void *)&in, sizeof(in), (void *)out,
520 outlen);
523 int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn)
525 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
526 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
527 int err;
529 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
530 err = mlx5_cmd_exec_inout(dev->mdev, alloc_xrcd, in, out);
531 if (!err)
532 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
533 return err;
536 int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn)
538 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
540 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
541 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
542 return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in);
545 static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
547 u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
549 MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
550 MLX5_SET(destroy_rq_in, in, rqn, rqn);
551 MLX5_SET(destroy_rq_in, in, uid, uid);
552 mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
555 int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
556 struct mlx5_core_qp *rq)
558 int err;
559 u32 rqn;
561 err = mlx5_core_create_rq(dev->mdev, in, inlen, &rqn);
562 if (err)
563 return err;
565 rq->uid = MLX5_GET(create_rq_in, in, uid);
566 rq->qpn = rqn;
567 err = create_resource_common(dev, rq, MLX5_RES_RQ);
568 if (err)
569 goto err_destroy_rq;
571 return 0;
573 err_destroy_rq:
574 destroy_rq_tracked(dev, rq->qpn, rq->uid);
576 return err;
579 int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
580 struct mlx5_core_qp *rq)
582 destroy_resource_common(dev, rq);
583 destroy_rq_tracked(dev, rq->qpn, rq->uid);
584 return 0;
587 static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
589 u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
591 MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
592 MLX5_SET(destroy_sq_in, in, sqn, sqn);
593 MLX5_SET(destroy_sq_in, in, uid, uid);
594 mlx5_cmd_exec_in(dev->mdev, destroy_sq, in);
597 int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
598 struct mlx5_core_qp *sq)
600 u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
601 int err;
603 MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
604 err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
605 if (err)
606 return err;
608 sq->qpn = MLX5_GET(create_sq_out, out, sqn);
609 sq->uid = MLX5_GET(create_sq_in, in, uid);
610 err = create_resource_common(dev, sq, MLX5_RES_SQ);
611 if (err)
612 goto err_destroy_sq;
614 return 0;
616 err_destroy_sq:
617 destroy_sq_tracked(dev, sq->qpn, sq->uid);
619 return err;
622 void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
623 struct mlx5_core_qp *sq)
625 destroy_resource_common(dev, sq);
626 destroy_sq_tracked(dev, sq->qpn, sq->uid);
629 struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
630 int res_num,
631 enum mlx5_res_type res_type)
633 u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
634 struct mlx5_qp_table *table = &dev->qp_table;
636 return mlx5_get_rsc(table, rsn);
639 void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
641 mlx5_core_put_rsc(res);