1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
7 #include <linux/mlx5/qp.h>
8 #include <linux/mlx5/driver.h>
12 static int mlx5_core_drain_dct(struct mlx5_ib_dev
*dev
,
13 struct mlx5_core_dct
*dct
);
15 static struct mlx5_core_rsc_common
*
16 mlx5_get_rsc(struct mlx5_qp_table
*table
, u32 rsn
)
18 struct mlx5_core_rsc_common
*common
;
21 spin_lock_irqsave(&table
->lock
, flags
);
23 common
= radix_tree_lookup(&table
->tree
, rsn
);
25 refcount_inc(&common
->refcount
);
27 spin_unlock_irqrestore(&table
->lock
, flags
);
32 void mlx5_core_put_rsc(struct mlx5_core_rsc_common
*common
)
34 if (refcount_dec_and_test(&common
->refcount
))
35 complete(&common
->free
);
38 static u64
qp_allowed_event_types(void)
42 mask
= BIT(MLX5_EVENT_TYPE_PATH_MIG
) |
43 BIT(MLX5_EVENT_TYPE_COMM_EST
) |
44 BIT(MLX5_EVENT_TYPE_SQ_DRAINED
) |
45 BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE
) |
46 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
) |
47 BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED
) |
48 BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
) |
49 BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
);
54 static u64
rq_allowed_event_types(void)
58 mask
= BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE
) |
59 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
);
64 static u64
sq_allowed_event_types(void)
66 return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
);
69 static u64
dct_allowed_event_types(void)
71 return BIT(MLX5_EVENT_TYPE_DCT_DRAINED
);
74 static bool is_event_type_allowed(int rsc_type
, int event_type
)
77 case MLX5_EVENT_QUEUE_TYPE_QP
:
78 return BIT(event_type
) & qp_allowed_event_types();
79 case MLX5_EVENT_QUEUE_TYPE_RQ
:
80 return BIT(event_type
) & rq_allowed_event_types();
81 case MLX5_EVENT_QUEUE_TYPE_SQ
:
82 return BIT(event_type
) & sq_allowed_event_types();
83 case MLX5_EVENT_QUEUE_TYPE_DCT
:
84 return BIT(event_type
) & dct_allowed_event_types();
86 WARN(1, "Event arrived for unknown resource type");
91 static int rsc_event_notifier(struct notifier_block
*nb
,
92 unsigned long type
, void *data
)
94 struct mlx5_core_rsc_common
*common
;
95 struct mlx5_qp_table
*table
;
96 struct mlx5_core_dct
*dct
;
97 u8 event_type
= (u8
)type
;
98 struct mlx5_core_qp
*qp
;
102 switch (event_type
) {
103 case MLX5_EVENT_TYPE_DCT_DRAINED
:
105 rsn
= be32_to_cpu(eqe
->data
.dct
.dctn
) & 0xffffff;
106 rsn
|= (MLX5_RES_DCT
<< MLX5_USER_INDEX_LEN
);
108 case MLX5_EVENT_TYPE_PATH_MIG
:
109 case MLX5_EVENT_TYPE_COMM_EST
:
110 case MLX5_EVENT_TYPE_SQ_DRAINED
:
111 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
112 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
113 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
114 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
115 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
117 rsn
= be32_to_cpu(eqe
->data
.qp_srq
.qp_srq_n
) & 0xffffff;
118 rsn
|= (eqe
->data
.qp_srq
.type
<< MLX5_USER_INDEX_LEN
);
124 table
= container_of(nb
, struct mlx5_qp_table
, nb
);
125 common
= mlx5_get_rsc(table
, rsn
);
129 if (!is_event_type_allowed((rsn
>> MLX5_USER_INDEX_LEN
), event_type
))
132 switch (common
->res
) {
136 qp
= (struct mlx5_core_qp
*)common
;
137 qp
->event(qp
, event_type
);
140 dct
= (struct mlx5_core_dct
*)common
;
141 if (event_type
== MLX5_EVENT_TYPE_DCT_DRAINED
)
142 complete(&dct
->drained
);
148 mlx5_core_put_rsc(common
);
153 static int create_resource_common(struct mlx5_ib_dev
*dev
,
154 struct mlx5_core_qp
*qp
, int rsc_type
)
156 struct mlx5_qp_table
*table
= &dev
->qp_table
;
159 qp
->common
.res
= rsc_type
;
160 spin_lock_irq(&table
->lock
);
161 err
= radix_tree_insert(&table
->tree
,
162 qp
->qpn
| (rsc_type
<< MLX5_USER_INDEX_LEN
),
164 spin_unlock_irq(&table
->lock
);
168 refcount_set(&qp
->common
.refcount
, 1);
169 init_completion(&qp
->common
.free
);
170 qp
->pid
= current
->pid
;
175 static void destroy_resource_common(struct mlx5_ib_dev
*dev
,
176 struct mlx5_core_qp
*qp
)
178 struct mlx5_qp_table
*table
= &dev
->qp_table
;
181 spin_lock_irqsave(&table
->lock
, flags
);
182 radix_tree_delete(&table
->tree
,
183 qp
->qpn
| (qp
->common
.res
<< MLX5_USER_INDEX_LEN
));
184 spin_unlock_irqrestore(&table
->lock
, flags
);
185 mlx5_core_put_rsc((struct mlx5_core_rsc_common
*)qp
);
186 wait_for_completion(&qp
->common
.free
);
189 static int _mlx5_core_destroy_dct(struct mlx5_ib_dev
*dev
,
190 struct mlx5_core_dct
*dct
, bool need_cleanup
)
192 u32 in
[MLX5_ST_SZ_DW(destroy_dct_in
)] = {};
193 struct mlx5_core_qp
*qp
= &dct
->mqp
;
196 err
= mlx5_core_drain_dct(dev
, dct
);
198 if (dev
->mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
)
203 wait_for_completion(&dct
->drained
);
206 destroy_resource_common(dev
, &dct
->mqp
);
207 MLX5_SET(destroy_dct_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_DCT
);
208 MLX5_SET(destroy_dct_in
, in
, dctn
, qp
->qpn
);
209 MLX5_SET(destroy_dct_in
, in
, uid
, qp
->uid
);
210 err
= mlx5_cmd_exec_in(dev
->mdev
, destroy_dct
, in
);
214 int mlx5_core_create_dct(struct mlx5_ib_dev
*dev
, struct mlx5_core_dct
*dct
,
215 u32
*in
, int inlen
, u32
*out
, int outlen
)
217 struct mlx5_core_qp
*qp
= &dct
->mqp
;
220 init_completion(&dct
->drained
);
221 MLX5_SET(create_dct_in
, in
, opcode
, MLX5_CMD_OP_CREATE_DCT
);
223 err
= mlx5_cmd_exec(dev
->mdev
, in
, inlen
, out
, outlen
);
227 qp
->qpn
= MLX5_GET(create_dct_out
, out
, dctn
);
228 qp
->uid
= MLX5_GET(create_dct_in
, in
, uid
);
229 err
= create_resource_common(dev
, qp
, MLX5_RES_DCT
);
235 _mlx5_core_destroy_dct(dev
, dct
, false);
239 int mlx5_qpc_create_qp(struct mlx5_ib_dev
*dev
, struct mlx5_core_qp
*qp
,
240 u32
*in
, int inlen
, u32
*out
)
242 u32 din
[MLX5_ST_SZ_DW(destroy_qp_in
)] = {};
245 MLX5_SET(create_qp_in
, in
, opcode
, MLX5_CMD_OP_CREATE_QP
);
247 err
= mlx5_cmd_exec(dev
->mdev
, in
, inlen
, out
,
248 MLX5_ST_SZ_BYTES(create_qp_out
));
252 qp
->uid
= MLX5_GET(create_qp_in
, in
, uid
);
253 qp
->qpn
= MLX5_GET(create_qp_out
, out
, qpn
);
255 err
= create_resource_common(dev
, qp
, MLX5_RES_QP
);
259 mlx5_debug_qp_add(dev
->mdev
, qp
);
264 MLX5_SET(destroy_qp_in
, din
, opcode
, MLX5_CMD_OP_DESTROY_QP
);
265 MLX5_SET(destroy_qp_in
, din
, qpn
, qp
->qpn
);
266 MLX5_SET(destroy_qp_in
, din
, uid
, qp
->uid
);
267 mlx5_cmd_exec_in(dev
->mdev
, destroy_qp
, din
);
271 static int mlx5_core_drain_dct(struct mlx5_ib_dev
*dev
,
272 struct mlx5_core_dct
*dct
)
274 u32 in
[MLX5_ST_SZ_DW(drain_dct_in
)] = {};
275 struct mlx5_core_qp
*qp
= &dct
->mqp
;
277 MLX5_SET(drain_dct_in
, in
, opcode
, MLX5_CMD_OP_DRAIN_DCT
);
278 MLX5_SET(drain_dct_in
, in
, dctn
, qp
->qpn
);
279 MLX5_SET(drain_dct_in
, in
, uid
, qp
->uid
);
280 return mlx5_cmd_exec_in(dev
->mdev
, drain_dct
, in
);
283 int mlx5_core_destroy_dct(struct mlx5_ib_dev
*dev
,
284 struct mlx5_core_dct
*dct
)
286 return _mlx5_core_destroy_dct(dev
, dct
, true);
289 int mlx5_core_destroy_qp(struct mlx5_ib_dev
*dev
, struct mlx5_core_qp
*qp
)
291 u32 in
[MLX5_ST_SZ_DW(destroy_qp_in
)] = {};
293 mlx5_debug_qp_remove(dev
->mdev
, qp
);
295 destroy_resource_common(dev
, qp
);
297 MLX5_SET(destroy_qp_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_QP
);
298 MLX5_SET(destroy_qp_in
, in
, qpn
, qp
->qpn
);
299 MLX5_SET(destroy_qp_in
, in
, uid
, qp
->uid
);
300 mlx5_cmd_exec_in(dev
->mdev
, destroy_qp
, in
);
304 int mlx5_core_set_delay_drop(struct mlx5_ib_dev
*dev
,
307 u32 in
[MLX5_ST_SZ_DW(set_delay_drop_params_in
)] = {};
309 MLX5_SET(set_delay_drop_params_in
, in
, opcode
,
310 MLX5_CMD_OP_SET_DELAY_DROP_PARAMS
);
311 MLX5_SET(set_delay_drop_params_in
, in
, delay_drop_timeout
,
313 return mlx5_cmd_exec_in(dev
->mdev
, set_delay_drop_params
, in
);
323 static int mbox_alloc(struct mbox_info
*mbox
, int inlen
, int outlen
)
326 mbox
->outlen
= outlen
;
327 mbox
->in
= kzalloc(mbox
->inlen
, GFP_KERNEL
);
328 mbox
->out
= kzalloc(mbox
->outlen
, GFP_KERNEL
);
329 if (!mbox
->in
|| !mbox
->out
) {
338 static void mbox_free(struct mbox_info
*mbox
)
344 static int get_ece_from_mbox(void *out
, u16 opcode
)
349 case MLX5_CMD_OP_INIT2INIT_QP
:
350 ece
= MLX5_GET(init2init_qp_out
, out
, ece
);
352 case MLX5_CMD_OP_INIT2RTR_QP
:
353 ece
= MLX5_GET(init2rtr_qp_out
, out
, ece
);
355 case MLX5_CMD_OP_RTR2RTS_QP
:
356 ece
= MLX5_GET(rtr2rts_qp_out
, out
, ece
);
358 case MLX5_CMD_OP_RTS2RTS_QP
:
359 ece
= MLX5_GET(rts2rts_qp_out
, out
, ece
);
361 case MLX5_CMD_OP_RST2INIT_QP
:
362 ece
= MLX5_GET(rst2init_qp_out
, out
, ece
);
371 static int modify_qp_mbox_alloc(struct mlx5_core_dev
*dev
, u16 opcode
, int qpn
,
372 u32 opt_param_mask
, void *qpc
,
373 struct mbox_info
*mbox
, u16 uid
, u32 ece
)
378 #define MBOX_ALLOC(mbox, typ) \
379 mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
381 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
383 MLX5_SET(typ##_in, in, opcode, _opcode); \
384 MLX5_SET(typ##_in, in, qpn, _qpn); \
385 MLX5_SET(typ##_in, in, uid, _uid); \
388 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
390 MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \
391 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
392 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \
393 MLX5_ST_SZ_BYTES(qpc)); \
398 case MLX5_CMD_OP_2RST_QP
:
399 if (MBOX_ALLOC(mbox
, qp_2rst
))
401 MOD_QP_IN_SET(qp_2rst
, mbox
->in
, opcode
, qpn
, uid
);
403 case MLX5_CMD_OP_2ERR_QP
:
404 if (MBOX_ALLOC(mbox
, qp_2err
))
406 MOD_QP_IN_SET(qp_2err
, mbox
->in
, opcode
, qpn
, uid
);
409 /* MODIFY with QPC */
410 case MLX5_CMD_OP_RST2INIT_QP
:
411 if (MBOX_ALLOC(mbox
, rst2init_qp
))
413 MOD_QP_IN_SET_QPC(rst2init_qp
, mbox
->in
, opcode
, qpn
,
414 opt_param_mask
, qpc
, uid
);
415 MLX5_SET(rst2init_qp_in
, mbox
->in
, ece
, ece
);
417 case MLX5_CMD_OP_INIT2RTR_QP
:
418 if (MBOX_ALLOC(mbox
, init2rtr_qp
))
420 MOD_QP_IN_SET_QPC(init2rtr_qp
, mbox
->in
, opcode
, qpn
,
421 opt_param_mask
, qpc
, uid
);
422 MLX5_SET(init2rtr_qp_in
, mbox
->in
, ece
, ece
);
424 case MLX5_CMD_OP_RTR2RTS_QP
:
425 if (MBOX_ALLOC(mbox
, rtr2rts_qp
))
427 MOD_QP_IN_SET_QPC(rtr2rts_qp
, mbox
->in
, opcode
, qpn
,
428 opt_param_mask
, qpc
, uid
);
429 MLX5_SET(rtr2rts_qp_in
, mbox
->in
, ece
, ece
);
431 case MLX5_CMD_OP_RTS2RTS_QP
:
432 if (MBOX_ALLOC(mbox
, rts2rts_qp
))
434 MOD_QP_IN_SET_QPC(rts2rts_qp
, mbox
->in
, opcode
, qpn
,
435 opt_param_mask
, qpc
, uid
);
436 MLX5_SET(rts2rts_qp_in
, mbox
->in
, ece
, ece
);
438 case MLX5_CMD_OP_SQERR2RTS_QP
:
439 if (MBOX_ALLOC(mbox
, sqerr2rts_qp
))
441 MOD_QP_IN_SET_QPC(sqerr2rts_qp
, mbox
->in
, opcode
, qpn
,
442 opt_param_mask
, qpc
, uid
);
444 case MLX5_CMD_OP_INIT2INIT_QP
:
445 if (MBOX_ALLOC(mbox
, init2init_qp
))
447 MOD_QP_IN_SET_QPC(init2init_qp
, mbox
->in
, opcode
, qpn
,
448 opt_param_mask
, qpc
, uid
);
449 MLX5_SET(init2init_qp_in
, mbox
->in
, ece
, ece
);
457 int mlx5_core_qp_modify(struct mlx5_ib_dev
*dev
, u16 opcode
, u32 opt_param_mask
,
458 void *qpc
, struct mlx5_core_qp
*qp
, u32
*ece
)
460 struct mbox_info mbox
;
463 err
= modify_qp_mbox_alloc(dev
->mdev
, opcode
, qp
->qpn
, opt_param_mask
,
464 qpc
, &mbox
, qp
->uid
, (ece
) ? *ece
: 0);
468 err
= mlx5_cmd_exec(dev
->mdev
, mbox
.in
, mbox
.inlen
, mbox
.out
,
472 *ece
= get_ece_from_mbox(mbox
.out
, opcode
);
478 int mlx5_init_qp_table(struct mlx5_ib_dev
*dev
)
480 struct mlx5_qp_table
*table
= &dev
->qp_table
;
482 spin_lock_init(&table
->lock
);
483 INIT_RADIX_TREE(&table
->tree
, GFP_ATOMIC
);
484 mlx5_qp_debugfs_init(dev
->mdev
);
486 table
->nb
.notifier_call
= rsc_event_notifier
;
487 mlx5_notifier_register(dev
->mdev
, &table
->nb
);
492 void mlx5_cleanup_qp_table(struct mlx5_ib_dev
*dev
)
494 struct mlx5_qp_table
*table
= &dev
->qp_table
;
496 mlx5_notifier_unregister(dev
->mdev
, &table
->nb
);
497 mlx5_qp_debugfs_cleanup(dev
->mdev
);
500 int mlx5_core_qp_query(struct mlx5_ib_dev
*dev
, struct mlx5_core_qp
*qp
,
501 u32
*out
, int outlen
)
503 u32 in
[MLX5_ST_SZ_DW(query_qp_in
)] = {};
505 MLX5_SET(query_qp_in
, in
, opcode
, MLX5_CMD_OP_QUERY_QP
);
506 MLX5_SET(query_qp_in
, in
, qpn
, qp
->qpn
);
507 return mlx5_cmd_exec(dev
->mdev
, in
, sizeof(in
), out
, outlen
);
510 int mlx5_core_dct_query(struct mlx5_ib_dev
*dev
, struct mlx5_core_dct
*dct
,
511 u32
*out
, int outlen
)
513 u32 in
[MLX5_ST_SZ_DW(query_dct_in
)] = {};
514 struct mlx5_core_qp
*qp
= &dct
->mqp
;
516 MLX5_SET(query_dct_in
, in
, opcode
, MLX5_CMD_OP_QUERY_DCT
);
517 MLX5_SET(query_dct_in
, in
, dctn
, qp
->qpn
);
519 return mlx5_cmd_exec(dev
->mdev
, (void *)&in
, sizeof(in
), (void *)out
,
523 int mlx5_core_xrcd_alloc(struct mlx5_ib_dev
*dev
, u32
*xrcdn
)
525 u32 out
[MLX5_ST_SZ_DW(alloc_xrcd_out
)] = {};
526 u32 in
[MLX5_ST_SZ_DW(alloc_xrcd_in
)] = {};
529 MLX5_SET(alloc_xrcd_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_XRCD
);
530 err
= mlx5_cmd_exec_inout(dev
->mdev
, alloc_xrcd
, in
, out
);
532 *xrcdn
= MLX5_GET(alloc_xrcd_out
, out
, xrcd
);
536 int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev
*dev
, u32 xrcdn
)
538 u32 in
[MLX5_ST_SZ_DW(dealloc_xrcd_in
)] = {};
540 MLX5_SET(dealloc_xrcd_in
, in
, opcode
, MLX5_CMD_OP_DEALLOC_XRCD
);
541 MLX5_SET(dealloc_xrcd_in
, in
, xrcd
, xrcdn
);
542 return mlx5_cmd_exec_in(dev
->mdev
, dealloc_xrcd
, in
);
545 static void destroy_rq_tracked(struct mlx5_ib_dev
*dev
, u32 rqn
, u16 uid
)
547 u32 in
[MLX5_ST_SZ_DW(destroy_rq_in
)] = {};
549 MLX5_SET(destroy_rq_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_RQ
);
550 MLX5_SET(destroy_rq_in
, in
, rqn
, rqn
);
551 MLX5_SET(destroy_rq_in
, in
, uid
, uid
);
552 mlx5_cmd_exec_in(dev
->mdev
, destroy_rq
, in
);
555 int mlx5_core_create_rq_tracked(struct mlx5_ib_dev
*dev
, u32
*in
, int inlen
,
556 struct mlx5_core_qp
*rq
)
561 err
= mlx5_core_create_rq(dev
->mdev
, in
, inlen
, &rqn
);
565 rq
->uid
= MLX5_GET(create_rq_in
, in
, uid
);
567 err
= create_resource_common(dev
, rq
, MLX5_RES_RQ
);
574 destroy_rq_tracked(dev
, rq
->qpn
, rq
->uid
);
579 int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev
*dev
,
580 struct mlx5_core_qp
*rq
)
582 destroy_resource_common(dev
, rq
);
583 destroy_rq_tracked(dev
, rq
->qpn
, rq
->uid
);
587 static void destroy_sq_tracked(struct mlx5_ib_dev
*dev
, u32 sqn
, u16 uid
)
589 u32 in
[MLX5_ST_SZ_DW(destroy_sq_in
)] = {};
591 MLX5_SET(destroy_sq_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_SQ
);
592 MLX5_SET(destroy_sq_in
, in
, sqn
, sqn
);
593 MLX5_SET(destroy_sq_in
, in
, uid
, uid
);
594 mlx5_cmd_exec_in(dev
->mdev
, destroy_sq
, in
);
597 int mlx5_core_create_sq_tracked(struct mlx5_ib_dev
*dev
, u32
*in
, int inlen
,
598 struct mlx5_core_qp
*sq
)
600 u32 out
[MLX5_ST_SZ_DW(create_sq_out
)] = {};
603 MLX5_SET(create_sq_in
, in
, opcode
, MLX5_CMD_OP_CREATE_SQ
);
604 err
= mlx5_cmd_exec(dev
->mdev
, in
, inlen
, out
, sizeof(out
));
608 sq
->qpn
= MLX5_GET(create_sq_out
, out
, sqn
);
609 sq
->uid
= MLX5_GET(create_sq_in
, in
, uid
);
610 err
= create_resource_common(dev
, sq
, MLX5_RES_SQ
);
617 destroy_sq_tracked(dev
, sq
->qpn
, sq
->uid
);
622 void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev
*dev
,
623 struct mlx5_core_qp
*sq
)
625 destroy_resource_common(dev
, sq
);
626 destroy_sq_tracked(dev
, sq
->qpn
, sq
->uid
);
629 struct mlx5_core_rsc_common
*mlx5_core_res_hold(struct mlx5_ib_dev
*dev
,
631 enum mlx5_res_type res_type
)
633 u32 rsn
= res_num
| (res_type
<< MLX5_USER_INDEX_LEN
);
634 struct mlx5_qp_table
*table
= &dev
->qp_table
;
636 return mlx5_get_rsc(table
, rsn
);
639 void mlx5_core_res_put(struct mlx5_core_rsc_common
*res
)
641 mlx5_core_put_rsc(res
);