2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_user_verbs.h>
37 #include <rdma/rdma_counter.h>
38 #include <linux/mlx5/fs.h>
43 /* not supported currently */
44 static int wq_signature
;
47 MLX5_IB_ACK_REQ_FREQ
= 8,
51 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
52 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
53 MLX5_IB_LINK_TYPE_IB
= 0,
54 MLX5_IB_LINK_TYPE_ETH
= 1
58 MLX5_IB_SQ_STRIDE
= 6,
59 MLX5_IB_SQ_UMR_INLINE_THRESHOLD
= 64,
62 static const u32 mlx5_ib_opcode
[] = {
63 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
64 [IB_WR_LSO
] = MLX5_OPCODE_LSO
,
65 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
66 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
67 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
68 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
69 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
70 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
71 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
72 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
73 [IB_WR_REG_MR
] = MLX5_OPCODE_UMR
,
74 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
75 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
76 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
79 struct mlx5_wqe_eth_pad
{
83 enum raw_qp_set_mask_map
{
84 MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
= 1UL << 0,
85 MLX5_RAW_QP_RATE_LIMIT
= 1UL << 1,
88 struct mlx5_modify_raw_qp_param
{
91 u32 set_mask
; /* raw_qp_set_mask_map */
93 struct mlx5_rate_limit rl
;
99 static void get_cqs(enum ib_qp_type qp_type
,
100 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
101 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
);
103 static int is_qp0(enum ib_qp_type qp_type
)
105 return qp_type
== IB_QPT_SMI
;
108 static int is_sqp(enum ib_qp_type qp_type
)
110 return is_qp0(qp_type
) || is_qp1(qp_type
);
114 * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ
117 * @umem: User space memory where the WQ is
118 * @buffer: buffer to copy to
119 * @buflen: buffer length
120 * @wqe_index: index of WQE to copy from
121 * @wq_offset: offset to start of WQ
122 * @wq_wqe_cnt: number of WQEs in WQ
123 * @wq_wqe_shift: log2 of WQE size
124 * @bcnt: number of bytes to copy
125 * @bytes_copied: number of bytes to copy (return value)
127 * Copies from start of WQE bcnt or less bytes.
128 * Does not gurantee to copy the entire WQE.
130 * Return: zero on success, or an error code.
132 static int mlx5_ib_read_user_wqe_common(struct ib_umem
*umem
, void *buffer
,
133 size_t buflen
, int wqe_index
,
134 int wq_offset
, int wq_wqe_cnt
,
135 int wq_wqe_shift
, int bcnt
,
136 size_t *bytes_copied
)
138 size_t offset
= wq_offset
+ ((wqe_index
% wq_wqe_cnt
) << wq_wqe_shift
);
139 size_t wq_end
= wq_offset
+ (wq_wqe_cnt
<< wq_wqe_shift
);
143 /* don't copy more than requested, more than buffer length or
146 copy_length
= min_t(u32
, buflen
, wq_end
- offset
);
147 copy_length
= min_t(u32
, copy_length
, bcnt
);
149 ret
= ib_umem_copy_from(buffer
, umem
, offset
, copy_length
);
153 if (!ret
&& bytes_copied
)
154 *bytes_copied
= copy_length
;
159 static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp
*qp
, int wqe_index
,
160 void *buffer
, size_t buflen
, size_t *bc
)
162 struct mlx5_wqe_ctrl_seg
*ctrl
;
163 size_t bytes_copied
= 0;
168 wqe_index
= wqe_index
& qp
->sq
.fbc
.sz_m1
;
170 /* read the control segment first */
171 p
= mlx5_frag_buf_get_wqe(&qp
->sq
.fbc
, wqe_index
);
173 ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
174 wqe_length
= ds
* MLX5_WQE_DS_UNITS
;
176 /* read rest of WQE if it spreads over more than one stride */
177 while (bytes_copied
< wqe_length
) {
179 min_t(size_t, buflen
- bytes_copied
, MLX5_SEND_WQE_BB
);
184 memcpy(buffer
+ bytes_copied
, p
, copy_length
);
185 bytes_copied
+= copy_length
;
187 wqe_index
= (wqe_index
+ 1) & qp
->sq
.fbc
.sz_m1
;
188 p
= mlx5_frag_buf_get_wqe(&qp
->sq
.fbc
, wqe_index
);
194 static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp
*qp
, int wqe_index
,
195 void *buffer
, size_t buflen
, size_t *bc
)
197 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
198 struct ib_umem
*umem
= base
->ubuffer
.umem
;
199 struct mlx5_ib_wq
*wq
= &qp
->sq
;
200 struct mlx5_wqe_ctrl_seg
*ctrl
;
202 size_t bytes_copied2
;
207 /* at first read as much as possible */
208 ret
= mlx5_ib_read_user_wqe_common(umem
, buffer
, buflen
, wqe_index
,
209 wq
->offset
, wq
->wqe_cnt
,
210 wq
->wqe_shift
, buflen
,
215 /* we need at least control segment size to proceed */
216 if (bytes_copied
< sizeof(*ctrl
))
220 ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
221 wqe_length
= ds
* MLX5_WQE_DS_UNITS
;
223 /* if we copied enough then we are done */
224 if (bytes_copied
>= wqe_length
) {
229 /* otherwise this a wrapped around wqe
230 * so read the remaining bytes starting
233 ret
= mlx5_ib_read_user_wqe_common(umem
, buffer
+ bytes_copied
,
234 buflen
- bytes_copied
, 0, wq
->offset
,
235 wq
->wqe_cnt
, wq
->wqe_shift
,
236 wqe_length
- bytes_copied
,
241 *bc
= bytes_copied
+ bytes_copied2
;
245 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp
*qp
, int wqe_index
, void *buffer
,
246 size_t buflen
, size_t *bc
)
248 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
249 struct ib_umem
*umem
= base
->ubuffer
.umem
;
251 if (buflen
< sizeof(struct mlx5_wqe_ctrl_seg
))
255 return mlx5_ib_read_kernel_wqe_sq(qp
, wqe_index
, buffer
,
258 return mlx5_ib_read_user_wqe_sq(qp
, wqe_index
, buffer
, buflen
, bc
);
261 static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp
*qp
, int wqe_index
,
262 void *buffer
, size_t buflen
, size_t *bc
)
264 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
265 struct ib_umem
*umem
= base
->ubuffer
.umem
;
266 struct mlx5_ib_wq
*wq
= &qp
->rq
;
270 ret
= mlx5_ib_read_user_wqe_common(umem
, buffer
, buflen
, wqe_index
,
271 wq
->offset
, wq
->wqe_cnt
,
272 wq
->wqe_shift
, buflen
,
281 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp
*qp
, int wqe_index
, void *buffer
,
282 size_t buflen
, size_t *bc
)
284 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
285 struct ib_umem
*umem
= base
->ubuffer
.umem
;
286 struct mlx5_ib_wq
*wq
= &qp
->rq
;
287 size_t wqe_size
= 1 << wq
->wqe_shift
;
289 if (buflen
< wqe_size
)
295 return mlx5_ib_read_user_wqe_rq(qp
, wqe_index
, buffer
, buflen
, bc
);
298 static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq
*srq
, int wqe_index
,
299 void *buffer
, size_t buflen
, size_t *bc
)
301 struct ib_umem
*umem
= srq
->umem
;
305 ret
= mlx5_ib_read_user_wqe_common(umem
, buffer
, buflen
, wqe_index
, 0,
306 srq
->msrq
.max
, srq
->msrq
.wqe_shift
,
307 buflen
, &bytes_copied
);
315 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq
*srq
, int wqe_index
, void *buffer
,
316 size_t buflen
, size_t *bc
)
318 struct ib_umem
*umem
= srq
->umem
;
319 size_t wqe_size
= 1 << srq
->msrq
.wqe_shift
;
321 if (buflen
< wqe_size
)
327 return mlx5_ib_read_user_wqe_srq(srq
, wqe_index
, buffer
, buflen
, bc
);
330 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
332 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
333 struct ib_event event
;
335 if (type
== MLX5_EVENT_TYPE_PATH_MIG
) {
336 /* This event is only valid for trans_qps */
337 to_mibqp(qp
)->port
= to_mibqp(qp
)->trans_qp
.alt_port
;
340 if (ibqp
->event_handler
) {
341 event
.device
= ibqp
->device
;
342 event
.element
.qp
= ibqp
;
344 case MLX5_EVENT_TYPE_PATH_MIG
:
345 event
.event
= IB_EVENT_PATH_MIG
;
347 case MLX5_EVENT_TYPE_COMM_EST
:
348 event
.event
= IB_EVENT_COMM_EST
;
350 case MLX5_EVENT_TYPE_SQ_DRAINED
:
351 event
.event
= IB_EVENT_SQ_DRAINED
;
353 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
354 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
356 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
357 event
.event
= IB_EVENT_QP_FATAL
;
359 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
360 event
.event
= IB_EVENT_PATH_MIG_ERR
;
362 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
363 event
.event
= IB_EVENT_QP_REQ_ERR
;
365 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
366 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
369 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
373 ibqp
->event_handler(&event
, ibqp
->qp_context
);
377 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
378 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
383 /* Sanity check RQ size before proceeding */
384 if (cap
->max_recv_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
)))
390 qp
->rq
.wqe_shift
= 0;
391 cap
->max_recv_wr
= 0;
392 cap
->max_recv_sge
= 0;
395 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
396 if (ucmd
->rq_wqe_shift
> BITS_PER_BYTE
* sizeof(ucmd
->rq_wqe_shift
))
398 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
399 if ((1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) < qp
->wq_sig
)
401 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
402 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
404 wqe_size
= qp
->wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) : 0;
405 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
406 wqe_size
= roundup_pow_of_two(wqe_size
);
407 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
408 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
409 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
410 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_rq
)) {
411 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
413 MLX5_CAP_GEN(dev
->mdev
,
417 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
418 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
419 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
426 static int sq_overhead(struct ib_qp_init_attr
*attr
)
430 switch (attr
->qp_type
) {
432 size
+= sizeof(struct mlx5_wqe_xrc_seg
);
435 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
436 max(sizeof(struct mlx5_wqe_atomic_seg
) +
437 sizeof(struct mlx5_wqe_raddr_seg
),
438 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
439 sizeof(struct mlx5_mkey_seg
) +
440 MLX5_IB_SQ_UMR_INLINE_THRESHOLD
/
441 MLX5_IB_UMR_OCTOWORD
);
448 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
449 max(sizeof(struct mlx5_wqe_raddr_seg
),
450 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
451 sizeof(struct mlx5_mkey_seg
));
455 if (attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
456 size
+= sizeof(struct mlx5_wqe_eth_pad
) +
457 sizeof(struct mlx5_wqe_eth_seg
);
460 case MLX5_IB_QPT_HW_GSI
:
461 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
462 sizeof(struct mlx5_wqe_datagram_seg
);
465 case MLX5_IB_QPT_REG_UMR
:
466 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
467 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
468 sizeof(struct mlx5_mkey_seg
);
478 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
483 size
= sq_overhead(attr
);
487 if (attr
->cap
.max_inline_data
) {
488 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
489 attr
->cap
.max_inline_data
;
492 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
493 if (attr
->create_flags
& IB_QP_CREATE_INTEGRITY_EN
&&
494 ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
) < MLX5_SIG_WQE_SIZE
)
495 return MLX5_SIG_WQE_SIZE
;
497 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
500 static int get_send_sge(struct ib_qp_init_attr
*attr
, int wqe_size
)
504 if (attr
->qp_type
== IB_QPT_RC
)
505 max_sge
= (min_t(int, wqe_size
, 512) -
506 sizeof(struct mlx5_wqe_ctrl_seg
) -
507 sizeof(struct mlx5_wqe_raddr_seg
)) /
508 sizeof(struct mlx5_wqe_data_seg
);
509 else if (attr
->qp_type
== IB_QPT_XRC_INI
)
510 max_sge
= (min_t(int, wqe_size
, 512) -
511 sizeof(struct mlx5_wqe_ctrl_seg
) -
512 sizeof(struct mlx5_wqe_xrc_seg
) -
513 sizeof(struct mlx5_wqe_raddr_seg
)) /
514 sizeof(struct mlx5_wqe_data_seg
);
516 max_sge
= (wqe_size
- sq_overhead(attr
)) /
517 sizeof(struct mlx5_wqe_data_seg
);
519 return min_t(int, max_sge
, wqe_size
- sq_overhead(attr
) /
520 sizeof(struct mlx5_wqe_data_seg
));
523 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
524 struct mlx5_ib_qp
*qp
)
529 if (!attr
->cap
.max_send_wr
)
532 wqe_size
= calc_send_wqe(attr
);
533 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
537 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
538 mlx5_ib_dbg(dev
, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
539 wqe_size
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
543 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
) -
544 sizeof(struct mlx5_wqe_inline_seg
);
545 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
547 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
548 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
549 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
550 mlx5_ib_dbg(dev
, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
551 attr
->cap
.max_send_wr
, wqe_size
, MLX5_SEND_WQE_BB
,
553 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
556 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
557 qp
->sq
.max_gs
= get_send_sge(attr
, wqe_size
);
558 if (qp
->sq
.max_gs
< attr
->cap
.max_send_sge
)
561 attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
562 qp
->sq
.max_post
= wq_size
/ wqe_size
;
563 attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
568 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
569 struct mlx5_ib_qp
*qp
,
570 struct mlx5_ib_create_qp
*ucmd
,
571 struct mlx5_ib_qp_base
*base
,
572 struct ib_qp_init_attr
*attr
)
574 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
576 if (desc_sz
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
577 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
578 desc_sz
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
582 if (ucmd
->sq_wqe_count
&& !is_power_of_2(ucmd
->sq_wqe_count
)) {
583 mlx5_ib_warn(dev
, "sq_wqe_count %d is not a power of two\n",
588 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
590 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
591 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
593 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
597 if (attr
->qp_type
== IB_QPT_RAW_PACKET
||
598 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
599 base
->ubuffer
.buf_size
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
600 qp
->raw_packet_qp
.sq
.ubuffer
.buf_size
= qp
->sq
.wqe_cnt
<< 6;
602 base
->ubuffer
.buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
603 (qp
->sq
.wqe_cnt
<< 6);
609 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
611 if (attr
->qp_type
== IB_QPT_XRC_INI
||
612 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
613 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
614 !attr
->cap
.max_recv_wr
)
621 /* this is the first blue flame register in the array of bfregs assigned
622 * to a processes. Since we do not use it for blue flame but rather
623 * regular 64 bit doorbells, we do not need a lock for maintaiing
626 NUM_NON_BLUE_FLAME_BFREGS
= 1,
629 static int max_bfregs(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
)
631 return get_num_static_uars(dev
, bfregi
) * MLX5_NON_FP_BFREGS_PER_UAR
;
634 static int num_med_bfreg(struct mlx5_ib_dev
*dev
,
635 struct mlx5_bfreg_info
*bfregi
)
639 n
= max_bfregs(dev
, bfregi
) - bfregi
->num_low_latency_bfregs
-
640 NUM_NON_BLUE_FLAME_BFREGS
;
642 return n
>= 0 ? n
: 0;
645 static int first_med_bfreg(struct mlx5_ib_dev
*dev
,
646 struct mlx5_bfreg_info
*bfregi
)
648 return num_med_bfreg(dev
, bfregi
) ? 1 : -ENOMEM
;
651 static int first_hi_bfreg(struct mlx5_ib_dev
*dev
,
652 struct mlx5_bfreg_info
*bfregi
)
656 med
= num_med_bfreg(dev
, bfregi
);
660 static int alloc_high_class_bfreg(struct mlx5_ib_dev
*dev
,
661 struct mlx5_bfreg_info
*bfregi
)
665 for (i
= first_hi_bfreg(dev
, bfregi
); i
< max_bfregs(dev
, bfregi
); i
++) {
666 if (!bfregi
->count
[i
]) {
675 static int alloc_med_class_bfreg(struct mlx5_ib_dev
*dev
,
676 struct mlx5_bfreg_info
*bfregi
)
678 int minidx
= first_med_bfreg(dev
, bfregi
);
684 for (i
= minidx
; i
< first_hi_bfreg(dev
, bfregi
); i
++) {
685 if (bfregi
->count
[i
] < bfregi
->count
[minidx
])
687 if (!bfregi
->count
[minidx
])
691 bfregi
->count
[minidx
]++;
695 static int alloc_bfreg(struct mlx5_ib_dev
*dev
,
696 struct mlx5_bfreg_info
*bfregi
)
698 int bfregn
= -ENOMEM
;
700 if (bfregi
->lib_uar_dyn
)
703 mutex_lock(&bfregi
->lock
);
704 if (bfregi
->ver
>= 2) {
705 bfregn
= alloc_high_class_bfreg(dev
, bfregi
);
707 bfregn
= alloc_med_class_bfreg(dev
, bfregi
);
711 BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS
!= 1);
713 bfregi
->count
[bfregn
]++;
715 mutex_unlock(&bfregi
->lock
);
720 void mlx5_ib_free_bfreg(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
, int bfregn
)
722 mutex_lock(&bfregi
->lock
);
723 bfregi
->count
[bfregn
]--;
724 mutex_unlock(&bfregi
->lock
);
727 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
730 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
731 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
732 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
733 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
734 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
735 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
736 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
741 static int to_mlx5_st(enum ib_qp_type type
)
744 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
745 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
746 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
747 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
749 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
750 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
751 case MLX5_IB_QPT_HW_GSI
: return MLX5_QP_ST_QP1
;
752 case MLX5_IB_QPT_DCI
: return MLX5_QP_ST_DCI
;
753 case IB_QPT_RAW_IPV6
: return MLX5_QP_ST_RAW_IPV6
;
754 case IB_QPT_RAW_PACKET
:
755 case IB_QPT_RAW_ETHERTYPE
: return MLX5_QP_ST_RAW_ETHERTYPE
;
757 default: return -EINVAL
;
761 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
,
762 struct mlx5_ib_cq
*recv_cq
);
763 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
,
764 struct mlx5_ib_cq
*recv_cq
);
766 int bfregn_to_uar_index(struct mlx5_ib_dev
*dev
,
767 struct mlx5_bfreg_info
*bfregi
, u32 bfregn
,
770 unsigned int bfregs_per_sys_page
;
771 u32 index_of_sys_page
;
774 if (bfregi
->lib_uar_dyn
)
777 bfregs_per_sys_page
= get_uars_per_sys_page(dev
, bfregi
->lib_uar_4k
) *
778 MLX5_NON_FP_BFREGS_PER_UAR
;
779 index_of_sys_page
= bfregn
/ bfregs_per_sys_page
;
782 index_of_sys_page
+= bfregi
->num_static_sys_pages
;
784 if (index_of_sys_page
>= bfregi
->num_sys_pages
)
787 if (bfregn
> bfregi
->num_dyn_bfregs
||
788 bfregi
->sys_pages
[index_of_sys_page
] == MLX5_IB_INVALID_UAR_INDEX
) {
789 mlx5_ib_dbg(dev
, "Invalid dynamic uar index\n");
794 offset
= bfregn
% bfregs_per_sys_page
/ MLX5_NON_FP_BFREGS_PER_UAR
;
795 return bfregi
->sys_pages
[index_of_sys_page
] + offset
;
798 static int mlx5_ib_umem_get(struct mlx5_ib_dev
*dev
, struct ib_udata
*udata
,
799 unsigned long addr
, size_t size
,
800 struct ib_umem
**umem
, int *npages
, int *page_shift
,
801 int *ncont
, u32
*offset
)
805 *umem
= ib_umem_get(&dev
->ib_dev
, addr
, size
, 0);
807 mlx5_ib_dbg(dev
, "umem_get failed\n");
808 return PTR_ERR(*umem
);
811 mlx5_ib_cont_pages(*umem
, addr
, 0, npages
, page_shift
, ncont
, NULL
);
813 err
= mlx5_ib_get_buf_offset(addr
, *page_shift
, offset
);
815 mlx5_ib_warn(dev
, "bad offset\n");
819 mlx5_ib_dbg(dev
, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
820 addr
, size
, *npages
, *page_shift
, *ncont
, *offset
);
825 ib_umem_release(*umem
);
831 static void destroy_user_rq(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
832 struct mlx5_ib_rwq
*rwq
, struct ib_udata
*udata
)
834 struct mlx5_ib_ucontext
*context
=
835 rdma_udata_to_drv_context(
837 struct mlx5_ib_ucontext
,
840 if (rwq
->create_flags
& MLX5_IB_WQ_FLAGS_DELAY_DROP
)
841 atomic_dec(&dev
->delay_drop
.rqs_cnt
);
843 mlx5_ib_db_unmap_user(context
, &rwq
->db
);
844 ib_umem_release(rwq
->umem
);
847 static int create_user_rq(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
848 struct ib_udata
*udata
, struct mlx5_ib_rwq
*rwq
,
849 struct mlx5_ib_create_wq
*ucmd
)
851 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
852 udata
, struct mlx5_ib_ucontext
, ibucontext
);
862 rwq
->umem
= ib_umem_get(&dev
->ib_dev
, ucmd
->buf_addr
, rwq
->buf_size
, 0);
863 if (IS_ERR(rwq
->umem
)) {
864 mlx5_ib_dbg(dev
, "umem_get failed\n");
865 err
= PTR_ERR(rwq
->umem
);
869 mlx5_ib_cont_pages(rwq
->umem
, ucmd
->buf_addr
, 0, &npages
, &page_shift
,
871 err
= mlx5_ib_get_buf_offset(ucmd
->buf_addr
, page_shift
,
872 &rwq
->rq_page_offset
);
874 mlx5_ib_warn(dev
, "bad offset\n");
878 rwq
->rq_num_pas
= ncont
;
879 rwq
->page_shift
= page_shift
;
880 rwq
->log_page_size
= page_shift
- MLX5_ADAPTER_PAGE_SHIFT
;
881 rwq
->wq_sig
= !!(ucmd
->flags
& MLX5_WQ_FLAG_SIGNATURE
);
883 mlx5_ib_dbg(dev
, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
884 (unsigned long long)ucmd
->buf_addr
, rwq
->buf_size
,
885 npages
, page_shift
, ncont
, offset
);
887 err
= mlx5_ib_db_map_user(ucontext
, udata
, ucmd
->db_addr
, &rwq
->db
);
889 mlx5_ib_dbg(dev
, "map failed\n");
893 rwq
->create_type
= MLX5_WQ_USER
;
897 ib_umem_release(rwq
->umem
);
901 static int adjust_bfregn(struct mlx5_ib_dev
*dev
,
902 struct mlx5_bfreg_info
*bfregi
, int bfregn
)
904 return bfregn
/ MLX5_NON_FP_BFREGS_PER_UAR
* MLX5_BFREGS_PER_UAR
+
905 bfregn
% MLX5_NON_FP_BFREGS_PER_UAR
;
908 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
909 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
910 struct ib_qp_init_attr
*attr
,
912 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
,
913 struct mlx5_ib_qp_base
*base
)
915 struct mlx5_ib_ucontext
*context
;
916 struct mlx5_ib_create_qp ucmd
;
917 struct mlx5_ib_ubuffer
*ubuffer
= &base
->ubuffer
;
930 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
932 mlx5_ib_dbg(dev
, "copy failed\n");
936 context
= rdma_udata_to_drv_context(udata
, struct mlx5_ib_ucontext
,
938 uar_flags
= ucmd
.flags
& (MLX5_QP_FLAG_UAR_PAGE_INDEX
|
939 MLX5_QP_FLAG_BFREG_INDEX
);
941 case MLX5_QP_FLAG_UAR_PAGE_INDEX
:
942 uar_index
= ucmd
.bfreg_index
;
943 bfregn
= MLX5_IB_INVALID_BFREG
;
945 case MLX5_QP_FLAG_BFREG_INDEX
:
946 uar_index
= bfregn_to_uar_index(dev
, &context
->bfregi
,
947 ucmd
.bfreg_index
, true);
950 bfregn
= MLX5_IB_INVALID_BFREG
;
953 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
955 bfregn
= alloc_bfreg(dev
, &context
->bfregi
);
963 mlx5_ib_dbg(dev
, "bfregn 0x%x, uar_index 0x%x\n", bfregn
, uar_index
);
964 if (bfregn
!= MLX5_IB_INVALID_BFREG
)
965 uar_index
= bfregn_to_uar_index(dev
, &context
->bfregi
, bfregn
,
969 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
970 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
972 err
= set_user_buf_size(dev
, qp
, &ucmd
, base
, attr
);
976 if (ucmd
.buf_addr
&& ubuffer
->buf_size
) {
977 ubuffer
->buf_addr
= ucmd
.buf_addr
;
978 err
= mlx5_ib_umem_get(dev
, udata
, ubuffer
->buf_addr
,
979 ubuffer
->buf_size
, &ubuffer
->umem
,
980 &npages
, &page_shift
, &ncont
, &offset
);
984 ubuffer
->umem
= NULL
;
987 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
988 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * ncont
;
989 *in
= kvzalloc(*inlen
, GFP_KERNEL
);
995 uid
= (attr
->qp_type
!= IB_QPT_XRC_TGT
&&
996 attr
->qp_type
!= IB_QPT_XRC_INI
) ? to_mpd(pd
)->uid
: 0;
997 MLX5_SET(create_qp_in
, *in
, uid
, uid
);
998 pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, *in
, pas
);
1000 mlx5_ib_populate_pas(dev
, ubuffer
->umem
, page_shift
, pas
, 0);
1002 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
1004 MLX5_SET(qpc
, qpc
, log_page_size
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1005 MLX5_SET(qpc
, qpc
, page_offset
, offset
);
1007 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
1008 if (bfregn
!= MLX5_IB_INVALID_BFREG
)
1009 resp
->bfreg_index
= adjust_bfregn(dev
, &context
->bfregi
, bfregn
);
1011 resp
->bfreg_index
= MLX5_IB_INVALID_BFREG
;
1012 qp
->bfregn
= bfregn
;
1014 err
= mlx5_ib_db_map_user(context
, udata
, ucmd
.db_addr
, &qp
->db
);
1016 mlx5_ib_dbg(dev
, "map failed\n");
1020 err
= ib_copy_to_udata(udata
, resp
, min(udata
->outlen
, sizeof(*resp
)));
1022 mlx5_ib_dbg(dev
, "copy failed\n");
1025 qp
->create_type
= MLX5_QP_USER
;
1030 mlx5_ib_db_unmap_user(context
, &qp
->db
);
1036 ib_umem_release(ubuffer
->umem
);
1039 if (bfregn
!= MLX5_IB_INVALID_BFREG
)
1040 mlx5_ib_free_bfreg(dev
, &context
->bfregi
, bfregn
);
1044 static void destroy_qp_user(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
1045 struct mlx5_ib_qp
*qp
, struct mlx5_ib_qp_base
*base
,
1046 struct ib_udata
*udata
)
1048 struct mlx5_ib_ucontext
*context
=
1049 rdma_udata_to_drv_context(
1051 struct mlx5_ib_ucontext
,
1054 mlx5_ib_db_unmap_user(context
, &qp
->db
);
1055 ib_umem_release(base
->ubuffer
.umem
);
1058 * Free only the BFREGs which are handled by the kernel.
1059 * BFREGs of UARs allocated dynamically are handled by user.
1061 if (qp
->bfregn
!= MLX5_IB_INVALID_BFREG
)
1062 mlx5_ib_free_bfreg(dev
, &context
->bfregi
, qp
->bfregn
);
1065 /* get_sq_edge - Get the next nearby edge.
1067 * An 'edge' is defined as the first following address after the end
1068 * of the fragment or the SQ. Accordingly, during the WQE construction
1069 * which repetitively increases the pointer to write the next data, it
1070 * simply should check if it gets to an edge.
1073 * @idx - Stride index in the SQ buffer.
1078 static void *get_sq_edge(struct mlx5_ib_wq
*sq
, u32 idx
)
1082 fragment_end
= mlx5_frag_buf_get_wqe
1084 mlx5_frag_buf_get_idx_last_contig_stride(&sq
->fbc
, idx
));
1086 return fragment_end
+ MLX5_SEND_WQE_BB
;
1089 static int create_kernel_qp(struct mlx5_ib_dev
*dev
,
1090 struct ib_qp_init_attr
*init_attr
,
1091 struct mlx5_ib_qp
*qp
,
1092 u32
**in
, int *inlen
,
1093 struct mlx5_ib_qp_base
*base
)
1099 if (init_attr
->create_flags
& ~(IB_QP_CREATE_INTEGRITY_EN
|
1100 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
|
1101 IB_QP_CREATE_IPOIB_UD_LSO
|
1102 IB_QP_CREATE_NETIF_QP
|
1103 MLX5_IB_QP_CREATE_SQPN_QP1
|
1104 MLX5_IB_QP_CREATE_WC_TEST
))
1107 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
1108 qp
->bf
.bfreg
= &dev
->fp_bfreg
;
1109 else if (init_attr
->create_flags
& MLX5_IB_QP_CREATE_WC_TEST
)
1110 qp
->bf
.bfreg
= &dev
->wc_bfreg
;
1112 qp
->bf
.bfreg
= &dev
->bfreg
;
1114 /* We need to divide by two since each register is comprised of
1115 * two buffers of identical size, namely odd and even
1117 qp
->bf
.buf_size
= (1 << MLX5_CAP_GEN(dev
->mdev
, log_bf_reg_size
)) / 2;
1118 uar_index
= qp
->bf
.bfreg
->index
;
1120 err
= calc_sq_size(dev
, init_attr
, qp
);
1122 mlx5_ib_dbg(dev
, "err %d\n", err
);
1127 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
1128 base
->ubuffer
.buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
1130 err
= mlx5_frag_buf_alloc_node(dev
->mdev
, base
->ubuffer
.buf_size
,
1131 &qp
->buf
, dev
->mdev
->priv
.numa_node
);
1133 mlx5_ib_dbg(dev
, "err %d\n", err
);
1138 mlx5_init_fbc(qp
->buf
.frags
, qp
->rq
.wqe_shift
,
1139 ilog2(qp
->rq
.wqe_cnt
), &qp
->rq
.fbc
);
1141 if (qp
->sq
.wqe_cnt
) {
1142 int sq_strides_offset
= (qp
->sq
.offset
& (PAGE_SIZE
- 1)) /
1144 mlx5_init_fbc_offset(qp
->buf
.frags
+
1145 (qp
->sq
.offset
/ PAGE_SIZE
),
1146 ilog2(MLX5_SEND_WQE_BB
),
1147 ilog2(qp
->sq
.wqe_cnt
),
1148 sq_strides_offset
, &qp
->sq
.fbc
);
1150 qp
->sq
.cur_edge
= get_sq_edge(&qp
->sq
, 0);
1153 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
1154 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * qp
->buf
.npages
;
1155 *in
= kvzalloc(*inlen
, GFP_KERNEL
);
1161 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
1162 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
1163 MLX5_SET(qpc
, qpc
, log_page_size
, qp
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1165 /* Set "fast registration enabled" for all kernel QPs */
1166 MLX5_SET(qpc
, qpc
, fre
, 1);
1167 MLX5_SET(qpc
, qpc
, rlky
, 1);
1169 if (init_attr
->create_flags
& MLX5_IB_QP_CREATE_SQPN_QP1
) {
1170 MLX5_SET(qpc
, qpc
, deth_sqpn
, 1);
1171 qp
->flags
|= MLX5_IB_QP_SQPN_QP1
;
1174 mlx5_fill_page_frag_array(&qp
->buf
,
1175 (__be64
*)MLX5_ADDR_OF(create_qp_in
,
1178 err
= mlx5_db_alloc(dev
->mdev
, &qp
->db
);
1180 mlx5_ib_dbg(dev
, "err %d\n", err
);
1184 qp
->sq
.wrid
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1185 sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
1186 qp
->sq
.wr_data
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1187 sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
1188 qp
->rq
.wrid
= kvmalloc_array(qp
->rq
.wqe_cnt
,
1189 sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
1190 qp
->sq
.w_list
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1191 sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
1192 qp
->sq
.wqe_head
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1193 sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
1195 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
1196 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
1200 qp
->create_type
= MLX5_QP_KERNEL
;
1205 kvfree(qp
->sq
.wqe_head
);
1206 kvfree(qp
->sq
.w_list
);
1207 kvfree(qp
->sq
.wrid
);
1208 kvfree(qp
->sq
.wr_data
);
1209 kvfree(qp
->rq
.wrid
);
1210 mlx5_db_free(dev
->mdev
, &qp
->db
);
1216 mlx5_frag_buf_free(dev
->mdev
, &qp
->buf
);
1220 static void destroy_qp_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1222 kvfree(qp
->sq
.wqe_head
);
1223 kvfree(qp
->sq
.w_list
);
1224 kvfree(qp
->sq
.wrid
);
1225 kvfree(qp
->sq
.wr_data
);
1226 kvfree(qp
->rq
.wrid
);
1227 mlx5_db_free(dev
->mdev
, &qp
->db
);
1228 mlx5_frag_buf_free(dev
->mdev
, &qp
->buf
);
1231 static u32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
1233 if (attr
->srq
|| (attr
->qp_type
== IB_QPT_XRC_TGT
) ||
1234 (attr
->qp_type
== MLX5_IB_QPT_DCI
) ||
1235 (attr
->qp_type
== IB_QPT_XRC_INI
))
1237 else if (!qp
->has_rq
)
1238 return MLX5_ZERO_LEN_RQ
;
1240 return MLX5_NON_ZERO_RQ
;
1243 static int is_connected(enum ib_qp_type qp_type
)
1245 if (qp_type
== IB_QPT_RC
|| qp_type
== IB_QPT_UC
||
1246 qp_type
== MLX5_IB_QPT_DCI
)
1252 static int create_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1253 struct mlx5_ib_qp
*qp
,
1254 struct mlx5_ib_sq
*sq
, u32 tdn
,
1257 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {0};
1258 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
1260 MLX5_SET(create_tis_in
, in
, uid
, to_mpd(pd
)->uid
);
1261 MLX5_SET(tisc
, tisc
, transport_domain
, tdn
);
1262 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
)
1263 MLX5_SET(tisc
, tisc
, underlay_qpn
, qp
->underlay_qpn
);
1265 return mlx5_core_create_tis(dev
->mdev
, in
, sizeof(in
), &sq
->tisn
);
1268 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1269 struct mlx5_ib_sq
*sq
, struct ib_pd
*pd
)
1271 mlx5_cmd_destroy_tis(dev
->mdev
, sq
->tisn
, to_mpd(pd
)->uid
);
1274 static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq
*sq
)
1277 mlx5_del_flow_rules(sq
->flow_rule
);
1278 sq
->flow_rule
= NULL
;
1281 static int create_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1282 struct ib_udata
*udata
,
1283 struct mlx5_ib_sq
*sq
, void *qpin
,
1286 struct mlx5_ib_ubuffer
*ubuffer
= &sq
->ubuffer
;
1290 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1299 err
= mlx5_ib_umem_get(dev
, udata
, ubuffer
->buf_addr
, ubuffer
->buf_size
,
1300 &sq
->ubuffer
.umem
, &npages
, &page_shift
, &ncont
,
1305 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) + sizeof(u64
) * ncont
;
1306 in
= kvzalloc(inlen
, GFP_KERNEL
);
1312 MLX5_SET(create_sq_in
, in
, uid
, to_mpd(pd
)->uid
);
1313 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1314 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
1315 if (MLX5_CAP_ETH(dev
->mdev
, multi_pkt_send_wqe
))
1316 MLX5_SET(sqc
, sqc
, allow_multi_pkt_send_wqe
, 1);
1317 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1318 MLX5_SET(sqc
, sqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1319 MLX5_SET(sqc
, sqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_snd
));
1320 MLX5_SET(sqc
, sqc
, tis_lst_sz
, 1);
1321 MLX5_SET(sqc
, sqc
, tis_num_0
, sq
->tisn
);
1322 if (MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
1323 MLX5_CAP_ETH(dev
->mdev
, swp
))
1324 MLX5_SET(sqc
, sqc
, allow_swp
, 1);
1326 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1327 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1328 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1329 MLX5_SET(wq
, wq
, uar_page
, MLX5_GET(qpc
, qpc
, uar_page
));
1330 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1331 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1332 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_sq_size
));
1333 MLX5_SET(wq
, wq
, log_wq_pg_sz
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1334 MLX5_SET(wq
, wq
, page_offset
, offset
);
1336 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1337 mlx5_ib_populate_pas(dev
, sq
->ubuffer
.umem
, page_shift
, pas
, 0);
1339 err
= mlx5_core_create_sq_tracked(dev
->mdev
, in
, inlen
, &sq
->base
.mqp
);
1349 ib_umem_release(sq
->ubuffer
.umem
);
1350 sq
->ubuffer
.umem
= NULL
;
1355 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1356 struct mlx5_ib_sq
*sq
)
1358 destroy_flow_rule_vport_sq(sq
);
1359 mlx5_core_destroy_sq_tracked(dev
->mdev
, &sq
->base
.mqp
);
1360 ib_umem_release(sq
->ubuffer
.umem
);
1363 static size_t get_rq_pas_size(void *qpc
)
1365 u32 log_page_size
= MLX5_GET(qpc
, qpc
, log_page_size
) + 12;
1366 u32 log_rq_stride
= MLX5_GET(qpc
, qpc
, log_rq_stride
);
1367 u32 log_rq_size
= MLX5_GET(qpc
, qpc
, log_rq_size
);
1368 u32 page_offset
= MLX5_GET(qpc
, qpc
, page_offset
);
1369 u32 po_quanta
= 1 << (log_page_size
- 6);
1370 u32 rq_sz
= 1 << (log_rq_size
+ 4 + log_rq_stride
);
1371 u32 page_size
= 1 << log_page_size
;
1372 u32 rq_sz_po
= rq_sz
+ (page_offset
* po_quanta
);
1373 u32 rq_num_pas
= (rq_sz_po
+ page_size
- 1) / page_size
;
1375 return rq_num_pas
* sizeof(u64
);
1378 static int create_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1379 struct mlx5_ib_rq
*rq
, void *qpin
,
1380 size_t qpinlen
, struct ib_pd
*pd
)
1382 struct mlx5_ib_qp
*mqp
= rq
->base
.container_mibqp
;
1388 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1389 size_t rq_pas_size
= get_rq_pas_size(qpc
);
1393 if (qpinlen
< rq_pas_size
+ MLX5_BYTE_OFF(create_qp_in
, pas
))
1396 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + rq_pas_size
;
1397 in
= kvzalloc(inlen
, GFP_KERNEL
);
1401 MLX5_SET(create_rq_in
, in
, uid
, to_mpd(pd
)->uid
);
1402 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
1403 if (!(rq
->flags
& MLX5_IB_RQ_CVLAN_STRIPPING
))
1404 MLX5_SET(rqc
, rqc
, vsd
, 1);
1405 MLX5_SET(rqc
, rqc
, mem_rq_type
, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
1406 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
1407 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
1408 MLX5_SET(rqc
, rqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1409 MLX5_SET(rqc
, rqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_rcv
));
1411 if (mqp
->flags
& MLX5_IB_QP_CAP_SCATTER_FCS
)
1412 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
1414 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1415 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1416 if (rq
->flags
& MLX5_IB_RQ_PCI_WRITE_END_PADDING
)
1417 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1418 MLX5_SET(wq
, wq
, page_offset
, MLX5_GET(qpc
, qpc
, page_offset
));
1419 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1420 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1421 MLX5_SET(wq
, wq
, log_wq_stride
, MLX5_GET(qpc
, qpc
, log_rq_stride
) + 4);
1422 MLX5_SET(wq
, wq
, log_wq_pg_sz
, MLX5_GET(qpc
, qpc
, log_page_size
));
1423 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_rq_size
));
1425 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1426 qp_pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, qpin
, pas
);
1427 memcpy(pas
, qp_pas
, rq_pas_size
);
1429 err
= mlx5_core_create_rq_tracked(dev
->mdev
, in
, inlen
, &rq
->base
.mqp
);
1436 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1437 struct mlx5_ib_rq
*rq
)
1439 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rq
->base
.mqp
);
1442 static bool tunnel_offload_supported(struct mlx5_core_dev
*dev
)
1444 return (MLX5_CAP_ETH(dev
, tunnel_stateless_vxlan
) ||
1445 MLX5_CAP_ETH(dev
, tunnel_stateless_gre
) ||
1446 MLX5_CAP_ETH(dev
, tunnel_stateless_geneve_rx
));
1449 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1450 struct mlx5_ib_rq
*rq
,
1454 if (qp_flags_en
& (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|
1455 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
))
1456 mlx5_ib_disable_lb(dev
, false, true);
1457 mlx5_cmd_destroy_tir(dev
->mdev
, rq
->tirn
, to_mpd(pd
)->uid
);
1460 static int create_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1461 struct mlx5_ib_rq
*rq
, u32 tdn
,
1464 u32
*out
, int outlen
)
1472 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1473 in
= kvzalloc(inlen
, GFP_KERNEL
);
1477 MLX5_SET(create_tir_in
, in
, uid
, to_mpd(pd
)->uid
);
1478 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1479 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_DIRECT
);
1480 MLX5_SET(tirc
, tirc
, inline_rqn
, rq
->base
.mqp
.qpn
);
1481 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1482 if (*qp_flags_en
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)
1483 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 1);
1485 if (*qp_flags_en
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
)
1486 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST
;
1488 if (*qp_flags_en
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
)
1489 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST
;
1492 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST
;
1493 *qp_flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
;
1496 MLX5_SET(tirc
, tirc
, self_lb_block
, lb_flag
);
1498 err
= mlx5_core_create_tir_out(dev
->mdev
, in
, inlen
, out
, outlen
);
1500 rq
->tirn
= MLX5_GET(create_tir_out
, out
, tirn
);
1501 if (!err
&& MLX5_GET(tirc
, tirc
, self_lb_block
)) {
1502 err
= mlx5_ib_enable_lb(dev
, false, true);
1505 destroy_raw_packet_qp_tir(dev
, rq
, 0, pd
);
1512 static int create_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1513 u32
*in
, size_t inlen
,
1515 struct ib_udata
*udata
,
1516 struct mlx5_ib_create_qp_resp
*resp
)
1518 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1519 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1520 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1521 struct mlx5_ib_ucontext
*mucontext
= rdma_udata_to_drv_context(
1522 udata
, struct mlx5_ib_ucontext
, ibucontext
);
1524 u32 tdn
= mucontext
->tdn
;
1525 u16 uid
= to_mpd(pd
)->uid
;
1526 u32 out
[MLX5_ST_SZ_DW(create_tir_out
)] = {};
1528 if (qp
->sq
.wqe_cnt
) {
1529 err
= create_raw_packet_qp_tis(dev
, qp
, sq
, tdn
, pd
);
1533 err
= create_raw_packet_qp_sq(dev
, udata
, sq
, in
, pd
);
1535 goto err_destroy_tis
;
1538 resp
->tisn
= sq
->tisn
;
1539 resp
->comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_TISN
;
1540 resp
->sqn
= sq
->base
.mqp
.qpn
;
1541 resp
->comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_SQN
;
1544 sq
->base
.container_mibqp
= qp
;
1545 sq
->base
.mqp
.event
= mlx5_ib_qp_event
;
1548 if (qp
->rq
.wqe_cnt
) {
1549 rq
->base
.container_mibqp
= qp
;
1551 if (qp
->flags
& MLX5_IB_QP_CVLAN_STRIPPING
)
1552 rq
->flags
|= MLX5_IB_RQ_CVLAN_STRIPPING
;
1553 if (qp
->flags
& MLX5_IB_QP_PCI_WRITE_END_PADDING
)
1554 rq
->flags
|= MLX5_IB_RQ_PCI_WRITE_END_PADDING
;
1555 err
= create_raw_packet_qp_rq(dev
, rq
, in
, inlen
, pd
);
1557 goto err_destroy_sq
;
1559 err
= create_raw_packet_qp_tir(
1560 dev
, rq
, tdn
, &qp
->flags_en
, pd
, out
,
1561 MLX5_ST_SZ_BYTES(create_tir_out
));
1563 goto err_destroy_rq
;
1566 resp
->rqn
= rq
->base
.mqp
.qpn
;
1567 resp
->comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_RQN
;
1568 resp
->tirn
= rq
->tirn
;
1569 resp
->comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_TIRN
;
1570 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev
->mdev
, sw_owner
)) {
1571 resp
->tir_icm_addr
= MLX5_GET(
1572 create_tir_out
, out
, icm_address_31_0
);
1573 resp
->tir_icm_addr
|=
1574 (u64
)MLX5_GET(create_tir_out
, out
,
1577 resp
->tir_icm_addr
|=
1578 (u64
)MLX5_GET(create_tir_out
, out
,
1582 MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR
;
1587 qp
->trans_qp
.base
.mqp
.qpn
= qp
->sq
.wqe_cnt
? sq
->base
.mqp
.qpn
:
1589 err
= ib_copy_to_udata(udata
, resp
, min(udata
->outlen
, sizeof(*resp
)));
1591 goto err_destroy_tir
;
1596 destroy_raw_packet_qp_tir(dev
, rq
, qp
->flags_en
, pd
);
1598 destroy_raw_packet_qp_rq(dev
, rq
);
1600 if (!qp
->sq
.wqe_cnt
)
1602 destroy_raw_packet_qp_sq(dev
, sq
);
1604 destroy_raw_packet_qp_tis(dev
, sq
, pd
);
1609 static void destroy_raw_packet_qp(struct mlx5_ib_dev
*dev
,
1610 struct mlx5_ib_qp
*qp
)
1612 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1613 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1614 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1616 if (qp
->rq
.wqe_cnt
) {
1617 destroy_raw_packet_qp_tir(dev
, rq
, qp
->flags_en
, qp
->ibqp
.pd
);
1618 destroy_raw_packet_qp_rq(dev
, rq
);
1621 if (qp
->sq
.wqe_cnt
) {
1622 destroy_raw_packet_qp_sq(dev
, sq
);
1623 destroy_raw_packet_qp_tis(dev
, sq
, qp
->ibqp
.pd
);
1627 static void raw_packet_qp_copy_info(struct mlx5_ib_qp
*qp
,
1628 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
)
1630 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1631 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1635 sq
->doorbell
= &qp
->db
;
1636 rq
->doorbell
= &qp
->db
;
1639 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1641 if (qp
->flags_en
& (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|
1642 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
))
1643 mlx5_ib_disable_lb(dev
, false, true);
1644 mlx5_cmd_destroy_tir(dev
->mdev
, qp
->rss_qp
.tirn
,
1645 to_mpd(qp
->ibqp
.pd
)->uid
);
1648 static int create_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1650 struct ib_qp_init_attr
*init_attr
,
1651 struct ib_udata
*udata
)
1653 struct mlx5_ib_ucontext
*mucontext
= rdma_udata_to_drv_context(
1654 udata
, struct mlx5_ib_ucontext
, ibucontext
);
1655 struct mlx5_ib_create_qp_resp resp
= {};
1663 u32 selected_fields
= 0;
1665 size_t min_resp_len
;
1666 u32 tdn
= mucontext
->tdn
;
1667 struct mlx5_ib_create_qp_rss ucmd
= {};
1668 size_t required_cmd_sz
;
1671 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
)
1674 if (init_attr
->create_flags
|| init_attr
->send_cq
)
1677 min_resp_len
= offsetof(typeof(resp
), bfreg_index
) + sizeof(resp
.bfreg_index
);
1678 if (udata
->outlen
< min_resp_len
)
1681 required_cmd_sz
= offsetof(typeof(ucmd
), flags
) + sizeof(ucmd
.flags
);
1682 if (udata
->inlen
< required_cmd_sz
) {
1683 mlx5_ib_dbg(dev
, "invalid inlen\n");
1687 if (udata
->inlen
> sizeof(ucmd
) &&
1688 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
1689 udata
->inlen
- sizeof(ucmd
))) {
1690 mlx5_ib_dbg(dev
, "inlen is not supported\n");
1694 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
1695 mlx5_ib_dbg(dev
, "copy failed\n");
1699 if (ucmd
.comp_mask
) {
1700 mlx5_ib_dbg(dev
, "invalid comp mask\n");
1704 if (ucmd
.flags
& ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS
|
1705 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|
1706 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
)) {
1707 mlx5_ib_dbg(dev
, "invalid flags\n");
1711 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
&&
1712 !tunnel_offload_supported(dev
->mdev
)) {
1713 mlx5_ib_dbg(dev
, "tunnel offloads isn't supported\n");
1717 if (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_INNER
&&
1718 !(ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)) {
1719 mlx5_ib_dbg(dev
, "Tunnel offloads must be set for inner RSS\n");
1723 if (ucmd
.flags
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|| dev
->is_rep
) {
1724 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST
;
1725 qp
->flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
;
1728 if (ucmd
.flags
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
) {
1729 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST
;
1730 qp
->flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
;
1733 err
= ib_copy_to_udata(udata
, &resp
, min(udata
->outlen
, sizeof(resp
)));
1735 mlx5_ib_dbg(dev
, "copy failed\n");
1739 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1740 outlen
= MLX5_ST_SZ_BYTES(create_tir_out
);
1741 in
= kvzalloc(inlen
+ outlen
, GFP_KERNEL
);
1745 out
= in
+ MLX5_ST_SZ_DW(create_tir_in
);
1746 MLX5_SET(create_tir_in
, in
, uid
, to_mpd(pd
)->uid
);
1747 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1748 MLX5_SET(tirc
, tirc
, disp_type
,
1749 MLX5_TIRC_DISP_TYPE_INDIRECT
);
1750 MLX5_SET(tirc
, tirc
, indirect_table
,
1751 init_attr
->rwq_ind_tbl
->ind_tbl_num
);
1752 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1754 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1756 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)
1757 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 1);
1759 MLX5_SET(tirc
, tirc
, self_lb_block
, lb_flag
);
1761 if (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_INNER
)
1762 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_inner
);
1764 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1766 switch (ucmd
.rx_hash_function
) {
1767 case MLX5_RX_HASH_FUNC_TOEPLITZ
:
1769 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_toeplitz_key
);
1770 size_t len
= MLX5_FLD_SZ_BYTES(tirc
, rx_hash_toeplitz_key
);
1772 if (len
!= ucmd
.rx_key_len
) {
1777 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_TOEPLITZ
);
1778 memcpy(rss_key
, ucmd
.rx_hash_key
, len
);
1786 if (!ucmd
.rx_hash_fields_mask
) {
1787 /* special case when this TIR serves as steering entry without hashing */
1788 if (!init_attr
->rwq_ind_tbl
->log_ind_tbl_size
)
1794 if (((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1795 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
)) &&
1796 ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1797 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))) {
1802 /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
1803 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1804 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
))
1805 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1806 MLX5_L3_PROT_TYPE_IPV4
);
1807 else if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1808 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1809 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1810 MLX5_L3_PROT_TYPE_IPV6
);
1812 outer_l4
= ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1813 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
)) << 0 |
1814 ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1815 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
)) << 1 |
1816 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_IPSEC_SPI
) << 2;
1818 /* Check that only one l4 protocol is set */
1819 if (outer_l4
& (outer_l4
- 1)) {
1824 /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
1825 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1826 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
))
1827 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1828 MLX5_L4_PROT_TYPE_TCP
);
1829 else if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1830 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1831 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1832 MLX5_L4_PROT_TYPE_UDP
);
1834 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1835 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
))
1836 selected_fields
|= MLX5_HASH_FIELD_SEL_SRC_IP
;
1838 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
) ||
1839 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1840 selected_fields
|= MLX5_HASH_FIELD_SEL_DST_IP
;
1842 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1843 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
))
1844 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_SPORT
;
1846 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
) ||
1847 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1848 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_DPORT
;
1850 if (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_IPSEC_SPI
)
1851 selected_fields
|= MLX5_HASH_FIELD_SEL_IPSEC_SPI
;
1853 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
, selected_fields
);
1856 err
= mlx5_core_create_tir_out(dev
->mdev
, in
, inlen
, out
, outlen
);
1858 qp
->rss_qp
.tirn
= MLX5_GET(create_tir_out
, out
, tirn
);
1859 if (!err
&& MLX5_GET(tirc
, tirc
, self_lb_block
)) {
1860 err
= mlx5_ib_enable_lb(dev
, false, true);
1863 mlx5_cmd_destroy_tir(dev
->mdev
, qp
->rss_qp
.tirn
,
1870 if (mucontext
->devx_uid
) {
1871 resp
.comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_TIRN
;
1872 resp
.tirn
= qp
->rss_qp
.tirn
;
1873 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev
->mdev
, sw_owner
)) {
1875 MLX5_GET(create_tir_out
, out
, icm_address_31_0
);
1876 resp
.tir_icm_addr
|= (u64
)MLX5_GET(create_tir_out
, out
,
1879 resp
.tir_icm_addr
|= (u64
)MLX5_GET(create_tir_out
, out
,
1883 MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR
;
1887 err
= ib_copy_to_udata(udata
, &resp
, min(udata
->outlen
, sizeof(resp
)));
1892 /* qpn is reserved for that QP */
1893 qp
->trans_qp
.base
.mqp
.qpn
= 0;
1894 qp
->flags
|= MLX5_IB_QP_RSS
;
1898 mlx5_cmd_destroy_tir(dev
->mdev
, qp
->rss_qp
.tirn
, mucontext
->devx_uid
);
1904 static void configure_responder_scat_cqe(struct ib_qp_init_attr
*init_attr
,
1909 if (init_attr
->qp_type
== MLX5_IB_QPT_DCI
)
1912 rcqe_sz
= mlx5_ib_get_cqe_size(init_attr
->recv_cq
);
1914 if (init_attr
->qp_type
== MLX5_IB_QPT_DCT
) {
1916 MLX5_SET(dctc
, qpc
, cs_res
, MLX5_RES_SCAT_DATA64_CQE
);
1921 MLX5_SET(qpc
, qpc
, cs_res
,
1922 rcqe_sz
== 128 ? MLX5_RES_SCAT_DATA64_CQE
:
1923 MLX5_RES_SCAT_DATA32_CQE
);
1926 static void configure_requester_scat_cqe(struct mlx5_ib_dev
*dev
,
1927 struct ib_qp_init_attr
*init_attr
,
1928 struct mlx5_ib_create_qp
*ucmd
,
1931 enum ib_qp_type qpt
= init_attr
->qp_type
;
1933 bool allow_scat_cqe
= false;
1935 if (qpt
== IB_QPT_UC
|| qpt
== IB_QPT_UD
)
1939 allow_scat_cqe
= ucmd
->flags
& MLX5_QP_FLAG_ALLOW_SCATTER_CQE
;
1941 if (!allow_scat_cqe
&& init_attr
->sq_sig_type
!= IB_SIGNAL_ALL_WR
)
1944 scqe_sz
= mlx5_ib_get_cqe_size(init_attr
->send_cq
);
1945 if (scqe_sz
== 128) {
1946 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA64_CQE
);
1950 if (init_attr
->qp_type
!= MLX5_IB_QPT_DCI
||
1951 MLX5_CAP_GEN(dev
->mdev
, dc_req_scat_data_cqe
))
1952 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA32_CQE
);
1955 static int atomic_size_to_mode(int size_mask
)
1957 /* driver does not support atomic_size > 256B
1958 * and does not know how to translate bigger sizes
1960 int supported_size_mask
= size_mask
& 0x1ff;
1963 if (!supported_size_mask
)
1966 log_max_size
= __fls(supported_size_mask
);
1968 if (log_max_size
> 3)
1969 return log_max_size
;
1971 return MLX5_ATOMIC_MODE_8B
;
1974 static int get_atomic_mode(struct mlx5_ib_dev
*dev
,
1975 enum ib_qp_type qp_type
)
1977 u8 atomic_operations
= MLX5_CAP_ATOMIC(dev
->mdev
, atomic_operations
);
1978 u8 atomic
= MLX5_CAP_GEN(dev
->mdev
, atomic
);
1979 int atomic_mode
= -EOPNOTSUPP
;
1980 int atomic_size_mask
;
1985 if (qp_type
== MLX5_IB_QPT_DCT
)
1986 atomic_size_mask
= MLX5_CAP_ATOMIC(dev
->mdev
, atomic_size_dc
);
1988 atomic_size_mask
= MLX5_CAP_ATOMIC(dev
->mdev
, atomic_size_qp
);
1990 if ((atomic_operations
& MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP
) ||
1991 (atomic_operations
& MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD
))
1992 atomic_mode
= atomic_size_to_mode(atomic_size_mask
);
1994 if (atomic_mode
<= 0 &&
1995 (atomic_operations
& MLX5_ATOMIC_OPS_CMP_SWAP
&&
1996 atomic_operations
& MLX5_ATOMIC_OPS_FETCH_ADD
))
1997 atomic_mode
= MLX5_ATOMIC_MODE_IB_COMP
;
2002 static inline bool check_flags_mask(uint64_t input
, uint64_t supported
)
2004 return (input
& ~supported
) == 0;
2007 static int create_qp_common(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
2008 struct ib_qp_init_attr
*init_attr
,
2009 struct ib_udata
*udata
, struct mlx5_ib_qp
*qp
)
2011 struct mlx5_ib_resources
*devr
= &dev
->devr
;
2012 int inlen
= MLX5_ST_SZ_BYTES(create_qp_in
);
2013 struct mlx5_core_dev
*mdev
= dev
->mdev
;
2014 struct mlx5_ib_create_qp_resp resp
= {};
2015 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
2016 udata
, struct mlx5_ib_ucontext
, ibucontext
);
2017 struct mlx5_ib_cq
*send_cq
;
2018 struct mlx5_ib_cq
*recv_cq
;
2019 unsigned long flags
;
2020 u32 uidx
= MLX5_IB_DEFAULT_UIDX
;
2021 struct mlx5_ib_create_qp ucmd
;
2022 struct mlx5_ib_qp_base
*base
;
2028 mutex_init(&qp
->mutex
);
2029 spin_lock_init(&qp
->sq
.lock
);
2030 spin_lock_init(&qp
->rq
.lock
);
2032 mlx5_st
= to_mlx5_st(init_attr
->qp_type
);
2036 if (init_attr
->rwq_ind_tbl
) {
2040 err
= create_rss_raw_qp_tir(dev
, qp
, pd
, init_attr
, udata
);
2044 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
) {
2045 if (!MLX5_CAP_GEN(mdev
, block_lb_mc
)) {
2046 mlx5_ib_dbg(dev
, "block multicast loopback isn't supported\n");
2049 qp
->flags
|= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
2053 if (init_attr
->create_flags
&
2054 (IB_QP_CREATE_CROSS_CHANNEL
|
2055 IB_QP_CREATE_MANAGED_SEND
|
2056 IB_QP_CREATE_MANAGED_RECV
)) {
2057 if (!MLX5_CAP_GEN(mdev
, cd
)) {
2058 mlx5_ib_dbg(dev
, "cross-channel isn't supported\n");
2061 if (init_attr
->create_flags
& IB_QP_CREATE_CROSS_CHANNEL
)
2062 qp
->flags
|= MLX5_IB_QP_CROSS_CHANNEL
;
2063 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_SEND
)
2064 qp
->flags
|= MLX5_IB_QP_MANAGED_SEND
;
2065 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_RECV
)
2066 qp
->flags
|= MLX5_IB_QP_MANAGED_RECV
;
2069 if (init_attr
->qp_type
== IB_QPT_UD
&&
2070 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
))
2071 if (!MLX5_CAP_GEN(mdev
, ipoib_basic_offloads
)) {
2072 mlx5_ib_dbg(dev
, "ipoib UD lso qp isn't supported\n");
2076 if (init_attr
->create_flags
& IB_QP_CREATE_SCATTER_FCS
) {
2077 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
2078 mlx5_ib_dbg(dev
, "Scatter FCS is supported only for Raw Packet QPs");
2081 if (!MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) ||
2082 !MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
)) {
2083 mlx5_ib_dbg(dev
, "Scatter FCS isn't supported\n");
2086 qp
->flags
|= MLX5_IB_QP_CAP_SCATTER_FCS
;
2089 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
2090 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
2092 if (init_attr
->create_flags
& IB_QP_CREATE_CVLAN_STRIPPING
) {
2093 if (!(MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
2094 MLX5_CAP_ETH(dev
->mdev
, vlan_cap
)) ||
2095 (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
))
2097 qp
->flags
|= MLX5_IB_QP_CVLAN_STRIPPING
;
2101 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
2102 mlx5_ib_dbg(dev
, "copy failed\n");
2106 if (!check_flags_mask(ucmd
.flags
,
2107 MLX5_QP_FLAG_ALLOW_SCATTER_CQE
|
2108 MLX5_QP_FLAG_BFREG_INDEX
|
2109 MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE
|
2110 MLX5_QP_FLAG_SCATTER_CQE
|
2111 MLX5_QP_FLAG_SIGNATURE
|
2112 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
|
2113 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|
2114 MLX5_QP_FLAG_TUNNEL_OFFLOADS
|
2115 MLX5_QP_FLAG_UAR_PAGE_INDEX
|
2116 MLX5_QP_FLAG_TYPE_DCI
|
2117 MLX5_QP_FLAG_TYPE_DCT
))
2120 err
= get_qp_user_index(ucontext
, &ucmd
, udata
->inlen
, &uidx
);
2124 qp
->wq_sig
= !!(ucmd
.flags
& MLX5_QP_FLAG_SIGNATURE
);
2125 if (MLX5_CAP_GEN(dev
->mdev
, sctr_data_cqe
))
2126 qp
->scat_cqe
= !!(ucmd
.flags
& MLX5_QP_FLAG_SCATTER_CQE
);
2127 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
) {
2128 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
||
2129 !tunnel_offload_supported(mdev
)) {
2130 mlx5_ib_dbg(dev
, "Tunnel offload isn't supported\n");
2133 qp
->flags_en
|= MLX5_QP_FLAG_TUNNEL_OFFLOADS
;
2136 if (ucmd
.flags
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
) {
2137 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
2138 mlx5_ib_dbg(dev
, "Self-LB UC isn't supported\n");
2141 qp
->flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
;
2144 if (ucmd
.flags
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
) {
2145 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
2146 mlx5_ib_dbg(dev
, "Self-LB UM isn't supported\n");
2149 qp
->flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
;
2152 if (ucmd
.flags
& MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE
) {
2153 if (init_attr
->qp_type
!= IB_QPT_RC
||
2154 !MLX5_CAP_GEN(dev
->mdev
, qp_packet_based
)) {
2155 mlx5_ib_dbg(dev
, "packet based credit mode isn't supported\n");
2158 qp
->flags
|= MLX5_IB_QP_PACKET_BASED_CREDIT
;
2161 if (init_attr
->create_flags
& IB_QP_CREATE_SOURCE_QPN
) {
2162 if (init_attr
->qp_type
!= IB_QPT_UD
||
2163 (MLX5_CAP_GEN(dev
->mdev
, port_type
) !=
2164 MLX5_CAP_PORT_TYPE_IB
) ||
2165 !mlx5_get_flow_namespace(dev
->mdev
, MLX5_FLOW_NAMESPACE_BYPASS
)) {
2166 mlx5_ib_dbg(dev
, "Source QP option isn't supported\n");
2170 qp
->flags
|= MLX5_IB_QP_UNDERLAY
;
2171 qp
->underlay_qpn
= init_attr
->source_qpn
;
2174 qp
->wq_sig
= !!wq_signature
;
2177 base
= (init_attr
->qp_type
== IB_QPT_RAW_PACKET
||
2178 qp
->flags
& MLX5_IB_QP_UNDERLAY
) ?
2179 &qp
->raw_packet_qp
.rq
.base
:
2182 qp
->has_rq
= qp_has_rq(init_attr
);
2183 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
,
2184 qp
, udata
? &ucmd
: NULL
);
2186 mlx5_ib_dbg(dev
, "err %d\n", err
);
2193 1 << MLX5_CAP_GEN(mdev
, log_max_qp_sz
);
2194 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d)\n", ucmd
.sq_wqe_count
);
2195 if (ucmd
.rq_wqe_shift
!= qp
->rq
.wqe_shift
||
2196 ucmd
.rq_wqe_count
!= qp
->rq
.wqe_cnt
) {
2197 mlx5_ib_dbg(dev
, "invalid rq params\n");
2200 if (ucmd
.sq_wqe_count
> max_wqes
) {
2201 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d) > max allowed (%d)\n",
2202 ucmd
.sq_wqe_count
, max_wqes
);
2205 if (init_attr
->create_flags
&
2206 MLX5_IB_QP_CREATE_SQPN_QP1
) {
2207 mlx5_ib_dbg(dev
, "user-space is not allowed to create UD QPs spoofing as QP1\n");
2210 err
= create_user_qp(dev
, pd
, qp
, udata
, init_attr
, &in
,
2211 &resp
, &inlen
, base
);
2213 mlx5_ib_dbg(dev
, "err %d\n", err
);
2215 err
= create_kernel_qp(dev
, init_attr
, qp
, &in
, &inlen
,
2218 mlx5_ib_dbg(dev
, "err %d\n", err
);
2224 in
= kvzalloc(inlen
, GFP_KERNEL
);
2228 qp
->create_type
= MLX5_QP_EMPTY
;
2231 if (is_sqp(init_attr
->qp_type
))
2232 qp
->port
= init_attr
->port_num
;
2234 qpc
= MLX5_ADDR_OF(create_qp_in
, in
, qpc
);
2236 MLX5_SET(qpc
, qpc
, st
, mlx5_st
);
2237 MLX5_SET(qpc
, qpc
, pm_state
, MLX5_QP_PM_MIGRATED
);
2239 if (init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
2240 MLX5_SET(qpc
, qpc
, pd
, to_mpd(pd
? pd
: devr
->p0
)->pdn
);
2242 MLX5_SET(qpc
, qpc
, latency_sensitive
, 1);
2246 MLX5_SET(qpc
, qpc
, wq_signature
, 1);
2248 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
2249 MLX5_SET(qpc
, qpc
, block_lb_mc
, 1);
2251 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
2252 MLX5_SET(qpc
, qpc
, cd_master
, 1);
2253 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
2254 MLX5_SET(qpc
, qpc
, cd_slave_send
, 1);
2255 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
2256 MLX5_SET(qpc
, qpc
, cd_slave_receive
, 1);
2257 if (qp
->flags
& MLX5_IB_QP_PACKET_BASED_CREDIT
)
2258 MLX5_SET(qpc
, qpc
, req_e2e_credit_mode
, 1);
2259 if (qp
->scat_cqe
&& is_connected(init_attr
->qp_type
)) {
2260 configure_responder_scat_cqe(init_attr
, qpc
);
2261 configure_requester_scat_cqe(dev
, init_attr
,
2262 udata
? &ucmd
: NULL
,
2266 if (qp
->rq
.wqe_cnt
) {
2267 MLX5_SET(qpc
, qpc
, log_rq_stride
, qp
->rq
.wqe_shift
- 4);
2268 MLX5_SET(qpc
, qpc
, log_rq_size
, ilog2(qp
->rq
.wqe_cnt
));
2271 MLX5_SET(qpc
, qpc
, rq_type
, get_rx_type(qp
, init_attr
));
2273 if (qp
->sq
.wqe_cnt
) {
2274 MLX5_SET(qpc
, qpc
, log_sq_size
, ilog2(qp
->sq
.wqe_cnt
));
2276 MLX5_SET(qpc
, qpc
, no_sq
, 1);
2277 if (init_attr
->srq
&&
2278 init_attr
->srq
->srq_type
== IB_SRQT_TM
)
2279 MLX5_SET(qpc
, qpc
, offload_type
,
2280 MLX5_QPC_OFFLOAD_TYPE_RNDV
);
2283 /* Set default resources */
2284 switch (init_attr
->qp_type
) {
2285 case IB_QPT_XRC_TGT
:
2286 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
2287 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(devr
->c0
)->mcq
.cqn
);
2288 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
2289 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(init_attr
->xrcd
)->xrcdn
);
2291 case IB_QPT_XRC_INI
:
2292 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
2293 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
2294 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
2297 if (init_attr
->srq
) {
2298 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x0
)->xrcdn
);
2299 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(init_attr
->srq
)->msrq
.srqn
);
2301 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
2302 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s1
)->msrq
.srqn
);
2306 if (init_attr
->send_cq
)
2307 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
2309 if (init_attr
->recv_cq
)
2310 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
2312 MLX5_SET64(qpc
, qpc
, dbr_addr
, qp
->db
.dma
);
2314 /* 0xffffff means we ask to work with cqe version 0 */
2315 if (MLX5_CAP_GEN(mdev
, cqe_version
) == MLX5_CQE_VERSION_V1
)
2316 MLX5_SET(qpc
, qpc
, user_index
, uidx
);
2318 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
2319 if (init_attr
->qp_type
== IB_QPT_UD
&&
2320 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)) {
2321 MLX5_SET(qpc
, qpc
, ulp_stateless_offload_mode
, 1);
2322 qp
->flags
|= MLX5_IB_QP_LSO
;
2325 if (init_attr
->create_flags
& IB_QP_CREATE_PCI_WRITE_END_PADDING
) {
2326 if (!MLX5_CAP_GEN(dev
->mdev
, end_pad
)) {
2327 mlx5_ib_dbg(dev
, "scatter end padding is not supported\n");
2330 } else if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
2331 MLX5_SET(qpc
, qpc
, end_padding_mode
,
2332 MLX5_WQ_END_PAD_MODE_ALIGN
);
2334 qp
->flags
|= MLX5_IB_QP_PCI_WRITE_END_PADDING
;
2343 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
||
2344 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
2345 qp
->raw_packet_qp
.sq
.ubuffer
.buf_addr
= ucmd
.sq_buf_addr
;
2346 raw_packet_qp_copy_info(qp
, &qp
->raw_packet_qp
);
2347 err
= create_raw_packet_qp(dev
, qp
, in
, inlen
, pd
, udata
,
2350 err
= mlx5_core_create_qp(dev
->mdev
, &base
->mqp
, in
, inlen
);
2354 mlx5_ib_dbg(dev
, "create qp failed\n");
2360 base
->container_mibqp
= qp
;
2361 base
->mqp
.event
= mlx5_ib_qp_event
;
2363 get_cqs(init_attr
->qp_type
, init_attr
->send_cq
, init_attr
->recv_cq
,
2364 &send_cq
, &recv_cq
);
2365 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
2366 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
2367 /* Maintain device to QPs access, needed for further handling via reset
2370 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
2371 /* Maintain CQ to QPs access, needed for further handling via reset flow
2374 list_add_tail(&qp
->cq_send_list
, &send_cq
->list_send_qp
);
2376 list_add_tail(&qp
->cq_recv_list
, &recv_cq
->list_recv_qp
);
2377 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
2378 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
2383 if (qp
->create_type
== MLX5_QP_USER
)
2384 destroy_qp_user(dev
, pd
, qp
, base
, udata
);
2385 else if (qp
->create_type
== MLX5_QP_KERNEL
)
2386 destroy_qp_kernel(dev
, qp
);
2393 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
2394 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
2398 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
2399 spin_lock(&send_cq
->lock
);
2400 spin_lock_nested(&recv_cq
->lock
,
2401 SINGLE_DEPTH_NESTING
);
2402 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
2403 spin_lock(&send_cq
->lock
);
2404 __acquire(&recv_cq
->lock
);
2406 spin_lock(&recv_cq
->lock
);
2407 spin_lock_nested(&send_cq
->lock
,
2408 SINGLE_DEPTH_NESTING
);
2411 spin_lock(&send_cq
->lock
);
2412 __acquire(&recv_cq
->lock
);
2414 } else if (recv_cq
) {
2415 spin_lock(&recv_cq
->lock
);
2416 __acquire(&send_cq
->lock
);
2418 __acquire(&send_cq
->lock
);
2419 __acquire(&recv_cq
->lock
);
2423 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
2424 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
2428 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
2429 spin_unlock(&recv_cq
->lock
);
2430 spin_unlock(&send_cq
->lock
);
2431 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
2432 __release(&recv_cq
->lock
);
2433 spin_unlock(&send_cq
->lock
);
2435 spin_unlock(&send_cq
->lock
);
2436 spin_unlock(&recv_cq
->lock
);
2439 __release(&recv_cq
->lock
);
2440 spin_unlock(&send_cq
->lock
);
2442 } else if (recv_cq
) {
2443 __release(&send_cq
->lock
);
2444 spin_unlock(&recv_cq
->lock
);
2446 __release(&recv_cq
->lock
);
2447 __release(&send_cq
->lock
);
2451 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
2453 return to_mpd(qp
->ibqp
.pd
);
2456 static void get_cqs(enum ib_qp_type qp_type
,
2457 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
2458 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
2461 case IB_QPT_XRC_TGT
:
2465 case MLX5_IB_QPT_REG_UMR
:
2466 case IB_QPT_XRC_INI
:
2467 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
2472 case MLX5_IB_QPT_HW_GSI
:
2476 case IB_QPT_RAW_IPV6
:
2477 case IB_QPT_RAW_ETHERTYPE
:
2478 case IB_QPT_RAW_PACKET
:
2479 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
2480 *recv_cq
= ib_recv_cq
? to_mcq(ib_recv_cq
) : NULL
;
2491 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2492 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
2493 u8 lag_tx_affinity
);
2495 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2496 struct ib_udata
*udata
)
2498 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
2499 struct mlx5_ib_qp_base
*base
;
2500 unsigned long flags
;
2503 if (qp
->ibqp
.rwq_ind_tbl
) {
2504 destroy_rss_raw_qp_tir(dev
, qp
);
2508 base
= (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2509 qp
->flags
& MLX5_IB_QP_UNDERLAY
) ?
2510 &qp
->raw_packet_qp
.rq
.base
:
2513 if (qp
->state
!= IB_QPS_RESET
) {
2514 if (qp
->ibqp
.qp_type
!= IB_QPT_RAW_PACKET
&&
2515 !(qp
->flags
& MLX5_IB_QP_UNDERLAY
)) {
2516 err
= mlx5_core_qp_modify(dev
->mdev
,
2517 MLX5_CMD_OP_2RST_QP
, 0,
2520 struct mlx5_modify_raw_qp_param raw_qp_param
= {
2521 .operation
= MLX5_CMD_OP_2RST_QP
2524 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, 0);
2527 mlx5_ib_warn(dev
, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
2531 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
2532 &send_cq
, &recv_cq
);
2534 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
2535 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
2536 /* del from lists under both locks above to protect reset flow paths */
2537 list_del(&qp
->qps_list
);
2539 list_del(&qp
->cq_send_list
);
2542 list_del(&qp
->cq_recv_list
);
2544 if (qp
->create_type
== MLX5_QP_KERNEL
) {
2545 __mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
2546 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
2547 if (send_cq
!= recv_cq
)
2548 __mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
,
2551 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
2552 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
2554 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2555 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
2556 destroy_raw_packet_qp(dev
, qp
);
2558 err
= mlx5_core_destroy_qp(dev
->mdev
, &base
->mqp
);
2560 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n",
2564 if (qp
->create_type
== MLX5_QP_KERNEL
)
2565 destroy_qp_kernel(dev
, qp
);
2566 else if (qp
->create_type
== MLX5_QP_USER
)
2567 destroy_qp_user(dev
, &get_pd(qp
)->ibpd
, qp
, base
, udata
);
2570 static const char *ib_qp_type_str(enum ib_qp_type type
)
2574 return "IB_QPT_SMI";
2576 return "IB_QPT_GSI";
2583 case IB_QPT_RAW_IPV6
:
2584 return "IB_QPT_RAW_IPV6";
2585 case IB_QPT_RAW_ETHERTYPE
:
2586 return "IB_QPT_RAW_ETHERTYPE";
2587 case IB_QPT_XRC_INI
:
2588 return "IB_QPT_XRC_INI";
2589 case IB_QPT_XRC_TGT
:
2590 return "IB_QPT_XRC_TGT";
2591 case IB_QPT_RAW_PACKET
:
2592 return "IB_QPT_RAW_PACKET";
2593 case MLX5_IB_QPT_REG_UMR
:
2594 return "MLX5_IB_QPT_REG_UMR";
2596 return "IB_QPT_DRIVER";
2599 return "Invalid QP type";
2603 static struct ib_qp
*mlx5_ib_create_dct(struct ib_pd
*pd
,
2604 struct ib_qp_init_attr
*attr
,
2605 struct mlx5_ib_create_qp
*ucmd
,
2606 struct ib_udata
*udata
)
2608 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
2609 udata
, struct mlx5_ib_ucontext
, ibucontext
);
2610 struct mlx5_ib_qp
*qp
;
2612 u32 uidx
= MLX5_IB_DEFAULT_UIDX
;
2615 if (!attr
->srq
|| !attr
->recv_cq
)
2616 return ERR_PTR(-EINVAL
);
2618 err
= get_qp_user_index(ucontext
, ucmd
, sizeof(*ucmd
), &uidx
);
2620 return ERR_PTR(err
);
2622 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
2624 return ERR_PTR(-ENOMEM
);
2626 qp
->dct
.in
= kzalloc(MLX5_ST_SZ_BYTES(create_dct_in
), GFP_KERNEL
);
2632 MLX5_SET(create_dct_in
, qp
->dct
.in
, uid
, to_mpd(pd
)->uid
);
2633 dctc
= MLX5_ADDR_OF(create_dct_in
, qp
->dct
.in
, dct_context_entry
);
2634 qp
->qp_sub_type
= MLX5_IB_QPT_DCT
;
2635 MLX5_SET(dctc
, dctc
, pd
, to_mpd(pd
)->pdn
);
2636 MLX5_SET(dctc
, dctc
, srqn_xrqn
, to_msrq(attr
->srq
)->msrq
.srqn
);
2637 MLX5_SET(dctc
, dctc
, cqn
, to_mcq(attr
->recv_cq
)->mcq
.cqn
);
2638 MLX5_SET64(dctc
, dctc
, dc_access_key
, ucmd
->access_key
);
2639 MLX5_SET(dctc
, dctc
, user_index
, uidx
);
2641 if (ucmd
->flags
& MLX5_QP_FLAG_SCATTER_CQE
)
2642 configure_responder_scat_cqe(attr
, dctc
);
2644 qp
->state
= IB_QPS_RESET
;
2649 return ERR_PTR(err
);
2652 static int set_mlx_qp_type(struct mlx5_ib_dev
*dev
,
2653 struct ib_qp_init_attr
*init_attr
,
2654 struct mlx5_ib_create_qp
*ucmd
,
2655 struct ib_udata
*udata
)
2657 enum { MLX_QP_FLAGS
= MLX5_QP_FLAG_TYPE_DCT
| MLX5_QP_FLAG_TYPE_DCI
};
2663 if (udata
->inlen
< sizeof(*ucmd
)) {
2664 mlx5_ib_dbg(dev
, "create_qp user command is smaller than expected\n");
2667 err
= ib_copy_from_udata(ucmd
, udata
, sizeof(*ucmd
));
2671 if ((ucmd
->flags
& MLX_QP_FLAGS
) == MLX5_QP_FLAG_TYPE_DCI
) {
2672 init_attr
->qp_type
= MLX5_IB_QPT_DCI
;
2674 if ((ucmd
->flags
& MLX_QP_FLAGS
) == MLX5_QP_FLAG_TYPE_DCT
) {
2675 init_attr
->qp_type
= MLX5_IB_QPT_DCT
;
2677 mlx5_ib_dbg(dev
, "Invalid QP flags\n");
2682 if (!MLX5_CAP_GEN(dev
->mdev
, dct
)) {
2683 mlx5_ib_dbg(dev
, "DC transport is not supported\n");
2690 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
2691 struct ib_qp_init_attr
*verbs_init_attr
,
2692 struct ib_udata
*udata
)
2694 struct mlx5_ib_dev
*dev
;
2695 struct mlx5_ib_qp
*qp
;
2698 struct ib_qp_init_attr mlx_init_attr
;
2699 struct ib_qp_init_attr
*init_attr
= verbs_init_attr
;
2700 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
2701 udata
, struct mlx5_ib_ucontext
, ibucontext
);
2704 dev
= to_mdev(pd
->device
);
2706 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
) {
2708 mlx5_ib_dbg(dev
, "Raw Packet QP is not supported for kernel consumers\n");
2709 return ERR_PTR(-EINVAL
);
2710 } else if (!ucontext
->cqe_version
) {
2711 mlx5_ib_dbg(dev
, "Raw Packet QP is only supported for CQE version > 0\n");
2712 return ERR_PTR(-EINVAL
);
2716 /* being cautious here */
2717 if (init_attr
->qp_type
!= IB_QPT_XRC_TGT
&&
2718 init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
) {
2719 pr_warn("%s: no PD for transport %s\n", __func__
,
2720 ib_qp_type_str(init_attr
->qp_type
));
2721 return ERR_PTR(-EINVAL
);
2723 dev
= to_mdev(to_mxrcd(init_attr
->xrcd
)->ibxrcd
.device
);
2726 if (init_attr
->qp_type
== IB_QPT_DRIVER
) {
2727 struct mlx5_ib_create_qp ucmd
;
2729 init_attr
= &mlx_init_attr
;
2730 memcpy(init_attr
, verbs_init_attr
, sizeof(*verbs_init_attr
));
2731 err
= set_mlx_qp_type(dev
, init_attr
, &ucmd
, udata
);
2733 return ERR_PTR(err
);
2735 if (init_attr
->qp_type
== MLX5_IB_QPT_DCI
) {
2736 if (init_attr
->cap
.max_recv_wr
||
2737 init_attr
->cap
.max_recv_sge
) {
2738 mlx5_ib_dbg(dev
, "DCI QP requires zero size receive queue\n");
2739 return ERR_PTR(-EINVAL
);
2742 return mlx5_ib_create_dct(pd
, init_attr
, &ucmd
, udata
);
2746 switch (init_attr
->qp_type
) {
2747 case IB_QPT_XRC_TGT
:
2748 case IB_QPT_XRC_INI
:
2749 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
)) {
2750 mlx5_ib_dbg(dev
, "XRC not supported\n");
2751 return ERR_PTR(-ENOSYS
);
2753 init_attr
->recv_cq
= NULL
;
2754 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
2755 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
2756 init_attr
->send_cq
= NULL
;
2760 case IB_QPT_RAW_PACKET
:
2765 case MLX5_IB_QPT_HW_GSI
:
2766 case MLX5_IB_QPT_REG_UMR
:
2767 case MLX5_IB_QPT_DCI
:
2768 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
2770 return ERR_PTR(-ENOMEM
);
2772 err
= create_qp_common(dev
, pd
, init_attr
, udata
, qp
);
2774 mlx5_ib_dbg(dev
, "create_qp_common failed\n");
2776 return ERR_PTR(err
);
2779 if (is_qp0(init_attr
->qp_type
))
2780 qp
->ibqp
.qp_num
= 0;
2781 else if (is_qp1(init_attr
->qp_type
))
2782 qp
->ibqp
.qp_num
= 1;
2784 qp
->ibqp
.qp_num
= qp
->trans_qp
.base
.mqp
.qpn
;
2786 mlx5_ib_dbg(dev
, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
2787 qp
->ibqp
.qp_num
, qp
->trans_qp
.base
.mqp
.qpn
,
2788 init_attr
->recv_cq
? to_mcq(init_attr
->recv_cq
)->mcq
.cqn
: -1,
2789 init_attr
->send_cq
? to_mcq(init_attr
->send_cq
)->mcq
.cqn
: -1);
2791 qp
->trans_qp
.xrcdn
= xrcdn
;
2796 return mlx5_ib_gsi_create_qp(pd
, init_attr
);
2798 case IB_QPT_RAW_IPV6
:
2799 case IB_QPT_RAW_ETHERTYPE
:
2802 mlx5_ib_dbg(dev
, "unsupported qp type %d\n",
2803 init_attr
->qp_type
);
2804 /* Don't support raw QPs */
2805 return ERR_PTR(-EOPNOTSUPP
);
2808 if (verbs_init_attr
->qp_type
== IB_QPT_DRIVER
)
2809 qp
->qp_sub_type
= init_attr
->qp_type
;
2814 static int mlx5_ib_destroy_dct(struct mlx5_ib_qp
*mqp
)
2816 struct mlx5_ib_dev
*dev
= to_mdev(mqp
->ibqp
.device
);
2818 if (mqp
->state
== IB_QPS_RTR
) {
2821 err
= mlx5_core_destroy_dct(dev
->mdev
, &mqp
->dct
.mdct
);
2823 mlx5_ib_warn(dev
, "failed to destroy DCT %d\n", err
);
2833 int mlx5_ib_destroy_qp(struct ib_qp
*qp
, struct ib_udata
*udata
)
2835 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
2836 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
2838 if (unlikely(qp
->qp_type
== IB_QPT_GSI
))
2839 return mlx5_ib_gsi_destroy_qp(qp
);
2841 if (mqp
->qp_sub_type
== MLX5_IB_QPT_DCT
)
2842 return mlx5_ib_destroy_dct(mqp
);
2844 destroy_qp_common(dev
, mqp
, udata
);
2851 static int to_mlx5_access_flags(struct mlx5_ib_qp
*qp
,
2852 const struct ib_qp_attr
*attr
,
2853 int attr_mask
, __be32
*hw_access_flags_be
)
2856 u32 access_flags
, hw_access_flags
= 0;
2858 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.device
);
2860 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2861 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
2863 dest_rd_atomic
= qp
->trans_qp
.resp_depth
;
2865 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2866 access_flags
= attr
->qp_access_flags
;
2868 access_flags
= qp
->trans_qp
.atomic_rd_en
;
2870 if (!dest_rd_atomic
)
2871 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
2873 if (access_flags
& IB_ACCESS_REMOTE_READ
)
2874 hw_access_flags
|= MLX5_QP_BIT_RRE
;
2875 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
) {
2878 atomic_mode
= get_atomic_mode(dev
, qp
->ibqp
.qp_type
);
2879 if (atomic_mode
< 0)
2882 hw_access_flags
|= MLX5_QP_BIT_RAE
;
2883 hw_access_flags
|= atomic_mode
<< MLX5_ATOMIC_MODE_OFFSET
;
2886 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
2887 hw_access_flags
|= MLX5_QP_BIT_RWE
;
2889 *hw_access_flags_be
= cpu_to_be32(hw_access_flags
);
2895 MLX5_PATH_FLAG_FL
= 1 << 0,
2896 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
2897 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
2900 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
2902 if (rate
== IB_RATE_PORT_CURRENT
)
2905 if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_600_GBPS
)
2908 while (rate
!= IB_RATE_PORT_CURRENT
&&
2909 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
2910 MLX5_CAP_GEN(dev
->mdev
, stat_rate_support
)))
2913 return rate
? rate
+ MLX5_STAT_RATE_OFFSET
: rate
;
2916 static int modify_raw_packet_eth_prio(struct mlx5_core_dev
*dev
,
2917 struct mlx5_ib_sq
*sq
, u8 sl
,
2925 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
2926 in
= kvzalloc(inlen
, GFP_KERNEL
);
2930 MLX5_SET(modify_tis_in
, in
, bitmask
.prio
, 1);
2931 MLX5_SET(modify_tis_in
, in
, uid
, to_mpd(pd
)->uid
);
2933 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
2934 MLX5_SET(tisc
, tisc
, prio
, ((sl
& 0x7) << 1));
2936 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
, inlen
);
2943 static int modify_raw_packet_tx_affinity(struct mlx5_core_dev
*dev
,
2944 struct mlx5_ib_sq
*sq
, u8 tx_affinity
,
2952 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
2953 in
= kvzalloc(inlen
, GFP_KERNEL
);
2957 MLX5_SET(modify_tis_in
, in
, bitmask
.lag_tx_port_affinity
, 1);
2958 MLX5_SET(modify_tis_in
, in
, uid
, to_mpd(pd
)->uid
);
2960 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
2961 MLX5_SET(tisc
, tisc
, lag_tx_port_affinity
, tx_affinity
);
2963 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
, inlen
);
2970 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2971 const struct rdma_ah_attr
*ah
,
2972 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
2973 u32 path_flags
, const struct ib_qp_attr
*attr
,
2976 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah
);
2978 enum ib_gid_type gid_type
;
2979 u8 ah_flags
= rdma_ah_get_ah_flags(ah
);
2980 u8 sl
= rdma_ah_get_sl(ah
);
2982 if (attr_mask
& IB_QP_PKEY_INDEX
)
2983 path
->pkey_index
= cpu_to_be16(alt
? attr
->alt_pkey_index
:
2986 if (ah_flags
& IB_AH_GRH
) {
2987 if (grh
->sgid_index
>=
2988 dev
->mdev
->port_caps
[port
- 1].gid_table_len
) {
2989 pr_err("sgid_index (%u) too large. max is %d\n",
2991 dev
->mdev
->port_caps
[port
- 1].gid_table_len
);
2996 if (ah
->type
== RDMA_AH_ATTR_TYPE_ROCE
) {
2997 if (!(ah_flags
& IB_AH_GRH
))
3000 memcpy(path
->rmac
, ah
->roce
.dmac
, sizeof(ah
->roce
.dmac
));
3001 if (qp
->ibqp
.qp_type
== IB_QPT_RC
||
3002 qp
->ibqp
.qp_type
== IB_QPT_UC
||
3003 qp
->ibqp
.qp_type
== IB_QPT_XRC_INI
||
3004 qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
3006 mlx5_get_roce_udp_sport(dev
, ah
->grh
.sgid_attr
);
3007 path
->dci_cfi_prio_sl
= (sl
& 0x7) << 4;
3008 gid_type
= ah
->grh
.sgid_attr
->gid_type
;
3009 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
)
3010 path
->ecn_dscp
= (grh
->traffic_class
>> 2) & 0x3f;
3012 path
->fl_free_ar
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
3014 (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x40 : 0;
3015 path
->rlid
= cpu_to_be16(rdma_ah_get_dlid(ah
));
3016 path
->grh_mlid
= rdma_ah_get_path_bits(ah
) & 0x7f;
3017 if (ah_flags
& IB_AH_GRH
)
3018 path
->grh_mlid
|= 1 << 7;
3019 path
->dci_cfi_prio_sl
= sl
& 0xf;
3022 if (ah_flags
& IB_AH_GRH
) {
3023 path
->mgid_index
= grh
->sgid_index
;
3024 path
->hop_limit
= grh
->hop_limit
;
3025 path
->tclass_flowlabel
=
3026 cpu_to_be32((grh
->traffic_class
<< 20) |
3028 memcpy(path
->rgid
, grh
->dgid
.raw
, 16);
3031 err
= ib_rate_to_mlx5(dev
, rdma_ah_get_static_rate(ah
));
3034 path
->static_rate
= err
;
3037 if (attr_mask
& IB_QP_TIMEOUT
)
3038 path
->ackto_lt
= (alt
? attr
->alt_timeout
: attr
->timeout
) << 3;
3040 if ((qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) && qp
->sq
.wqe_cnt
)
3041 return modify_raw_packet_eth_prio(dev
->mdev
,
3042 &qp
->raw_packet_qp
.sq
,
3043 sl
& 0xf, qp
->ibqp
.pd
);
3048 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
3049 [MLX5_QP_STATE_INIT
] = {
3050 [MLX5_QP_STATE_INIT
] = {
3051 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
3052 MLX5_QP_OPTPAR_RAE
|
3053 MLX5_QP_OPTPAR_RWE
|
3054 MLX5_QP_OPTPAR_PKEY_INDEX
|
3055 MLX5_QP_OPTPAR_PRI_PORT
,
3056 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
3057 MLX5_QP_OPTPAR_PKEY_INDEX
|
3058 MLX5_QP_OPTPAR_PRI_PORT
,
3059 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
3060 MLX5_QP_OPTPAR_Q_KEY
|
3061 MLX5_QP_OPTPAR_PRI_PORT
,
3062 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_RRE
|
3063 MLX5_QP_OPTPAR_RAE
|
3064 MLX5_QP_OPTPAR_RWE
|
3065 MLX5_QP_OPTPAR_PKEY_INDEX
|
3066 MLX5_QP_OPTPAR_PRI_PORT
,
3068 [MLX5_QP_STATE_RTR
] = {
3069 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
3070 MLX5_QP_OPTPAR_RRE
|
3071 MLX5_QP_OPTPAR_RAE
|
3072 MLX5_QP_OPTPAR_RWE
|
3073 MLX5_QP_OPTPAR_PKEY_INDEX
,
3074 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
3075 MLX5_QP_OPTPAR_RWE
|
3076 MLX5_QP_OPTPAR_PKEY_INDEX
,
3077 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
3078 MLX5_QP_OPTPAR_Q_KEY
,
3079 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
3080 MLX5_QP_OPTPAR_Q_KEY
,
3081 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
3082 MLX5_QP_OPTPAR_RRE
|
3083 MLX5_QP_OPTPAR_RAE
|
3084 MLX5_QP_OPTPAR_RWE
|
3085 MLX5_QP_OPTPAR_PKEY_INDEX
,
3088 [MLX5_QP_STATE_RTR
] = {
3089 [MLX5_QP_STATE_RTS
] = {
3090 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
3091 MLX5_QP_OPTPAR_RRE
|
3092 MLX5_QP_OPTPAR_RAE
|
3093 MLX5_QP_OPTPAR_RWE
|
3094 MLX5_QP_OPTPAR_PM_STATE
|
3095 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
3096 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
3097 MLX5_QP_OPTPAR_RWE
|
3098 MLX5_QP_OPTPAR_PM_STATE
,
3099 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
3100 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
3101 MLX5_QP_OPTPAR_RRE
|
3102 MLX5_QP_OPTPAR_RAE
|
3103 MLX5_QP_OPTPAR_RWE
|
3104 MLX5_QP_OPTPAR_PM_STATE
|
3105 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
3108 [MLX5_QP_STATE_RTS
] = {
3109 [MLX5_QP_STATE_RTS
] = {
3110 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
3111 MLX5_QP_OPTPAR_RAE
|
3112 MLX5_QP_OPTPAR_RWE
|
3113 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
3114 MLX5_QP_OPTPAR_PM_STATE
|
3115 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
3116 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
3117 MLX5_QP_OPTPAR_PM_STATE
|
3118 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
3119 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
3120 MLX5_QP_OPTPAR_SRQN
|
3121 MLX5_QP_OPTPAR_CQN_RCV
,
3122 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_RRE
|
3123 MLX5_QP_OPTPAR_RAE
|
3124 MLX5_QP_OPTPAR_RWE
|
3125 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
3126 MLX5_QP_OPTPAR_PM_STATE
|
3127 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
3130 [MLX5_QP_STATE_SQER
] = {
3131 [MLX5_QP_STATE_RTS
] = {
3132 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
3133 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
3134 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
,
3135 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
3136 MLX5_QP_OPTPAR_RWE
|
3137 MLX5_QP_OPTPAR_RAE
|
3139 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
3140 MLX5_QP_OPTPAR_RWE
|
3141 MLX5_QP_OPTPAR_RAE
|
3147 static int ib_nr_to_mlx5_nr(int ib_mask
)
3152 case IB_QP_CUR_STATE
:
3154 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
3156 case IB_QP_ACCESS_FLAGS
:
3157 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
3159 case IB_QP_PKEY_INDEX
:
3160 return MLX5_QP_OPTPAR_PKEY_INDEX
;
3162 return MLX5_QP_OPTPAR_PRI_PORT
;
3164 return MLX5_QP_OPTPAR_Q_KEY
;
3166 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
3167 MLX5_QP_OPTPAR_PRI_PORT
;
3168 case IB_QP_PATH_MTU
:
3171 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
3172 case IB_QP_RETRY_CNT
:
3173 return MLX5_QP_OPTPAR_RETRY_COUNT
;
3174 case IB_QP_RNR_RETRY
:
3175 return MLX5_QP_OPTPAR_RNR_RETRY
;
3178 case IB_QP_MAX_QP_RD_ATOMIC
:
3179 return MLX5_QP_OPTPAR_SRA_MAX
;
3180 case IB_QP_ALT_PATH
:
3181 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
3182 case IB_QP_MIN_RNR_TIMER
:
3183 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
3186 case IB_QP_MAX_DEST_RD_ATOMIC
:
3187 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
3188 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
3189 case IB_QP_PATH_MIG_STATE
:
3190 return MLX5_QP_OPTPAR_PM_STATE
;
3193 case IB_QP_DEST_QPN
:
3199 static int ib_mask_to_mlx5_opt(int ib_mask
)
3204 for (i
= 0; i
< 8 * sizeof(int); i
++) {
3205 if ((1 << i
) & ib_mask
)
3206 result
|= ib_nr_to_mlx5_nr(1 << i
);
3212 static int modify_raw_packet_qp_rq(
3213 struct mlx5_ib_dev
*dev
, struct mlx5_ib_rq
*rq
, int new_state
,
3214 const struct mlx5_modify_raw_qp_param
*raw_qp_param
, struct ib_pd
*pd
)
3221 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
3222 in
= kvzalloc(inlen
, GFP_KERNEL
);
3226 MLX5_SET(modify_rq_in
, in
, rq_state
, rq
->state
);
3227 MLX5_SET(modify_rq_in
, in
, uid
, to_mpd(pd
)->uid
);
3229 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
3230 MLX5_SET(rqc
, rqc
, state
, new_state
);
3232 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
) {
3233 if (MLX5_CAP_GEN(dev
->mdev
, modify_rq_counter_set_id
)) {
3234 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
3235 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID
);
3236 MLX5_SET(rqc
, rqc
, counter_set_id
, raw_qp_param
->rq_q_ctr_id
);
3240 "RAW PACKET QP counters are not supported on current FW\n");
3243 err
= mlx5_core_modify_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, in
, inlen
);
3247 rq
->state
= new_state
;
3254 static int modify_raw_packet_qp_sq(
3255 struct mlx5_core_dev
*dev
, struct mlx5_ib_sq
*sq
, int new_state
,
3256 const struct mlx5_modify_raw_qp_param
*raw_qp_param
, struct ib_pd
*pd
)
3258 struct mlx5_ib_qp
*ibqp
= sq
->base
.container_mibqp
;
3259 struct mlx5_rate_limit old_rl
= ibqp
->rl
;
3260 struct mlx5_rate_limit new_rl
= old_rl
;
3261 bool new_rate_added
= false;
3268 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
3269 in
= kvzalloc(inlen
, GFP_KERNEL
);
3273 MLX5_SET(modify_sq_in
, in
, uid
, to_mpd(pd
)->uid
);
3274 MLX5_SET(modify_sq_in
, in
, sq_state
, sq
->state
);
3276 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
3277 MLX5_SET(sqc
, sqc
, state
, new_state
);
3279 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_RATE_LIMIT
) {
3280 if (new_state
!= MLX5_SQC_STATE_RDY
)
3281 pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
3284 new_rl
= raw_qp_param
->rl
;
3287 if (!mlx5_rl_are_equal(&old_rl
, &new_rl
)) {
3289 err
= mlx5_rl_add_rate(dev
, &rl_index
, &new_rl
);
3291 pr_err("Failed configuring rate limit(err %d): \
3292 rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
3293 err
, new_rl
.rate
, new_rl
.max_burst_sz
,
3294 new_rl
.typical_pkt_sz
);
3298 new_rate_added
= true;
3301 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
3302 /* index 0 means no limit */
3303 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, rl_index
);
3306 err
= mlx5_core_modify_sq(dev
, sq
->base
.mqp
.qpn
, in
, inlen
);
3308 /* Remove new rate from table if failed */
3310 mlx5_rl_remove_rate(dev
, &new_rl
);
3314 /* Only remove the old rate after new rate was set */
3315 if ((old_rl
.rate
&& !mlx5_rl_are_equal(&old_rl
, &new_rl
)) ||
3316 (new_state
!= MLX5_SQC_STATE_RDY
)) {
3317 mlx5_rl_remove_rate(dev
, &old_rl
);
3318 if (new_state
!= MLX5_SQC_STATE_RDY
)
3319 memset(&new_rl
, 0, sizeof(new_rl
));
3323 sq
->state
= new_state
;
3330 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
3331 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
3334 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
3335 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
3336 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
3337 int modify_rq
= !!qp
->rq
.wqe_cnt
;
3338 int modify_sq
= !!qp
->sq
.wqe_cnt
;
3343 switch (raw_qp_param
->operation
) {
3344 case MLX5_CMD_OP_RST2INIT_QP
:
3345 rq_state
= MLX5_RQC_STATE_RDY
;
3346 sq_state
= MLX5_SQC_STATE_RDY
;
3348 case MLX5_CMD_OP_2ERR_QP
:
3349 rq_state
= MLX5_RQC_STATE_ERR
;
3350 sq_state
= MLX5_SQC_STATE_ERR
;
3352 case MLX5_CMD_OP_2RST_QP
:
3353 rq_state
= MLX5_RQC_STATE_RST
;
3354 sq_state
= MLX5_SQC_STATE_RST
;
3356 case MLX5_CMD_OP_RTR2RTS_QP
:
3357 case MLX5_CMD_OP_RTS2RTS_QP
:
3358 if (raw_qp_param
->set_mask
==
3359 MLX5_RAW_QP_RATE_LIMIT
) {
3361 sq_state
= sq
->state
;
3363 return raw_qp_param
->set_mask
? -EINVAL
: 0;
3366 case MLX5_CMD_OP_INIT2INIT_QP
:
3367 case MLX5_CMD_OP_INIT2RTR_QP
:
3368 if (raw_qp_param
->set_mask
)
3378 err
= modify_raw_packet_qp_rq(dev
, rq
, rq_state
, raw_qp_param
,
3385 struct mlx5_flow_handle
*flow_rule
;
3388 err
= modify_raw_packet_tx_affinity(dev
->mdev
, sq
,
3395 flow_rule
= create_flow_rule_vport_sq(dev
, sq
,
3396 raw_qp_param
->port
);
3397 if (IS_ERR(flow_rule
))
3398 return PTR_ERR(flow_rule
);
3400 err
= modify_raw_packet_qp_sq(dev
->mdev
, sq
, sq_state
,
3401 raw_qp_param
, qp
->ibqp
.pd
);
3404 mlx5_del_flow_rules(flow_rule
);
3409 destroy_flow_rule_vport_sq(sq
);
3410 sq
->flow_rule
= flow_rule
;
3419 static unsigned int get_tx_affinity(struct mlx5_ib_dev
*dev
,
3420 struct mlx5_ib_pd
*pd
,
3421 struct mlx5_ib_qp_base
*qp_base
,
3422 u8 port_num
, struct ib_udata
*udata
)
3424 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
3425 udata
, struct mlx5_ib_ucontext
, ibucontext
);
3426 unsigned int tx_port_affinity
;
3429 tx_port_affinity
= (unsigned int)atomic_add_return(
3430 1, &ucontext
->tx_port_affinity
) %
3433 mlx5_ib_dbg(dev
, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
3434 tx_port_affinity
, qp_base
->mqp
.qpn
, ucontext
);
3437 (unsigned int)atomic_add_return(
3438 1, &dev
->port
[port_num
].roce
.tx_port_affinity
) %
3441 mlx5_ib_dbg(dev
, "Set tx affinity 0x%x to qpn 0x%x\n",
3442 tx_port_affinity
, qp_base
->mqp
.qpn
);
3445 return tx_port_affinity
;
3448 static int __mlx5_ib_qp_set_counter(struct ib_qp
*qp
,
3449 struct rdma_counter
*counter
)
3451 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
3452 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
3453 struct mlx5_qp_context context
= {};
3454 struct mlx5_ib_qp_base
*base
;
3458 set_id
= counter
->id
;
3460 set_id
= mlx5_ib_get_counters_id(dev
, mqp
->port
- 1);
3462 base
= &mqp
->trans_qp
.base
;
3463 context
.qp_counter_set_usr_page
&= cpu_to_be32(0xffffff);
3464 context
.qp_counter_set_usr_page
|= cpu_to_be32(set_id
<< 24);
3465 return mlx5_core_qp_modify(dev
->mdev
,
3466 MLX5_CMD_OP_RTS2RTS_QP
,
3467 MLX5_QP_OPTPAR_COUNTER_SET_ID
,
3468 &context
, &base
->mqp
);
3471 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
3472 const struct ib_qp_attr
*attr
, int attr_mask
,
3473 enum ib_qp_state cur_state
,
3474 enum ib_qp_state new_state
,
3475 const struct mlx5_ib_modify_qp
*ucmd
,
3476 struct ib_udata
*udata
)
3478 static const u16 optab
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
] = {
3479 [MLX5_QP_STATE_RST
] = {
3480 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3481 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3482 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_RST2INIT_QP
,
3484 [MLX5_QP_STATE_INIT
] = {
3485 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3486 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3487 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_INIT2INIT_QP
,
3488 [MLX5_QP_STATE_RTR
] = MLX5_CMD_OP_INIT2RTR_QP
,
3490 [MLX5_QP_STATE_RTR
] = {
3491 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3492 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3493 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTR2RTS_QP
,
3495 [MLX5_QP_STATE_RTS
] = {
3496 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3497 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3498 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTS2RTS_QP
,
3500 [MLX5_QP_STATE_SQD
] = {
3501 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3502 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3504 [MLX5_QP_STATE_SQER
] = {
3505 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3506 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3507 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_SQERR2RTS_QP
,
3509 [MLX5_QP_STATE_ERR
] = {
3510 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3511 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3515 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3516 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
3517 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
3518 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
3519 struct mlx5_qp_context
*context
;
3520 struct mlx5_ib_pd
*pd
;
3521 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
3522 enum mlx5_qp_optpar optpar
;
3529 mlx5_st
= to_mlx5_st(ibqp
->qp_type
== IB_QPT_DRIVER
?
3530 qp
->qp_sub_type
: ibqp
->qp_type
);
3534 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
3539 context
->flags
= cpu_to_be32(mlx5_st
<< 16);
3541 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
3542 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
3544 switch (attr
->path_mig_state
) {
3545 case IB_MIG_MIGRATED
:
3546 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
3549 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
3552 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
3557 if ((cur_state
== IB_QPS_RESET
) && (new_state
== IB_QPS_INIT
)) {
3558 if ((ibqp
->qp_type
== IB_QPT_RC
) ||
3559 (ibqp
->qp_type
== IB_QPT_UD
&&
3560 !(qp
->flags
& MLX5_IB_QP_SQPN_QP1
)) ||
3561 (ibqp
->qp_type
== IB_QPT_UC
) ||
3562 (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) ||
3563 (ibqp
->qp_type
== IB_QPT_XRC_INI
) ||
3564 (ibqp
->qp_type
== IB_QPT_XRC_TGT
)) {
3565 if (dev
->lag_active
) {
3566 u8 p
= mlx5_core_native_port_num(dev
->mdev
) - 1;
3567 tx_affinity
= get_tx_affinity(dev
, pd
, base
, p
,
3569 context
->flags
|= cpu_to_be32(tx_affinity
<< 24);
3574 if (is_sqp(ibqp
->qp_type
)) {
3575 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
3576 } else if ((ibqp
->qp_type
== IB_QPT_UD
&&
3577 !(qp
->flags
& MLX5_IB_QP_UNDERLAY
)) ||
3578 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
3579 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
3580 } else if (attr_mask
& IB_QP_PATH_MTU
) {
3581 if (attr
->path_mtu
< IB_MTU_256
||
3582 attr
->path_mtu
> IB_MTU_4096
) {
3583 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
3587 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
3588 (u8
)MLX5_CAP_GEN(dev
->mdev
, log_max_msg
);
3591 if (attr_mask
& IB_QP_DEST_QPN
)
3592 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
3594 if (attr_mask
& IB_QP_PKEY_INDEX
)
3595 context
->pri_path
.pkey_index
= cpu_to_be16(attr
->pkey_index
);
3597 /* todo implement counter_index functionality */
3599 if (is_sqp(ibqp
->qp_type
))
3600 context
->pri_path
.port
= qp
->port
;
3602 if (attr_mask
& IB_QP_PORT
)
3603 context
->pri_path
.port
= attr
->port_num
;
3605 if (attr_mask
& IB_QP_AV
) {
3606 err
= mlx5_set_path(dev
, qp
, &attr
->ah_attr
, &context
->pri_path
,
3607 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
3608 attr_mask
, 0, attr
, false);
3613 if (attr_mask
& IB_QP_TIMEOUT
)
3614 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
3616 if (attr_mask
& IB_QP_ALT_PATH
) {
3617 err
= mlx5_set_path(dev
, qp
, &attr
->alt_ah_attr
,
3620 attr_mask
| IB_QP_PKEY_INDEX
| IB_QP_TIMEOUT
,
3626 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
3627 &send_cq
, &recv_cq
);
3629 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
3630 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
3631 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
3632 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
3634 if (attr_mask
& IB_QP_RNR_RETRY
)
3635 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
3637 if (attr_mask
& IB_QP_RETRY_CNT
)
3638 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
3640 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
3641 if (attr
->max_rd_atomic
)
3643 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
3646 if (attr_mask
& IB_QP_SQ_PSN
)
3647 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
3649 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
3650 if (attr
->max_dest_rd_atomic
)
3652 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
3655 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
3656 __be32 access_flags
;
3658 err
= to_mlx5_access_flags(qp
, attr
, attr_mask
, &access_flags
);
3662 context
->params2
|= access_flags
;
3665 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
3666 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
3668 if (attr_mask
& IB_QP_RQ_PSN
)
3669 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
3671 if (attr_mask
& IB_QP_QKEY
)
3672 context
->qkey
= cpu_to_be32(attr
->qkey
);
3674 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
3675 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
3677 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3678 u8 port_num
= (attr_mask
& IB_QP_PORT
? attr
->port_num
:
3681 /* Underlay port should be used - index 0 function per port */
3682 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
)
3686 set_id
= ibqp
->counter
->id
;
3688 set_id
= mlx5_ib_get_counters_id(dev
, port_num
);
3689 context
->qp_counter_set_usr_page
|=
3690 cpu_to_be32(set_id
<< 24);
3693 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
3694 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
3696 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
3697 context
->deth_sqpn
= cpu_to_be32(1);
3699 mlx5_cur
= to_mlx5_state(cur_state
);
3700 mlx5_new
= to_mlx5_state(new_state
);
3702 if (mlx5_cur
>= MLX5_QP_NUM_STATE
|| mlx5_new
>= MLX5_QP_NUM_STATE
||
3703 !optab
[mlx5_cur
][mlx5_new
]) {
3708 op
= optab
[mlx5_cur
][mlx5_new
];
3709 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
3710 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
3712 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
3713 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
3714 struct mlx5_modify_raw_qp_param raw_qp_param
= {};
3716 raw_qp_param
.operation
= op
;
3717 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3718 raw_qp_param
.rq_q_ctr_id
= set_id
;
3719 raw_qp_param
.set_mask
|= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
;
3722 if (attr_mask
& IB_QP_PORT
)
3723 raw_qp_param
.port
= attr
->port_num
;
3725 if (attr_mask
& IB_QP_RATE_LIMIT
) {
3726 raw_qp_param
.rl
.rate
= attr
->rate_limit
;
3728 if (ucmd
->burst_info
.max_burst_sz
) {
3729 if (attr
->rate_limit
&&
3730 MLX5_CAP_QOS(dev
->mdev
, packet_pacing_burst_bound
)) {
3731 raw_qp_param
.rl
.max_burst_sz
=
3732 ucmd
->burst_info
.max_burst_sz
;
3739 if (ucmd
->burst_info
.typical_pkt_sz
) {
3740 if (attr
->rate_limit
&&
3741 MLX5_CAP_QOS(dev
->mdev
, packet_pacing_typical_size
)) {
3742 raw_qp_param
.rl
.typical_pkt_sz
=
3743 ucmd
->burst_info
.typical_pkt_sz
;
3750 raw_qp_param
.set_mask
|= MLX5_RAW_QP_RATE_LIMIT
;
3753 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, tx_affinity
);
3755 err
= mlx5_core_qp_modify(dev
->mdev
, op
, optpar
, context
,
3762 qp
->state
= new_state
;
3764 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
3765 qp
->trans_qp
.atomic_rd_en
= attr
->qp_access_flags
;
3766 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
3767 qp
->trans_qp
.resp_depth
= attr
->max_dest_rd_atomic
;
3768 if (attr_mask
& IB_QP_PORT
)
3769 qp
->port
= attr
->port_num
;
3770 if (attr_mask
& IB_QP_ALT_PATH
)
3771 qp
->trans_qp
.alt_port
= attr
->alt_port_num
;
3774 * If we moved a kernel QP to RESET, clean up all old CQ
3775 * entries and reinitialize the QP.
3777 if (new_state
== IB_QPS_RESET
&&
3778 !ibqp
->uobject
&& ibqp
->qp_type
!= IB_QPT_XRC_TGT
) {
3779 mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
3780 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
3781 if (send_cq
!= recv_cq
)
3782 mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
, NULL
);
3788 qp
->sq
.cur_post
= 0;
3790 qp
->sq
.cur_edge
= get_sq_edge(&qp
->sq
, 0);
3791 qp
->sq
.last_poll
= 0;
3792 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
3793 qp
->db
.db
[MLX5_SND_DBR
] = 0;
3796 if ((new_state
== IB_QPS_RTS
) && qp
->counter_pending
) {
3797 err
= __mlx5_ib_qp_set_counter(ibqp
, ibqp
->counter
);
3799 qp
->counter_pending
= 0;
3807 static inline bool is_valid_mask(int mask
, int req
, int opt
)
3809 if ((mask
& req
) != req
)
3812 if (mask
& ~(req
| opt
))
3818 /* check valid transition for driver QP types
3819 * for now the only QP type that this function supports is DCI
3821 static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state
, enum ib_qp_state new_state
,
3822 enum ib_qp_attr_mask attr_mask
)
3824 int req
= IB_QP_STATE
;
3827 if (new_state
== IB_QPS_RESET
) {
3828 return is_valid_mask(attr_mask
, req
, opt
);
3829 } else if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3830 req
|= IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3831 return is_valid_mask(attr_mask
, req
, opt
);
3832 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_INIT
) {
3833 opt
= IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3834 return is_valid_mask(attr_mask
, req
, opt
);
3835 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
3836 req
|= IB_QP_PATH_MTU
;
3837 opt
= IB_QP_PKEY_INDEX
| IB_QP_AV
;
3838 return is_valid_mask(attr_mask
, req
, opt
);
3839 } else if (cur_state
== IB_QPS_RTR
&& new_state
== IB_QPS_RTS
) {
3840 req
|= IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
| IB_QP_RNR_RETRY
|
3841 IB_QP_MAX_QP_RD_ATOMIC
| IB_QP_SQ_PSN
;
3842 opt
= IB_QP_MIN_RNR_TIMER
;
3843 return is_valid_mask(attr_mask
, req
, opt
);
3844 } else if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_RTS
) {
3845 opt
= IB_QP_MIN_RNR_TIMER
;
3846 return is_valid_mask(attr_mask
, req
, opt
);
3847 } else if (cur_state
!= IB_QPS_RESET
&& new_state
== IB_QPS_ERR
) {
3848 return is_valid_mask(attr_mask
, req
, opt
);
3853 /* mlx5_ib_modify_dct: modify a DCT QP
3854 * valid transitions are:
3855 * RESET to INIT: must set access_flags, pkey_index and port
3856 * INIT to RTR : must set min_rnr_timer, tclass, flow_label,
3857 * mtu, gid_index and hop_limit
3858 * Other transitions and attributes are illegal
3860 static int mlx5_ib_modify_dct(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
3861 int attr_mask
, struct ib_udata
*udata
)
3863 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
3864 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3865 enum ib_qp_state cur_state
, new_state
;
3867 int required
= IB_QP_STATE
;
3870 if (!(attr_mask
& IB_QP_STATE
))
3873 cur_state
= qp
->state
;
3874 new_state
= attr
->qp_state
;
3876 dctc
= MLX5_ADDR_OF(create_dct_in
, qp
->dct
.in
, dct_context_entry
);
3877 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3880 required
|= IB_QP_ACCESS_FLAGS
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3881 if (!is_valid_mask(attr_mask
, required
, 0))
3884 if (attr
->port_num
== 0 ||
3885 attr
->port_num
> MLX5_CAP_GEN(dev
->mdev
, num_ports
)) {
3886 mlx5_ib_dbg(dev
, "invalid port number %d. number of ports is %d\n",
3887 attr
->port_num
, dev
->num_ports
);
3890 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)
3891 MLX5_SET(dctc
, dctc
, rre
, 1);
3892 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)
3893 MLX5_SET(dctc
, dctc
, rwe
, 1);
3894 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) {
3897 atomic_mode
= get_atomic_mode(dev
, MLX5_IB_QPT_DCT
);
3898 if (atomic_mode
< 0)
3901 MLX5_SET(dctc
, dctc
, atomic_mode
, atomic_mode
);
3902 MLX5_SET(dctc
, dctc
, rae
, 1);
3904 MLX5_SET(dctc
, dctc
, pkey_index
, attr
->pkey_index
);
3905 MLX5_SET(dctc
, dctc
, port
, attr
->port_num
);
3907 set_id
= mlx5_ib_get_counters_id(dev
, attr
->port_num
- 1);
3908 MLX5_SET(dctc
, dctc
, counter_set_id
, set_id
);
3910 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
3911 struct mlx5_ib_modify_qp_resp resp
= {};
3912 u32 out
[MLX5_ST_SZ_DW(create_dct_out
)] = {0};
3913 u32 min_resp_len
= offsetof(typeof(resp
), dctn
) +
3916 if (udata
->outlen
< min_resp_len
)
3918 resp
.response_length
= min_resp_len
;
3920 required
|= IB_QP_MIN_RNR_TIMER
| IB_QP_AV
| IB_QP_PATH_MTU
;
3921 if (!is_valid_mask(attr_mask
, required
, 0))
3923 MLX5_SET(dctc
, dctc
, min_rnr_nak
, attr
->min_rnr_timer
);
3924 MLX5_SET(dctc
, dctc
, tclass
, attr
->ah_attr
.grh
.traffic_class
);
3925 MLX5_SET(dctc
, dctc
, flow_label
, attr
->ah_attr
.grh
.flow_label
);
3926 MLX5_SET(dctc
, dctc
, mtu
, attr
->path_mtu
);
3927 MLX5_SET(dctc
, dctc
, my_addr_index
, attr
->ah_attr
.grh
.sgid_index
);
3928 MLX5_SET(dctc
, dctc
, hop_limit
, attr
->ah_attr
.grh
.hop_limit
);
3930 err
= mlx5_core_create_dct(dev
->mdev
, &qp
->dct
.mdct
, qp
->dct
.in
,
3931 MLX5_ST_SZ_BYTES(create_dct_in
), out
,
3935 resp
.dctn
= qp
->dct
.mdct
.mqp
.qpn
;
3936 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
3938 mlx5_core_destroy_dct(dev
->mdev
, &qp
->dct
.mdct
);
3942 mlx5_ib_warn(dev
, "Modify DCT: Invalid transition from %d to %d\n", cur_state
, new_state
);
3946 qp
->state
= IB_QPS_ERR
;
3948 qp
->state
= new_state
;
3952 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
3953 int attr_mask
, struct ib_udata
*udata
)
3955 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3956 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
3957 struct mlx5_ib_modify_qp ucmd
= {};
3958 enum ib_qp_type qp_type
;
3959 enum ib_qp_state cur_state
, new_state
;
3960 size_t required_cmd_sz
;
3964 if (ibqp
->rwq_ind_tbl
)
3967 if (udata
&& udata
->inlen
) {
3968 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) +
3969 sizeof(ucmd
.reserved
);
3970 if (udata
->inlen
< required_cmd_sz
)
3973 if (udata
->inlen
> sizeof(ucmd
) &&
3974 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
3975 udata
->inlen
- sizeof(ucmd
)))
3978 if (ib_copy_from_udata(&ucmd
, udata
,
3979 min(udata
->inlen
, sizeof(ucmd
))))
3982 if (ucmd
.comp_mask
||
3983 memchr_inv(&ucmd
.reserved
, 0, sizeof(ucmd
.reserved
)) ||
3984 memchr_inv(&ucmd
.burst_info
.reserved
, 0,
3985 sizeof(ucmd
.burst_info
.reserved
)))
3989 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
3990 return mlx5_ib_gsi_modify_qp(ibqp
, attr
, attr_mask
);
3992 if (ibqp
->qp_type
== IB_QPT_DRIVER
)
3993 qp_type
= qp
->qp_sub_type
;
3995 qp_type
= (unlikely(ibqp
->qp_type
== MLX5_IB_QPT_HW_GSI
)) ?
3996 IB_QPT_GSI
: ibqp
->qp_type
;
3998 if (qp_type
== MLX5_IB_QPT_DCT
)
3999 return mlx5_ib_modify_dct(ibqp
, attr
, attr_mask
, udata
);
4001 mutex_lock(&qp
->mutex
);
4003 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
4004 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
4006 if (!(cur_state
== new_state
&& cur_state
== IB_QPS_RESET
)) {
4007 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
4010 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
4011 if (attr_mask
& ~(IB_QP_STATE
| IB_QP_CUR_STATE
)) {
4012 mlx5_ib_dbg(dev
, "invalid attr_mask 0x%x when underlay QP is used\n",
4016 } else if (qp_type
!= MLX5_IB_QPT_REG_UMR
&&
4017 qp_type
!= MLX5_IB_QPT_DCI
&&
4018 !ib_modify_qp_is_ok(cur_state
, new_state
, qp_type
,
4020 mlx5_ib_dbg(dev
, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4021 cur_state
, new_state
, ibqp
->qp_type
, attr_mask
);
4023 } else if (qp_type
== MLX5_IB_QPT_DCI
&&
4024 !modify_dci_qp_is_ok(cur_state
, new_state
, attr_mask
)) {
4025 mlx5_ib_dbg(dev
, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4026 cur_state
, new_state
, qp_type
, attr_mask
);
4030 if ((attr_mask
& IB_QP_PORT
) &&
4031 (attr
->port_num
== 0 ||
4032 attr
->port_num
> dev
->num_ports
)) {
4033 mlx5_ib_dbg(dev
, "invalid port number %d. number of ports is %d\n",
4034 attr
->port_num
, dev
->num_ports
);
4038 if (attr_mask
& IB_QP_PKEY_INDEX
) {
4039 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
4040 if (attr
->pkey_index
>=
4041 dev
->mdev
->port_caps
[port
- 1].pkey_table_len
) {
4042 mlx5_ib_dbg(dev
, "invalid pkey index %d\n",
4048 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
4049 attr
->max_rd_atomic
>
4050 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_res_qp
))) {
4051 mlx5_ib_dbg(dev
, "invalid max_rd_atomic value %d\n",
4052 attr
->max_rd_atomic
);
4056 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
4057 attr
->max_dest_rd_atomic
>
4058 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_req_qp
))) {
4059 mlx5_ib_dbg(dev
, "invalid max_dest_rd_atomic value %d\n",
4060 attr
->max_dest_rd_atomic
);
4064 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
4069 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
,
4070 new_state
, &ucmd
, udata
);
4073 mutex_unlock(&qp
->mutex
);
4077 static void _handle_post_send_edge(struct mlx5_ib_wq
*sq
, void **seg
,
4078 u32 wqe_sz
, void **cur_edge
)
4082 idx
= (sq
->cur_post
+ (wqe_sz
>> 2)) & (sq
->wqe_cnt
- 1);
4083 *cur_edge
= get_sq_edge(sq
, idx
);
4085 *seg
= mlx5_frag_buf_get_wqe(&sq
->fbc
, idx
);
4088 /* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
4089 * next nearby edge and get new address translation for current WQE position.
4091 * @seg: Current WQE position (16B aligned).
4092 * @wqe_sz: Total current WQE size [16B].
4093 * @cur_edge: Updated current edge.
4095 static inline void handle_post_send_edge(struct mlx5_ib_wq
*sq
, void **seg
,
4096 u32 wqe_sz
, void **cur_edge
)
4098 if (likely(*seg
!= *cur_edge
))
4101 _handle_post_send_edge(sq
, seg
, wqe_sz
, cur_edge
);
4104 /* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
4105 * pointers. At the end @seg is aligned to 16B regardless the copied size.
4107 * @cur_edge: Updated current edge.
4108 * @seg: Current WQE position (16B aligned).
4109 * @wqe_sz: Total current WQE size [16B].
4110 * @src: Pointer to copy from.
4111 * @n: Number of bytes to copy.
4113 static inline void memcpy_send_wqe(struct mlx5_ib_wq
*sq
, void **cur_edge
,
4114 void **seg
, u32
*wqe_sz
, const void *src
,
4118 size_t leftlen
= *cur_edge
- *seg
;
4119 size_t copysz
= min_t(size_t, leftlen
, n
);
4122 memcpy(*seg
, src
, copysz
);
4126 stride
= !n
? ALIGN(copysz
, 16) : copysz
;
4128 *wqe_sz
+= stride
>> 4;
4129 handle_post_send_edge(sq
, seg
, *wqe_sz
, cur_edge
);
4133 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
4135 struct mlx5_ib_cq
*cq
;
4138 cur
= wq
->head
- wq
->tail
;
4139 if (likely(cur
+ nreq
< wq
->max_post
))
4143 spin_lock(&cq
->lock
);
4144 cur
= wq
->head
- wq
->tail
;
4145 spin_unlock(&cq
->lock
);
4147 return cur
+ nreq
>= wq
->max_post
;
4150 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
4151 u64 remote_addr
, u32 rkey
)
4153 rseg
->raddr
= cpu_to_be64(remote_addr
);
4154 rseg
->rkey
= cpu_to_be32(rkey
);
4158 static void set_eth_seg(const struct ib_send_wr
*wr
, struct mlx5_ib_qp
*qp
,
4159 void **seg
, int *size
, void **cur_edge
)
4161 struct mlx5_wqe_eth_seg
*eseg
= *seg
;
4163 memset(eseg
, 0, sizeof(struct mlx5_wqe_eth_seg
));
4165 if (wr
->send_flags
& IB_SEND_IP_CSUM
)
4166 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
|
4167 MLX5_ETH_WQE_L4_CSUM
;
4169 if (wr
->opcode
== IB_WR_LSO
) {
4170 struct ib_ud_wr
*ud_wr
= container_of(wr
, struct ib_ud_wr
, wr
);
4171 size_t left
, copysz
;
4172 void *pdata
= ud_wr
->header
;
4176 eseg
->mss
= cpu_to_be16(ud_wr
->mss
);
4177 eseg
->inline_hdr
.sz
= cpu_to_be16(left
);
4179 /* memcpy_send_wqe should get a 16B align address. Hence, we
4180 * first copy up to the current edge and then, if needed,
4181 * fall-through to memcpy_send_wqe.
4183 copysz
= min_t(u64
, *cur_edge
- (void *)eseg
->inline_hdr
.start
,
4185 memcpy(eseg
->inline_hdr
.start
, pdata
, copysz
);
4186 stride
= ALIGN(sizeof(struct mlx5_wqe_eth_seg
) -
4187 sizeof(eseg
->inline_hdr
.start
) + copysz
, 16);
4188 *size
+= stride
/ 16;
4191 if (copysz
< left
) {
4192 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4195 memcpy_send_wqe(&qp
->sq
, cur_edge
, seg
, size
, pdata
,
4202 *seg
+= sizeof(struct mlx5_wqe_eth_seg
);
4203 *size
+= sizeof(struct mlx5_wqe_eth_seg
) / 16;
4206 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
4207 const struct ib_send_wr
*wr
)
4209 memcpy(&dseg
->av
, &to_mah(ud_wr(wr
)->ah
)->av
, sizeof(struct mlx5_av
));
4210 dseg
->av
.dqp_dct
= cpu_to_be32(ud_wr(wr
)->remote_qpn
| MLX5_EXTENDED_UD_AV
);
4211 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(ud_wr(wr
)->remote_qkey
);
4214 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
4216 dseg
->byte_count
= cpu_to_be32(sg
->length
);
4217 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
4218 dseg
->addr
= cpu_to_be64(sg
->addr
);
4221 static u64
get_xlt_octo(u64 bytes
)
4223 return ALIGN(bytes
, MLX5_IB_UMR_XLT_ALIGNMENT
) /
4224 MLX5_IB_UMR_OCTOWORD
;
4227 static __be64
frwr_mkey_mask(bool atomic
)
4231 result
= MLX5_MKEY_MASK_LEN
|
4232 MLX5_MKEY_MASK_PAGE_SIZE
|
4233 MLX5_MKEY_MASK_START_ADDR
|
4234 MLX5_MKEY_MASK_EN_RINVAL
|
4235 MLX5_MKEY_MASK_KEY
|
4240 MLX5_MKEY_MASK_SMALL_FENCE
|
4241 MLX5_MKEY_MASK_FREE
;
4244 result
|= MLX5_MKEY_MASK_A
;
4246 return cpu_to_be64(result
);
4249 static __be64
sig_mkey_mask(void)
4253 result
= MLX5_MKEY_MASK_LEN
|
4254 MLX5_MKEY_MASK_PAGE_SIZE
|
4255 MLX5_MKEY_MASK_START_ADDR
|
4256 MLX5_MKEY_MASK_EN_SIGERR
|
4257 MLX5_MKEY_MASK_EN_RINVAL
|
4258 MLX5_MKEY_MASK_KEY
|
4263 MLX5_MKEY_MASK_SMALL_FENCE
|
4264 MLX5_MKEY_MASK_FREE
|
4265 MLX5_MKEY_MASK_BSF_EN
;
4267 return cpu_to_be64(result
);
4270 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
,
4271 struct mlx5_ib_mr
*mr
, u8 flags
, bool atomic
)
4273 int size
= (mr
->ndescs
+ mr
->meta_ndescs
) * mr
->desc_size
;
4275 memset(umr
, 0, sizeof(*umr
));
4278 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(size
));
4279 umr
->mkey_mask
= frwr_mkey_mask(atomic
);
4282 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
)
4284 memset(umr
, 0, sizeof(*umr
));
4285 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
4286 umr
->flags
= MLX5_UMR_INLINE
;
4289 static __be64
get_umr_enable_mr_mask(void)
4293 result
= MLX5_MKEY_MASK_KEY
|
4294 MLX5_MKEY_MASK_FREE
;
4296 return cpu_to_be64(result
);
4299 static __be64
get_umr_disable_mr_mask(void)
4303 result
= MLX5_MKEY_MASK_FREE
;
4305 return cpu_to_be64(result
);
4308 static __be64
get_umr_update_translation_mask(void)
4312 result
= MLX5_MKEY_MASK_LEN
|
4313 MLX5_MKEY_MASK_PAGE_SIZE
|
4314 MLX5_MKEY_MASK_START_ADDR
;
4316 return cpu_to_be64(result
);
4319 static __be64
get_umr_update_access_mask(int atomic
)
4323 result
= MLX5_MKEY_MASK_LR
|
4329 result
|= MLX5_MKEY_MASK_A
;
4331 return cpu_to_be64(result
);
4334 static __be64
get_umr_update_pd_mask(void)
4338 result
= MLX5_MKEY_MASK_PD
;
4340 return cpu_to_be64(result
);
4343 static int umr_check_mkey_mask(struct mlx5_ib_dev
*dev
, u64 mask
)
4345 if ((mask
& MLX5_MKEY_MASK_PAGE_SIZE
&&
4346 MLX5_CAP_GEN(dev
->mdev
, umr_modify_entity_size_disabled
)) ||
4347 (mask
& MLX5_MKEY_MASK_A
&&
4348 MLX5_CAP_GEN(dev
->mdev
, umr_modify_atomic_disabled
)))
4353 static int set_reg_umr_segment(struct mlx5_ib_dev
*dev
,
4354 struct mlx5_wqe_umr_ctrl_seg
*umr
,
4355 const struct ib_send_wr
*wr
, int atomic
)
4357 const struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
4359 memset(umr
, 0, sizeof(*umr
));
4361 if (!umrwr
->ignore_free_state
) {
4362 if (wr
->send_flags
& MLX5_IB_SEND_UMR_FAIL_IF_FREE
)
4364 umr
->flags
= MLX5_UMR_CHECK_FREE
;
4366 /* fail if not free */
4367 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
;
4370 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(umrwr
->xlt_size
));
4371 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_XLT
) {
4372 u64 offset
= get_xlt_octo(umrwr
->offset
);
4374 umr
->xlt_offset
= cpu_to_be16(offset
& 0xffff);
4375 umr
->xlt_offset_47_16
= cpu_to_be32(offset
>> 16);
4376 umr
->flags
|= MLX5_UMR_TRANSLATION_OFFSET_EN
;
4378 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
)
4379 umr
->mkey_mask
|= get_umr_update_translation_mask();
4380 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
) {
4381 umr
->mkey_mask
|= get_umr_update_access_mask(atomic
);
4382 umr
->mkey_mask
|= get_umr_update_pd_mask();
4384 if (wr
->send_flags
& MLX5_IB_SEND_UMR_ENABLE_MR
)
4385 umr
->mkey_mask
|= get_umr_enable_mr_mask();
4386 if (wr
->send_flags
& MLX5_IB_SEND_UMR_DISABLE_MR
)
4387 umr
->mkey_mask
|= get_umr_disable_mr_mask();
4390 umr
->flags
|= MLX5_UMR_INLINE
;
4392 return umr_check_mkey_mask(dev
, be64_to_cpu(umr
->mkey_mask
));
4395 static u8
get_umr_flags(int acc
)
4397 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
4398 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
4399 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
4400 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
4401 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
;
4404 static void set_reg_mkey_seg(struct mlx5_mkey_seg
*seg
,
4405 struct mlx5_ib_mr
*mr
,
4406 u32 key
, int access
)
4408 int ndescs
= ALIGN(mr
->ndescs
+ mr
->meta_ndescs
, 8) >> 1;
4410 memset(seg
, 0, sizeof(*seg
));
4412 if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_MTT
)
4413 seg
->log2_page_size
= ilog2(mr
->ibmr
.page_size
);
4414 else if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_KLMS
)
4415 /* KLMs take twice the size of MTTs */
4418 seg
->flags
= get_umr_flags(access
) | mr
->access_mode
;
4419 seg
->qpn_mkey7_0
= cpu_to_be32((key
& 0xff) | 0xffffff00);
4420 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
4421 seg
->start_addr
= cpu_to_be64(mr
->ibmr
.iova
);
4422 seg
->len
= cpu_to_be64(mr
->ibmr
.length
);
4423 seg
->xlt_oct_size
= cpu_to_be32(ndescs
);
4426 static void set_linv_mkey_seg(struct mlx5_mkey_seg
*seg
)
4428 memset(seg
, 0, sizeof(*seg
));
4429 seg
->status
= MLX5_MKEY_STATUS_FREE
;
4432 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
,
4433 const struct ib_send_wr
*wr
)
4435 const struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
4437 memset(seg
, 0, sizeof(*seg
));
4438 if (wr
->send_flags
& MLX5_IB_SEND_UMR_DISABLE_MR
)
4439 seg
->status
= MLX5_MKEY_STATUS_FREE
;
4441 seg
->flags
= convert_access(umrwr
->access_flags
);
4443 seg
->flags_pd
= cpu_to_be32(to_mpd(umrwr
->pd
)->pdn
);
4444 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
&&
4446 seg
->flags_pd
|= cpu_to_be32(MLX5_MKEY_LEN64
);
4448 seg
->start_addr
= cpu_to_be64(umrwr
->virt_addr
);
4449 seg
->len
= cpu_to_be64(umrwr
->length
);
4450 seg
->log2_page_size
= umrwr
->page_shift
;
4451 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff00 |
4452 mlx5_mkey_variant(umrwr
->mkey
));
4455 static void set_reg_data_seg(struct mlx5_wqe_data_seg
*dseg
,
4456 struct mlx5_ib_mr
*mr
,
4457 struct mlx5_ib_pd
*pd
)
4459 int bcount
= mr
->desc_size
* (mr
->ndescs
+ mr
->meta_ndescs
);
4461 dseg
->addr
= cpu_to_be64(mr
->desc_map
);
4462 dseg
->byte_count
= cpu_to_be32(ALIGN(bcount
, 64));
4463 dseg
->lkey
= cpu_to_be32(pd
->ibpd
.local_dma_lkey
);
4466 static __be32
send_ieth(const struct ib_send_wr
*wr
)
4468 switch (wr
->opcode
) {
4469 case IB_WR_SEND_WITH_IMM
:
4470 case IB_WR_RDMA_WRITE_WITH_IMM
:
4471 return wr
->ex
.imm_data
;
4473 case IB_WR_SEND_WITH_INV
:
4474 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
4481 static u8
calc_sig(void *wqe
, int size
)
4487 for (i
= 0; i
< size
; i
++)
4493 static u8
wq_sig(void *wqe
)
4495 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
4498 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, const struct ib_send_wr
*wr
,
4499 void **wqe
, int *wqe_sz
, void **cur_edge
)
4501 struct mlx5_wqe_inline_seg
*seg
;
4507 *wqe
+= sizeof(*seg
);
4508 offset
= sizeof(*seg
);
4510 for (i
= 0; i
< wr
->num_sge
; i
++) {
4511 size_t len
= wr
->sg_list
[i
].length
;
4512 void *addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
4516 if (unlikely(inl
> qp
->max_inline_data
))
4519 while (likely(len
)) {
4523 handle_post_send_edge(&qp
->sq
, wqe
,
4524 *wqe_sz
+ (offset
>> 4),
4527 leftlen
= *cur_edge
- *wqe
;
4528 copysz
= min_t(size_t, leftlen
, len
);
4530 memcpy(*wqe
, addr
, copysz
);
4538 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
4540 *wqe_sz
+= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
4545 static u16
prot_field_size(enum ib_signature_type type
)
4548 case IB_SIG_TYPE_T10_DIF
:
4549 return MLX5_DIF_SIZE
;
4555 static u8
bs_selector(int block_size
)
4557 switch (block_size
) {
4558 case 512: return 0x1;
4559 case 520: return 0x2;
4560 case 4096: return 0x3;
4561 case 4160: return 0x4;
4562 case 1073741824: return 0x5;
4567 static void mlx5_fill_inl_bsf(struct ib_sig_domain
*domain
,
4568 struct mlx5_bsf_inl
*inl
)
4570 /* Valid inline section and allow BSF refresh */
4571 inl
->vld_refresh
= cpu_to_be16(MLX5_BSF_INL_VALID
|
4572 MLX5_BSF_REFRESH_DIF
);
4573 inl
->dif_apptag
= cpu_to_be16(domain
->sig
.dif
.app_tag
);
4574 inl
->dif_reftag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
4575 /* repeating block */
4576 inl
->rp_inv_seed
= MLX5_BSF_REPEAT_BLOCK
;
4577 inl
->sig_type
= domain
->sig
.dif
.bg_type
== IB_T10DIF_CRC
?
4578 MLX5_DIF_CRC
: MLX5_DIF_IPCS
;
4580 if (domain
->sig
.dif
.ref_remap
)
4581 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_INC_REFTAG
;
4583 if (domain
->sig
.dif
.app_escape
) {
4584 if (domain
->sig
.dif
.ref_escape
)
4585 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPREF_ESCAPE
;
4587 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPTAG_ESCAPE
;
4590 inl
->dif_app_bitmask_check
=
4591 cpu_to_be16(domain
->sig
.dif
.apptag_check_mask
);
4594 static int mlx5_set_bsf(struct ib_mr
*sig_mr
,
4595 struct ib_sig_attrs
*sig_attrs
,
4596 struct mlx5_bsf
*bsf
, u32 data_size
)
4598 struct mlx5_core_sig_ctx
*msig
= to_mmr(sig_mr
)->sig
;
4599 struct mlx5_bsf_basic
*basic
= &bsf
->basic
;
4600 struct ib_sig_domain
*mem
= &sig_attrs
->mem
;
4601 struct ib_sig_domain
*wire
= &sig_attrs
->wire
;
4603 memset(bsf
, 0, sizeof(*bsf
));
4605 /* Basic + Extended + Inline */
4606 basic
->bsf_size_sbs
= 1 << 7;
4607 /* Input domain check byte mask */
4608 basic
->check_byte_mask
= sig_attrs
->check_mask
;
4609 basic
->raw_data_size
= cpu_to_be32(data_size
);
4612 switch (sig_attrs
->mem
.sig_type
) {
4613 case IB_SIG_TYPE_NONE
:
4615 case IB_SIG_TYPE_T10_DIF
:
4616 basic
->mem
.bs_selector
= bs_selector(mem
->sig
.dif
.pi_interval
);
4617 basic
->m_bfs_psv
= cpu_to_be32(msig
->psv_memory
.psv_idx
);
4618 mlx5_fill_inl_bsf(mem
, &bsf
->m_inl
);
4625 switch (sig_attrs
->wire
.sig_type
) {
4626 case IB_SIG_TYPE_NONE
:
4628 case IB_SIG_TYPE_T10_DIF
:
4629 if (mem
->sig
.dif
.pi_interval
== wire
->sig
.dif
.pi_interval
&&
4630 mem
->sig_type
== wire
->sig_type
) {
4631 /* Same block structure */
4632 basic
->bsf_size_sbs
|= 1 << 4;
4633 if (mem
->sig
.dif
.bg_type
== wire
->sig
.dif
.bg_type
)
4634 basic
->wire
.copy_byte_mask
|= MLX5_CPY_GRD_MASK
;
4635 if (mem
->sig
.dif
.app_tag
== wire
->sig
.dif
.app_tag
)
4636 basic
->wire
.copy_byte_mask
|= MLX5_CPY_APP_MASK
;
4637 if (mem
->sig
.dif
.ref_tag
== wire
->sig
.dif
.ref_tag
)
4638 basic
->wire
.copy_byte_mask
|= MLX5_CPY_REF_MASK
;
4640 basic
->wire
.bs_selector
= bs_selector(wire
->sig
.dif
.pi_interval
);
4642 basic
->w_bfs_psv
= cpu_to_be32(msig
->psv_wire
.psv_idx
);
4643 mlx5_fill_inl_bsf(wire
, &bsf
->w_inl
);
4652 static int set_sig_data_segment(const struct ib_send_wr
*send_wr
,
4653 struct ib_mr
*sig_mr
,
4654 struct ib_sig_attrs
*sig_attrs
,
4655 struct mlx5_ib_qp
*qp
, void **seg
, int *size
,
4658 struct mlx5_bsf
*bsf
;
4668 struct mlx5_ib_mr
*mr
= to_mmr(sig_mr
);
4669 struct mlx5_ib_mr
*pi_mr
= mr
->pi_mr
;
4671 data_len
= pi_mr
->data_length
;
4672 data_key
= pi_mr
->ibmr
.lkey
;
4673 data_va
= pi_mr
->data_iova
;
4674 if (pi_mr
->meta_ndescs
) {
4675 prot_len
= pi_mr
->meta_length
;
4676 prot_key
= pi_mr
->ibmr
.lkey
;
4677 prot_va
= pi_mr
->pi_iova
;
4681 if (!prot
|| (data_key
== prot_key
&& data_va
== prot_va
&&
4682 data_len
== prot_len
)) {
4684 * Source domain doesn't contain signature information
4685 * or data and protection are interleaved in memory.
4686 * So need construct:
4687 * ------------------
4689 * ------------------
4691 * ------------------
4693 struct mlx5_klm
*data_klm
= *seg
;
4695 data_klm
->bcount
= cpu_to_be32(data_len
);
4696 data_klm
->key
= cpu_to_be32(data_key
);
4697 data_klm
->va
= cpu_to_be64(data_va
);
4698 wqe_size
= ALIGN(sizeof(*data_klm
), 64);
4701 * Source domain contains signature information
4702 * So need construct a strided block format:
4703 * ---------------------------
4704 * | stride_block_ctrl |
4705 * ---------------------------
4707 * ---------------------------
4709 * ---------------------------
4711 * ---------------------------
4713 struct mlx5_stride_block_ctrl_seg
*sblock_ctrl
;
4714 struct mlx5_stride_block_entry
*data_sentry
;
4715 struct mlx5_stride_block_entry
*prot_sentry
;
4716 u16 block_size
= sig_attrs
->mem
.sig
.dif
.pi_interval
;
4720 data_sentry
= (void *)sblock_ctrl
+ sizeof(*sblock_ctrl
);
4721 prot_sentry
= (void *)data_sentry
+ sizeof(*data_sentry
);
4723 prot_size
= prot_field_size(sig_attrs
->mem
.sig_type
);
4725 pr_err("Bad block size given: %u\n", block_size
);
4728 sblock_ctrl
->bcount_per_cycle
= cpu_to_be32(block_size
+
4730 sblock_ctrl
->op
= cpu_to_be32(MLX5_STRIDE_BLOCK_OP
);
4731 sblock_ctrl
->repeat_count
= cpu_to_be32(data_len
/ block_size
);
4732 sblock_ctrl
->num_entries
= cpu_to_be16(2);
4734 data_sentry
->bcount
= cpu_to_be16(block_size
);
4735 data_sentry
->key
= cpu_to_be32(data_key
);
4736 data_sentry
->va
= cpu_to_be64(data_va
);
4737 data_sentry
->stride
= cpu_to_be16(block_size
);
4739 prot_sentry
->bcount
= cpu_to_be16(prot_size
);
4740 prot_sentry
->key
= cpu_to_be32(prot_key
);
4741 prot_sentry
->va
= cpu_to_be64(prot_va
);
4742 prot_sentry
->stride
= cpu_to_be16(prot_size
);
4744 wqe_size
= ALIGN(sizeof(*sblock_ctrl
) + sizeof(*data_sentry
) +
4745 sizeof(*prot_sentry
), 64);
4749 *size
+= wqe_size
/ 16;
4750 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4753 ret
= mlx5_set_bsf(sig_mr
, sig_attrs
, bsf
, data_len
);
4757 *seg
+= sizeof(*bsf
);
4758 *size
+= sizeof(*bsf
) / 16;
4759 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4764 static void set_sig_mkey_segment(struct mlx5_mkey_seg
*seg
,
4765 struct ib_mr
*sig_mr
, int access_flags
,
4766 u32 size
, u32 length
, u32 pdn
)
4768 u32 sig_key
= sig_mr
->rkey
;
4769 u8 sigerr
= to_mmr(sig_mr
)->sig
->sigerr_count
& 1;
4771 memset(seg
, 0, sizeof(*seg
));
4773 seg
->flags
= get_umr_flags(access_flags
) | MLX5_MKC_ACCESS_MODE_KLMS
;
4774 seg
->qpn_mkey7_0
= cpu_to_be32((sig_key
& 0xff) | 0xffffff00);
4775 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
| sigerr
<< 26 |
4776 MLX5_MKEY_BSF_EN
| pdn
);
4777 seg
->len
= cpu_to_be64(length
);
4778 seg
->xlt_oct_size
= cpu_to_be32(get_xlt_octo(size
));
4779 seg
->bsfs_octo_size
= cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE
);
4782 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
4785 memset(umr
, 0, sizeof(*umr
));
4787 umr
->flags
= MLX5_FLAGS_INLINE
| MLX5_FLAGS_CHECK_FREE
;
4788 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(size
));
4789 umr
->bsf_octowords
= cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE
);
4790 umr
->mkey_mask
= sig_mkey_mask();
4793 static int set_pi_umr_wr(const struct ib_send_wr
*send_wr
,
4794 struct mlx5_ib_qp
*qp
, void **seg
, int *size
,
4797 const struct ib_reg_wr
*wr
= reg_wr(send_wr
);
4798 struct mlx5_ib_mr
*sig_mr
= to_mmr(wr
->mr
);
4799 struct mlx5_ib_mr
*pi_mr
= sig_mr
->pi_mr
;
4800 struct ib_sig_attrs
*sig_attrs
= sig_mr
->ibmr
.sig_attrs
;
4801 u32 pdn
= get_pd(qp
)->pdn
;
4803 int region_len
, ret
;
4805 if (unlikely(send_wr
->num_sge
!= 0) ||
4806 unlikely(wr
->access
& IB_ACCESS_REMOTE_ATOMIC
) ||
4807 unlikely(!sig_mr
->sig
) || unlikely(!qp
->ibqp
.integrity_en
) ||
4808 unlikely(!sig_mr
->sig
->sig_status_checked
))
4811 /* length of the protected region, data + protection */
4812 region_len
= pi_mr
->ibmr
.length
;
4815 * KLM octoword size - if protection was provided
4816 * then we use strided block format (3 octowords),
4817 * else we use single KLM (1 octoword)
4819 if (sig_attrs
->mem
.sig_type
!= IB_SIG_TYPE_NONE
)
4822 xlt_size
= sizeof(struct mlx5_klm
);
4824 set_sig_umr_segment(*seg
, xlt_size
);
4825 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4826 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4827 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4829 set_sig_mkey_segment(*seg
, wr
->mr
, wr
->access
, xlt_size
, region_len
,
4831 *seg
+= sizeof(struct mlx5_mkey_seg
);
4832 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4833 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4835 ret
= set_sig_data_segment(send_wr
, wr
->mr
, sig_attrs
, qp
, seg
, size
,
4840 sig_mr
->sig
->sig_status_checked
= false;
4844 static int set_psv_wr(struct ib_sig_domain
*domain
,
4845 u32 psv_idx
, void **seg
, int *size
)
4847 struct mlx5_seg_set_psv
*psv_seg
= *seg
;
4849 memset(psv_seg
, 0, sizeof(*psv_seg
));
4850 psv_seg
->psv_num
= cpu_to_be32(psv_idx
);
4851 switch (domain
->sig_type
) {
4852 case IB_SIG_TYPE_NONE
:
4854 case IB_SIG_TYPE_T10_DIF
:
4855 psv_seg
->transient_sig
= cpu_to_be32(domain
->sig
.dif
.bg
<< 16 |
4856 domain
->sig
.dif
.app_tag
);
4857 psv_seg
->ref_tag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
4860 pr_err("Bad signature type (%d) is given.\n",
4865 *seg
+= sizeof(*psv_seg
);
4866 *size
+= sizeof(*psv_seg
) / 16;
4871 static int set_reg_wr(struct mlx5_ib_qp
*qp
,
4872 const struct ib_reg_wr
*wr
,
4873 void **seg
, int *size
, void **cur_edge
,
4874 bool check_not_free
)
4876 struct mlx5_ib_mr
*mr
= to_mmr(wr
->mr
);
4877 struct mlx5_ib_pd
*pd
= to_mpd(qp
->ibqp
.pd
);
4878 struct mlx5_ib_dev
*dev
= to_mdev(pd
->ibpd
.device
);
4879 int mr_list_size
= (mr
->ndescs
+ mr
->meta_ndescs
) * mr
->desc_size
;
4880 bool umr_inline
= mr_list_size
<= MLX5_IB_SQ_UMR_INLINE_THRESHOLD
;
4881 bool atomic
= wr
->access
& IB_ACCESS_REMOTE_ATOMIC
;
4884 if (!mlx5_ib_can_use_umr(dev
, atomic
, wr
->access
)) {
4885 mlx5_ib_warn(to_mdev(qp
->ibqp
.device
),
4886 "Fast update of %s for MR is disabled\n",
4887 (MLX5_CAP_GEN(dev
->mdev
,
4888 umr_modify_entity_size_disabled
)) ?
4894 if (unlikely(wr
->wr
.send_flags
& IB_SEND_INLINE
)) {
4895 mlx5_ib_warn(to_mdev(qp
->ibqp
.device
),
4896 "Invalid IB_SEND_INLINE send flag\n");
4901 flags
|= MLX5_UMR_CHECK_NOT_FREE
;
4903 flags
|= MLX5_UMR_INLINE
;
4905 set_reg_umr_seg(*seg
, mr
, flags
, atomic
);
4906 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4907 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4908 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4910 set_reg_mkey_seg(*seg
, mr
, wr
->key
, wr
->access
);
4911 *seg
+= sizeof(struct mlx5_mkey_seg
);
4912 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4913 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4916 memcpy_send_wqe(&qp
->sq
, cur_edge
, seg
, size
, mr
->descs
,
4918 *size
= ALIGN(*size
, MLX5_SEND_WQE_BB
>> 4);
4920 set_reg_data_seg(*seg
, mr
, pd
);
4921 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
4922 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
4927 static void set_linv_wr(struct mlx5_ib_qp
*qp
, void **seg
, int *size
,
4930 set_linv_umr_seg(*seg
);
4931 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4932 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4933 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4934 set_linv_mkey_seg(*seg
);
4935 *seg
+= sizeof(struct mlx5_mkey_seg
);
4936 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4937 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4940 static void dump_wqe(struct mlx5_ib_qp
*qp
, u32 idx
, int size_16
)
4945 pr_debug("dump WQE index %u:\n", idx
);
4946 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
4947 if ((i
& 0xf) == 0) {
4948 p
= mlx5_frag_buf_get_wqe(&qp
->sq
.fbc
, idx
);
4949 pr_debug("WQBB at %p:\n", (void *)p
);
4951 idx
= (idx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
4953 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
4954 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
4955 be32_to_cpu(p
[j
+ 3]));
4959 static int __begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
4960 struct mlx5_wqe_ctrl_seg
**ctrl
,
4961 const struct ib_send_wr
*wr
, unsigned int *idx
,
4962 int *size
, void **cur_edge
, int nreq
,
4963 bool send_signaled
, bool solicited
)
4965 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)))
4968 *idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
4969 *seg
= mlx5_frag_buf_get_wqe(&qp
->sq
.fbc
, *idx
);
4971 *(uint32_t *)(*seg
+ 8) = 0;
4972 (*ctrl
)->imm
= send_ieth(wr
);
4973 (*ctrl
)->fm_ce_se
= qp
->sq_signal_bits
|
4974 (send_signaled
? MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
4975 (solicited
? MLX5_WQE_CTRL_SOLICITED
: 0);
4977 *seg
+= sizeof(**ctrl
);
4978 *size
= sizeof(**ctrl
) / 16;
4979 *cur_edge
= qp
->sq
.cur_edge
;
4984 static int begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
4985 struct mlx5_wqe_ctrl_seg
**ctrl
,
4986 const struct ib_send_wr
*wr
, unsigned *idx
,
4987 int *size
, void **cur_edge
, int nreq
)
4989 return __begin_wqe(qp
, seg
, ctrl
, wr
, idx
, size
, cur_edge
, nreq
,
4990 wr
->send_flags
& IB_SEND_SIGNALED
,
4991 wr
->send_flags
& IB_SEND_SOLICITED
);
4994 static void finish_wqe(struct mlx5_ib_qp
*qp
,
4995 struct mlx5_wqe_ctrl_seg
*ctrl
,
4996 void *seg
, u8 size
, void *cur_edge
,
4997 unsigned int idx
, u64 wr_id
, int nreq
, u8 fence
,
5002 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
5003 mlx5_opcode
| ((u32
)opmod
<< 24));
5004 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->trans_qp
.base
.mqp
.qpn
<< 8));
5005 ctrl
->fm_ce_se
|= fence
;
5006 if (unlikely(qp
->wq_sig
))
5007 ctrl
->signature
= wq_sig(ctrl
);
5009 qp
->sq
.wrid
[idx
] = wr_id
;
5010 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
5011 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
5012 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
5013 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
5015 /* We save the edge which was possibly updated during the WQE
5016 * construction, into SQ's cache.
5018 seg
= PTR_ALIGN(seg
, MLX5_SEND_WQE_BB
);
5019 qp
->sq
.cur_edge
= (unlikely(seg
== cur_edge
)) ?
5020 get_sq_edge(&qp
->sq
, qp
->sq
.cur_post
&
5021 (qp
->sq
.wqe_cnt
- 1)) :
5025 static int _mlx5_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
5026 const struct ib_send_wr
**bad_wr
, bool drain
)
5028 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
5029 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
5030 struct mlx5_core_dev
*mdev
= dev
->mdev
;
5031 struct ib_reg_wr reg_pi_wr
;
5032 struct mlx5_ib_qp
*qp
;
5033 struct mlx5_ib_mr
*mr
;
5034 struct mlx5_ib_mr
*pi_mr
;
5035 struct mlx5_ib_mr pa_pi_mr
;
5036 struct ib_sig_attrs
*sig_attrs
;
5037 struct mlx5_wqe_xrc_seg
*xrc
;
5040 int uninitialized_var(size
);
5041 unsigned long flags
;
5051 if (unlikely(mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
&&
5057 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
5058 return mlx5_ib_gsi_post_send(ibqp
, wr
, bad_wr
);
5063 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
5065 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
5066 if (unlikely(wr
->opcode
>= ARRAY_SIZE(mlx5_ib_opcode
))) {
5067 mlx5_ib_warn(dev
, "\n");
5073 num_sge
= wr
->num_sge
;
5074 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
5075 mlx5_ib_warn(dev
, "\n");
5081 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
, &size
, &cur_edge
,
5084 mlx5_ib_warn(dev
, "\n");
5090 if (wr
->opcode
== IB_WR_REG_MR
||
5091 wr
->opcode
== IB_WR_REG_MR_INTEGRITY
) {
5092 fence
= dev
->umr_fence
;
5093 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
5095 if (wr
->send_flags
& IB_SEND_FENCE
) {
5097 fence
= MLX5_FENCE_MODE_SMALL_AND_FENCE
;
5099 fence
= MLX5_FENCE_MODE_FENCE
;
5101 fence
= qp
->next_fence
;
5105 switch (ibqp
->qp_type
) {
5106 case IB_QPT_XRC_INI
:
5108 seg
+= sizeof(*xrc
);
5109 size
+= sizeof(*xrc
) / 16;
5112 switch (wr
->opcode
) {
5113 case IB_WR_RDMA_READ
:
5114 case IB_WR_RDMA_WRITE
:
5115 case IB_WR_RDMA_WRITE_WITH_IMM
:
5116 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
5118 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
5119 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
5122 case IB_WR_ATOMIC_CMP_AND_SWP
:
5123 case IB_WR_ATOMIC_FETCH_AND_ADD
:
5124 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
5125 mlx5_ib_warn(dev
, "Atomic operations are not supported yet\n");
5130 case IB_WR_LOCAL_INV
:
5131 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
5132 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
5133 set_linv_wr(qp
, &seg
, &size
, &cur_edge
);
5138 qp
->sq
.wr_data
[idx
] = IB_WR_REG_MR
;
5139 ctrl
->imm
= cpu_to_be32(reg_wr(wr
)->key
);
5140 err
= set_reg_wr(qp
, reg_wr(wr
), &seg
, &size
,
5149 case IB_WR_REG_MR_INTEGRITY
:
5150 qp
->sq
.wr_data
[idx
] = IB_WR_REG_MR_INTEGRITY
;
5152 mr
= to_mmr(reg_wr(wr
)->mr
);
5156 memset(®_pi_wr
, 0,
5157 sizeof(struct ib_reg_wr
));
5159 reg_pi_wr
.mr
= &pi_mr
->ibmr
;
5160 reg_pi_wr
.access
= reg_wr(wr
)->access
;
5161 reg_pi_wr
.key
= pi_mr
->ibmr
.rkey
;
5163 ctrl
->imm
= cpu_to_be32(reg_pi_wr
.key
);
5164 /* UMR for data + prot registration */
5165 err
= set_reg_wr(qp
, ®_pi_wr
, &seg
,
5172 finish_wqe(qp
, ctrl
, seg
, size
,
5173 cur_edge
, idx
, wr
->wr_id
,
5177 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
,
5178 &idx
, &size
, &cur_edge
,
5181 mlx5_ib_warn(dev
, "\n");
5187 memset(&pa_pi_mr
, 0,
5188 sizeof(struct mlx5_ib_mr
));
5189 /* No UMR, use local_dma_lkey */
5190 pa_pi_mr
.ibmr
.lkey
=
5191 mr
->ibmr
.pd
->local_dma_lkey
;
5193 pa_pi_mr
.ndescs
= mr
->ndescs
;
5194 pa_pi_mr
.data_length
= mr
->data_length
;
5195 pa_pi_mr
.data_iova
= mr
->data_iova
;
5196 if (mr
->meta_ndescs
) {
5197 pa_pi_mr
.meta_ndescs
=
5199 pa_pi_mr
.meta_length
=
5201 pa_pi_mr
.pi_iova
= mr
->pi_iova
;
5204 pa_pi_mr
.ibmr
.length
= mr
->ibmr
.length
;
5205 mr
->pi_mr
= &pa_pi_mr
;
5207 ctrl
->imm
= cpu_to_be32(mr
->ibmr
.rkey
);
5208 /* UMR for sig MR */
5209 err
= set_pi_umr_wr(wr
, qp
, &seg
, &size
,
5212 mlx5_ib_warn(dev
, "\n");
5216 finish_wqe(qp
, ctrl
, seg
, size
, cur_edge
, idx
,
5217 wr
->wr_id
, nreq
, fence
,
5221 * SET_PSV WQEs are not signaled and solicited
5224 sig_attrs
= mr
->ibmr
.sig_attrs
;
5225 err
= __begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
,
5226 &size
, &cur_edge
, nreq
, false,
5229 mlx5_ib_warn(dev
, "\n");
5234 err
= set_psv_wr(&sig_attrs
->mem
,
5235 mr
->sig
->psv_memory
.psv_idx
,
5238 mlx5_ib_warn(dev
, "\n");
5242 finish_wqe(qp
, ctrl
, seg
, size
, cur_edge
, idx
,
5243 wr
->wr_id
, nreq
, next_fence
,
5244 MLX5_OPCODE_SET_PSV
);
5246 err
= __begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
,
5247 &size
, &cur_edge
, nreq
, false,
5250 mlx5_ib_warn(dev
, "\n");
5255 err
= set_psv_wr(&sig_attrs
->wire
,
5256 mr
->sig
->psv_wire
.psv_idx
,
5259 mlx5_ib_warn(dev
, "\n");
5263 finish_wqe(qp
, ctrl
, seg
, size
, cur_edge
, idx
,
5264 wr
->wr_id
, nreq
, next_fence
,
5265 MLX5_OPCODE_SET_PSV
);
5268 MLX5_FENCE_MODE_INITIATOR_SMALL
;
5278 switch (wr
->opcode
) {
5279 case IB_WR_RDMA_WRITE
:
5280 case IB_WR_RDMA_WRITE_WITH_IMM
:
5281 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
5283 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
5284 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
5293 if (unlikely(!mdev
->port_caps
[qp
->port
- 1].has_smi
)) {
5294 mlx5_ib_warn(dev
, "Send SMP MADs is not allowed\n");
5300 case MLX5_IB_QPT_HW_GSI
:
5301 set_datagram_seg(seg
, wr
);
5302 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
5303 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
5304 handle_post_send_edge(&qp
->sq
, &seg
, size
, &cur_edge
);
5308 set_datagram_seg(seg
, wr
);
5309 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
5310 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
5311 handle_post_send_edge(&qp
->sq
, &seg
, size
, &cur_edge
);
5313 /* handle qp that supports ud offload */
5314 if (qp
->flags
& IB_QP_CREATE_IPOIB_UD_LSO
) {
5315 struct mlx5_wqe_eth_pad
*pad
;
5318 memset(pad
, 0, sizeof(struct mlx5_wqe_eth_pad
));
5319 seg
+= sizeof(struct mlx5_wqe_eth_pad
);
5320 size
+= sizeof(struct mlx5_wqe_eth_pad
) / 16;
5321 set_eth_seg(wr
, qp
, &seg
, &size
, &cur_edge
);
5322 handle_post_send_edge(&qp
->sq
, &seg
, size
,
5326 case MLX5_IB_QPT_REG_UMR
:
5327 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
5329 mlx5_ib_warn(dev
, "bad opcode\n");
5332 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
5333 ctrl
->imm
= cpu_to_be32(umr_wr(wr
)->mkey
);
5334 err
= set_reg_umr_segment(dev
, seg
, wr
, !!(MLX5_CAP_GEN(mdev
, atomic
)));
5337 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
5338 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
5339 handle_post_send_edge(&qp
->sq
, &seg
, size
, &cur_edge
);
5340 set_reg_mkey_segment(seg
, wr
);
5341 seg
+= sizeof(struct mlx5_mkey_seg
);
5342 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
5343 handle_post_send_edge(&qp
->sq
, &seg
, size
, &cur_edge
);
5350 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
5351 err
= set_data_inl_seg(qp
, wr
, &seg
, &size
, &cur_edge
);
5352 if (unlikely(err
)) {
5353 mlx5_ib_warn(dev
, "\n");
5358 for (i
= 0; i
< num_sge
; i
++) {
5359 handle_post_send_edge(&qp
->sq
, &seg
, size
,
5361 if (likely(wr
->sg_list
[i
].length
)) {
5363 ((struct mlx5_wqe_data_seg
*)seg
,
5365 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
5366 seg
+= sizeof(struct mlx5_wqe_data_seg
);
5371 qp
->next_fence
= next_fence
;
5372 finish_wqe(qp
, ctrl
, seg
, size
, cur_edge
, idx
, wr
->wr_id
, nreq
,
5373 fence
, mlx5_ib_opcode
[wr
->opcode
]);
5376 dump_wqe(qp
, idx
, size
);
5381 qp
->sq
.head
+= nreq
;
5383 /* Make sure that descriptors are written before
5384 * updating doorbell record and ringing the doorbell
5388 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
5390 /* Make sure doorbell record is visible to the HCA before
5391 * we hit doorbell */
5394 mlx5_write64((__be32
*)ctrl
, bf
->bfreg
->map
+ bf
->offset
);
5395 /* Make sure doorbells don't leak out of SQ spinlock
5396 * and reach the HCA out of order.
5398 bf
->offset
^= bf
->buf_size
;
5401 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
5406 int mlx5_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
5407 const struct ib_send_wr
**bad_wr
)
5409 return _mlx5_ib_post_send(ibqp
, wr
, bad_wr
, false);
5412 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
5414 sig
->signature
= calc_sig(sig
, size
);
5417 static int _mlx5_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
5418 const struct ib_recv_wr
**bad_wr
, bool drain
)
5420 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
5421 struct mlx5_wqe_data_seg
*scat
;
5422 struct mlx5_rwqe_sig
*sig
;
5423 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
5424 struct mlx5_core_dev
*mdev
= dev
->mdev
;
5425 unsigned long flags
;
5431 if (unlikely(mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
&&
5437 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
5438 return mlx5_ib_gsi_post_recv(ibqp
, wr
, bad_wr
);
5440 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
5442 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
5444 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
5445 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
5451 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
5457 scat
= mlx5_frag_buf_get_wqe(&qp
->rq
.fbc
, ind
);
5461 for (i
= 0; i
< wr
->num_sge
; i
++)
5462 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
5464 if (i
< qp
->rq
.max_gs
) {
5465 scat
[i
].byte_count
= 0;
5466 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
5471 sig
= (struct mlx5_rwqe_sig
*)scat
;
5472 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
5475 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
5477 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
5482 qp
->rq
.head
+= nreq
;
5484 /* Make sure that descriptors are written before
5489 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
5492 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
5497 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
5498 const struct ib_recv_wr
**bad_wr
)
5500 return _mlx5_ib_post_recv(ibqp
, wr
, bad_wr
, false);
5503 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
5505 switch (mlx5_state
) {
5506 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
5507 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
5508 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
5509 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
5510 case MLX5_QP_STATE_SQ_DRAINING
:
5511 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
5512 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
5513 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
5518 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
5520 switch (mlx5_mig_state
) {
5521 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
5522 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
5523 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
5528 static int to_ib_qp_access_flags(int mlx5_flags
)
5532 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
5533 ib_flags
|= IB_ACCESS_REMOTE_READ
;
5534 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
5535 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
5536 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
5537 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
5542 static void to_rdma_ah_attr(struct mlx5_ib_dev
*ibdev
,
5543 struct rdma_ah_attr
*ah_attr
,
5544 struct mlx5_qp_path
*path
)
5547 memset(ah_attr
, 0, sizeof(*ah_attr
));
5549 if (!path
->port
|| path
->port
> ibdev
->num_ports
)
5552 ah_attr
->type
= rdma_ah_find_type(&ibdev
->ib_dev
, path
->port
);
5554 rdma_ah_set_port_num(ah_attr
, path
->port
);
5555 rdma_ah_set_sl(ah_attr
, path
->dci_cfi_prio_sl
& 0xf);
5557 rdma_ah_set_dlid(ah_attr
, be16_to_cpu(path
->rlid
));
5558 rdma_ah_set_path_bits(ah_attr
, path
->grh_mlid
& 0x7f);
5559 rdma_ah_set_static_rate(ah_attr
,
5560 path
->static_rate
? path
->static_rate
- 5 : 0);
5562 if (path
->grh_mlid
& (1 << 7) ||
5563 ah_attr
->type
== RDMA_AH_ATTR_TYPE_ROCE
) {
5564 u32 tc_fl
= be32_to_cpu(path
->tclass_flowlabel
);
5566 rdma_ah_set_grh(ah_attr
, NULL
,
5570 (tc_fl
>> 20) & 0xff);
5571 rdma_ah_set_dgid_raw(ah_attr
, path
->rgid
);
5575 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev
*dev
,
5576 struct mlx5_ib_sq
*sq
,
5581 err
= mlx5_core_query_sq_state(dev
->mdev
, sq
->base
.mqp
.qpn
, sq_state
);
5584 sq
->state
= *sq_state
;
5590 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev
*dev
,
5591 struct mlx5_ib_rq
*rq
,
5599 inlen
= MLX5_ST_SZ_BYTES(query_rq_out
);
5600 out
= kvzalloc(inlen
, GFP_KERNEL
);
5604 err
= mlx5_core_query_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, out
);
5608 rqc
= MLX5_ADDR_OF(query_rq_out
, out
, rq_context
);
5609 *rq_state
= MLX5_GET(rqc
, rqc
, state
);
5610 rq
->state
= *rq_state
;
5617 static int sqrq_state_to_qp_state(u8 sq_state
, u8 rq_state
,
5618 struct mlx5_ib_qp
*qp
, u8
*qp_state
)
5620 static const u8 sqrq_trans
[MLX5_RQ_NUM_STATE
][MLX5_SQ_NUM_STATE
] = {
5621 [MLX5_RQC_STATE_RST
] = {
5622 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
5623 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
5624 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE_BAD
,
5625 [MLX5_SQ_STATE_NA
] = IB_QPS_RESET
,
5627 [MLX5_RQC_STATE_RDY
] = {
5628 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
5629 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
5630 [MLX5_SQC_STATE_ERR
] = IB_QPS_SQE
,
5631 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE
,
5633 [MLX5_RQC_STATE_ERR
] = {
5634 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
5635 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
5636 [MLX5_SQC_STATE_ERR
] = IB_QPS_ERR
,
5637 [MLX5_SQ_STATE_NA
] = IB_QPS_ERR
,
5639 [MLX5_RQ_STATE_NA
] = {
5640 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
5641 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
5642 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE
,
5643 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE_BAD
,
5647 *qp_state
= sqrq_trans
[rq_state
][sq_state
];
5649 if (*qp_state
== MLX5_QP_STATE_BAD
) {
5650 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
5651 qp
->raw_packet_qp
.sq
.base
.mqp
.qpn
, sq_state
,
5652 qp
->raw_packet_qp
.rq
.base
.mqp
.qpn
, rq_state
);
5656 if (*qp_state
== MLX5_QP_STATE
)
5657 *qp_state
= qp
->state
;
5662 static int query_raw_packet_qp_state(struct mlx5_ib_dev
*dev
,
5663 struct mlx5_ib_qp
*qp
,
5664 u8
*raw_packet_qp_state
)
5666 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
5667 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
5668 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
5670 u8 sq_state
= MLX5_SQ_STATE_NA
;
5671 u8 rq_state
= MLX5_RQ_STATE_NA
;
5673 if (qp
->sq
.wqe_cnt
) {
5674 err
= query_raw_packet_qp_sq_state(dev
, sq
, &sq_state
);
5679 if (qp
->rq
.wqe_cnt
) {
5680 err
= query_raw_packet_qp_rq_state(dev
, rq
, &rq_state
);
5685 return sqrq_state_to_qp_state(sq_state
, rq_state
, qp
,
5686 raw_packet_qp_state
);
5689 static int query_qp_attr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
5690 struct ib_qp_attr
*qp_attr
)
5692 int outlen
= MLX5_ST_SZ_BYTES(query_qp_out
);
5693 struct mlx5_qp_context
*context
;
5698 outb
= kzalloc(outlen
, GFP_KERNEL
);
5702 err
= mlx5_core_qp_query(dev
->mdev
, &qp
->trans_qp
.base
.mqp
, outb
,
5707 /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
5708 context
= (struct mlx5_qp_context
*)MLX5_ADDR_OF(query_qp_out
, outb
, qpc
);
5710 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
5712 qp
->state
= to_ib_qp_state(mlx5_state
);
5713 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
5714 qp_attr
->path_mig_state
=
5715 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
5716 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
5717 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
5718 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
5719 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
5720 qp_attr
->qp_access_flags
=
5721 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
5723 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
5724 to_rdma_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
5725 to_rdma_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
5726 qp_attr
->alt_pkey_index
=
5727 be16_to_cpu(context
->alt_path
.pkey_index
);
5728 qp_attr
->alt_port_num
=
5729 rdma_ah_get_port_num(&qp_attr
->alt_ah_attr
);
5732 qp_attr
->pkey_index
= be16_to_cpu(context
->pri_path
.pkey_index
);
5733 qp_attr
->port_num
= context
->pri_path
.port
;
5735 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
5736 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
5738 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
5740 qp_attr
->max_dest_rd_atomic
=
5741 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
5742 qp_attr
->min_rnr_timer
=
5743 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
5744 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
5745 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
5746 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
5747 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
5754 static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*mqp
,
5755 struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
5756 struct ib_qp_init_attr
*qp_init_attr
)
5758 struct mlx5_core_dct
*dct
= &mqp
->dct
.mdct
;
5760 u32 access_flags
= 0;
5761 int outlen
= MLX5_ST_SZ_BYTES(query_dct_out
);
5764 int supported_mask
= IB_QP_STATE
|
5765 IB_QP_ACCESS_FLAGS
|
5767 IB_QP_MIN_RNR_TIMER
|
5772 if (qp_attr_mask
& ~supported_mask
)
5774 if (mqp
->state
!= IB_QPS_RTR
)
5777 out
= kzalloc(outlen
, GFP_KERNEL
);
5781 err
= mlx5_core_dct_query(dev
->mdev
, dct
, out
, outlen
);
5785 dctc
= MLX5_ADDR_OF(query_dct_out
, out
, dct_context_entry
);
5787 if (qp_attr_mask
& IB_QP_STATE
)
5788 qp_attr
->qp_state
= IB_QPS_RTR
;
5790 if (qp_attr_mask
& IB_QP_ACCESS_FLAGS
) {
5791 if (MLX5_GET(dctc
, dctc
, rre
))
5792 access_flags
|= IB_ACCESS_REMOTE_READ
;
5793 if (MLX5_GET(dctc
, dctc
, rwe
))
5794 access_flags
|= IB_ACCESS_REMOTE_WRITE
;
5795 if (MLX5_GET(dctc
, dctc
, rae
))
5796 access_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
5797 qp_attr
->qp_access_flags
= access_flags
;
5800 if (qp_attr_mask
& IB_QP_PORT
)
5801 qp_attr
->port_num
= MLX5_GET(dctc
, dctc
, port
);
5802 if (qp_attr_mask
& IB_QP_MIN_RNR_TIMER
)
5803 qp_attr
->min_rnr_timer
= MLX5_GET(dctc
, dctc
, min_rnr_nak
);
5804 if (qp_attr_mask
& IB_QP_AV
) {
5805 qp_attr
->ah_attr
.grh
.traffic_class
= MLX5_GET(dctc
, dctc
, tclass
);
5806 qp_attr
->ah_attr
.grh
.flow_label
= MLX5_GET(dctc
, dctc
, flow_label
);
5807 qp_attr
->ah_attr
.grh
.sgid_index
= MLX5_GET(dctc
, dctc
, my_addr_index
);
5808 qp_attr
->ah_attr
.grh
.hop_limit
= MLX5_GET(dctc
, dctc
, hop_limit
);
5810 if (qp_attr_mask
& IB_QP_PATH_MTU
)
5811 qp_attr
->path_mtu
= MLX5_GET(dctc
, dctc
, mtu
);
5812 if (qp_attr_mask
& IB_QP_PKEY_INDEX
)
5813 qp_attr
->pkey_index
= MLX5_GET(dctc
, dctc
, pkey_index
);
5819 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
,
5820 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
5822 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
5823 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
5825 u8 raw_packet_qp_state
;
5827 if (ibqp
->rwq_ind_tbl
)
5830 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
5831 return mlx5_ib_gsi_query_qp(ibqp
, qp_attr
, qp_attr_mask
,
5834 /* Not all of output fields are applicable, make sure to zero them */
5835 memset(qp_init_attr
, 0, sizeof(*qp_init_attr
));
5836 memset(qp_attr
, 0, sizeof(*qp_attr
));
5838 if (unlikely(qp
->qp_sub_type
== MLX5_IB_QPT_DCT
))
5839 return mlx5_ib_dct_query_qp(dev
, qp
, qp_attr
,
5840 qp_attr_mask
, qp_init_attr
);
5842 mutex_lock(&qp
->mutex
);
5844 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
5845 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
5846 err
= query_raw_packet_qp_state(dev
, qp
, &raw_packet_qp_state
);
5849 qp
->state
= raw_packet_qp_state
;
5850 qp_attr
->port_num
= 1;
5852 err
= query_qp_attr(dev
, qp
, qp_attr
);
5857 qp_attr
->qp_state
= qp
->state
;
5858 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
5859 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
5860 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
5862 if (!ibqp
->uobject
) {
5863 qp_attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
5864 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
5865 qp_init_attr
->qp_context
= ibqp
->qp_context
;
5867 qp_attr
->cap
.max_send_wr
= 0;
5868 qp_attr
->cap
.max_send_sge
= 0;
5871 qp_init_attr
->qp_type
= ibqp
->qp_type
;
5872 qp_init_attr
->recv_cq
= ibqp
->recv_cq
;
5873 qp_init_attr
->send_cq
= ibqp
->send_cq
;
5874 qp_init_attr
->srq
= ibqp
->srq
;
5875 qp_attr
->cap
.max_inline_data
= qp
->max_inline_data
;
5877 qp_init_attr
->cap
= qp_attr
->cap
;
5879 qp_init_attr
->create_flags
= 0;
5880 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
5881 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
5883 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
5884 qp_init_attr
->create_flags
|= IB_QP_CREATE_CROSS_CHANNEL
;
5885 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
5886 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_SEND
;
5887 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
5888 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_RECV
;
5889 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
5890 qp_init_attr
->create_flags
|= MLX5_IB_QP_CREATE_SQPN_QP1
;
5892 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
5893 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
5896 mutex_unlock(&qp
->mutex
);
5900 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
5901 struct ib_udata
*udata
)
5903 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
5904 struct mlx5_ib_xrcd
*xrcd
;
5907 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
))
5908 return ERR_PTR(-ENOSYS
);
5910 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
5912 return ERR_PTR(-ENOMEM
);
5914 err
= mlx5_cmd_xrcd_alloc(dev
->mdev
, &xrcd
->xrcdn
, 0);
5917 return ERR_PTR(-ENOMEM
);
5920 return &xrcd
->ibxrcd
;
5923 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
)
5925 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
5926 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
5929 err
= mlx5_cmd_xrcd_dealloc(dev
->mdev
, xrcdn
, 0);
5931 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);
5937 static void mlx5_ib_wq_event(struct mlx5_core_qp
*core_qp
, int type
)
5939 struct mlx5_ib_rwq
*rwq
= to_mibrwq(core_qp
);
5940 struct mlx5_ib_dev
*dev
= to_mdev(rwq
->ibwq
.device
);
5941 struct ib_event event
;
5943 if (rwq
->ibwq
.event_handler
) {
5944 event
.device
= rwq
->ibwq
.device
;
5945 event
.element
.wq
= &rwq
->ibwq
;
5947 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
5948 event
.event
= IB_EVENT_WQ_FATAL
;
5951 mlx5_ib_warn(dev
, "Unexpected event type %d on WQ %06x\n", type
, core_qp
->qpn
);
5955 rwq
->ibwq
.event_handler(&event
, rwq
->ibwq
.wq_context
);
5959 static int set_delay_drop(struct mlx5_ib_dev
*dev
)
5963 mutex_lock(&dev
->delay_drop
.lock
);
5964 if (dev
->delay_drop
.activate
)
5967 err
= mlx5_core_set_delay_drop(dev
->mdev
, dev
->delay_drop
.timeout
);
5971 dev
->delay_drop
.activate
= true;
5973 mutex_unlock(&dev
->delay_drop
.lock
);
5976 atomic_inc(&dev
->delay_drop
.rqs_cnt
);
5980 static int create_rq(struct mlx5_ib_rwq
*rwq
, struct ib_pd
*pd
,
5981 struct ib_wq_init_attr
*init_attr
)
5983 struct mlx5_ib_dev
*dev
;
5984 int has_net_offloads
;
5992 dev
= to_mdev(pd
->device
);
5994 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + sizeof(u64
) * rwq
->rq_num_pas
;
5995 in
= kvzalloc(inlen
, GFP_KERNEL
);
5999 MLX5_SET(create_rq_in
, in
, uid
, to_mpd(pd
)->uid
);
6000 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
6001 MLX5_SET(rqc
, rqc
, mem_rq_type
,
6002 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
6003 MLX5_SET(rqc
, rqc
, user_index
, rwq
->user_index
);
6004 MLX5_SET(rqc
, rqc
, cqn
, to_mcq(init_attr
->cq
)->mcq
.cqn
);
6005 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
6006 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
6007 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
6008 MLX5_SET(wq
, wq
, wq_type
,
6009 rwq
->create_flags
& MLX5_IB_WQ_FLAGS_STRIDING_RQ
?
6010 MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ
: MLX5_WQ_TYPE_CYCLIC
);
6011 if (init_attr
->create_flags
& IB_WQ_FLAGS_PCI_WRITE_END_PADDING
) {
6012 if (!MLX5_CAP_GEN(dev
->mdev
, end_pad
)) {
6013 mlx5_ib_dbg(dev
, "Scatter end padding is not supported\n");
6017 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
6020 MLX5_SET(wq
, wq
, log_wq_stride
, rwq
->log_rq_stride
);
6021 if (rwq
->create_flags
& MLX5_IB_WQ_FLAGS_STRIDING_RQ
) {
6023 * In Firmware number of strides in each WQE is:
6024 * "512 * 2^single_wqe_log_num_of_strides"
6025 * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are
6026 * accepted as 0 to 9
6028 static const u8 fw_map
[] = { 10, 11, 12, 13, 14, 15, 0, 1,
6029 2, 3, 4, 5, 6, 7, 8, 9 };
6030 MLX5_SET(wq
, wq
, two_byte_shift_en
, rwq
->two_byte_shift_en
);
6031 MLX5_SET(wq
, wq
, log_wqe_stride_size
,
6032 rwq
->single_stride_log_num_of_bytes
-
6033 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
);
6034 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
,
6035 fw_map
[rwq
->log_num_strides
-
6036 MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES
]);
6038 MLX5_SET(wq
, wq
, log_wq_sz
, rwq
->log_rq_size
);
6039 MLX5_SET(wq
, wq
, pd
, to_mpd(pd
)->pdn
);
6040 MLX5_SET(wq
, wq
, page_offset
, rwq
->rq_page_offset
);
6041 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rwq
->log_page_size
);
6042 MLX5_SET(wq
, wq
, wq_signature
, rwq
->wq_sig
);
6043 MLX5_SET64(wq
, wq
, dbr_addr
, rwq
->db
.dma
);
6044 has_net_offloads
= MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
);
6045 if (init_attr
->create_flags
& IB_WQ_FLAGS_CVLAN_STRIPPING
) {
6046 if (!(has_net_offloads
&& MLX5_CAP_ETH(dev
->mdev
, vlan_cap
))) {
6047 mlx5_ib_dbg(dev
, "VLAN offloads are not supported\n");
6052 MLX5_SET(rqc
, rqc
, vsd
, 1);
6054 if (init_attr
->create_flags
& IB_WQ_FLAGS_SCATTER_FCS
) {
6055 if (!(has_net_offloads
&& MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
))) {
6056 mlx5_ib_dbg(dev
, "Scatter FCS is not supported\n");
6060 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
6062 if (init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
) {
6063 if (!(dev
->ib_dev
.attrs
.raw_packet_caps
&
6064 IB_RAW_PACKET_CAP_DELAY_DROP
)) {
6065 mlx5_ib_dbg(dev
, "Delay drop is not supported\n");
6069 MLX5_SET(rqc
, rqc
, delay_drop_en
, 1);
6071 rq_pas0
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
6072 mlx5_ib_populate_pas(dev
, rwq
->umem
, rwq
->page_shift
, rq_pas0
, 0);
6073 err
= mlx5_core_create_rq_tracked(dev
->mdev
, in
, inlen
, &rwq
->core_qp
);
6074 if (!err
&& init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
) {
6075 err
= set_delay_drop(dev
);
6077 mlx5_ib_warn(dev
, "Failed to enable delay drop err=%d\n",
6079 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
6081 rwq
->create_flags
|= MLX5_IB_WQ_FLAGS_DELAY_DROP
;
6089 static int set_user_rq_size(struct mlx5_ib_dev
*dev
,
6090 struct ib_wq_init_attr
*wq_init_attr
,
6091 struct mlx5_ib_create_wq
*ucmd
,
6092 struct mlx5_ib_rwq
*rwq
)
6094 /* Sanity check RQ size before proceeding */
6095 if (wq_init_attr
->max_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_wq_sz
)))
6098 if (!ucmd
->rq_wqe_count
)
6101 rwq
->wqe_count
= ucmd
->rq_wqe_count
;
6102 rwq
->wqe_shift
= ucmd
->rq_wqe_shift
;
6103 if (check_shl_overflow(rwq
->wqe_count
, rwq
->wqe_shift
, &rwq
->buf_size
))
6106 rwq
->log_rq_stride
= rwq
->wqe_shift
;
6107 rwq
->log_rq_size
= ilog2(rwq
->wqe_count
);
6111 static bool log_of_strides_valid(struct mlx5_ib_dev
*dev
, u32 log_num_strides
)
6113 if ((log_num_strides
> MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES
) ||
6114 (log_num_strides
< MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES
))
6117 if (!MLX5_CAP_GEN(dev
->mdev
, ext_stride_num_range
) &&
6118 (log_num_strides
< MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
))
6124 static int prepare_user_rq(struct ib_pd
*pd
,
6125 struct ib_wq_init_attr
*init_attr
,
6126 struct ib_udata
*udata
,
6127 struct mlx5_ib_rwq
*rwq
)
6129 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
6130 struct mlx5_ib_create_wq ucmd
= {};
6132 size_t required_cmd_sz
;
6134 required_cmd_sz
= offsetof(typeof(ucmd
), single_stride_log_num_of_bytes
)
6135 + sizeof(ucmd
.single_stride_log_num_of_bytes
);
6136 if (udata
->inlen
< required_cmd_sz
) {
6137 mlx5_ib_dbg(dev
, "invalid inlen\n");
6141 if (udata
->inlen
> sizeof(ucmd
) &&
6142 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
6143 udata
->inlen
- sizeof(ucmd
))) {
6144 mlx5_ib_dbg(dev
, "inlen is not supported\n");
6148 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
6149 mlx5_ib_dbg(dev
, "copy failed\n");
6153 if (ucmd
.comp_mask
& (~MLX5_IB_CREATE_WQ_STRIDING_RQ
)) {
6154 mlx5_ib_dbg(dev
, "invalid comp mask\n");
6156 } else if (ucmd
.comp_mask
& MLX5_IB_CREATE_WQ_STRIDING_RQ
) {
6157 if (!MLX5_CAP_GEN(dev
->mdev
, striding_rq
)) {
6158 mlx5_ib_dbg(dev
, "Striding RQ is not supported\n");
6161 if ((ucmd
.single_stride_log_num_of_bytes
<
6162 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
) ||
6163 (ucmd
.single_stride_log_num_of_bytes
>
6164 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES
)) {
6165 mlx5_ib_dbg(dev
, "Invalid log stride size (%u. Range is %u - %u)\n",
6166 ucmd
.single_stride_log_num_of_bytes
,
6167 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
,
6168 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES
);
6171 if (!log_of_strides_valid(dev
,
6172 ucmd
.single_wqe_log_num_of_strides
)) {
6175 "Invalid log num strides (%u. Range is %u - %u)\n",
6176 ucmd
.single_wqe_log_num_of_strides
,
6177 MLX5_CAP_GEN(dev
->mdev
, ext_stride_num_range
) ?
6178 MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES
:
6179 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
,
6180 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES
);
6183 rwq
->single_stride_log_num_of_bytes
=
6184 ucmd
.single_stride_log_num_of_bytes
;
6185 rwq
->log_num_strides
= ucmd
.single_wqe_log_num_of_strides
;
6186 rwq
->two_byte_shift_en
= !!ucmd
.two_byte_shift_en
;
6187 rwq
->create_flags
|= MLX5_IB_WQ_FLAGS_STRIDING_RQ
;
6190 err
= set_user_rq_size(dev
, init_attr
, &ucmd
, rwq
);
6192 mlx5_ib_dbg(dev
, "err %d\n", err
);
6196 err
= create_user_rq(dev
, pd
, udata
, rwq
, &ucmd
);
6198 mlx5_ib_dbg(dev
, "err %d\n", err
);
6202 rwq
->user_index
= ucmd
.user_index
;
6206 struct ib_wq
*mlx5_ib_create_wq(struct ib_pd
*pd
,
6207 struct ib_wq_init_attr
*init_attr
,
6208 struct ib_udata
*udata
)
6210 struct mlx5_ib_dev
*dev
;
6211 struct mlx5_ib_rwq
*rwq
;
6212 struct mlx5_ib_create_wq_resp resp
= {};
6213 size_t min_resp_len
;
6217 return ERR_PTR(-ENOSYS
);
6219 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
6220 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
6221 return ERR_PTR(-EINVAL
);
6223 if (!capable(CAP_SYS_RAWIO
) &&
6224 init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
)
6225 return ERR_PTR(-EPERM
);
6227 dev
= to_mdev(pd
->device
);
6228 switch (init_attr
->wq_type
) {
6230 rwq
= kzalloc(sizeof(*rwq
), GFP_KERNEL
);
6232 return ERR_PTR(-ENOMEM
);
6233 err
= prepare_user_rq(pd
, init_attr
, udata
, rwq
);
6236 err
= create_rq(rwq
, pd
, init_attr
);
6241 mlx5_ib_dbg(dev
, "unsupported wq type %d\n",
6242 init_attr
->wq_type
);
6243 return ERR_PTR(-EINVAL
);
6246 rwq
->ibwq
.wq_num
= rwq
->core_qp
.qpn
;
6247 rwq
->ibwq
.state
= IB_WQS_RESET
;
6248 if (udata
->outlen
) {
6249 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
6250 sizeof(resp
.response_length
);
6251 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
6256 rwq
->core_qp
.event
= mlx5_ib_wq_event
;
6257 rwq
->ibwq
.event_handler
= init_attr
->event_handler
;
6261 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
6263 destroy_user_rq(dev
, pd
, rwq
, udata
);
6266 return ERR_PTR(err
);
6269 void mlx5_ib_destroy_wq(struct ib_wq
*wq
, struct ib_udata
*udata
)
6271 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
6272 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
6274 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
6275 destroy_user_rq(dev
, wq
->pd
, rwq
, udata
);
6279 struct ib_rwq_ind_table
*mlx5_ib_create_rwq_ind_table(struct ib_device
*device
,
6280 struct ib_rwq_ind_table_init_attr
*init_attr
,
6281 struct ib_udata
*udata
)
6283 struct mlx5_ib_dev
*dev
= to_mdev(device
);
6284 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
;
6285 int sz
= 1 << init_attr
->log_ind_tbl_size
;
6286 struct mlx5_ib_create_rwq_ind_tbl_resp resp
= {};
6287 size_t min_resp_len
;
6294 if (udata
->inlen
> 0 &&
6295 !ib_is_udata_cleared(udata
, 0,
6297 return ERR_PTR(-EOPNOTSUPP
);
6299 if (init_attr
->log_ind_tbl_size
>
6300 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
)) {
6301 mlx5_ib_dbg(dev
, "log_ind_tbl_size = %d is bigger than supported = %d\n",
6302 init_attr
->log_ind_tbl_size
,
6303 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
));
6304 return ERR_PTR(-EINVAL
);
6307 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
6308 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
6309 return ERR_PTR(-EINVAL
);
6311 rwq_ind_tbl
= kzalloc(sizeof(*rwq_ind_tbl
), GFP_KERNEL
);
6313 return ERR_PTR(-ENOMEM
);
6315 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
6316 in
= kvzalloc(inlen
, GFP_KERNEL
);
6322 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
6324 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
6325 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
6327 for (i
= 0; i
< sz
; i
++)
6328 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], init_attr
->ind_tbl
[i
]->wq_num
);
6330 rwq_ind_tbl
->uid
= to_mpd(init_attr
->ind_tbl
[0]->pd
)->uid
;
6331 MLX5_SET(create_rqt_in
, in
, uid
, rwq_ind_tbl
->uid
);
6333 err
= mlx5_core_create_rqt(dev
->mdev
, in
, inlen
, &rwq_ind_tbl
->rqtn
);
6339 rwq_ind_tbl
->ib_rwq_ind_tbl
.ind_tbl_num
= rwq_ind_tbl
->rqtn
;
6340 if (udata
->outlen
) {
6341 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
6342 sizeof(resp
.response_length
);
6343 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
6348 return &rwq_ind_tbl
->ib_rwq_ind_tbl
;
6351 mlx5_cmd_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
, rwq_ind_tbl
->uid
);
6354 return ERR_PTR(err
);
6357 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
)
6359 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
= to_mrwq_ind_table(ib_rwq_ind_tbl
);
6360 struct mlx5_ib_dev
*dev
= to_mdev(ib_rwq_ind_tbl
->device
);
6362 mlx5_cmd_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
, rwq_ind_tbl
->uid
);
6368 int mlx5_ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
6369 u32 wq_attr_mask
, struct ib_udata
*udata
)
6371 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
6372 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
6373 struct mlx5_ib_modify_wq ucmd
= {};
6374 size_t required_cmd_sz
;
6382 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) + sizeof(ucmd
.reserved
);
6383 if (udata
->inlen
< required_cmd_sz
)
6386 if (udata
->inlen
> sizeof(ucmd
) &&
6387 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
6388 udata
->inlen
- sizeof(ucmd
)))
6391 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
)))
6394 if (ucmd
.comp_mask
|| ucmd
.reserved
)
6397 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
6398 in
= kvzalloc(inlen
, GFP_KERNEL
);
6402 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
6404 curr_wq_state
= (wq_attr_mask
& IB_WQ_CUR_STATE
) ?
6405 wq_attr
->curr_wq_state
: wq
->state
;
6406 wq_state
= (wq_attr_mask
& IB_WQ_STATE
) ?
6407 wq_attr
->wq_state
: curr_wq_state
;
6408 if (curr_wq_state
== IB_WQS_ERR
)
6409 curr_wq_state
= MLX5_RQC_STATE_ERR
;
6410 if (wq_state
== IB_WQS_ERR
)
6411 wq_state
= MLX5_RQC_STATE_ERR
;
6412 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_wq_state
);
6413 MLX5_SET(modify_rq_in
, in
, uid
, to_mpd(wq
->pd
)->uid
);
6414 MLX5_SET(rqc
, rqc
, state
, wq_state
);
6416 if (wq_attr_mask
& IB_WQ_FLAGS
) {
6417 if (wq_attr
->flags_mask
& IB_WQ_FLAGS_CVLAN_STRIPPING
) {
6418 if (!(MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
6419 MLX5_CAP_ETH(dev
->mdev
, vlan_cap
))) {
6420 mlx5_ib_dbg(dev
, "VLAN offloads are not "
6425 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
6426 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
6427 MLX5_SET(rqc
, rqc
, vsd
,
6428 (wq_attr
->flags
& IB_WQ_FLAGS_CVLAN_STRIPPING
) ? 0 : 1);
6431 if (wq_attr
->flags_mask
& IB_WQ_FLAGS_PCI_WRITE_END_PADDING
) {
6432 mlx5_ib_dbg(dev
, "Modifying scatter end padding is not supported\n");
6438 if (curr_wq_state
== IB_WQS_RESET
&& wq_state
== IB_WQS_RDY
) {
6441 set_id
= mlx5_ib_get_counters_id(dev
, 0);
6442 if (MLX5_CAP_GEN(dev
->mdev
, modify_rq_counter_set_id
)) {
6443 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
6444 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID
);
6445 MLX5_SET(rqc
, rqc
, counter_set_id
, set_id
);
6449 "Receive WQ counters are not supported on current FW\n");
6452 err
= mlx5_core_modify_rq(dev
->mdev
, rwq
->core_qp
.qpn
, in
, inlen
);
6454 rwq
->ibwq
.state
= (wq_state
== MLX5_RQC_STATE_ERR
) ? IB_WQS_ERR
: wq_state
;
6461 struct mlx5_ib_drain_cqe
{
6463 struct completion done
;
6466 static void mlx5_ib_drain_qp_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
6468 struct mlx5_ib_drain_cqe
*cqe
= container_of(wc
->wr_cqe
,
6469 struct mlx5_ib_drain_cqe
,
6472 complete(&cqe
->done
);
6475 /* This function returns only once the drained WR was completed */
6476 static void handle_drain_completion(struct ib_cq
*cq
,
6477 struct mlx5_ib_drain_cqe
*sdrain
,
6478 struct mlx5_ib_dev
*dev
)
6480 struct mlx5_core_dev
*mdev
= dev
->mdev
;
6482 if (cq
->poll_ctx
== IB_POLL_DIRECT
) {
6483 while (wait_for_completion_timeout(&sdrain
->done
, HZ
/ 10) <= 0)
6484 ib_process_cq_direct(cq
, -1);
6488 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
6489 struct mlx5_ib_cq
*mcq
= to_mcq(cq
);
6490 bool triggered
= false;
6491 unsigned long flags
;
6493 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
6494 /* Make sure that the CQ handler won't run if wasn't run yet */
6495 if (!mcq
->mcq
.reset_notify_added
)
6496 mcq
->mcq
.reset_notify_added
= 1;
6499 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
6502 /* Wait for any scheduled/running task to be ended */
6503 switch (cq
->poll_ctx
) {
6504 case IB_POLL_SOFTIRQ
:
6505 irq_poll_disable(&cq
->iop
);
6506 irq_poll_enable(&cq
->iop
);
6508 case IB_POLL_WORKQUEUE
:
6509 cancel_work_sync(&cq
->work
);
6516 /* Run the CQ handler - this makes sure that the drain WR will
6517 * be processed if wasn't processed yet.
6519 mcq
->mcq
.comp(&mcq
->mcq
, NULL
);
6522 wait_for_completion(&sdrain
->done
);
6525 void mlx5_ib_drain_sq(struct ib_qp
*qp
)
6527 struct ib_cq
*cq
= qp
->send_cq
;
6528 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
6529 struct mlx5_ib_drain_cqe sdrain
;
6530 const struct ib_send_wr
*bad_swr
;
6531 struct ib_rdma_wr swr
= {
6534 { .wr_cqe
= &sdrain
.cqe
, },
6535 .opcode
= IB_WR_RDMA_WRITE
,
6539 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
6540 struct mlx5_core_dev
*mdev
= dev
->mdev
;
6542 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
6543 if (ret
&& mdev
->state
!= MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
6544 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
6548 sdrain
.cqe
.done
= mlx5_ib_drain_qp_done
;
6549 init_completion(&sdrain
.done
);
6551 ret
= _mlx5_ib_post_send(qp
, &swr
.wr
, &bad_swr
, true);
6553 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
6557 handle_drain_completion(cq
, &sdrain
, dev
);
6560 void mlx5_ib_drain_rq(struct ib_qp
*qp
)
6562 struct ib_cq
*cq
= qp
->recv_cq
;
6563 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
6564 struct mlx5_ib_drain_cqe rdrain
;
6565 struct ib_recv_wr rwr
= {};
6566 const struct ib_recv_wr
*bad_rwr
;
6568 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
6569 struct mlx5_core_dev
*mdev
= dev
->mdev
;
6571 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
6572 if (ret
&& mdev
->state
!= MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
6573 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
6577 rwr
.wr_cqe
= &rdrain
.cqe
;
6578 rdrain
.cqe
.done
= mlx5_ib_drain_qp_done
;
6579 init_completion(&rdrain
.done
);
6581 ret
= _mlx5_ib_post_recv(qp
, &rwr
, &bad_rwr
, true);
6583 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
6587 handle_drain_completion(cq
, &rdrain
, dev
);
6591 * Bind a qp to a counter. If @counter is NULL then bind the qp to
6592 * the default counter
6594 int mlx5_ib_qp_set_counter(struct ib_qp
*qp
, struct rdma_counter
*counter
)
6596 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
6597 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
6600 mutex_lock(&mqp
->mutex
);
6601 if (mqp
->state
== IB_QPS_RESET
) {
6602 qp
->counter
= counter
;
6606 if (!MLX5_CAP_GEN(dev
->mdev
, rts2rts_qp_counters_set_id
)) {
6611 if (mqp
->state
== IB_QPS_RTS
) {
6612 err
= __mlx5_ib_qp_set_counter(qp
, counter
);
6614 qp
->counter
= counter
;
6619 mqp
->counter_pending
= 1;
6620 qp
->counter
= counter
;
6623 mutex_unlock(&mqp
->mutex
);