2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/log2.h>
35 #include <linux/etherdevice.h>
37 #include <linux/slab.h>
38 #include <linux/netdevice.h>
40 #include <rdma/ib_cache.h>
41 #include <rdma/ib_pack.h>
42 #include <rdma/ib_addr.h>
43 #include <rdma/ib_mad.h>
44 #include <rdma/uverbs_ioctl.h>
46 #include <linux/mlx4/driver.h>
47 #include <linux/mlx4/qp.h>
50 #include <rdma/mlx4-abi.h>
52 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
,
53 struct mlx4_ib_cq
*recv_cq
);
54 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
,
55 struct mlx4_ib_cq
*recv_cq
);
56 static int _mlx4_ib_modify_wq(struct ib_wq
*ibwq
, enum ib_wq_state new_state
,
57 struct ib_udata
*udata
);
60 MLX4_IB_ACK_REQ_FREQ
= 8,
64 MLX4_IB_DEFAULT_SCHED_QUEUE
= 0x83,
65 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
66 MLX4_IB_LINK_TYPE_IB
= 0,
67 MLX4_IB_LINK_TYPE_ETH
= 1
71 MLX4_IB_MIN_SQ_STRIDE
= 6,
72 MLX4_IB_CACHE_LINE_SIZE
= 64,
77 MLX4_RAW_QP_MSGMAX
= 31,
84 static const __be32 mlx4_ib_opcode
[] = {
85 [IB_WR_SEND
] = cpu_to_be32(MLX4_OPCODE_SEND
),
86 [IB_WR_LSO
] = cpu_to_be32(MLX4_OPCODE_LSO
),
87 [IB_WR_SEND_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_SEND_IMM
),
88 [IB_WR_RDMA_WRITE
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE
),
89 [IB_WR_RDMA_WRITE_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM
),
90 [IB_WR_RDMA_READ
] = cpu_to_be32(MLX4_OPCODE_RDMA_READ
),
91 [IB_WR_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS
),
92 [IB_WR_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA
),
93 [IB_WR_SEND_WITH_INV
] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL
),
94 [IB_WR_LOCAL_INV
] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL
),
95 [IB_WR_REG_MR
] = cpu_to_be32(MLX4_OPCODE_FMR
),
96 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS
),
97 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA
),
100 enum mlx4_ib_source_type
{
105 struct mlx4_ib_qp_event_work
{
106 struct work_struct work
;
108 enum mlx4_event type
;
111 static struct workqueue_struct
*mlx4_ib_qp_event_wq
;
113 static int is_tunnel_qp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
115 if (!mlx4_is_master(dev
->dev
))
118 return qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_tunnel_sqpn
&&
119 qp
->mqp
.qpn
< dev
->dev
->phys_caps
.base_tunnel_sqpn
+
123 static int is_sqp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
128 /* PPF or Native -- real SQP */
129 real_sqp
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
130 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
131 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 3);
134 /* VF or PF -- proxy SQP */
135 if (mlx4_is_mfunc(dev
->dev
)) {
136 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
137 if (qp
->mqp
.qpn
== dev
->dev
->caps
.spec_qps
[i
].qp0_proxy
||
138 qp
->mqp
.qpn
== dev
->dev
->caps
.spec_qps
[i
].qp1_proxy
) {
147 return !!(qp
->flags
& MLX4_IB_ROCE_V2_GSI_QP
);
150 /* used for INIT/CLOSE port logic */
151 static int is_qp0(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
156 /* PPF or Native -- real QP0 */
157 real_qp0
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
158 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
159 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 1);
162 /* VF or PF -- proxy QP0 */
163 if (mlx4_is_mfunc(dev
->dev
)) {
164 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
165 if (qp
->mqp
.qpn
== dev
->dev
->caps
.spec_qps
[i
].qp0_proxy
) {
174 static void *get_wqe(struct mlx4_ib_qp
*qp
, int offset
)
176 return mlx4_buf_offset(&qp
->buf
, offset
);
179 static void *get_recv_wqe(struct mlx4_ib_qp
*qp
, int n
)
181 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
184 static void *get_send_wqe(struct mlx4_ib_qp
*qp
, int n
)
186 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< qp
->sq
.wqe_shift
));
190 * Stamp a SQ WQE so that it is invalid if prefetched by marking the
191 * first four bytes of every 64 byte chunk with 0xffffffff, except for
192 * the very first chunk of the WQE.
194 static void stamp_send_wqe(struct mlx4_ib_qp
*qp
, int n
)
200 struct mlx4_wqe_ctrl_seg
*ctrl
;
202 buf
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
203 ctrl
= (struct mlx4_wqe_ctrl_seg
*)buf
;
204 s
= (ctrl
->qpn_vlan
.fence_size
& 0x3f) << 4;
205 for (i
= 64; i
< s
; i
+= 64) {
207 *wqe
= cpu_to_be32(0xffffffff);
211 static void mlx4_ib_handle_qp_event(struct work_struct
*_work
)
213 struct mlx4_ib_qp_event_work
*qpe_work
=
214 container_of(_work
, struct mlx4_ib_qp_event_work
, work
);
215 struct ib_qp
*ibqp
= &to_mibqp(qpe_work
->qp
)->ibqp
;
216 struct ib_event event
= {};
218 event
.device
= ibqp
->device
;
219 event
.element
.qp
= ibqp
;
221 switch (qpe_work
->type
) {
222 case MLX4_EVENT_TYPE_PATH_MIG
:
223 event
.event
= IB_EVENT_PATH_MIG
;
225 case MLX4_EVENT_TYPE_COMM_EST
:
226 event
.event
= IB_EVENT_COMM_EST
;
228 case MLX4_EVENT_TYPE_SQ_DRAINED
:
229 event
.event
= IB_EVENT_SQ_DRAINED
;
231 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
232 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
234 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
235 event
.event
= IB_EVENT_QP_FATAL
;
237 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
238 event
.event
= IB_EVENT_PATH_MIG_ERR
;
240 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
241 event
.event
= IB_EVENT_QP_REQ_ERR
;
243 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
244 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
247 pr_warn("Unexpected event type %d on QP %06x\n",
248 qpe_work
->type
, qpe_work
->qp
->qpn
);
252 ibqp
->event_handler(&event
, ibqp
->qp_context
);
255 mlx4_put_qp(qpe_work
->qp
);
259 static void mlx4_ib_qp_event(struct mlx4_qp
*qp
, enum mlx4_event type
)
261 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
262 struct mlx4_ib_qp_event_work
*qpe_work
;
264 if (type
== MLX4_EVENT_TYPE_PATH_MIG
)
265 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
267 if (!ibqp
->event_handler
)
270 qpe_work
= kzalloc(sizeof(*qpe_work
), GFP_ATOMIC
);
275 qpe_work
->type
= type
;
276 INIT_WORK(&qpe_work
->work
, mlx4_ib_handle_qp_event
);
277 queue_work(mlx4_ib_qp_event_wq
, &qpe_work
->work
);
284 static void mlx4_ib_wq_event(struct mlx4_qp
*qp
, enum mlx4_event type
)
286 pr_warn_ratelimited("Unexpected event type %d on WQ 0x%06x. Events are not supported for WQs\n",
290 static int send_wqe_overhead(enum mlx4_ib_qp_type type
, u32 flags
)
293 * UD WQEs must have a datagram segment.
294 * RC and UC WQEs might have a remote address segment.
295 * MLX WQEs need two extra inline data segments (for the UD
296 * header and space for the ICRC).
300 return sizeof (struct mlx4_wqe_ctrl_seg
) +
301 sizeof (struct mlx4_wqe_datagram_seg
) +
302 ((flags
& MLX4_IB_QP_LSO
) ? MLX4_IB_LSO_HEADER_SPARE
: 0);
303 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
304 case MLX4_IB_QPT_PROXY_SMI
:
305 case MLX4_IB_QPT_PROXY_GSI
:
306 return sizeof (struct mlx4_wqe_ctrl_seg
) +
307 sizeof (struct mlx4_wqe_datagram_seg
) + 64;
308 case MLX4_IB_QPT_TUN_SMI_OWNER
:
309 case MLX4_IB_QPT_TUN_GSI
:
310 return sizeof (struct mlx4_wqe_ctrl_seg
) +
311 sizeof (struct mlx4_wqe_datagram_seg
);
314 return sizeof (struct mlx4_wqe_ctrl_seg
) +
315 sizeof (struct mlx4_wqe_raddr_seg
);
317 return sizeof (struct mlx4_wqe_ctrl_seg
) +
318 sizeof (struct mlx4_wqe_masked_atomic_seg
) +
319 sizeof (struct mlx4_wqe_raddr_seg
);
320 case MLX4_IB_QPT_SMI
:
321 case MLX4_IB_QPT_GSI
:
322 return sizeof (struct mlx4_wqe_ctrl_seg
) +
323 ALIGN(MLX4_IB_UD_HEADER_SIZE
+
324 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE
,
326 sizeof (struct mlx4_wqe_inline_seg
),
327 sizeof (struct mlx4_wqe_data_seg
)) +
329 sizeof (struct mlx4_wqe_inline_seg
),
330 sizeof (struct mlx4_wqe_data_seg
));
332 return sizeof (struct mlx4_wqe_ctrl_seg
);
336 static int set_rq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
337 bool is_user
, bool has_rq
, struct mlx4_ib_qp
*qp
,
340 /* Sanity check RQ size before proceeding */
341 if (cap
->max_recv_wr
> dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
||
342 cap
->max_recv_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
))
346 if (cap
->max_recv_wr
|| inl_recv_sz
)
349 qp
->rq
.wqe_cnt
= qp
->rq
.max_gs
= 0;
351 u32 max_inl_recv_sz
= dev
->dev
->caps
.max_rq_sg
*
352 sizeof(struct mlx4_wqe_data_seg
);
355 /* HW requires >= 1 RQ entry with >= 1 gather entry */
356 if (is_user
&& (!cap
->max_recv_wr
|| !cap
->max_recv_sge
||
357 inl_recv_sz
> max_inl_recv_sz
))
360 qp
->rq
.wqe_cnt
= roundup_pow_of_two(max(1U, cap
->max_recv_wr
));
361 qp
->rq
.max_gs
= roundup_pow_of_two(max(1U, cap
->max_recv_sge
));
362 wqe_size
= qp
->rq
.max_gs
* sizeof(struct mlx4_wqe_data_seg
);
363 qp
->rq
.wqe_shift
= ilog2(max_t(u32
, wqe_size
, inl_recv_sz
));
366 /* leave userspace return values as they were, so as not to break ABI */
368 cap
->max_recv_wr
= qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
369 cap
->max_recv_sge
= qp
->rq
.max_gs
;
371 cap
->max_recv_wr
= qp
->rq
.max_post
=
372 min(dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
, qp
->rq
.wqe_cnt
);
373 cap
->max_recv_sge
= min(qp
->rq
.max_gs
,
374 min(dev
->dev
->caps
.max_sq_sg
,
375 dev
->dev
->caps
.max_rq_sg
));
381 static int set_kernel_sq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
382 enum mlx4_ib_qp_type type
, struct mlx4_ib_qp
*qp
)
386 /* Sanity check SQ size before proceeding */
387 if (cap
->max_send_wr
> (dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
) ||
388 cap
->max_send_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
) ||
389 cap
->max_inline_data
+ send_wqe_overhead(type
, qp
->flags
) +
390 sizeof (struct mlx4_wqe_inline_seg
) > dev
->dev
->caps
.max_sq_desc_sz
)
394 * For MLX transport we need 2 extra S/G entries:
395 * one for the header and one for the checksum at the end
397 if ((type
== MLX4_IB_QPT_SMI
|| type
== MLX4_IB_QPT_GSI
||
398 type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) &&
399 cap
->max_send_sge
+ 2 > dev
->dev
->caps
.max_sq_sg
)
402 s
= max(cap
->max_send_sge
* sizeof (struct mlx4_wqe_data_seg
),
403 cap
->max_inline_data
+ sizeof (struct mlx4_wqe_inline_seg
)) +
404 send_wqe_overhead(type
, qp
->flags
);
406 if (s
> dev
->dev
->caps
.max_sq_desc_sz
)
409 qp
->sq
.wqe_shift
= ilog2(roundup_pow_of_two(s
));
412 * We need to leave 2 KB + 1 WR of headroom in the SQ to
413 * allow HW to prefetch.
415 qp
->sq_spare_wqes
= MLX4_IB_SQ_HEADROOM(qp
->sq
.wqe_shift
);
416 qp
->sq
.wqe_cnt
= roundup_pow_of_two(cap
->max_send_wr
+
420 (min(dev
->dev
->caps
.max_sq_desc_sz
,
421 (1 << qp
->sq
.wqe_shift
)) -
422 send_wqe_overhead(type
, qp
->flags
)) /
423 sizeof (struct mlx4_wqe_data_seg
);
425 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
426 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
427 if (qp
->rq
.wqe_shift
> qp
->sq
.wqe_shift
) {
429 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
431 qp
->rq
.offset
= qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
;
435 cap
->max_send_wr
= qp
->sq
.max_post
=
436 qp
->sq
.wqe_cnt
- qp
->sq_spare_wqes
;
437 cap
->max_send_sge
= min(qp
->sq
.max_gs
,
438 min(dev
->dev
->caps
.max_sq_sg
,
439 dev
->dev
->caps
.max_rq_sg
));
440 /* We don't support inline sends for kernel QPs (yet) */
441 cap
->max_inline_data
= 0;
446 static int set_user_sq_size(struct mlx4_ib_dev
*dev
,
447 struct mlx4_ib_qp
*qp
,
448 struct mlx4_ib_create_qp
*ucmd
)
452 /* Sanity check SQ size before proceeding */
453 if (check_shl_overflow(1, ucmd
->log_sq_bb_count
, &cnt
) ||
454 cnt
> dev
->dev
->caps
.max_wqes
)
456 if (ucmd
->log_sq_stride
>
457 ilog2(roundup_pow_of_two(dev
->dev
->caps
.max_sq_desc_sz
)) ||
458 ucmd
->log_sq_stride
< MLX4_IB_MIN_SQ_STRIDE
)
461 qp
->sq
.wqe_cnt
= 1 << ucmd
->log_sq_bb_count
;
462 qp
->sq
.wqe_shift
= ucmd
->log_sq_stride
;
464 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
465 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
470 static int alloc_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
475 kmalloc_array(qp
->rq
.wqe_cnt
, sizeof(struct mlx4_ib_buf
),
477 if (!qp
->sqp_proxy_rcv
)
479 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
480 qp
->sqp_proxy_rcv
[i
].addr
=
481 kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr
),
483 if (!qp
->sqp_proxy_rcv
[i
].addr
)
485 qp
->sqp_proxy_rcv
[i
].map
=
486 ib_dma_map_single(dev
, qp
->sqp_proxy_rcv
[i
].addr
,
487 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
489 if (ib_dma_mapping_error(dev
, qp
->sqp_proxy_rcv
[i
].map
)) {
490 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
499 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
500 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
502 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
504 kfree(qp
->sqp_proxy_rcv
);
505 qp
->sqp_proxy_rcv
= NULL
;
509 static void free_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
513 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
514 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
515 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
517 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
519 kfree(qp
->sqp_proxy_rcv
);
522 static bool qp_has_rq(struct ib_qp_init_attr
*attr
)
524 if (attr
->qp_type
== IB_QPT_XRC_INI
|| attr
->qp_type
== IB_QPT_XRC_TGT
)
530 static int qp0_enabled_vf(struct mlx4_dev
*dev
, int qpn
)
533 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
534 if (qpn
== dev
->caps
.spec_qps
[i
].qp0_proxy
)
535 return !!dev
->caps
.spec_qps
[i
].qp0_qkey
;
540 static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev
*dev
,
541 struct mlx4_ib_qp
*qp
)
543 mutex_lock(&dev
->counters_table
[qp
->port
- 1].mutex
);
544 mlx4_counter_free(dev
->dev
, qp
->counter_index
->index
);
545 list_del(&qp
->counter_index
->list
);
546 mutex_unlock(&dev
->counters_table
[qp
->port
- 1].mutex
);
548 kfree(qp
->counter_index
);
549 qp
->counter_index
= NULL
;
552 static int set_qp_rss(struct mlx4_ib_dev
*dev
, struct mlx4_ib_rss
*rss_ctx
,
553 struct ib_qp_init_attr
*init_attr
,
554 struct mlx4_ib_create_qp_rss
*ucmd
)
556 rss_ctx
->base_qpn_tbl_sz
= init_attr
->rwq_ind_tbl
->ind_tbl
[0]->wq_num
|
557 (init_attr
->rwq_ind_tbl
->log_ind_tbl_size
<< 24);
559 if ((ucmd
->rx_hash_function
== MLX4_IB_RX_HASH_FUNC_TOEPLITZ
) &&
560 (dev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_RSS_TOP
)) {
561 memcpy(rss_ctx
->rss_key
, ucmd
->rx_hash_key
,
562 MLX4_EN_RSS_KEY_SIZE
);
564 pr_debug("RX Hash function is not supported\n");
565 return (-EOPNOTSUPP
);
568 if (ucmd
->rx_hash_fields_mask
& ~(u64
)(MLX4_IB_RX_HASH_SRC_IPV4
|
569 MLX4_IB_RX_HASH_DST_IPV4
|
570 MLX4_IB_RX_HASH_SRC_IPV6
|
571 MLX4_IB_RX_HASH_DST_IPV6
|
572 MLX4_IB_RX_HASH_SRC_PORT_TCP
|
573 MLX4_IB_RX_HASH_DST_PORT_TCP
|
574 MLX4_IB_RX_HASH_SRC_PORT_UDP
|
575 MLX4_IB_RX_HASH_DST_PORT_UDP
|
576 MLX4_IB_RX_HASH_INNER
)) {
577 pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
578 ucmd
->rx_hash_fields_mask
);
579 return (-EOPNOTSUPP
);
582 if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_IPV4
) &&
583 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_IPV4
)) {
584 rss_ctx
->flags
= MLX4_RSS_IPV4
;
585 } else if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_IPV4
) ||
586 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_IPV4
)) {
587 pr_debug("RX Hash fields_mask is not supported - both IPv4 SRC and DST must be set\n");
588 return (-EOPNOTSUPP
);
591 if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_IPV6
) &&
592 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_IPV6
)) {
593 rss_ctx
->flags
|= MLX4_RSS_IPV6
;
594 } else if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_IPV6
) ||
595 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_IPV6
)) {
596 pr_debug("RX Hash fields_mask is not supported - both IPv6 SRC and DST must be set\n");
597 return (-EOPNOTSUPP
);
600 if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_PORT_UDP
) &&
601 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_PORT_UDP
)) {
602 if (!(dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS
)) {
603 pr_debug("RX Hash fields_mask for UDP is not supported\n");
604 return (-EOPNOTSUPP
);
607 if (rss_ctx
->flags
& MLX4_RSS_IPV4
)
608 rss_ctx
->flags
|= MLX4_RSS_UDP_IPV4
;
609 if (rss_ctx
->flags
& MLX4_RSS_IPV6
)
610 rss_ctx
->flags
|= MLX4_RSS_UDP_IPV6
;
611 if (!(rss_ctx
->flags
& (MLX4_RSS_IPV6
| MLX4_RSS_IPV4
))) {
612 pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
613 return (-EOPNOTSUPP
);
615 } else if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_PORT_UDP
) ||
616 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_PORT_UDP
)) {
617 pr_debug("RX Hash fields_mask is not supported - both UDP SRC and DST must be set\n");
618 return (-EOPNOTSUPP
);
621 if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_PORT_TCP
) &&
622 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_PORT_TCP
)) {
623 if (rss_ctx
->flags
& MLX4_RSS_IPV4
)
624 rss_ctx
->flags
|= MLX4_RSS_TCP_IPV4
;
625 if (rss_ctx
->flags
& MLX4_RSS_IPV6
)
626 rss_ctx
->flags
|= MLX4_RSS_TCP_IPV6
;
627 if (!(rss_ctx
->flags
& (MLX4_RSS_IPV6
| MLX4_RSS_IPV4
))) {
628 pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
629 return (-EOPNOTSUPP
);
631 } else if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_PORT_TCP
) ||
632 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_PORT_TCP
)) {
633 pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
634 return (-EOPNOTSUPP
);
637 if (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_INNER
) {
638 if (dev
->dev
->caps
.tunnel_offload_mode
==
639 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
641 * Hash according to inner headers if exist, otherwise
642 * according to outer headers.
644 rss_ctx
->flags
|= MLX4_RSS_BY_INNER_HEADERS_IPONLY
;
646 pr_debug("RSS Hash for inner headers isn't supported\n");
647 return (-EOPNOTSUPP
);
654 static int create_qp_rss(struct mlx4_ib_dev
*dev
,
655 struct ib_qp_init_attr
*init_attr
,
656 struct mlx4_ib_create_qp_rss
*ucmd
,
657 struct mlx4_ib_qp
*qp
)
662 qp
->mqp
.usage
= MLX4_RES_USAGE_USER_VERBS
;
664 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1, &qpn
, 0, qp
->mqp
.usage
);
668 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
);
672 INIT_LIST_HEAD(&qp
->gid_list
);
673 INIT_LIST_HEAD(&qp
->steering_rules
);
675 qp
->mlx4_ib_qp_type
= MLX4_IB_QPT_RAW_PACKET
;
676 qp
->state
= IB_QPS_RESET
;
678 /* Set dummy send resources to be compatible with HV and PRM */
679 qp
->sq_no_prefetch
= 1;
681 qp
->sq
.wqe_shift
= MLX4_IB_MIN_SQ_STRIDE
;
682 qp
->buf_size
= qp
->sq
.wqe_cnt
<< MLX4_IB_MIN_SQ_STRIDE
;
684 (struct ib_qp
*)init_attr
->rwq_ind_tbl
->ind_tbl
[0]))->mtt
;
686 qp
->rss_ctx
= kzalloc(sizeof(*qp
->rss_ctx
), GFP_KERNEL
);
692 err
= set_qp_rss(dev
, qp
->rss_ctx
, init_attr
, ucmd
);
702 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
703 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
706 mlx4_qp_release_range(dev
->dev
, qpn
, 1);
710 static int _mlx4_ib_create_qp_rss(struct ib_pd
*pd
, struct mlx4_ib_qp
*qp
,
711 struct ib_qp_init_attr
*init_attr
,
712 struct ib_udata
*udata
)
714 struct mlx4_ib_create_qp_rss ucmd
= {};
715 size_t required_cmd_sz
;
719 pr_debug("RSS QP with NULL udata\n");
726 required_cmd_sz
= offsetof(typeof(ucmd
), reserved1
) +
727 sizeof(ucmd
.reserved1
);
728 if (udata
->inlen
< required_cmd_sz
) {
729 pr_debug("invalid inlen\n");
733 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
734 pr_debug("copy failed\n");
738 if (memchr_inv(ucmd
.reserved
, 0, sizeof(ucmd
.reserved
)))
741 if (ucmd
.comp_mask
|| ucmd
.reserved1
)
744 if (udata
->inlen
> sizeof(ucmd
) &&
745 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
746 udata
->inlen
- sizeof(ucmd
))) {
747 pr_debug("inlen is not supported\n");
751 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
752 pr_debug("RSS QP with unsupported QP type %d\n",
757 if (init_attr
->create_flags
) {
758 pr_debug("RSS QP doesn't support create flags\n");
762 if (init_attr
->send_cq
|| init_attr
->cap
.max_send_wr
) {
763 pr_debug("RSS QP with unsupported send attributes\n");
767 qp
->pri
.vid
= 0xFFFF;
768 qp
->alt
.vid
= 0xFFFF;
770 err
= create_qp_rss(to_mdev(pd
->device
), init_attr
, &ucmd
, qp
);
774 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
779 * This function allocates a WQN from a range which is consecutive and aligned
780 * to its size. In case the range is full, then it creates a new range and
781 * allocates WQN from it. The new range will be used for following allocations.
783 static int mlx4_ib_alloc_wqn(struct mlx4_ib_ucontext
*context
,
784 struct mlx4_ib_qp
*qp
, int range_size
, int *wqn
)
786 struct mlx4_ib_dev
*dev
= to_mdev(context
->ibucontext
.device
);
787 struct mlx4_wqn_range
*range
;
790 mutex_lock(&context
->wqn_ranges_mutex
);
792 range
= list_first_entry_or_null(&context
->wqn_ranges_list
,
793 struct mlx4_wqn_range
, list
);
795 if (!range
|| (range
->refcount
== range
->size
) || range
->dirty
) {
796 range
= kzalloc(sizeof(*range
), GFP_KERNEL
);
802 err
= mlx4_qp_reserve_range(dev
->dev
, range_size
,
803 range_size
, &range
->base_wqn
, 0,
810 range
->size
= range_size
;
811 list_add(&range
->list
, &context
->wqn_ranges_list
);
812 } else if (range_size
!= 1) {
814 * Requesting a new range (>1) when last range is still open, is
821 qp
->wqn_range
= range
;
823 *wqn
= range
->base_wqn
+ range
->refcount
;
828 mutex_unlock(&context
->wqn_ranges_mutex
);
833 static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext
*context
,
834 struct mlx4_ib_qp
*qp
, bool dirty_release
)
836 struct mlx4_ib_dev
*dev
= to_mdev(context
->ibucontext
.device
);
837 struct mlx4_wqn_range
*range
;
839 mutex_lock(&context
->wqn_ranges_mutex
);
841 range
= qp
->wqn_range
;
844 if (!range
->refcount
) {
845 mlx4_qp_release_range(dev
->dev
, range
->base_wqn
,
847 list_del(&range
->list
);
849 } else if (dirty_release
) {
851 * A range which one of its WQNs is destroyed, won't be able to be
852 * reused for further WQN allocations.
853 * The next created WQ will allocate a new range.
858 mutex_unlock(&context
->wqn_ranges_mutex
);
861 static int create_rq(struct ib_pd
*pd
, struct ib_qp_init_attr
*init_attr
,
862 struct ib_udata
*udata
, struct mlx4_ib_qp
*qp
)
864 struct mlx4_ib_dev
*dev
= to_mdev(pd
->device
);
867 struct mlx4_ib_ucontext
*context
= rdma_udata_to_drv_context(
868 udata
, struct mlx4_ib_ucontext
, ibucontext
);
869 struct mlx4_ib_cq
*mcq
;
872 struct mlx4_ib_create_wq wq
;
877 qp
->mlx4_ib_qp_type
= MLX4_IB_QPT_RAW_PACKET
;
879 spin_lock_init(&qp
->sq
.lock
);
880 spin_lock_init(&qp
->rq
.lock
);
881 INIT_LIST_HEAD(&qp
->gid_list
);
882 INIT_LIST_HEAD(&qp
->steering_rules
);
884 qp
->state
= IB_QPS_RESET
;
886 copy_len
= min(sizeof(struct mlx4_ib_create_wq
), udata
->inlen
);
888 if (ib_copy_from_udata(&wq
, udata
, copy_len
)) {
893 if (wq
.comp_mask
|| wq
.reserved
[0] || wq
.reserved
[1] ||
895 pr_debug("user command isn't supported\n");
900 if (wq
.log_range_size
> ilog2(dev
->dev
->caps
.max_rss_tbl_sz
)) {
901 pr_debug("WQN range size must be equal or smaller than %d\n",
902 dev
->dev
->caps
.max_rss_tbl_sz
);
906 range_size
= 1 << wq
.log_range_size
;
908 if (init_attr
->create_flags
& IB_QP_CREATE_SCATTER_FCS
)
909 qp
->flags
|= MLX4_IB_QP_SCATTER_FCS
;
911 err
= set_rq_size(dev
, &init_attr
->cap
, true, true, qp
, qp
->inl_recv_sz
);
915 qp
->sq_no_prefetch
= 1;
917 qp
->sq
.wqe_shift
= MLX4_IB_MIN_SQ_STRIDE
;
918 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
919 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
921 qp
->umem
= ib_umem_get(pd
->device
, wq
.buf_addr
, qp
->buf_size
, 0);
922 if (IS_ERR(qp
->umem
)) {
923 err
= PTR_ERR(qp
->umem
);
927 shift
= mlx4_ib_umem_calc_optimal_mtt_size(qp
->umem
, 0, &n
);
928 err
= mlx4_mtt_init(dev
->dev
, n
, shift
, &qp
->mtt
);
933 err
= mlx4_ib_umem_write_mtt(dev
, &qp
->mtt
, qp
->umem
);
937 err
= mlx4_ib_db_map_user(udata
, wq
.db_addr
, &qp
->db
);
940 qp
->mqp
.usage
= MLX4_RES_USAGE_USER_VERBS
;
942 err
= mlx4_ib_alloc_wqn(context
, qp
, range_size
, &qpn
);
946 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
);
951 * Hardware wants QPN written in big-endian order (after
952 * shifting) for send doorbell. Precompute this value to save
953 * a little bit when posting sends.
955 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
957 qp
->mqp
.event
= mlx4_ib_wq_event
;
959 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
960 mlx4_ib_lock_cqs(to_mcq(init_attr
->send_cq
),
961 to_mcq(init_attr
->recv_cq
));
962 /* Maintain device to QPs access, needed for further handling
965 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
966 /* Maintain CQ to QPs access, needed for further handling
969 mcq
= to_mcq(init_attr
->send_cq
);
970 list_add_tail(&qp
->cq_send_list
, &mcq
->send_qp_list
);
971 mcq
= to_mcq(init_attr
->recv_cq
);
972 list_add_tail(&qp
->cq_recv_list
, &mcq
->recv_qp_list
);
973 mlx4_ib_unlock_cqs(to_mcq(init_attr
->send_cq
),
974 to_mcq(init_attr
->recv_cq
));
975 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
979 mlx4_ib_release_wqn(context
, qp
, 0);
981 mlx4_ib_db_unmap_user(context
, &qp
->db
);
984 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
986 ib_umem_release(qp
->umem
);
991 static int create_qp_common(struct ib_pd
*pd
, struct ib_qp_init_attr
*init_attr
,
992 struct ib_udata
*udata
, int sqpn
,
993 struct mlx4_ib_qp
*qp
)
995 struct mlx4_ib_dev
*dev
= to_mdev(pd
->device
);
998 struct mlx4_ib_ucontext
*context
= rdma_udata_to_drv_context(
999 udata
, struct mlx4_ib_ucontext
, ibucontext
);
1000 enum mlx4_ib_qp_type qp_type
= (enum mlx4_ib_qp_type
) init_attr
->qp_type
;
1001 struct mlx4_ib_cq
*mcq
;
1002 unsigned long flags
;
1004 /* When tunneling special qps, we use a plain UD qp */
1006 if (mlx4_is_mfunc(dev
->dev
) &&
1007 (!mlx4_is_master(dev
->dev
) ||
1008 !(init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
))) {
1009 if (init_attr
->qp_type
== IB_QPT_GSI
)
1010 qp_type
= MLX4_IB_QPT_PROXY_GSI
;
1012 if (mlx4_is_master(dev
->dev
) ||
1013 qp0_enabled_vf(dev
->dev
, sqpn
))
1014 qp_type
= MLX4_IB_QPT_PROXY_SMI_OWNER
;
1016 qp_type
= MLX4_IB_QPT_PROXY_SMI
;
1020 /* add extra sg entry for tunneling */
1021 init_attr
->cap
.max_recv_sge
++;
1022 } else if (init_attr
->create_flags
& MLX4_IB_SRIOV_TUNNEL_QP
) {
1023 struct mlx4_ib_qp_tunnel_init_attr
*tnl_init
=
1024 container_of(init_attr
,
1025 struct mlx4_ib_qp_tunnel_init_attr
, init_attr
);
1026 if ((tnl_init
->proxy_qp_type
!= IB_QPT_SMI
&&
1027 tnl_init
->proxy_qp_type
!= IB_QPT_GSI
) ||
1028 !mlx4_is_master(dev
->dev
))
1030 if (tnl_init
->proxy_qp_type
== IB_QPT_GSI
)
1031 qp_type
= MLX4_IB_QPT_TUN_GSI
;
1032 else if (tnl_init
->slave
== mlx4_master_func_num(dev
->dev
) ||
1033 mlx4_vf_smi_enabled(dev
->dev
, tnl_init
->slave
,
1035 qp_type
= MLX4_IB_QPT_TUN_SMI_OWNER
;
1037 qp_type
= MLX4_IB_QPT_TUN_SMI
;
1038 /* we are definitely in the PPF here, since we are creating
1039 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
1040 qpn
= dev
->dev
->phys_caps
.base_tunnel_sqpn
+ 8 * tnl_init
->slave
1041 + tnl_init
->proxy_qp_type
* 2 + tnl_init
->port
- 1;
1045 if (init_attr
->qp_type
== IB_QPT_SMI
||
1046 init_attr
->qp_type
== IB_QPT_GSI
|| qp_type
== MLX4_IB_QPT_SMI
||
1047 qp_type
== MLX4_IB_QPT_GSI
||
1048 (qp_type
& (MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_SMI_OWNER
|
1049 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
1050 qp
->sqp
= kzalloc(sizeof(struct mlx4_ib_sqp
), GFP_KERNEL
);
1055 qp
->mlx4_ib_qp_type
= qp_type
;
1057 spin_lock_init(&qp
->sq
.lock
);
1058 spin_lock_init(&qp
->rq
.lock
);
1059 INIT_LIST_HEAD(&qp
->gid_list
);
1060 INIT_LIST_HEAD(&qp
->steering_rules
);
1062 qp
->state
= IB_QPS_RESET
;
1063 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
1064 qp
->sq_signal_bits
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
1067 struct mlx4_ib_create_qp ucmd
;
1072 copy_len
= sizeof(struct mlx4_ib_create_qp
);
1074 if (ib_copy_from_udata(&ucmd
, udata
, copy_len
)) {
1079 qp
->inl_recv_sz
= ucmd
.inl_recv_sz
;
1081 if (init_attr
->create_flags
& IB_QP_CREATE_SCATTER_FCS
) {
1082 if (!(dev
->dev
->caps
.flags
&
1083 MLX4_DEV_CAP_FLAG_FCS_KEEP
)) {
1084 pr_debug("scatter FCS is unsupported\n");
1089 qp
->flags
|= MLX4_IB_QP_SCATTER_FCS
;
1092 err
= set_rq_size(dev
, &init_attr
->cap
, udata
,
1093 qp_has_rq(init_attr
), qp
, qp
->inl_recv_sz
);
1097 qp
->sq_no_prefetch
= ucmd
.sq_no_prefetch
;
1099 err
= set_user_sq_size(dev
, qp
, &ucmd
);
1104 ib_umem_get(pd
->device
, ucmd
.buf_addr
, qp
->buf_size
, 0);
1105 if (IS_ERR(qp
->umem
)) {
1106 err
= PTR_ERR(qp
->umem
);
1110 shift
= mlx4_ib_umem_calc_optimal_mtt_size(qp
->umem
, 0, &n
);
1111 err
= mlx4_mtt_init(dev
->dev
, n
, shift
, &qp
->mtt
);
1116 err
= mlx4_ib_umem_write_mtt(dev
, &qp
->mtt
, qp
->umem
);
1120 if (qp_has_rq(init_attr
)) {
1121 err
= mlx4_ib_db_map_user(udata
, ucmd
.db_addr
, &qp
->db
);
1125 qp
->mqp
.usage
= MLX4_RES_USAGE_USER_VERBS
;
1127 err
= set_rq_size(dev
, &init_attr
->cap
, udata
,
1128 qp_has_rq(init_attr
), qp
, 0);
1132 qp
->sq_no_prefetch
= 0;
1134 if (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
1135 qp
->flags
|= MLX4_IB_QP_LSO
;
1137 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
1138 if (dev
->steering_support
==
1139 MLX4_STEERING_MODE_DEVICE_MANAGED
)
1140 qp
->flags
|= MLX4_IB_QP_NETIF
;
1147 err
= set_kernel_sq_size(dev
, &init_attr
->cap
, qp_type
, qp
);
1151 if (qp_has_rq(init_attr
)) {
1152 err
= mlx4_db_alloc(dev
->dev
, &qp
->db
, 0);
1159 if (mlx4_buf_alloc(dev
->dev
, qp
->buf_size
, PAGE_SIZE
* 2,
1165 err
= mlx4_mtt_init(dev
->dev
, qp
->buf
.npages
, qp
->buf
.page_shift
,
1170 err
= mlx4_buf_write_mtt(dev
->dev
, &qp
->mtt
, &qp
->buf
);
1174 qp
->sq
.wrid
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1175 sizeof(u64
), GFP_KERNEL
);
1176 qp
->rq
.wrid
= kvmalloc_array(qp
->rq
.wqe_cnt
,
1177 sizeof(u64
), GFP_KERNEL
);
1178 if (!qp
->sq
.wrid
|| !qp
->rq
.wrid
) {
1182 qp
->mqp
.usage
= MLX4_RES_USAGE_DRIVER
;
1186 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
1187 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
1188 if (alloc_proxy_bufs(pd
->device
, qp
)) {
1194 /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
1195 * otherwise, the WQE BlueFlame setup flow wrongly causes
1196 * VLAN insertion. */
1197 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
)
1198 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1, &qpn
,
1199 (init_attr
->cap
.max_send_wr
?
1200 MLX4_RESERVE_ETH_BF_QP
: 0) |
1201 (init_attr
->cap
.max_recv_wr
?
1202 MLX4_RESERVE_A0_QP
: 0),
1205 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1206 err
= mlx4_ib_steer_qp_alloc(dev
, 1, &qpn
);
1208 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1,
1209 &qpn
, 0, qp
->mqp
.usage
);
1214 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
1215 qp
->flags
|= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
1217 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
);
1221 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
)
1222 qp
->mqp
.qpn
|= (1 << 23);
1225 * Hardware wants QPN written in big-endian order (after
1226 * shifting) for send doorbell. Precompute this value to save
1227 * a little bit when posting sends.
1229 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
1231 qp
->mqp
.event
= mlx4_ib_qp_event
;
1233 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
1234 mlx4_ib_lock_cqs(to_mcq(init_attr
->send_cq
),
1235 to_mcq(init_attr
->recv_cq
));
1236 /* Maintain device to QPs access, needed for further handling
1239 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
1240 /* Maintain CQ to QPs access, needed for further handling
1243 mcq
= to_mcq(init_attr
->send_cq
);
1244 list_add_tail(&qp
->cq_send_list
, &mcq
->send_qp_list
);
1245 mcq
= to_mcq(init_attr
->recv_cq
);
1246 list_add_tail(&qp
->cq_recv_list
, &mcq
->recv_qp_list
);
1247 mlx4_ib_unlock_cqs(to_mcq(init_attr
->send_cq
),
1248 to_mcq(init_attr
->recv_cq
));
1249 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
1254 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1255 mlx4_ib_steer_qp_free(dev
, qpn
, 1);
1257 mlx4_qp_release_range(dev
->dev
, qpn
, 1);
1260 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
1261 free_proxy_bufs(pd
->device
, qp
);
1264 if (qp_has_rq(init_attr
))
1265 mlx4_ib_db_unmap_user(context
, &qp
->db
);
1267 kvfree(qp
->sq
.wrid
);
1268 kvfree(qp
->rq
.wrid
);
1272 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
1276 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
1277 ib_umem_release(qp
->umem
);
1280 if (!udata
&& qp_has_rq(init_attr
))
1281 mlx4_db_free(dev
->dev
, &qp
->db
);
1288 static enum mlx4_qp_state
to_mlx4_state(enum ib_qp_state state
)
1291 case IB_QPS_RESET
: return MLX4_QP_STATE_RST
;
1292 case IB_QPS_INIT
: return MLX4_QP_STATE_INIT
;
1293 case IB_QPS_RTR
: return MLX4_QP_STATE_RTR
;
1294 case IB_QPS_RTS
: return MLX4_QP_STATE_RTS
;
1295 case IB_QPS_SQD
: return MLX4_QP_STATE_SQD
;
1296 case IB_QPS_SQE
: return MLX4_QP_STATE_SQER
;
1297 case IB_QPS_ERR
: return MLX4_QP_STATE_ERR
;
1302 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
1303 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
1305 if (send_cq
== recv_cq
) {
1306 spin_lock(&send_cq
->lock
);
1307 __acquire(&recv_cq
->lock
);
1308 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1309 spin_lock(&send_cq
->lock
);
1310 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
1312 spin_lock(&recv_cq
->lock
);
1313 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
1317 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
1318 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
1320 if (send_cq
== recv_cq
) {
1321 __release(&recv_cq
->lock
);
1322 spin_unlock(&send_cq
->lock
);
1323 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1324 spin_unlock(&recv_cq
->lock
);
1325 spin_unlock(&send_cq
->lock
);
1327 spin_unlock(&send_cq
->lock
);
1328 spin_unlock(&recv_cq
->lock
);
1332 static void del_gid_entries(struct mlx4_ib_qp
*qp
)
1334 struct mlx4_ib_gid_entry
*ge
, *tmp
;
1336 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1337 list_del(&ge
->list
);
1342 static struct mlx4_ib_pd
*get_pd(struct mlx4_ib_qp
*qp
)
1344 if (qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
1345 return to_mpd(to_mxrcd(qp
->ibqp
.xrcd
)->pd
);
1347 return to_mpd(qp
->ibqp
.pd
);
1350 static void get_cqs(struct mlx4_ib_qp
*qp
, enum mlx4_ib_source_type src
,
1351 struct mlx4_ib_cq
**send_cq
, struct mlx4_ib_cq
**recv_cq
)
1353 switch (qp
->ibqp
.qp_type
) {
1354 case IB_QPT_XRC_TGT
:
1355 *send_cq
= to_mcq(to_mxrcd(qp
->ibqp
.xrcd
)->cq
);
1356 *recv_cq
= *send_cq
;
1358 case IB_QPT_XRC_INI
:
1359 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1360 *recv_cq
= *send_cq
;
1363 *recv_cq
= (src
== MLX4_IB_QP_SRC
) ? to_mcq(qp
->ibqp
.recv_cq
) :
1364 to_mcq(qp
->ibwq
.cq
);
1365 *send_cq
= (src
== MLX4_IB_QP_SRC
) ? to_mcq(qp
->ibqp
.send_cq
) :
1371 static void destroy_qp_rss(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1373 if (qp
->state
!= IB_QPS_RESET
) {
1376 for (i
= 0; i
< (1 << qp
->ibqp
.rwq_ind_tbl
->log_ind_tbl_size
);
1378 struct ib_wq
*ibwq
= qp
->ibqp
.rwq_ind_tbl
->ind_tbl
[i
];
1379 struct mlx4_ib_qp
*wq
= to_mqp((struct ib_qp
*)ibwq
);
1381 mutex_lock(&wq
->mutex
);
1385 mutex_unlock(&wq
->mutex
);
1388 if (mlx4_qp_modify(dev
->dev
, NULL
, to_mlx4_state(qp
->state
),
1389 MLX4_QP_STATE_RST
, NULL
, 0, 0, &qp
->mqp
))
1390 pr_warn("modify QP %06x to RESET failed.\n",
1394 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
1395 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
1396 mlx4_qp_release_range(dev
->dev
, qp
->mqp
.qpn
, 1);
1397 del_gid_entries(qp
);
1400 static void destroy_qp_common(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
,
1401 enum mlx4_ib_source_type src
,
1402 struct ib_udata
*udata
)
1404 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
1405 unsigned long flags
;
1407 if (qp
->state
!= IB_QPS_RESET
) {
1408 if (mlx4_qp_modify(dev
->dev
, NULL
, to_mlx4_state(qp
->state
),
1409 MLX4_QP_STATE_RST
, NULL
, 0, 0, &qp
->mqp
))
1410 pr_warn("modify QP %06x to RESET failed.\n",
1412 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
)) {
1413 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
1415 qp
->pri
.smac_port
= 0;
1418 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
1421 if (qp
->pri
.vid
< 0x1000) {
1422 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
1423 qp
->pri
.vid
= 0xFFFF;
1424 qp
->pri
.candidate_vid
= 0xFFFF;
1425 qp
->pri
.update_vid
= 0;
1427 if (qp
->alt
.vid
< 0x1000) {
1428 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
1429 qp
->alt
.vid
= 0xFFFF;
1430 qp
->alt
.candidate_vid
= 0xFFFF;
1431 qp
->alt
.update_vid
= 0;
1435 get_cqs(qp
, src
, &send_cq
, &recv_cq
);
1437 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
1438 mlx4_ib_lock_cqs(send_cq
, recv_cq
);
1440 /* del from lists under both locks above to protect reset flow paths */
1441 list_del(&qp
->qps_list
);
1442 list_del(&qp
->cq_send_list
);
1443 list_del(&qp
->cq_recv_list
);
1445 __mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1446 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
): NULL
);
1447 if (send_cq
!= recv_cq
)
1448 __mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1451 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
1453 mlx4_ib_unlock_cqs(send_cq
, recv_cq
);
1454 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
1456 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
1458 if (!is_sqp(dev
, qp
) && !is_tunnel_qp(dev
, qp
)) {
1459 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1460 mlx4_ib_steer_qp_free(dev
, qp
->mqp
.qpn
, 1);
1461 else if (src
== MLX4_IB_RWQ_SRC
)
1462 mlx4_ib_release_wqn(
1463 rdma_udata_to_drv_context(
1465 struct mlx4_ib_ucontext
,
1469 mlx4_qp_release_range(dev
->dev
, qp
->mqp
.qpn
, 1);
1472 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
1475 if (qp
->rq
.wqe_cnt
) {
1476 struct mlx4_ib_ucontext
*mcontext
=
1477 rdma_udata_to_drv_context(
1479 struct mlx4_ib_ucontext
,
1482 mlx4_ib_db_unmap_user(mcontext
, &qp
->db
);
1485 kvfree(qp
->sq
.wrid
);
1486 kvfree(qp
->rq
.wrid
);
1487 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
1488 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
))
1489 free_proxy_bufs(&dev
->ib_dev
, qp
);
1490 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
1492 mlx4_db_free(dev
->dev
, &qp
->db
);
1494 ib_umem_release(qp
->umem
);
1496 del_gid_entries(qp
);
1499 static u32
get_sqp_num(struct mlx4_ib_dev
*dev
, struct ib_qp_init_attr
*attr
)
1502 if (!mlx4_is_mfunc(dev
->dev
) ||
1503 (mlx4_is_master(dev
->dev
) &&
1504 attr
->create_flags
& MLX4_IB_SRIOV_SQP
)) {
1505 return dev
->dev
->phys_caps
.base_sqpn
+
1506 (attr
->qp_type
== IB_QPT_SMI
? 0 : 2) +
1509 /* PF or VF -- creating proxies */
1510 if (attr
->qp_type
== IB_QPT_SMI
)
1511 return dev
->dev
->caps
.spec_qps
[attr
->port_num
- 1].qp0_proxy
;
1513 return dev
->dev
->caps
.spec_qps
[attr
->port_num
- 1].qp1_proxy
;
1516 static int _mlx4_ib_create_qp(struct ib_pd
*pd
, struct mlx4_ib_qp
*qp
,
1517 struct ib_qp_init_attr
*init_attr
,
1518 struct ib_udata
*udata
)
1521 int sup_u_create_flags
= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
1524 if (init_attr
->rwq_ind_tbl
)
1525 return _mlx4_ib_create_qp_rss(pd
, qp
, init_attr
, udata
);
1528 * We only support LSO, vendor flag1, and multicast loopback blocking,
1529 * and only for kernel UD QPs.
1531 if (init_attr
->create_flags
& ~(MLX4_IB_QP_LSO
|
1532 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
|
1533 MLX4_IB_SRIOV_TUNNEL_QP
|
1536 MLX4_IB_QP_CREATE_ROCE_V2_GSI
))
1539 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
1540 if (init_attr
->qp_type
!= IB_QPT_UD
)
1544 if (init_attr
->create_flags
) {
1545 if (udata
&& init_attr
->create_flags
& ~(sup_u_create_flags
))
1548 if ((init_attr
->create_flags
& ~(MLX4_IB_SRIOV_SQP
|
1549 MLX4_IB_QP_CREATE_ROCE_V2_GSI
|
1550 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
) &&
1551 init_attr
->qp_type
!= IB_QPT_UD
) ||
1552 (init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
&&
1553 init_attr
->qp_type
> IB_QPT_GSI
) ||
1554 (init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
&&
1555 init_attr
->qp_type
!= IB_QPT_GSI
))
1559 switch (init_attr
->qp_type
) {
1560 case IB_QPT_XRC_TGT
:
1561 pd
= to_mxrcd(init_attr
->xrcd
)->pd
;
1562 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1563 init_attr
->send_cq
= to_mxrcd(init_attr
->xrcd
)->cq
;
1565 case IB_QPT_XRC_INI
:
1566 if (!(to_mdev(pd
->device
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
1568 init_attr
->recv_cq
= init_attr
->send_cq
;
1572 case IB_QPT_RAW_PACKET
:
1574 qp
->pri
.vid
= 0xFFFF;
1575 qp
->alt
.vid
= 0xFFFF;
1576 err
= create_qp_common(pd
, init_attr
, udata
, 0, qp
);
1580 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1588 if (init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
) {
1589 int res
= mlx4_qp_reserve_range(to_mdev(pd
->device
)->dev
,
1591 MLX4_RES_USAGE_DRIVER
);
1596 sqpn
= get_sqp_num(to_mdev(pd
->device
), init_attr
);
1599 qp
->pri
.vid
= 0xFFFF;
1600 qp
->alt
.vid
= 0xFFFF;
1601 err
= create_qp_common(pd
, init_attr
, udata
, sqpn
, qp
);
1605 if (init_attr
->create_flags
&
1606 (MLX4_IB_SRIOV_SQP
| MLX4_IB_SRIOV_TUNNEL_QP
))
1607 /* Internal QP created with ib_create_qp */
1608 rdma_restrack_no_track(&qp
->ibqp
.res
);
1610 qp
->port
= init_attr
->port_num
;
1611 qp
->ibqp
.qp_num
= init_attr
->qp_type
== IB_QPT_SMI
? 0 :
1612 init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
? sqpn
: 1;
1616 /* Don't support raw QPs */
1622 int mlx4_ib_create_qp(struct ib_qp
*ibqp
, struct ib_qp_init_attr
*init_attr
,
1623 struct ib_udata
*udata
)
1625 struct ib_device
*device
= ibqp
->device
;
1626 struct mlx4_ib_dev
*dev
= to_mdev(device
);
1627 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1628 struct ib_pd
*pd
= ibqp
->pd
;
1631 mutex_init(&qp
->mutex
);
1632 ret
= _mlx4_ib_create_qp(pd
, qp
, init_attr
, udata
);
1636 if (init_attr
->qp_type
== IB_QPT_GSI
&&
1637 !(init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
)) {
1638 struct mlx4_ib_sqp
*sqp
= qp
->sqp
;
1639 int is_eth
= rdma_cap_eth_ah(&dev
->ib_dev
, init_attr
->port_num
);
1642 dev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
) {
1643 init_attr
->create_flags
|= MLX4_IB_QP_CREATE_ROCE_V2_GSI
;
1644 sqp
->roce_v2_gsi
= ib_create_qp(pd
, init_attr
);
1646 if (IS_ERR(sqp
->roce_v2_gsi
)) {
1647 pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp
->roce_v2_gsi
));
1648 sqp
->roce_v2_gsi
= NULL
;
1650 to_mqp(sqp
->roce_v2_gsi
)->flags
|=
1651 MLX4_IB_ROCE_V2_GSI_QP
;
1654 init_attr
->create_flags
&= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI
;
1660 static int _mlx4_ib_destroy_qp(struct ib_qp
*qp
, struct ib_udata
*udata
)
1662 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
1663 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
1665 if (is_qp0(dev
, mqp
))
1666 mlx4_CLOSE_PORT(dev
->dev
, mqp
->port
);
1668 if (mqp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
&&
1669 dev
->qp1_proxy
[mqp
->port
- 1] == mqp
) {
1670 mutex_lock(&dev
->qp1_proxy_lock
[mqp
->port
- 1]);
1671 dev
->qp1_proxy
[mqp
->port
- 1] = NULL
;
1672 mutex_unlock(&dev
->qp1_proxy_lock
[mqp
->port
- 1]);
1675 if (mqp
->counter_index
)
1676 mlx4_ib_free_qp_counter(dev
, mqp
);
1678 if (qp
->rwq_ind_tbl
) {
1679 destroy_qp_rss(dev
, mqp
);
1681 destroy_qp_common(dev
, mqp
, MLX4_IB_QP_SRC
, udata
);
1688 int mlx4_ib_destroy_qp(struct ib_qp
*qp
, struct ib_udata
*udata
)
1690 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
1692 if (mqp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
) {
1693 struct mlx4_ib_sqp
*sqp
= mqp
->sqp
;
1695 if (sqp
->roce_v2_gsi
)
1696 ib_destroy_qp(sqp
->roce_v2_gsi
);
1699 return _mlx4_ib_destroy_qp(qp
, udata
);
1702 static int to_mlx4_st(struct mlx4_ib_dev
*dev
, enum mlx4_ib_qp_type type
)
1705 case MLX4_IB_QPT_RC
: return MLX4_QP_ST_RC
;
1706 case MLX4_IB_QPT_UC
: return MLX4_QP_ST_UC
;
1707 case MLX4_IB_QPT_UD
: return MLX4_QP_ST_UD
;
1708 case MLX4_IB_QPT_XRC_INI
:
1709 case MLX4_IB_QPT_XRC_TGT
: return MLX4_QP_ST_XRC
;
1710 case MLX4_IB_QPT_SMI
:
1711 case MLX4_IB_QPT_GSI
:
1712 case MLX4_IB_QPT_RAW_PACKET
: return MLX4_QP_ST_MLX
;
1714 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
1715 case MLX4_IB_QPT_TUN_SMI_OWNER
: return (mlx4_is_mfunc(dev
->dev
) ?
1716 MLX4_QP_ST_MLX
: -1);
1717 case MLX4_IB_QPT_PROXY_SMI
:
1718 case MLX4_IB_QPT_TUN_SMI
:
1719 case MLX4_IB_QPT_PROXY_GSI
:
1720 case MLX4_IB_QPT_TUN_GSI
: return (mlx4_is_mfunc(dev
->dev
) ?
1721 MLX4_QP_ST_UD
: -1);
1726 static __be32
to_mlx4_access_flags(struct mlx4_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1731 u32 hw_access_flags
= 0;
1733 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1734 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1736 dest_rd_atomic
= qp
->resp_depth
;
1738 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1739 access_flags
= attr
->qp_access_flags
;
1741 access_flags
= qp
->atomic_rd_en
;
1743 if (!dest_rd_atomic
)
1744 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1746 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1747 hw_access_flags
|= MLX4_QP_BIT_RRE
;
1748 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1749 hw_access_flags
|= MLX4_QP_BIT_RAE
;
1750 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1751 hw_access_flags
|= MLX4_QP_BIT_RWE
;
1753 return cpu_to_be32(hw_access_flags
);
1756 static void store_sqp_attrs(struct mlx4_ib_sqp
*sqp
, const struct ib_qp_attr
*attr
,
1759 if (attr_mask
& IB_QP_PKEY_INDEX
)
1760 sqp
->pkey_index
= attr
->pkey_index
;
1761 if (attr_mask
& IB_QP_QKEY
)
1762 sqp
->qkey
= attr
->qkey
;
1763 if (attr_mask
& IB_QP_SQ_PSN
)
1764 sqp
->send_psn
= attr
->sq_psn
;
1767 static void mlx4_set_sched(struct mlx4_qp_path
*path
, u8 port
)
1769 path
->sched_queue
= (path
->sched_queue
& 0xbf) | ((port
- 1) << 6);
1772 static int _mlx4_set_path(struct mlx4_ib_dev
*dev
,
1773 const struct rdma_ah_attr
*ah
,
1774 u64 smac
, u16 vlan_tag
, struct mlx4_qp_path
*path
,
1775 struct mlx4_roce_smac_vlan_info
*smac_info
, u8 port
)
1781 path
->grh_mylmc
= rdma_ah_get_path_bits(ah
) & 0x7f;
1782 path
->rlid
= cpu_to_be16(rdma_ah_get_dlid(ah
));
1783 if (rdma_ah_get_static_rate(ah
)) {
1784 path
->static_rate
= rdma_ah_get_static_rate(ah
) +
1785 MLX4_STAT_RATE_OFFSET
;
1786 while (path
->static_rate
> IB_RATE_2_5_GBPS
+ MLX4_STAT_RATE_OFFSET
&&
1787 !(1 << path
->static_rate
& dev
->dev
->caps
.stat_rate_support
))
1788 --path
->static_rate
;
1790 path
->static_rate
= 0;
1792 if (rdma_ah_get_ah_flags(ah
) & IB_AH_GRH
) {
1793 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah
);
1794 int real_sgid_index
=
1795 mlx4_ib_gid_index_to_real_index(dev
, grh
->sgid_attr
);
1797 if (real_sgid_index
< 0)
1798 return real_sgid_index
;
1799 if (real_sgid_index
>= dev
->dev
->caps
.gid_table_len
[port
]) {
1800 pr_err("sgid_index (%u) too large. max is %d\n",
1801 real_sgid_index
, dev
->dev
->caps
.gid_table_len
[port
] - 1);
1805 path
->grh_mylmc
|= 1 << 7;
1806 path
->mgid_index
= real_sgid_index
;
1807 path
->hop_limit
= grh
->hop_limit
;
1808 path
->tclass_flowlabel
=
1809 cpu_to_be32((grh
->traffic_class
<< 20) |
1811 memcpy(path
->rgid
, grh
->dgid
.raw
, 16);
1814 if (ah
->type
== RDMA_AH_ATTR_TYPE_ROCE
) {
1815 if (!(rdma_ah_get_ah_flags(ah
) & IB_AH_GRH
))
1818 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1819 ((port
- 1) << 6) | ((rdma_ah_get_sl(ah
) & 7) << 3);
1821 path
->feup
|= MLX4_FEUP_FORCE_ETH_UP
;
1822 if (vlan_tag
< 0x1000) {
1823 if (smac_info
->vid
< 0x1000) {
1824 /* both valid vlan ids */
1825 if (smac_info
->vid
!= vlan_tag
) {
1826 /* different VIDs. unreg old and reg new */
1827 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1830 smac_info
->candidate_vid
= vlan_tag
;
1831 smac_info
->candidate_vlan_index
= vidx
;
1832 smac_info
->candidate_vlan_port
= port
;
1833 smac_info
->update_vid
= 1;
1834 path
->vlan_index
= vidx
;
1836 path
->vlan_index
= smac_info
->vlan_index
;
1839 /* no current vlan tag in qp */
1840 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1843 smac_info
->candidate_vid
= vlan_tag
;
1844 smac_info
->candidate_vlan_index
= vidx
;
1845 smac_info
->candidate_vlan_port
= port
;
1846 smac_info
->update_vid
= 1;
1847 path
->vlan_index
= vidx
;
1849 path
->feup
|= MLX4_FVL_FORCE_ETH_VLAN
;
1852 /* have current vlan tag. unregister it at modify-qp success */
1853 if (smac_info
->vid
< 0x1000) {
1854 smac_info
->candidate_vid
= 0xFFFF;
1855 smac_info
->update_vid
= 1;
1859 /* get smac_index for RoCE use.
1860 * If no smac was yet assigned, register one.
1861 * If one was already assigned, but the new mac differs,
1862 * unregister the old one and register the new one.
1864 if ((!smac_info
->smac
&& !smac_info
->smac_port
) ||
1865 smac_info
->smac
!= smac
) {
1866 /* register candidate now, unreg if needed, after success */
1867 smac_index
= mlx4_register_mac(dev
->dev
, port
, smac
);
1868 if (smac_index
>= 0) {
1869 smac_info
->candidate_smac_index
= smac_index
;
1870 smac_info
->candidate_smac
= smac
;
1871 smac_info
->candidate_smac_port
= port
;
1876 smac_index
= smac_info
->smac_index
;
1878 memcpy(path
->dmac
, ah
->roce
.dmac
, 6);
1879 path
->ackto
= MLX4_IB_LINK_TYPE_ETH
;
1880 /* put MAC table smac index for IBoE */
1881 path
->grh_mylmc
= (u8
) (smac_index
) | 0x80;
1883 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1884 ((port
- 1) << 6) | ((rdma_ah_get_sl(ah
) & 0xf) << 2);
1890 static int mlx4_set_path(struct mlx4_ib_dev
*dev
, const struct ib_qp_attr
*qp
,
1891 enum ib_qp_attr_mask qp_attr_mask
,
1892 struct mlx4_ib_qp
*mqp
,
1893 struct mlx4_qp_path
*path
, u8 port
,
1894 u16 vlan_id
, u8
*smac
)
1896 return _mlx4_set_path(dev
, &qp
->ah_attr
,
1897 ether_addr_to_u64(smac
),
1899 path
, &mqp
->pri
, port
);
1902 static int mlx4_set_alt_path(struct mlx4_ib_dev
*dev
,
1903 const struct ib_qp_attr
*qp
,
1904 enum ib_qp_attr_mask qp_attr_mask
,
1905 struct mlx4_ib_qp
*mqp
,
1906 struct mlx4_qp_path
*path
, u8 port
)
1908 return _mlx4_set_path(dev
, &qp
->alt_ah_attr
,
1911 path
, &mqp
->alt
, port
);
1914 static void update_mcg_macs(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1916 struct mlx4_ib_gid_entry
*ge
, *tmp
;
1918 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1919 if (!ge
->added
&& mlx4_ib_add_mc(dev
, qp
, &ge
->gid
)) {
1921 ge
->port
= qp
->port
;
1926 static int handle_eth_ud_smac_index(struct mlx4_ib_dev
*dev
,
1927 struct mlx4_ib_qp
*qp
,
1928 struct mlx4_qp_context
*context
)
1933 u64_mac
= atomic64_read(&dev
->iboe
.mac
[qp
->port
- 1]);
1935 context
->pri_path
.sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
| ((qp
->port
- 1) << 6);
1936 if (!qp
->pri
.smac
&& !qp
->pri
.smac_port
) {
1937 smac_index
= mlx4_register_mac(dev
->dev
, qp
->port
, u64_mac
);
1938 if (smac_index
>= 0) {
1939 qp
->pri
.candidate_smac_index
= smac_index
;
1940 qp
->pri
.candidate_smac
= u64_mac
;
1941 qp
->pri
.candidate_smac_port
= qp
->port
;
1942 context
->pri_path
.grh_mylmc
= 0x80 | (u8
) smac_index
;
1950 static int create_qp_lb_counter(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1952 struct counter_index
*new_counter_index
;
1956 if (rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) !=
1957 IB_LINK_LAYER_ETHERNET
||
1958 !(qp
->flags
& MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
) ||
1959 !(dev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_LB_SRC_CHK
))
1962 err
= mlx4_counter_alloc(dev
->dev
, &tmp_idx
, MLX4_RES_USAGE_DRIVER
);
1966 new_counter_index
= kmalloc(sizeof(*new_counter_index
), GFP_KERNEL
);
1967 if (!new_counter_index
) {
1968 mlx4_counter_free(dev
->dev
, tmp_idx
);
1972 new_counter_index
->index
= tmp_idx
;
1973 new_counter_index
->allocated
= 1;
1974 qp
->counter_index
= new_counter_index
;
1976 mutex_lock(&dev
->counters_table
[qp
->port
- 1].mutex
);
1977 list_add_tail(&new_counter_index
->list
,
1978 &dev
->counters_table
[qp
->port
- 1].counters_list
);
1979 mutex_unlock(&dev
->counters_table
[qp
->port
- 1].mutex
);
1985 MLX4_QPC_ROCE_MODE_1
= 0,
1986 MLX4_QPC_ROCE_MODE_2
= 2,
1987 MLX4_QPC_ROCE_MODE_UNDEFINED
= 0xff
1990 static u8
gid_type_to_qpc(enum ib_gid_type gid_type
)
1993 case IB_GID_TYPE_ROCE
:
1994 return MLX4_QPC_ROCE_MODE_1
;
1995 case IB_GID_TYPE_ROCE_UDP_ENCAP
:
1996 return MLX4_QPC_ROCE_MODE_2
;
1998 return MLX4_QPC_ROCE_MODE_UNDEFINED
;
2003 * Go over all RSS QP's childes (WQs) and apply their HW state according to
2004 * their logic state if the RSS QP is the first RSS QP associated for the WQ.
2006 static int bringup_rss_rwqs(struct ib_rwq_ind_table
*ind_tbl
, u8 port_num
,
2007 struct ib_udata
*udata
)
2012 for (i
= 0; i
< (1 << ind_tbl
->log_ind_tbl_size
); i
++) {
2013 struct ib_wq
*ibwq
= ind_tbl
->ind_tbl
[i
];
2014 struct mlx4_ib_qp
*wq
= to_mqp((struct ib_qp
*)ibwq
);
2016 mutex_lock(&wq
->mutex
);
2018 /* Mlx4_ib restrictions:
2019 * WQ's is associated to a port according to the RSS QP it is
2021 * In case the WQ is associated to a different port by another
2022 * RSS QP, return a failure.
2024 if ((wq
->rss_usecnt
> 0) && (wq
->port
!= port_num
)) {
2026 mutex_unlock(&wq
->mutex
);
2029 wq
->port
= port_num
;
2030 if ((wq
->rss_usecnt
== 0) && (ibwq
->state
== IB_WQS_RDY
)) {
2031 err
= _mlx4_ib_modify_wq(ibwq
, IB_WQS_RDY
, udata
);
2033 mutex_unlock(&wq
->mutex
);
2039 mutex_unlock(&wq
->mutex
);
2045 for (j
= (i
- 1); j
>= 0; j
--) {
2046 struct ib_wq
*ibwq
= ind_tbl
->ind_tbl
[j
];
2047 struct mlx4_ib_qp
*wq
= to_mqp((struct ib_qp
*)ibwq
);
2049 mutex_lock(&wq
->mutex
);
2051 if ((wq
->rss_usecnt
== 1) &&
2052 (ibwq
->state
== IB_WQS_RDY
))
2053 if (_mlx4_ib_modify_wq(ibwq
, IB_WQS_RESET
,
2055 pr_warn("failed to reverse WQN=0x%06x\n",
2059 mutex_unlock(&wq
->mutex
);
2066 static void bring_down_rss_rwqs(struct ib_rwq_ind_table
*ind_tbl
,
2067 struct ib_udata
*udata
)
2071 for (i
= 0; i
< (1 << ind_tbl
->log_ind_tbl_size
); i
++) {
2072 struct ib_wq
*ibwq
= ind_tbl
->ind_tbl
[i
];
2073 struct mlx4_ib_qp
*wq
= to_mqp((struct ib_qp
*)ibwq
);
2075 mutex_lock(&wq
->mutex
);
2077 if ((wq
->rss_usecnt
== 1) && (ibwq
->state
== IB_WQS_RDY
))
2078 if (_mlx4_ib_modify_wq(ibwq
, IB_WQS_RESET
, udata
))
2079 pr_warn("failed to reverse WQN=%x\n",
2083 mutex_unlock(&wq
->mutex
);
2087 static void fill_qp_rss_context(struct mlx4_qp_context
*context
,
2088 struct mlx4_ib_qp
*qp
)
2090 struct mlx4_rss_context
*rss_context
;
2092 rss_context
= (void *)context
+ offsetof(struct mlx4_qp_context
,
2093 pri_path
) + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH
;
2095 rss_context
->base_qpn
= cpu_to_be32(qp
->rss_ctx
->base_qpn_tbl_sz
);
2096 rss_context
->default_qpn
=
2097 cpu_to_be32(qp
->rss_ctx
->base_qpn_tbl_sz
& 0xffffff);
2098 if (qp
->rss_ctx
->flags
& (MLX4_RSS_UDP_IPV4
| MLX4_RSS_UDP_IPV6
))
2099 rss_context
->base_qpn_udp
= rss_context
->default_qpn
;
2100 rss_context
->flags
= qp
->rss_ctx
->flags
;
2101 /* Currently support just toeplitz */
2102 rss_context
->hash_fn
= MLX4_RSS_HASH_TOP
;
2104 memcpy(rss_context
->rss_key
, qp
->rss_ctx
->rss_key
,
2105 MLX4_EN_RSS_KEY_SIZE
);
2108 static int __mlx4_ib_modify_qp(void *src
, enum mlx4_ib_source_type src_type
,
2109 const struct ib_qp_attr
*attr
, int attr_mask
,
2110 enum ib_qp_state cur_state
,
2111 enum ib_qp_state new_state
,
2112 struct ib_udata
*udata
)
2114 struct ib_srq
*ibsrq
;
2115 const struct ib_gid_attr
*gid_attr
= NULL
;
2116 struct ib_rwq_ind_table
*rwq_ind_tbl
;
2117 enum ib_qp_type qp_type
;
2118 struct mlx4_ib_dev
*dev
;
2119 struct mlx4_ib_qp
*qp
;
2120 struct mlx4_ib_pd
*pd
;
2121 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
2122 struct mlx4_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
2123 udata
, struct mlx4_ib_ucontext
, ibucontext
);
2124 struct mlx4_qp_context
*context
;
2125 enum mlx4_qp_optpar optpar
= 0;
2131 if (src_type
== MLX4_IB_RWQ_SRC
) {
2134 ibwq
= (struct ib_wq
*)src
;
2137 qp_type
= IB_QPT_RAW_PACKET
;
2138 qp
= to_mqp((struct ib_qp
*)ibwq
);
2139 dev
= to_mdev(ibwq
->device
);
2140 pd
= to_mpd(ibwq
->pd
);
2144 ibqp
= (struct ib_qp
*)src
;
2146 rwq_ind_tbl
= ibqp
->rwq_ind_tbl
;
2147 qp_type
= ibqp
->qp_type
;
2149 dev
= to_mdev(ibqp
->device
);
2153 /* APM is not supported under RoCE */
2154 if (attr_mask
& IB_QP_ALT_PATH
&&
2155 rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) ==
2156 IB_LINK_LAYER_ETHERNET
)
2159 context
= kzalloc(sizeof *context
, GFP_KERNEL
);
2163 context
->flags
= cpu_to_be32((to_mlx4_state(new_state
) << 28) |
2164 (to_mlx4_st(dev
, qp
->mlx4_ib_qp_type
) << 16));
2166 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
2167 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
2169 optpar
|= MLX4_QP_OPTPAR_PM_STATE
;
2170 switch (attr
->path_mig_state
) {
2171 case IB_MIG_MIGRATED
:
2172 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
2175 context
->flags
|= cpu_to_be32(MLX4_QP_PM_REARM
<< 11);
2178 context
->flags
|= cpu_to_be32(MLX4_QP_PM_ARMED
<< 11);
2183 if (qp
->inl_recv_sz
)
2184 context
->param3
|= cpu_to_be32(1 << 25);
2186 if (qp
->flags
& MLX4_IB_QP_SCATTER_FCS
)
2187 context
->param3
|= cpu_to_be32(1 << 29);
2189 if (qp_type
== IB_QPT_GSI
|| qp_type
== IB_QPT_SMI
)
2190 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 11;
2191 else if (qp_type
== IB_QPT_RAW_PACKET
)
2192 context
->mtu_msgmax
= (MLX4_RAW_QP_MTU
<< 5) | MLX4_RAW_QP_MSGMAX
;
2193 else if (qp_type
== IB_QPT_UD
) {
2194 if (qp
->flags
& MLX4_IB_QP_LSO
)
2195 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) |
2196 ilog2(dev
->dev
->caps
.max_gso_sz
);
2198 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 13;
2199 } else if (attr_mask
& IB_QP_PATH_MTU
) {
2200 if (attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> IB_MTU_4096
) {
2201 pr_err("path MTU (%u) is invalid\n",
2205 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
2206 ilog2(dev
->dev
->caps
.max_msg_sz
);
2209 if (!rwq_ind_tbl
) { /* PRM RSS receive side should be left zeros */
2211 context
->rq_size_stride
= ilog2(qp
->rq
.wqe_cnt
) << 3;
2212 context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
2216 context
->sq_size_stride
= ilog2(qp
->sq
.wqe_cnt
) << 3;
2217 context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
2219 if (new_state
== IB_QPS_RESET
&& qp
->counter_index
)
2220 mlx4_ib_free_qp_counter(dev
, qp
);
2222 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
2223 context
->sq_size_stride
|= !!qp
->sq_no_prefetch
<< 7;
2224 context
->xrcd
= cpu_to_be32((u32
) qp
->xrcdn
);
2225 if (qp_type
== IB_QPT_RAW_PACKET
)
2226 context
->param3
|= cpu_to_be32(1 << 30);
2230 context
->usr_page
= cpu_to_be32(
2231 mlx4_to_hw_uar_index(dev
->dev
, ucontext
->uar
.index
));
2233 context
->usr_page
= cpu_to_be32(
2234 mlx4_to_hw_uar_index(dev
->dev
, dev
->priv_uar
.index
));
2236 if (attr_mask
& IB_QP_DEST_QPN
)
2237 context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
2239 if (attr_mask
& IB_QP_PORT
) {
2240 if (cur_state
== IB_QPS_SQD
&& new_state
== IB_QPS_SQD
&&
2241 !(attr_mask
& IB_QP_AV
)) {
2242 mlx4_set_sched(&context
->pri_path
, attr
->port_num
);
2243 optpar
|= MLX4_QP_OPTPAR_SCHED_QUEUE
;
2247 if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
2248 err
= create_qp_lb_counter(dev
, qp
);
2253 dev
->counters_table
[qp
->port
- 1].default_counter
;
2254 if (qp
->counter_index
)
2255 counter_index
= qp
->counter_index
->index
;
2257 if (counter_index
!= -1) {
2258 context
->pri_path
.counter_index
= counter_index
;
2259 optpar
|= MLX4_QP_OPTPAR_COUNTER_INDEX
;
2260 if (qp
->counter_index
) {
2261 context
->pri_path
.fl
|=
2262 MLX4_FL_ETH_SRC_CHECK_MC_LB
;
2263 context
->pri_path
.vlan_control
|=
2264 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER
;
2267 context
->pri_path
.counter_index
=
2268 MLX4_SINK_COUNTER_INDEX(dev
->dev
);
2270 if (qp
->flags
& MLX4_IB_QP_NETIF
) {
2271 mlx4_ib_steer_qp_reg(dev
, qp
, 1);
2275 if (qp_type
== IB_QPT_GSI
) {
2276 enum ib_gid_type gid_type
= qp
->flags
& MLX4_IB_ROCE_V2_GSI_QP
?
2277 IB_GID_TYPE_ROCE_UDP_ENCAP
: IB_GID_TYPE_ROCE
;
2278 u8 qpc_roce_mode
= gid_type_to_qpc(gid_type
);
2280 context
->rlkey_roce_mode
|= (qpc_roce_mode
<< 6);
2284 if (attr_mask
& IB_QP_PKEY_INDEX
) {
2285 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
2286 context
->pri_path
.disable_pkey_check
= 0x40;
2287 context
->pri_path
.pkey_index
= attr
->pkey_index
;
2288 optpar
|= MLX4_QP_OPTPAR_PKEY_INDEX
;
2291 if (attr_mask
& IB_QP_AV
) {
2292 u8 port_num
= mlx4_is_bonded(dev
->dev
) ? 1 :
2293 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
2297 rdma_cap_eth_ah(&dev
->ib_dev
, port_num
) &&
2298 rdma_ah_get_ah_flags(&attr
->ah_attr
) & IB_AH_GRH
;
2301 gid_attr
= attr
->ah_attr
.grh
.sgid_attr
;
2302 err
= rdma_read_gid_l2_fields(gid_attr
, &vlan
,
2308 if (mlx4_set_path(dev
, attr
, attr_mask
, qp
, &context
->pri_path
,
2309 port_num
, vlan
, smac
))
2312 optpar
|= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
|
2313 MLX4_QP_OPTPAR_SCHED_QUEUE
);
2316 (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
)) {
2317 u8 qpc_roce_mode
= gid_type_to_qpc(gid_attr
->gid_type
);
2319 if (qpc_roce_mode
== MLX4_QPC_ROCE_MODE_UNDEFINED
) {
2323 context
->rlkey_roce_mode
|= (qpc_roce_mode
<< 6);
2328 if (attr_mask
& IB_QP_TIMEOUT
) {
2329 context
->pri_path
.ackto
|= attr
->timeout
<< 3;
2330 optpar
|= MLX4_QP_OPTPAR_ACK_TIMEOUT
;
2333 if (attr_mask
& IB_QP_ALT_PATH
) {
2334 if (attr
->alt_port_num
== 0 ||
2335 attr
->alt_port_num
> dev
->dev
->caps
.num_ports
)
2338 if (attr
->alt_pkey_index
>=
2339 dev
->dev
->caps
.pkey_table_len
[attr
->alt_port_num
])
2342 if (mlx4_set_alt_path(dev
, attr
, attr_mask
, qp
,
2344 attr
->alt_port_num
))
2347 context
->alt_path
.pkey_index
= attr
->alt_pkey_index
;
2348 context
->alt_path
.ackto
= attr
->alt_timeout
<< 3;
2349 optpar
|= MLX4_QP_OPTPAR_ALT_ADDR_PATH
;
2352 context
->pd
= cpu_to_be32(pd
->pdn
);
2355 context
->params1
= cpu_to_be32(MLX4_IB_ACK_REQ_FREQ
<< 28);
2356 get_cqs(qp
, src_type
, &send_cq
, &recv_cq
);
2357 } else { /* Set dummy CQs to be compatible with HV and PRM */
2358 send_cq
= to_mcq(rwq_ind_tbl
->ind_tbl
[0]->cq
);
2361 context
->cqn_send
= cpu_to_be32(send_cq
->mcq
.cqn
);
2362 context
->cqn_recv
= cpu_to_be32(recv_cq
->mcq
.cqn
);
2364 /* Set "fast registration enabled" for all kernel QPs */
2366 context
->params1
|= cpu_to_be32(1 << 11);
2368 if (attr_mask
& IB_QP_RNR_RETRY
) {
2369 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
2370 optpar
|= MLX4_QP_OPTPAR_RNR_RETRY
;
2373 if (attr_mask
& IB_QP_RETRY_CNT
) {
2374 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
2375 optpar
|= MLX4_QP_OPTPAR_RETRY_COUNT
;
2378 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
2379 if (attr
->max_rd_atomic
)
2381 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
2382 optpar
|= MLX4_QP_OPTPAR_SRA_MAX
;
2385 if (attr_mask
& IB_QP_SQ_PSN
)
2386 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
2388 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
2389 if (attr
->max_dest_rd_atomic
)
2391 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
2392 optpar
|= MLX4_QP_OPTPAR_RRA_MAX
;
2395 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
2396 context
->params2
|= to_mlx4_access_flags(qp
, attr
, attr_mask
);
2397 optpar
|= MLX4_QP_OPTPAR_RWE
| MLX4_QP_OPTPAR_RRE
| MLX4_QP_OPTPAR_RAE
;
2401 context
->params2
|= cpu_to_be32(MLX4_QP_BIT_RIC
);
2403 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
2404 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
2405 optpar
|= MLX4_QP_OPTPAR_RNR_TIMEOUT
;
2407 if (attr_mask
& IB_QP_RQ_PSN
)
2408 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
2410 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
2411 if (attr_mask
& IB_QP_QKEY
) {
2412 if (qp
->mlx4_ib_qp_type
&
2413 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))
2414 context
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
2416 if (mlx4_is_mfunc(dev
->dev
) &&
2417 !(qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
) &&
2418 (attr
->qkey
& MLX4_RESERVED_QKEY_MASK
) ==
2419 MLX4_RESERVED_QKEY_BASE
) {
2420 pr_err("Cannot use reserved QKEY"
2421 " 0x%x (range 0xffff0000..0xffffffff"
2422 " is reserved)\n", attr
->qkey
);
2426 context
->qkey
= cpu_to_be32(attr
->qkey
);
2428 optpar
|= MLX4_QP_OPTPAR_Q_KEY
;
2432 context
->srqn
= cpu_to_be32(1 << 24 |
2433 to_msrq(ibsrq
)->msrq
.srqn
);
2435 if (qp
->rq
.wqe_cnt
&&
2436 cur_state
== IB_QPS_RESET
&&
2437 new_state
== IB_QPS_INIT
)
2438 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
2440 if (cur_state
== IB_QPS_INIT
&&
2441 new_state
== IB_QPS_RTR
&&
2442 (qp_type
== IB_QPT_GSI
|| qp_type
== IB_QPT_SMI
||
2443 qp_type
== IB_QPT_UD
|| qp_type
== IB_QPT_RAW_PACKET
)) {
2444 context
->pri_path
.sched_queue
= (qp
->port
- 1) << 6;
2445 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
2446 qp
->mlx4_ib_qp_type
&
2447 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) {
2448 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
;
2449 if (qp
->mlx4_ib_qp_type
!= MLX4_IB_QPT_SMI
)
2450 context
->pri_path
.fl
= 0x80;
2452 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
2453 context
->pri_path
.fl
= 0x80;
2454 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_SCHED_QUEUE
;
2456 if (rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) ==
2457 IB_LINK_LAYER_ETHERNET
) {
2458 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
||
2459 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
)
2460 context
->pri_path
.feup
= 1 << 7; /* don't fsm */
2461 /* handle smac_index */
2462 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_UD
||
2463 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
||
2464 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
) {
2465 err
= handle_eth_ud_smac_index(dev
, qp
, context
);
2470 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
2471 dev
->qp1_proxy
[qp
->port
- 1] = qp
;
2476 if (qp_type
== IB_QPT_RAW_PACKET
) {
2477 context
->pri_path
.ackto
= (context
->pri_path
.ackto
& 0xf8) |
2478 MLX4_IB_LINK_TYPE_ETH
;
2479 if (dev
->dev
->caps
.tunnel_offload_mode
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
2480 /* set QP to receive both tunneled & non-tunneled packets */
2482 context
->srqn
= cpu_to_be32(7 << 28);
2486 if (qp_type
== IB_QPT_UD
&& (new_state
== IB_QPS_RTR
)) {
2487 int is_eth
= rdma_port_get_link_layer(
2488 &dev
->ib_dev
, qp
->port
) ==
2489 IB_LINK_LAYER_ETHERNET
;
2491 context
->pri_path
.ackto
= MLX4_IB_LINK_TYPE_ETH
;
2492 optpar
|= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
;
2496 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
2497 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
2503 cur_state
== IB_QPS_RESET
&&
2504 new_state
== IB_QPS_INIT
)
2505 context
->rlkey_roce_mode
|= (1 << 4);
2508 * Before passing a kernel QP to the HW, make sure that the
2509 * ownership bits of the send queue are set and the SQ
2510 * headroom is stamped so that the hardware doesn't start
2511 * processing stale work requests.
2514 cur_state
== IB_QPS_RESET
&&
2515 new_state
== IB_QPS_INIT
) {
2516 struct mlx4_wqe_ctrl_seg
*ctrl
;
2519 for (i
= 0; i
< qp
->sq
.wqe_cnt
; ++i
) {
2520 ctrl
= get_send_wqe(qp
, i
);
2521 ctrl
->owner_opcode
= cpu_to_be32(1 << 31);
2522 ctrl
->qpn_vlan
.fence_size
=
2523 1 << (qp
->sq
.wqe_shift
- 4);
2524 stamp_send_wqe(qp
, i
);
2529 cur_state
== IB_QPS_RESET
&&
2530 new_state
== IB_QPS_INIT
) {
2531 fill_qp_rss_context(context
, qp
);
2532 context
->flags
|= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET
);
2535 err
= mlx4_qp_modify(dev
->dev
, &qp
->mtt
, to_mlx4_state(cur_state
),
2536 to_mlx4_state(new_state
), context
, optpar
,
2537 sqd_event
, &qp
->mqp
);
2541 qp
->state
= new_state
;
2543 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2544 qp
->atomic_rd_en
= attr
->qp_access_flags
;
2545 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2546 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
2547 if (attr_mask
& IB_QP_PORT
) {
2548 qp
->port
= attr
->port_num
;
2549 update_mcg_macs(dev
, qp
);
2551 if (attr_mask
& IB_QP_ALT_PATH
)
2552 qp
->alt_port
= attr
->alt_port_num
;
2554 if (is_sqp(dev
, qp
))
2555 store_sqp_attrs(qp
->sqp
, attr
, attr_mask
);
2558 * If we moved QP0 to RTR, bring the IB link up; if we moved
2559 * QP0 to RESET or ERROR, bring the link back down.
2561 if (is_qp0(dev
, qp
)) {
2562 if (cur_state
!= IB_QPS_RTR
&& new_state
== IB_QPS_RTR
)
2563 if (mlx4_INIT_PORT(dev
->dev
, qp
->port
))
2564 pr_warn("INIT_PORT failed for port %d\n",
2567 if (cur_state
!= IB_QPS_RESET
&& cur_state
!= IB_QPS_ERR
&&
2568 (new_state
== IB_QPS_RESET
|| new_state
== IB_QPS_ERR
))
2569 mlx4_CLOSE_PORT(dev
->dev
, qp
->port
);
2573 * If we moved a kernel QP to RESET, clean up all old CQ
2574 * entries and reinitialize the QP.
2576 if (new_state
== IB_QPS_RESET
) {
2578 mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
2579 ibsrq
? to_msrq(ibsrq
) : NULL
);
2580 if (send_cq
!= recv_cq
)
2581 mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
2587 qp
->sq_next_wqe
= 0;
2591 if (qp
->flags
& MLX4_IB_QP_NETIF
)
2592 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
2594 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
)) {
2595 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
2597 qp
->pri
.smac_port
= 0;
2600 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
2603 if (qp
->pri
.vid
< 0x1000) {
2604 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
2605 qp
->pri
.vid
= 0xFFFF;
2606 qp
->pri
.candidate_vid
= 0xFFFF;
2607 qp
->pri
.update_vid
= 0;
2610 if (qp
->alt
.vid
< 0x1000) {
2611 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
2612 qp
->alt
.vid
= 0xFFFF;
2613 qp
->alt
.candidate_vid
= 0xFFFF;
2614 qp
->alt
.update_vid
= 0;
2618 if (err
&& qp
->counter_index
)
2619 mlx4_ib_free_qp_counter(dev
, qp
);
2620 if (err
&& steer_qp
)
2621 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
2623 if (qp
->pri
.candidate_smac
||
2624 (!qp
->pri
.candidate_smac
&& qp
->pri
.candidate_smac_port
)) {
2626 mlx4_unregister_mac(dev
->dev
, qp
->pri
.candidate_smac_port
, qp
->pri
.candidate_smac
);
2628 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
))
2629 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
2630 qp
->pri
.smac
= qp
->pri
.candidate_smac
;
2631 qp
->pri
.smac_index
= qp
->pri
.candidate_smac_index
;
2632 qp
->pri
.smac_port
= qp
->pri
.candidate_smac_port
;
2634 qp
->pri
.candidate_smac
= 0;
2635 qp
->pri
.candidate_smac_index
= 0;
2636 qp
->pri
.candidate_smac_port
= 0;
2638 if (qp
->alt
.candidate_smac
) {
2640 mlx4_unregister_mac(dev
->dev
, qp
->alt
.candidate_smac_port
, qp
->alt
.candidate_smac
);
2643 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
2644 qp
->alt
.smac
= qp
->alt
.candidate_smac
;
2645 qp
->alt
.smac_index
= qp
->alt
.candidate_smac_index
;
2646 qp
->alt
.smac_port
= qp
->alt
.candidate_smac_port
;
2648 qp
->alt
.candidate_smac
= 0;
2649 qp
->alt
.candidate_smac_index
= 0;
2650 qp
->alt
.candidate_smac_port
= 0;
2653 if (qp
->pri
.update_vid
) {
2655 if (qp
->pri
.candidate_vid
< 0x1000)
2656 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.candidate_vlan_port
,
2657 qp
->pri
.candidate_vid
);
2659 if (qp
->pri
.vid
< 0x1000)
2660 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
,
2662 qp
->pri
.vid
= qp
->pri
.candidate_vid
;
2663 qp
->pri
.vlan_port
= qp
->pri
.candidate_vlan_port
;
2664 qp
->pri
.vlan_index
= qp
->pri
.candidate_vlan_index
;
2666 qp
->pri
.candidate_vid
= 0xFFFF;
2667 qp
->pri
.update_vid
= 0;
2670 if (qp
->alt
.update_vid
) {
2672 if (qp
->alt
.candidate_vid
< 0x1000)
2673 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.candidate_vlan_port
,
2674 qp
->alt
.candidate_vid
);
2676 if (qp
->alt
.vid
< 0x1000)
2677 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
,
2679 qp
->alt
.vid
= qp
->alt
.candidate_vid
;
2680 qp
->alt
.vlan_port
= qp
->alt
.candidate_vlan_port
;
2681 qp
->alt
.vlan_index
= qp
->alt
.candidate_vlan_index
;
2683 qp
->alt
.candidate_vid
= 0xFFFF;
2684 qp
->alt
.update_vid
= 0;
2691 MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK
= (IB_QP_STATE
|
2695 static int _mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2696 int attr_mask
, struct ib_udata
*udata
)
2698 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
2699 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2700 enum ib_qp_state cur_state
, new_state
;
2702 mutex_lock(&qp
->mutex
);
2704 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
2705 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
2707 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
2709 pr_debug("qpn 0x%x: invalid attribute mask specified "
2710 "for transition %d to %d. qp_type %d,"
2711 " attr_mask 0x%x\n",
2712 ibqp
->qp_num
, cur_state
, new_state
,
2713 ibqp
->qp_type
, attr_mask
);
2717 if (ibqp
->rwq_ind_tbl
) {
2718 if (!(((cur_state
== IB_QPS_RESET
) &&
2719 (new_state
== IB_QPS_INIT
)) ||
2720 ((cur_state
== IB_QPS_INIT
) &&
2721 (new_state
== IB_QPS_RTR
)))) {
2722 pr_debug("qpn 0x%x: RSS QP unsupported transition %d to %d\n",
2723 ibqp
->qp_num
, cur_state
, new_state
);
2729 if (attr_mask
& ~MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK
) {
2730 pr_debug("qpn 0x%x: RSS QP unsupported attribute mask 0x%x for transition %d to %d\n",
2731 ibqp
->qp_num
, attr_mask
, cur_state
, new_state
);
2738 if (mlx4_is_bonded(dev
->dev
) && (attr_mask
& IB_QP_PORT
)) {
2739 if ((cur_state
== IB_QPS_RESET
) && (new_state
== IB_QPS_INIT
)) {
2740 if ((ibqp
->qp_type
== IB_QPT_RC
) ||
2741 (ibqp
->qp_type
== IB_QPT_UD
) ||
2742 (ibqp
->qp_type
== IB_QPT_UC
) ||
2743 (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) ||
2744 (ibqp
->qp_type
== IB_QPT_XRC_INI
)) {
2745 attr
->port_num
= mlx4_ib_bond_next_port(dev
);
2748 /* no sense in changing port_num
2749 * when ports are bonded */
2750 attr_mask
&= ~IB_QP_PORT
;
2754 if ((attr_mask
& IB_QP_PORT
) &&
2755 (attr
->port_num
== 0 || attr
->port_num
> dev
->num_ports
)) {
2756 pr_debug("qpn 0x%x: invalid port number (%d) specified "
2757 "for transition %d to %d. qp_type %d\n",
2758 ibqp
->qp_num
, attr
->port_num
, cur_state
,
2759 new_state
, ibqp
->qp_type
);
2763 if ((attr_mask
& IB_QP_PORT
) && (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) &&
2764 (rdma_port_get_link_layer(&dev
->ib_dev
, attr
->port_num
) !=
2765 IB_LINK_LAYER_ETHERNET
))
2768 if (attr_mask
& IB_QP_PKEY_INDEX
) {
2769 int p
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
2770 if (attr
->pkey_index
>= dev
->dev
->caps
.pkey_table_len
[p
]) {
2771 pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
2772 "for transition %d to %d. qp_type %d\n",
2773 ibqp
->qp_num
, attr
->pkey_index
, cur_state
,
2774 new_state
, ibqp
->qp_type
);
2779 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
2780 attr
->max_rd_atomic
> dev
->dev
->caps
.max_qp_init_rdma
) {
2781 pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
2782 "Transition %d to %d. qp_type %d\n",
2783 ibqp
->qp_num
, attr
->max_rd_atomic
, cur_state
,
2784 new_state
, ibqp
->qp_type
);
2788 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
2789 attr
->max_dest_rd_atomic
> dev
->dev
->caps
.max_qp_dest_rdma
) {
2790 pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
2791 "Transition %d to %d. qp_type %d\n",
2792 ibqp
->qp_num
, attr
->max_dest_rd_atomic
, cur_state
,
2793 new_state
, ibqp
->qp_type
);
2797 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
2802 if (ibqp
->rwq_ind_tbl
&& (new_state
== IB_QPS_INIT
)) {
2803 err
= bringup_rss_rwqs(ibqp
->rwq_ind_tbl
, attr
->port_num
,
2809 err
= __mlx4_ib_modify_qp(ibqp
, MLX4_IB_QP_SRC
, attr
, attr_mask
,
2810 cur_state
, new_state
, udata
);
2812 if (ibqp
->rwq_ind_tbl
&& err
)
2813 bring_down_rss_rwqs(ibqp
->rwq_ind_tbl
, udata
);
2815 if (mlx4_is_bonded(dev
->dev
) && (attr_mask
& IB_QP_PORT
))
2819 mutex_unlock(&qp
->mutex
);
2823 int mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2824 int attr_mask
, struct ib_udata
*udata
)
2826 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
2829 if (attr_mask
& ~IB_QP_ATTR_STANDARD_BITS
)
2832 ret
= _mlx4_ib_modify_qp(ibqp
, attr
, attr_mask
, udata
);
2834 if (mqp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
) {
2835 struct mlx4_ib_sqp
*sqp
= mqp
->sqp
;
2838 if (sqp
->roce_v2_gsi
)
2839 err
= ib_modify_qp(sqp
->roce_v2_gsi
, attr
, attr_mask
);
2841 pr_err("Failed to modify GSI QP for RoCEv2 (%d)\n",
2847 static int vf_get_qp0_qkey(struct mlx4_dev
*dev
, int qpn
, u32
*qkey
)
2850 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
2851 if (qpn
== dev
->caps
.spec_qps
[i
].qp0_proxy
||
2852 qpn
== dev
->caps
.spec_qps
[i
].qp0_tunnel
) {
2853 *qkey
= dev
->caps
.spec_qps
[i
].qp0_qkey
;
2860 static int build_sriov_qp0_header(struct mlx4_ib_qp
*qp
,
2861 const struct ib_ud_wr
*wr
,
2862 void *wqe
, unsigned *mlx_seg_len
)
2864 struct mlx4_ib_dev
*mdev
= to_mdev(qp
->ibqp
.device
);
2865 struct mlx4_ib_sqp
*sqp
= qp
->sqp
;
2866 struct ib_device
*ib_dev
= qp
->ibqp
.device
;
2867 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
2868 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
2869 struct mlx4_ib_ah
*ah
= to_mah(wr
->ah
);
2878 if (wr
->wr
.opcode
!= IB_WR_SEND
)
2883 for (i
= 0; i
< wr
->wr
.num_sge
; ++i
)
2884 send_size
+= wr
->wr
.sg_list
[i
].length
;
2886 /* for proxy-qp0 sends, need to add in size of tunnel header */
2887 /* for tunnel-qp0 sends, tunnel header is already in s/g list */
2888 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
)
2889 send_size
+= sizeof (struct mlx4_ib_tunnel_header
);
2891 ib_ud_header_init(send_size
, 1, 0, 0, 0, 0, 0, 0, &sqp
->ud_header
);
2893 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
) {
2894 sqp
->ud_header
.lrh
.service_level
=
2895 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
2896 sqp
->ud_header
.lrh
.destination_lid
=
2897 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2898 sqp
->ud_header
.lrh
.source_lid
=
2899 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2902 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
2904 /* force loopback */
2905 mlx
->flags
|= cpu_to_be32(MLX4_WQE_MLX_VL15
| 0x1 | MLX4_WQE_MLX_SLR
);
2906 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
2908 sqp
->ud_header
.lrh
.virtual_lane
= 0;
2909 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->wr
.send_flags
& IB_SEND_SOLICITED
);
2910 err
= ib_get_cached_pkey(ib_dev
, qp
->port
, 0, &pkey
);
2913 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
2914 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_SMI_OWNER
)
2915 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->remote_qpn
);
2917 sqp
->ud_header
.bth
.destination_qpn
=
2918 cpu_to_be32(mdev
->dev
->caps
.spec_qps
[qp
->port
- 1].qp0_tunnel
);
2920 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
2921 if (mlx4_is_master(mdev
->dev
)) {
2922 if (mlx4_get_parav_qkey(mdev
->dev
, qp
->mqp
.qpn
, &qkey
))
2925 if (vf_get_qp0_qkey(mdev
->dev
, qp
->mqp
.qpn
, &qkey
))
2928 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(qkey
);
2929 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(qp
->mqp
.qpn
);
2931 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
2932 sqp
->ud_header
.immediate_present
= 0;
2934 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
2937 * Inline data segments may not cross a 64 byte boundary. If
2938 * our UD header is bigger than the space available up to the
2939 * next 64 byte boundary in the WQE, use two inline data
2940 * segments to hold the UD header.
2942 spc
= MLX4_INLINE_ALIGN
-
2943 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2944 if (header_size
<= spc
) {
2945 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
2946 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
2949 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2950 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
2952 inl
= (void *) (inl
+ 1) + spc
;
2953 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
2955 * Need a barrier here to make sure all the data is
2956 * visible before the byte_count field is set.
2957 * Otherwise the HCA prefetcher could grab the 64-byte
2958 * chunk with this inline segment and get a valid (!=
2959 * 0xffffffff) byte count but stale data, and end up
2960 * generating a packet with bad headers.
2962 * The first inline segment's byte_count field doesn't
2963 * need a barrier, because it comes after a
2964 * control/MLX segment and therefore is at an offset
2968 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
2973 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
2977 static u8
sl_to_vl(struct mlx4_ib_dev
*dev
, u8 sl
, int port_num
)
2979 union sl2vl_tbl_to_u64 tmp_vltab
;
2984 tmp_vltab
.sl64
= atomic64_read(&dev
->sl2vl
[port_num
- 1]);
2985 vl
= tmp_vltab
.sl8
[sl
>> 1];
2993 static int fill_gid_by_hw_index(struct mlx4_ib_dev
*ibdev
, u8 port_num
,
2994 int index
, union ib_gid
*gid
,
2995 enum ib_gid_type
*gid_type
)
2997 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
2998 struct mlx4_port_gid_table
*port_gid_table
;
2999 unsigned long flags
;
3001 port_gid_table
= &iboe
->gids
[port_num
- 1];
3002 spin_lock_irqsave(&iboe
->lock
, flags
);
3003 memcpy(gid
, &port_gid_table
->gids
[index
].gid
, sizeof(*gid
));
3004 *gid_type
= port_gid_table
->gids
[index
].gid_type
;
3005 spin_unlock_irqrestore(&iboe
->lock
, flags
);
3006 if (rdma_is_zero_gid(gid
))
3012 #define MLX4_ROCEV2_QP1_SPORT 0xC000
3013 static int build_mlx_header(struct mlx4_ib_qp
*qp
, const struct ib_ud_wr
*wr
,
3014 void *wqe
, unsigned *mlx_seg_len
)
3016 struct mlx4_ib_sqp
*sqp
= qp
->sqp
;
3017 struct ib_device
*ib_dev
= qp
->ibqp
.device
;
3018 struct mlx4_ib_dev
*ibdev
= to_mdev(ib_dev
);
3019 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
3020 struct mlx4_wqe_ctrl_seg
*ctrl
= wqe
;
3021 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
3022 struct mlx4_ib_ah
*ah
= to_mah(wr
->ah
);
3032 bool is_vlan
= false;
3034 bool is_udp
= false;
3038 for (i
= 0; i
< wr
->wr
.num_sge
; ++i
)
3039 send_size
+= wr
->wr
.sg_list
[i
].length
;
3041 is_eth
= rdma_port_get_link_layer(qp
->ibqp
.device
, qp
->port
) == IB_LINK_LAYER_ETHERNET
;
3042 is_grh
= mlx4_ib_ah_grh_present(ah
);
3044 enum ib_gid_type gid_type
;
3045 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
3046 /* When multi-function is enabled, the ib_core gid
3047 * indexes don't necessarily match the hw ones, so
3048 * we must use our own cache */
3049 err
= mlx4_get_roce_gid_from_slave(to_mdev(ib_dev
)->dev
,
3050 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
3051 ah
->av
.ib
.gid_index
, &sgid
.raw
[0]);
3055 err
= fill_gid_by_hw_index(ibdev
, qp
->port
,
3056 ah
->av
.ib
.gid_index
, &sgid
,
3059 is_udp
= gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
;
3061 if (ipv6_addr_v4mapped((struct in6_addr
*)&sgid
))
3071 if (ah
->av
.eth
.vlan
!= cpu_to_be16(0xffff)) {
3072 vlan
= be16_to_cpu(ah
->av
.eth
.vlan
) & 0x0fff;
3076 err
= ib_ud_header_init(send_size
, !is_eth
, is_eth
, is_vlan
, is_grh
,
3077 ip_version
, is_udp
, 0, &sqp
->ud_header
);
3082 sqp
->ud_header
.lrh
.service_level
=
3083 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
3084 sqp
->ud_header
.lrh
.destination_lid
= ah
->av
.ib
.dlid
;
3085 sqp
->ud_header
.lrh
.source_lid
= cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
3088 if (is_grh
|| (ip_version
== 6)) {
3089 sqp
->ud_header
.grh
.traffic_class
=
3090 (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 20) & 0xff;
3091 sqp
->ud_header
.grh
.flow_label
=
3092 ah
->av
.ib
.sl_tclass_flowlabel
& cpu_to_be32(0xfffff);
3093 sqp
->ud_header
.grh
.hop_limit
= ah
->av
.ib
.hop_limit
;
3095 memcpy(sqp
->ud_header
.grh
.source_gid
.raw
, sgid
.raw
, 16);
3097 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
3098 /* When multi-function is enabled, the ib_core gid
3099 * indexes don't necessarily match the hw ones, so
3100 * we must use our own cache
3102 sqp
->ud_header
.grh
.source_gid
.global
3104 cpu_to_be64(atomic64_read(
3107 .demux
[qp
->port
- 1]
3109 sqp
->ud_header
.grh
.source_gid
.global
3112 ->sriov
.demux
[qp
->port
- 1]
3113 .guid_cache
[ah
->av
.ib
.gid_index
];
3115 sqp
->ud_header
.grh
.source_gid
=
3116 ah
->ibah
.sgid_attr
->gid
;
3119 memcpy(sqp
->ud_header
.grh
.destination_gid
.raw
,
3120 ah
->av
.ib
.dgid
, 16);
3123 if (ip_version
== 4) {
3124 sqp
->ud_header
.ip4
.tos
=
3125 (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 20) & 0xff;
3126 sqp
->ud_header
.ip4
.id
= 0;
3127 sqp
->ud_header
.ip4
.frag_off
= htons(IP_DF
);
3128 sqp
->ud_header
.ip4
.ttl
= ah
->av
.eth
.hop_limit
;
3130 memcpy(&sqp
->ud_header
.ip4
.saddr
,
3132 memcpy(&sqp
->ud_header
.ip4
.daddr
, ah
->av
.ib
.dgid
+ 12, 4);
3133 sqp
->ud_header
.ip4
.check
= ib_ud_ip4_csum(&sqp
->ud_header
);
3137 sqp
->ud_header
.udp
.dport
= htons(ROCE_V2_UDP_DPORT
);
3138 sqp
->ud_header
.udp
.sport
= htons(MLX4_ROCEV2_QP1_SPORT
);
3139 sqp
->ud_header
.udp
.csum
= 0;
3142 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
3146 cpu_to_be32((!qp
->ibqp
.qp_num
? MLX4_WQE_MLX_VL15
: 0) |
3147 (sqp
->ud_header
.lrh
.destination_lid
==
3151 (sqp
->ud_header
.lrh
.service_level
<< 8));
3152 if (ah
->av
.ib
.port_pd
& cpu_to_be32(0x80000000))
3153 mlx
->flags
|= cpu_to_be32(0x1); /* force loopback */
3154 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
3157 switch (wr
->wr
.opcode
) {
3159 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
3160 sqp
->ud_header
.immediate_present
= 0;
3162 case IB_WR_SEND_WITH_IMM
:
3163 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
3164 sqp
->ud_header
.immediate_present
= 1;
3165 sqp
->ud_header
.immediate_data
= wr
->wr
.ex
.imm_data
;
3173 u16 pcp
= (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 29) << 13;
3175 ether_type
= (!is_udp
) ? ETH_P_IBOE
:
3176 (ip_version
== 4 ? ETH_P_IP
: ETH_P_IPV6
);
3178 mlx
->sched_prio
= cpu_to_be16(pcp
);
3180 ether_addr_copy(sqp
->ud_header
.eth
.smac_h
, ah
->av
.eth
.s_mac
);
3181 ether_addr_copy(sqp
->ud_header
.eth
.dmac_h
, ah
->av
.eth
.mac
);
3182 memcpy(&ctrl
->srcrb_flags16
[0], ah
->av
.eth
.mac
, 2);
3183 memcpy(&ctrl
->imm
, ah
->av
.eth
.mac
+ 2, 4);
3185 if (!memcmp(sqp
->ud_header
.eth
.smac_h
, sqp
->ud_header
.eth
.dmac_h
, 6))
3186 mlx
->flags
|= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK
);
3188 sqp
->ud_header
.eth
.type
= cpu_to_be16(ether_type
);
3190 sqp
->ud_header
.vlan
.type
= cpu_to_be16(ether_type
);
3191 sqp
->ud_header
.vlan
.tag
= cpu_to_be16(vlan
| pcp
);
3194 sqp
->ud_header
.lrh
.virtual_lane
=
3197 sl_to_vl(to_mdev(ib_dev
),
3198 sqp
->ud_header
.lrh
.service_level
,
3200 if (qp
->ibqp
.qp_num
&& sqp
->ud_header
.lrh
.virtual_lane
== 15)
3202 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
3203 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
3205 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->wr
.send_flags
& IB_SEND_SOLICITED
);
3206 if (!qp
->ibqp
.qp_num
)
3207 err
= ib_get_cached_pkey(ib_dev
, qp
->port
, sqp
->pkey_index
,
3210 err
= ib_get_cached_pkey(ib_dev
, qp
->port
, wr
->pkey_index
,
3215 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
3216 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->remote_qpn
);
3217 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
3218 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->remote_qkey
& 0x80000000 ?
3219 sqp
->qkey
: wr
->remote_qkey
);
3220 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(qp
->ibqp
.qp_num
);
3222 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
3225 pr_err("built UD header of size %d:\n", header_size
);
3226 for (i
= 0; i
< header_size
/ 4; ++i
) {
3228 pr_err(" [%02x] ", i
* 4);
3230 be32_to_cpu(((__be32
*) sqp
->header_buf
)[i
]));
3231 if ((i
+ 1) % 8 == 0)
3238 * Inline data segments may not cross a 64 byte boundary. If
3239 * our UD header is bigger than the space available up to the
3240 * next 64 byte boundary in the WQE, use two inline data
3241 * segments to hold the UD header.
3243 spc
= MLX4_INLINE_ALIGN
-
3244 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
3245 if (header_size
<= spc
) {
3246 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
3247 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
3250 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
3251 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
3253 inl
= (void *) (inl
+ 1) + spc
;
3254 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
3256 * Need a barrier here to make sure all the data is
3257 * visible before the byte_count field is set.
3258 * Otherwise the HCA prefetcher could grab the 64-byte
3259 * chunk with this inline segment and get a valid (!=
3260 * 0xffffffff) byte count but stale data, and end up
3261 * generating a packet with bad headers.
3263 * The first inline segment's byte_count field doesn't
3264 * need a barrier, because it comes after a
3265 * control/MLX segment and therefore is at an offset
3269 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
3274 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
3278 static int mlx4_wq_overflow(struct mlx4_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
3281 struct mlx4_ib_cq
*cq
;
3283 cur
= wq
->head
- wq
->tail
;
3284 if (likely(cur
+ nreq
< wq
->max_post
))
3288 spin_lock(&cq
->lock
);
3289 cur
= wq
->head
- wq
->tail
;
3290 spin_unlock(&cq
->lock
);
3292 return cur
+ nreq
>= wq
->max_post
;
3295 static __be32
convert_access(int acc
)
3297 return (acc
& IB_ACCESS_REMOTE_ATOMIC
?
3298 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC
) : 0) |
3299 (acc
& IB_ACCESS_REMOTE_WRITE
?
3300 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE
) : 0) |
3301 (acc
& IB_ACCESS_REMOTE_READ
?
3302 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ
) : 0) |
3303 (acc
& IB_ACCESS_LOCAL_WRITE
? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE
) : 0) |
3304 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ
);
3307 static void set_reg_seg(struct mlx4_wqe_fmr_seg
*fseg
,
3308 const struct ib_reg_wr
*wr
)
3310 struct mlx4_ib_mr
*mr
= to_mmr(wr
->mr
);
3312 fseg
->flags
= convert_access(wr
->access
);
3313 fseg
->mem_key
= cpu_to_be32(wr
->key
);
3314 fseg
->buf_list
= cpu_to_be64(mr
->page_map
);
3315 fseg
->start_addr
= cpu_to_be64(mr
->ibmr
.iova
);
3316 fseg
->reg_len
= cpu_to_be64(mr
->ibmr
.length
);
3317 fseg
->offset
= 0; /* XXX -- is this just for ZBVA? */
3318 fseg
->page_size
= cpu_to_be32(ilog2(mr
->ibmr
.page_size
));
3319 fseg
->reserved
[0] = 0;
3320 fseg
->reserved
[1] = 0;
3323 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg
*iseg
, u32 rkey
)
3325 memset(iseg
, 0, sizeof(*iseg
));
3326 iseg
->mem_key
= cpu_to_be32(rkey
);
3329 static __always_inline
void set_raddr_seg(struct mlx4_wqe_raddr_seg
*rseg
,
3330 u64 remote_addr
, u32 rkey
)
3332 rseg
->raddr
= cpu_to_be64(remote_addr
);
3333 rseg
->rkey
= cpu_to_be32(rkey
);
3337 static void set_atomic_seg(struct mlx4_wqe_atomic_seg
*aseg
,
3338 const struct ib_atomic_wr
*wr
)
3340 if (wr
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
3341 aseg
->swap_add
= cpu_to_be64(wr
->swap
);
3342 aseg
->compare
= cpu_to_be64(wr
->compare_add
);
3343 } else if (wr
->wr
.opcode
== IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
) {
3344 aseg
->swap_add
= cpu_to_be64(wr
->compare_add
);
3345 aseg
->compare
= cpu_to_be64(wr
->compare_add_mask
);
3347 aseg
->swap_add
= cpu_to_be64(wr
->compare_add
);
3353 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg
*aseg
,
3354 const struct ib_atomic_wr
*wr
)
3356 aseg
->swap_add
= cpu_to_be64(wr
->swap
);
3357 aseg
->swap_add_mask
= cpu_to_be64(wr
->swap_mask
);
3358 aseg
->compare
= cpu_to_be64(wr
->compare_add
);
3359 aseg
->compare_mask
= cpu_to_be64(wr
->compare_add_mask
);
3362 static void set_datagram_seg(struct mlx4_wqe_datagram_seg
*dseg
,
3363 const struct ib_ud_wr
*wr
)
3365 memcpy(dseg
->av
, &to_mah(wr
->ah
)->av
, sizeof (struct mlx4_av
));
3366 dseg
->dqpn
= cpu_to_be32(wr
->remote_qpn
);
3367 dseg
->qkey
= cpu_to_be32(wr
->remote_qkey
);
3368 dseg
->vlan
= to_mah(wr
->ah
)->av
.eth
.vlan
;
3369 memcpy(dseg
->mac
, to_mah(wr
->ah
)->av
.eth
.mac
, 6);
3372 static void set_tunnel_datagram_seg(struct mlx4_ib_dev
*dev
,
3373 struct mlx4_wqe_datagram_seg
*dseg
,
3374 const struct ib_ud_wr
*wr
,
3375 enum mlx4_ib_qp_type qpt
)
3377 union mlx4_ext_av
*av
= &to_mah(wr
->ah
)->av
;
3378 struct mlx4_av sqp_av
= {0};
3379 int port
= *((u8
*) &av
->ib
.port_pd
) & 0x3;
3381 /* force loopback */
3382 sqp_av
.port_pd
= av
->ib
.port_pd
| cpu_to_be32(0x80000000);
3383 sqp_av
.g_slid
= av
->ib
.g_slid
& 0x7f; /* no GRH */
3384 sqp_av
.sl_tclass_flowlabel
= av
->ib
.sl_tclass_flowlabel
&
3385 cpu_to_be32(0xf0000000);
3387 memcpy(dseg
->av
, &sqp_av
, sizeof (struct mlx4_av
));
3388 if (qpt
== MLX4_IB_QPT_PROXY_GSI
)
3389 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.spec_qps
[port
- 1].qp1_tunnel
);
3391 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.spec_qps
[port
- 1].qp0_tunnel
);
3392 /* Use QKEY from the QP context, which is set by master */
3393 dseg
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
3396 static void build_tunnel_header(const struct ib_ud_wr
*wr
, void *wqe
,
3397 unsigned *mlx_seg_len
)
3399 struct mlx4_wqe_inline_seg
*inl
= wqe
;
3400 struct mlx4_ib_tunnel_header hdr
;
3401 struct mlx4_ib_ah
*ah
= to_mah(wr
->ah
);
3405 memcpy(&hdr
.av
, &ah
->av
, sizeof hdr
.av
);
3406 hdr
.remote_qpn
= cpu_to_be32(wr
->remote_qpn
);
3407 hdr
.pkey_index
= cpu_to_be16(wr
->pkey_index
);
3408 hdr
.qkey
= cpu_to_be32(wr
->remote_qkey
);
3409 memcpy(hdr
.mac
, ah
->av
.eth
.mac
, 6);
3410 hdr
.vlan
= ah
->av
.eth
.vlan
;
3412 spc
= MLX4_INLINE_ALIGN
-
3413 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
3414 if (sizeof (hdr
) <= spc
) {
3415 memcpy(inl
+ 1, &hdr
, sizeof (hdr
));
3417 inl
->byte_count
= cpu_to_be32(1 << 31 | sizeof (hdr
));
3420 memcpy(inl
+ 1, &hdr
, spc
);
3422 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
3424 inl
= (void *) (inl
+ 1) + spc
;
3425 memcpy(inl
+ 1, (void *) &hdr
+ spc
, sizeof (hdr
) - spc
);
3427 inl
->byte_count
= cpu_to_be32(1 << 31 | (sizeof (hdr
) - spc
));
3432 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + sizeof (hdr
), 16);
3435 static void set_mlx_icrc_seg(void *dseg
)
3438 struct mlx4_wqe_inline_seg
*iseg
= dseg
;
3443 * Need a barrier here before writing the byte_count field to
3444 * make sure that all the data is visible before the
3445 * byte_count field is set. Otherwise, if the segment begins
3446 * a new cacheline, the HCA prefetcher could grab the 64-byte
3447 * chunk and get a valid (!= * 0xffffffff) byte count but
3448 * stale data, and end up sending the wrong data.
3452 iseg
->byte_count
= cpu_to_be32((1 << 31) | 4);
3455 static void set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
3457 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
3458 dseg
->addr
= cpu_to_be64(sg
->addr
);
3461 * Need a barrier here before writing the byte_count field to
3462 * make sure that all the data is visible before the
3463 * byte_count field is set. Otherwise, if the segment begins
3464 * a new cacheline, the HCA prefetcher could grab the 64-byte
3465 * chunk and get a valid (!= * 0xffffffff) byte count but
3466 * stale data, and end up sending the wrong data.
3470 dseg
->byte_count
= cpu_to_be32(sg
->length
);
3473 static void __set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
3475 dseg
->byte_count
= cpu_to_be32(sg
->length
);
3476 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
3477 dseg
->addr
= cpu_to_be64(sg
->addr
);
3480 static int build_lso_seg(struct mlx4_wqe_lso_seg
*wqe
,
3481 const struct ib_ud_wr
*wr
, struct mlx4_ib_qp
*qp
,
3482 unsigned *lso_seg_len
, __be32
*lso_hdr_sz
, __be32
*blh
)
3484 unsigned halign
= ALIGN(sizeof *wqe
+ wr
->hlen
, 16);
3486 if (unlikely(halign
> MLX4_IB_CACHE_LINE_SIZE
))
3487 *blh
= cpu_to_be32(1 << 6);
3489 if (unlikely(!(qp
->flags
& MLX4_IB_QP_LSO
) &&
3490 wr
->wr
.num_sge
> qp
->sq
.max_gs
- (halign
>> 4)))
3493 memcpy(wqe
->header
, wr
->header
, wr
->hlen
);
3495 *lso_hdr_sz
= cpu_to_be32(wr
->mss
<< 16 | wr
->hlen
);
3496 *lso_seg_len
= halign
;
3500 static __be32
send_ieth(const struct ib_send_wr
*wr
)
3502 switch (wr
->opcode
) {
3503 case IB_WR_SEND_WITH_IMM
:
3504 case IB_WR_RDMA_WRITE_WITH_IMM
:
3505 return wr
->ex
.imm_data
;
3507 case IB_WR_SEND_WITH_INV
:
3508 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
3515 static void add_zero_len_inline(void *wqe
)
3517 struct mlx4_wqe_inline_seg
*inl
= wqe
;
3519 inl
->byte_count
= cpu_to_be32(1 << 31);
3522 static int _mlx4_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
3523 const struct ib_send_wr
**bad_wr
, bool drain
)
3525 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
3527 struct mlx4_wqe_ctrl_seg
*ctrl
;
3528 struct mlx4_wqe_data_seg
*dseg
;
3529 unsigned long flags
;
3540 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
3542 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
) {
3543 struct mlx4_ib_sqp
*sqp
= qp
->sqp
;
3545 if (sqp
->roce_v2_gsi
) {
3546 struct mlx4_ib_ah
*ah
= to_mah(ud_wr(wr
)->ah
);
3547 enum ib_gid_type gid_type
;
3550 if (!fill_gid_by_hw_index(mdev
, qp
->port
,
3551 ah
->av
.ib
.gid_index
,
3553 qp
= (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) ?
3554 to_mqp(sqp
->roce_v2_gsi
) : qp
;
3556 pr_err("Failed to get gid at index %d. RoCEv2 will not work properly\n",
3557 ah
->av
.ib
.gid_index
);
3561 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
3562 if (mdev
->dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
&&
3570 ind
= qp
->sq_next_wqe
;
3572 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
3576 if (mlx4_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
3582 if (unlikely(wr
->num_sge
> qp
->sq
.max_gs
)) {
3588 ctrl
= wqe
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
3589 qp
->sq
.wrid
[(qp
->sq
.head
+ nreq
) & (qp
->sq
.wqe_cnt
- 1)] = wr
->wr_id
;
3592 (wr
->send_flags
& IB_SEND_SIGNALED
?
3593 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) : 0) |
3594 (wr
->send_flags
& IB_SEND_SOLICITED
?
3595 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED
) : 0) |
3596 ((wr
->send_flags
& IB_SEND_IP_CSUM
) ?
3597 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM
|
3598 MLX4_WQE_CTRL_TCP_UDP_CSUM
) : 0) |
3601 ctrl
->imm
= send_ieth(wr
);
3603 wqe
+= sizeof *ctrl
;
3604 size
= sizeof *ctrl
/ 16;
3606 switch (qp
->mlx4_ib_qp_type
) {
3607 case MLX4_IB_QPT_RC
:
3608 case MLX4_IB_QPT_UC
:
3609 switch (wr
->opcode
) {
3610 case IB_WR_ATOMIC_CMP_AND_SWP
:
3611 case IB_WR_ATOMIC_FETCH_AND_ADD
:
3612 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
:
3613 set_raddr_seg(wqe
, atomic_wr(wr
)->remote_addr
,
3614 atomic_wr(wr
)->rkey
);
3615 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
3617 set_atomic_seg(wqe
, atomic_wr(wr
));
3618 wqe
+= sizeof (struct mlx4_wqe_atomic_seg
);
3620 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
3621 sizeof (struct mlx4_wqe_atomic_seg
)) / 16;
3625 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
3626 set_raddr_seg(wqe
, atomic_wr(wr
)->remote_addr
,
3627 atomic_wr(wr
)->rkey
);
3628 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
3630 set_masked_atomic_seg(wqe
, atomic_wr(wr
));
3631 wqe
+= sizeof (struct mlx4_wqe_masked_atomic_seg
);
3633 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
3634 sizeof (struct mlx4_wqe_masked_atomic_seg
)) / 16;
3638 case IB_WR_RDMA_READ
:
3639 case IB_WR_RDMA_WRITE
:
3640 case IB_WR_RDMA_WRITE_WITH_IMM
:
3641 set_raddr_seg(wqe
, rdma_wr(wr
)->remote_addr
,
3643 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
3644 size
+= sizeof (struct mlx4_wqe_raddr_seg
) / 16;
3647 case IB_WR_LOCAL_INV
:
3648 ctrl
->srcrb_flags
|=
3649 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
3650 set_local_inv_seg(wqe
, wr
->ex
.invalidate_rkey
);
3651 wqe
+= sizeof (struct mlx4_wqe_local_inval_seg
);
3652 size
+= sizeof (struct mlx4_wqe_local_inval_seg
) / 16;
3656 ctrl
->srcrb_flags
|=
3657 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
3658 set_reg_seg(wqe
, reg_wr(wr
));
3659 wqe
+= sizeof(struct mlx4_wqe_fmr_seg
);
3660 size
+= sizeof(struct mlx4_wqe_fmr_seg
) / 16;
3664 /* No extra segments required for sends */
3669 case MLX4_IB_QPT_TUN_SMI_OWNER
:
3670 err
= build_sriov_qp0_header(qp
, ud_wr(wr
), ctrl
,
3672 if (unlikely(err
)) {
3677 size
+= seglen
/ 16;
3679 case MLX4_IB_QPT_TUN_SMI
:
3680 case MLX4_IB_QPT_TUN_GSI
:
3681 /* this is a UD qp used in MAD responses to slaves. */
3682 set_datagram_seg(wqe
, ud_wr(wr
));
3683 /* set the forced-loopback bit in the data seg av */
3684 *(__be32
*) wqe
|= cpu_to_be32(0x80000000);
3685 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
3686 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
3688 case MLX4_IB_QPT_UD
:
3689 set_datagram_seg(wqe
, ud_wr(wr
));
3690 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
3691 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
3693 if (wr
->opcode
== IB_WR_LSO
) {
3694 err
= build_lso_seg(wqe
, ud_wr(wr
), qp
, &seglen
,
3696 if (unlikely(err
)) {
3700 lso_wqe
= (__be32
*) wqe
;
3702 size
+= seglen
/ 16;
3706 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
3707 err
= build_sriov_qp0_header(qp
, ud_wr(wr
), ctrl
,
3709 if (unlikely(err
)) {
3714 size
+= seglen
/ 16;
3715 /* to start tunnel header on a cache-line boundary */
3716 add_zero_len_inline(wqe
);
3719 build_tunnel_header(ud_wr(wr
), wqe
, &seglen
);
3721 size
+= seglen
/ 16;
3723 case MLX4_IB_QPT_PROXY_SMI
:
3724 case MLX4_IB_QPT_PROXY_GSI
:
3725 /* If we are tunneling special qps, this is a UD qp.
3726 * In this case we first add a UD segment targeting
3727 * the tunnel qp, and then add a header with address
3729 set_tunnel_datagram_seg(to_mdev(ibqp
->device
), wqe
,
3731 qp
->mlx4_ib_qp_type
);
3732 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
3733 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
3734 build_tunnel_header(ud_wr(wr
), wqe
, &seglen
);
3736 size
+= seglen
/ 16;
3739 case MLX4_IB_QPT_SMI
:
3740 case MLX4_IB_QPT_GSI
:
3741 err
= build_mlx_header(qp
, ud_wr(wr
), ctrl
, &seglen
);
3742 if (unlikely(err
)) {
3747 size
+= seglen
/ 16;
3755 * Write data segments in reverse order, so as to
3756 * overwrite cacheline stamp last within each
3757 * cacheline. This avoids issues with WQE
3762 dseg
+= wr
->num_sge
- 1;
3763 size
+= wr
->num_sge
* (sizeof (struct mlx4_wqe_data_seg
) / 16);
3765 /* Add one more inline data segment for ICRC for MLX sends */
3766 if (unlikely(qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
3767 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
||
3768 qp
->mlx4_ib_qp_type
&
3769 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
3770 set_mlx_icrc_seg(dseg
+ 1);
3771 size
+= sizeof (struct mlx4_wqe_data_seg
) / 16;
3774 for (i
= wr
->num_sge
- 1; i
>= 0; --i
, --dseg
)
3775 set_data_seg(dseg
, wr
->sg_list
+ i
);
3778 * Possibly overwrite stamping in cacheline with LSO
3779 * segment only after making sure all data segments
3783 *lso_wqe
= lso_hdr_sz
;
3785 ctrl
->qpn_vlan
.fence_size
= (wr
->send_flags
& IB_SEND_FENCE
?
3786 MLX4_WQE_CTRL_FENCE
: 0) | size
;
3789 * Make sure descriptor is fully written before
3790 * setting ownership bit (because HW can start
3791 * executing as soon as we do).
3795 if (wr
->opcode
< 0 || wr
->opcode
>= ARRAY_SIZE(mlx4_ib_opcode
)) {
3801 ctrl
->owner_opcode
= mlx4_ib_opcode
[wr
->opcode
] |
3802 (ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0) | blh
;
3805 * We can improve latency by not stamping the last
3806 * send queue WQE until after ringing the doorbell, so
3807 * only stamp here if there are still more WQEs to post.
3810 stamp_send_wqe(qp
, ind
+ qp
->sq_spare_wqes
);
3816 qp
->sq
.head
+= nreq
;
3819 * Make sure that descriptors are written before
3824 writel_relaxed(qp
->doorbell_qpn
,
3825 to_mdev(ibqp
->device
)->uar_map
+ MLX4_SEND_DOORBELL
);
3827 stamp_send_wqe(qp
, ind
+ qp
->sq_spare_wqes
- 1);
3829 qp
->sq_next_wqe
= ind
;
3832 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
3837 int mlx4_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
3838 const struct ib_send_wr
**bad_wr
)
3840 return _mlx4_ib_post_send(ibqp
, wr
, bad_wr
, false);
3843 static int _mlx4_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
3844 const struct ib_recv_wr
**bad_wr
, bool drain
)
3846 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
3847 struct mlx4_wqe_data_seg
*scat
;
3848 unsigned long flags
;
3854 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
3856 max_gs
= qp
->rq
.max_gs
;
3857 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
3859 if (mdev
->dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
&&
3867 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
3869 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
3870 if (mlx4_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
3876 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
3882 scat
= get_recv_wqe(qp
, ind
);
3884 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
3885 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
3886 ib_dma_sync_single_for_device(ibqp
->device
,
3887 qp
->sqp_proxy_rcv
[ind
].map
,
3888 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
3891 cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr
));
3892 /* use dma lkey from upper layer entry */
3893 scat
->lkey
= cpu_to_be32(wr
->sg_list
->lkey
);
3894 scat
->addr
= cpu_to_be64(qp
->sqp_proxy_rcv
[ind
].map
);
3899 for (i
= 0; i
< wr
->num_sge
; ++i
)
3900 __set_data_seg(scat
+ i
, wr
->sg_list
+ i
);
3903 scat
[i
].byte_count
= 0;
3904 scat
[i
].lkey
= cpu_to_be32(MLX4_INVALID_LKEY
);
3908 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
3910 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
3915 qp
->rq
.head
+= nreq
;
3918 * Make sure that descriptors are written before
3923 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
3926 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
3931 int mlx4_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
3932 const struct ib_recv_wr
**bad_wr
)
3934 return _mlx4_ib_post_recv(ibqp
, wr
, bad_wr
, false);
3937 static inline enum ib_qp_state
to_ib_qp_state(enum mlx4_qp_state mlx4_state
)
3939 switch (mlx4_state
) {
3940 case MLX4_QP_STATE_RST
: return IB_QPS_RESET
;
3941 case MLX4_QP_STATE_INIT
: return IB_QPS_INIT
;
3942 case MLX4_QP_STATE_RTR
: return IB_QPS_RTR
;
3943 case MLX4_QP_STATE_RTS
: return IB_QPS_RTS
;
3944 case MLX4_QP_STATE_SQ_DRAINING
:
3945 case MLX4_QP_STATE_SQD
: return IB_QPS_SQD
;
3946 case MLX4_QP_STATE_SQER
: return IB_QPS_SQE
;
3947 case MLX4_QP_STATE_ERR
: return IB_QPS_ERR
;
3952 static inline enum ib_mig_state
to_ib_mig_state(int mlx4_mig_state
)
3954 switch (mlx4_mig_state
) {
3955 case MLX4_QP_PM_ARMED
: return IB_MIG_ARMED
;
3956 case MLX4_QP_PM_REARM
: return IB_MIG_REARM
;
3957 case MLX4_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
3962 static int to_ib_qp_access_flags(int mlx4_flags
)
3966 if (mlx4_flags
& MLX4_QP_BIT_RRE
)
3967 ib_flags
|= IB_ACCESS_REMOTE_READ
;
3968 if (mlx4_flags
& MLX4_QP_BIT_RWE
)
3969 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
3970 if (mlx4_flags
& MLX4_QP_BIT_RAE
)
3971 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
3976 static void to_rdma_ah_attr(struct mlx4_ib_dev
*ibdev
,
3977 struct rdma_ah_attr
*ah_attr
,
3978 struct mlx4_qp_path
*path
)
3980 struct mlx4_dev
*dev
= ibdev
->dev
;
3981 u8 port_num
= path
->sched_queue
& 0x40 ? 2 : 1;
3983 memset(ah_attr
, 0, sizeof(*ah_attr
));
3984 if (port_num
== 0 || port_num
> dev
->caps
.num_ports
)
3986 ah_attr
->type
= rdma_ah_find_type(&ibdev
->ib_dev
, port_num
);
3988 if (ah_attr
->type
== RDMA_AH_ATTR_TYPE_ROCE
)
3989 rdma_ah_set_sl(ah_attr
, ((path
->sched_queue
>> 3) & 0x7) |
3990 ((path
->sched_queue
& 4) << 1));
3992 rdma_ah_set_sl(ah_attr
, (path
->sched_queue
>> 2) & 0xf);
3993 rdma_ah_set_port_num(ah_attr
, port_num
);
3995 rdma_ah_set_dlid(ah_attr
, be16_to_cpu(path
->rlid
));
3996 rdma_ah_set_path_bits(ah_attr
, path
->grh_mylmc
& 0x7f);
3997 rdma_ah_set_static_rate(ah_attr
,
3998 path
->static_rate
? path
->static_rate
- 5 : 0);
3999 if (path
->grh_mylmc
& (1 << 7)) {
4000 rdma_ah_set_grh(ah_attr
, NULL
,
4001 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff,
4004 (be32_to_cpu(path
->tclass_flowlabel
)
4006 rdma_ah_set_dgid_raw(ah_attr
, path
->rgid
);
4010 int mlx4_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
4011 struct ib_qp_init_attr
*qp_init_attr
)
4013 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
4014 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
4015 struct mlx4_qp_context context
;
4019 if (ibqp
->rwq_ind_tbl
)
4022 mutex_lock(&qp
->mutex
);
4024 if (qp
->state
== IB_QPS_RESET
) {
4025 qp_attr
->qp_state
= IB_QPS_RESET
;
4029 err
= mlx4_qp_query(dev
->dev
, &qp
->mqp
, &context
);
4035 mlx4_state
= be32_to_cpu(context
.flags
) >> 28;
4037 qp
->state
= to_ib_qp_state(mlx4_state
);
4038 qp_attr
->qp_state
= qp
->state
;
4039 qp_attr
->path_mtu
= context
.mtu_msgmax
>> 5;
4040 qp_attr
->path_mig_state
=
4041 to_ib_mig_state((be32_to_cpu(context
.flags
) >> 11) & 0x3);
4042 qp_attr
->qkey
= be32_to_cpu(context
.qkey
);
4043 qp_attr
->rq_psn
= be32_to_cpu(context
.rnr_nextrecvpsn
) & 0xffffff;
4044 qp_attr
->sq_psn
= be32_to_cpu(context
.next_send_psn
) & 0xffffff;
4045 qp_attr
->dest_qp_num
= be32_to_cpu(context
.remote_qpn
) & 0xffffff;
4046 qp_attr
->qp_access_flags
=
4047 to_ib_qp_access_flags(be32_to_cpu(context
.params2
));
4049 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
||
4050 qp
->ibqp
.qp_type
== IB_QPT_XRC_INI
||
4051 qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
) {
4052 to_rdma_ah_attr(dev
, &qp_attr
->ah_attr
, &context
.pri_path
);
4053 to_rdma_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
.alt_path
);
4054 qp_attr
->alt_pkey_index
= context
.alt_path
.pkey_index
& 0x7f;
4055 qp_attr
->alt_port_num
=
4056 rdma_ah_get_port_num(&qp_attr
->alt_ah_attr
);
4059 qp_attr
->pkey_index
= context
.pri_path
.pkey_index
& 0x7f;
4060 if (qp_attr
->qp_state
== IB_QPS_INIT
)
4061 qp_attr
->port_num
= qp
->port
;
4063 qp_attr
->port_num
= context
.pri_path
.sched_queue
& 0x40 ? 2 : 1;
4065 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
4066 qp_attr
->sq_draining
= mlx4_state
== MLX4_QP_STATE_SQ_DRAINING
;
4068 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
.params1
) >> 21) & 0x7);
4070 qp_attr
->max_dest_rd_atomic
=
4071 1 << ((be32_to_cpu(context
.params2
) >> 21) & 0x7);
4072 qp_attr
->min_rnr_timer
=
4073 (be32_to_cpu(context
.rnr_nextrecvpsn
) >> 24) & 0x1f;
4074 qp_attr
->timeout
= context
.pri_path
.ackto
>> 3;
4075 qp_attr
->retry_cnt
= (be32_to_cpu(context
.params1
) >> 16) & 0x7;
4076 qp_attr
->rnr_retry
= (be32_to_cpu(context
.params1
) >> 13) & 0x7;
4077 qp_attr
->alt_timeout
= context
.alt_path
.ackto
>> 3;
4080 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
4081 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
4082 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
4084 if (!ibqp
->uobject
) {
4085 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
4086 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
4088 qp_attr
->cap
.max_send_wr
= 0;
4089 qp_attr
->cap
.max_send_sge
= 0;
4093 * We don't support inline sends for kernel QPs (yet), and we
4094 * don't know what userspace's value should be.
4096 qp_attr
->cap
.max_inline_data
= 0;
4098 qp_init_attr
->cap
= qp_attr
->cap
;
4100 qp_init_attr
->create_flags
= 0;
4101 if (qp
->flags
& MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
4102 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
4104 if (qp
->flags
& MLX4_IB_QP_LSO
)
4105 qp_init_attr
->create_flags
|= IB_QP_CREATE_IPOIB_UD_LSO
;
4107 if (qp
->flags
& MLX4_IB_QP_NETIF
)
4108 qp_init_attr
->create_flags
|= IB_QP_CREATE_NETIF_QP
;
4110 qp_init_attr
->sq_sig_type
=
4111 qp
->sq_signal_bits
== cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) ?
4112 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
4115 mutex_unlock(&qp
->mutex
);
4119 struct ib_wq
*mlx4_ib_create_wq(struct ib_pd
*pd
,
4120 struct ib_wq_init_attr
*init_attr
,
4121 struct ib_udata
*udata
)
4123 struct mlx4_dev
*dev
= to_mdev(pd
->device
)->dev
;
4124 struct ib_qp_init_attr ib_qp_init_attr
= {};
4125 struct mlx4_ib_qp
*qp
;
4126 struct mlx4_ib_create_wq ucmd
;
4127 int err
, required_cmd_sz
;
4130 return ERR_PTR(-EINVAL
);
4132 required_cmd_sz
= offsetof(typeof(ucmd
), comp_mask
) +
4133 sizeof(ucmd
.comp_mask
);
4134 if (udata
->inlen
< required_cmd_sz
) {
4135 pr_debug("invalid inlen\n");
4136 return ERR_PTR(-EINVAL
);
4139 if (udata
->inlen
> sizeof(ucmd
) &&
4140 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
4141 udata
->inlen
- sizeof(ucmd
))) {
4142 pr_debug("inlen is not supported\n");
4143 return ERR_PTR(-EOPNOTSUPP
);
4147 return ERR_PTR(-EOPNOTSUPP
);
4149 if (init_attr
->wq_type
!= IB_WQT_RQ
) {
4150 pr_debug("unsupported wq type %d\n", init_attr
->wq_type
);
4151 return ERR_PTR(-EOPNOTSUPP
);
4154 if (init_attr
->create_flags
& ~IB_WQ_FLAGS_SCATTER_FCS
||
4155 !(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_FCS_KEEP
)) {
4156 pr_debug("unsupported create_flags %u\n",
4157 init_attr
->create_flags
);
4158 return ERR_PTR(-EOPNOTSUPP
);
4161 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
4163 return ERR_PTR(-ENOMEM
);
4165 mutex_init(&qp
->mutex
);
4166 qp
->pri
.vid
= 0xFFFF;
4167 qp
->alt
.vid
= 0xFFFF;
4169 ib_qp_init_attr
.qp_context
= init_attr
->wq_context
;
4170 ib_qp_init_attr
.qp_type
= IB_QPT_RAW_PACKET
;
4171 ib_qp_init_attr
.cap
.max_recv_wr
= init_attr
->max_wr
;
4172 ib_qp_init_attr
.cap
.max_recv_sge
= init_attr
->max_sge
;
4173 ib_qp_init_attr
.recv_cq
= init_attr
->cq
;
4174 ib_qp_init_attr
.send_cq
= ib_qp_init_attr
.recv_cq
; /* Dummy CQ */
4176 if (init_attr
->create_flags
& IB_WQ_FLAGS_SCATTER_FCS
)
4177 ib_qp_init_attr
.create_flags
|= IB_QP_CREATE_SCATTER_FCS
;
4179 err
= create_rq(pd
, &ib_qp_init_attr
, udata
, qp
);
4182 return ERR_PTR(err
);
4185 qp
->ibwq
.event_handler
= init_attr
->event_handler
;
4186 qp
->ibwq
.wq_num
= qp
->mqp
.qpn
;
4187 qp
->ibwq
.state
= IB_WQS_RESET
;
4192 static int ib_wq2qp_state(enum ib_wq_state state
)
4196 return IB_QPS_RESET
;
4204 static int _mlx4_ib_modify_wq(struct ib_wq
*ibwq
, enum ib_wq_state new_state
,
4205 struct ib_udata
*udata
)
4207 struct mlx4_ib_qp
*qp
= to_mqp((struct ib_qp
*)ibwq
);
4208 enum ib_qp_state qp_cur_state
;
4209 enum ib_qp_state qp_new_state
;
4213 /* ib_qp.state represents the WQ HW state while ib_wq.state represents
4214 * the WQ logic state.
4216 qp_cur_state
= qp
->state
;
4217 qp_new_state
= ib_wq2qp_state(new_state
);
4219 if (ib_wq2qp_state(new_state
) == qp_cur_state
)
4222 if (new_state
== IB_WQS_RDY
) {
4223 struct ib_qp_attr attr
= {};
4225 attr
.port_num
= qp
->port
;
4226 attr_mask
= IB_QP_PORT
;
4228 err
= __mlx4_ib_modify_qp(ibwq
, MLX4_IB_RWQ_SRC
, &attr
,
4229 attr_mask
, IB_QPS_RESET
, IB_QPS_INIT
,
4232 pr_debug("WQN=0x%06x failed to apply RST->INIT on the HW QP\n",
4237 qp_cur_state
= IB_QPS_INIT
;
4241 err
= __mlx4_ib_modify_qp(ibwq
, MLX4_IB_RWQ_SRC
, NULL
, attr_mask
,
4242 qp_cur_state
, qp_new_state
, udata
);
4244 if (err
&& (qp_cur_state
== IB_QPS_INIT
)) {
4245 qp_new_state
= IB_QPS_RESET
;
4246 if (__mlx4_ib_modify_qp(ibwq
, MLX4_IB_RWQ_SRC
, NULL
,
4247 attr_mask
, IB_QPS_INIT
, IB_QPS_RESET
,
4249 pr_warn("WQN=0x%06x failed with reverting HW's resources failure\n",
4251 qp_new_state
= IB_QPS_INIT
;
4255 qp
->state
= qp_new_state
;
4260 int mlx4_ib_modify_wq(struct ib_wq
*ibwq
, struct ib_wq_attr
*wq_attr
,
4261 u32 wq_attr_mask
, struct ib_udata
*udata
)
4263 struct mlx4_ib_qp
*qp
= to_mqp((struct ib_qp
*)ibwq
);
4264 struct mlx4_ib_modify_wq ucmd
= {};
4265 size_t required_cmd_sz
;
4266 enum ib_wq_state cur_state
, new_state
;
4269 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) +
4270 sizeof(ucmd
.reserved
);
4271 if (udata
->inlen
< required_cmd_sz
)
4274 if (udata
->inlen
> sizeof(ucmd
) &&
4275 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
4276 udata
->inlen
- sizeof(ucmd
)))
4279 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
)))
4282 if (ucmd
.comp_mask
|| ucmd
.reserved
)
4285 if (wq_attr_mask
& IB_WQ_FLAGS
)
4288 cur_state
= wq_attr
->curr_wq_state
;
4289 new_state
= wq_attr
->wq_state
;
4291 if ((new_state
== IB_WQS_RDY
) && (cur_state
== IB_WQS_ERR
))
4294 if ((new_state
== IB_WQS_ERR
) && (cur_state
== IB_WQS_RESET
))
4297 /* Need to protect against the parent RSS which also may modify WQ
4300 mutex_lock(&qp
->mutex
);
4302 /* Can update HW state only if a RSS QP has already associated to this
4303 * WQ, so we can apply its port on the WQ.
4306 err
= _mlx4_ib_modify_wq(ibwq
, new_state
, udata
);
4309 ibwq
->state
= new_state
;
4311 mutex_unlock(&qp
->mutex
);
4316 int mlx4_ib_destroy_wq(struct ib_wq
*ibwq
, struct ib_udata
*udata
)
4318 struct mlx4_ib_dev
*dev
= to_mdev(ibwq
->device
);
4319 struct mlx4_ib_qp
*qp
= to_mqp((struct ib_qp
*)ibwq
);
4321 if (qp
->counter_index
)
4322 mlx4_ib_free_qp_counter(dev
, qp
);
4324 destroy_qp_common(dev
, qp
, MLX4_IB_RWQ_SRC
, udata
);
4330 int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table
*rwq_ind_table
,
4331 struct ib_rwq_ind_table_init_attr
*init_attr
,
4332 struct ib_udata
*udata
)
4334 struct mlx4_ib_create_rwq_ind_tbl_resp resp
= {};
4335 unsigned int ind_tbl_size
= 1 << init_attr
->log_ind_tbl_size
;
4336 struct ib_device
*device
= rwq_ind_table
->device
;
4337 unsigned int base_wqn
;
4338 size_t min_resp_len
;
4341 if (udata
->inlen
> 0 &&
4342 !ib_is_udata_cleared(udata
, 0,
4346 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
4347 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
4351 device
->attrs
.rss_caps
.max_rwq_indirection_table_size
) {
4352 pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
4354 device
->attrs
.rss_caps
.max_rwq_indirection_table_size
);
4358 base_wqn
= init_attr
->ind_tbl
[0]->wq_num
;
4360 if (base_wqn
% ind_tbl_size
) {
4361 pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
4366 for (i
= 1; i
< ind_tbl_size
; i
++) {
4367 if (++base_wqn
!= init_attr
->ind_tbl
[i
]->wq_num
) {
4368 pr_debug("indirection table's WQNs aren't consecutive\n");
4373 if (udata
->outlen
) {
4374 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
4375 sizeof(resp
.response_length
);
4376 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
4382 struct mlx4_ib_drain_cqe
{
4384 struct completion done
;
4387 static void mlx4_ib_drain_qp_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
4389 struct mlx4_ib_drain_cqe
*cqe
= container_of(wc
->wr_cqe
,
4390 struct mlx4_ib_drain_cqe
,
4393 complete(&cqe
->done
);
4396 /* This function returns only once the drained WR was completed */
4397 static void handle_drain_completion(struct ib_cq
*cq
,
4398 struct mlx4_ib_drain_cqe
*sdrain
,
4399 struct mlx4_ib_dev
*dev
)
4401 struct mlx4_dev
*mdev
= dev
->dev
;
4403 if (cq
->poll_ctx
== IB_POLL_DIRECT
) {
4404 while (wait_for_completion_timeout(&sdrain
->done
, HZ
/ 10) <= 0)
4405 ib_process_cq_direct(cq
, -1);
4409 if (mdev
->persist
->state
== MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
4410 struct mlx4_ib_cq
*mcq
= to_mcq(cq
);
4411 bool triggered
= false;
4412 unsigned long flags
;
4414 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
4415 /* Make sure that the CQ handler won't run if wasn't run yet */
4416 if (!mcq
->mcq
.reset_notify_added
)
4417 mcq
->mcq
.reset_notify_added
= 1;
4420 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
4423 /* Wait for any scheduled/running task to be ended */
4424 switch (cq
->poll_ctx
) {
4425 case IB_POLL_SOFTIRQ
:
4426 irq_poll_disable(&cq
->iop
);
4427 irq_poll_enable(&cq
->iop
);
4429 case IB_POLL_WORKQUEUE
:
4430 cancel_work_sync(&cq
->work
);
4437 /* Run the CQ handler - this makes sure that the drain WR will
4438 * be processed if wasn't processed yet.
4440 mcq
->mcq
.comp(&mcq
->mcq
);
4443 wait_for_completion(&sdrain
->done
);
4446 void mlx4_ib_drain_sq(struct ib_qp
*qp
)
4448 struct ib_cq
*cq
= qp
->send_cq
;
4449 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
4450 struct mlx4_ib_drain_cqe sdrain
;
4451 const struct ib_send_wr
*bad_swr
;
4452 struct ib_rdma_wr swr
= {
4455 { .wr_cqe
= &sdrain
.cqe
, },
4456 .opcode
= IB_WR_RDMA_WRITE
,
4460 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
4461 struct mlx4_dev
*mdev
= dev
->dev
;
4463 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
4464 if (ret
&& mdev
->persist
->state
!= MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
4465 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
4469 sdrain
.cqe
.done
= mlx4_ib_drain_qp_done
;
4470 init_completion(&sdrain
.done
);
4472 ret
= _mlx4_ib_post_send(qp
, &swr
.wr
, &bad_swr
, true);
4474 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
4478 handle_drain_completion(cq
, &sdrain
, dev
);
4481 void mlx4_ib_drain_rq(struct ib_qp
*qp
)
4483 struct ib_cq
*cq
= qp
->recv_cq
;
4484 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
4485 struct mlx4_ib_drain_cqe rdrain
;
4486 struct ib_recv_wr rwr
= {};
4487 const struct ib_recv_wr
*bad_rwr
;
4489 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
4490 struct mlx4_dev
*mdev
= dev
->dev
;
4492 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
4493 if (ret
&& mdev
->persist
->state
!= MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
4494 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
4498 rwr
.wr_cqe
= &rdrain
.cqe
;
4499 rdrain
.cqe
.done
= mlx4_ib_drain_qp_done
;
4500 init_completion(&rdrain
.done
);
4502 ret
= _mlx4_ib_post_recv(qp
, &rwr
, &bad_rwr
, true);
4504 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
4508 handle_drain_completion(cq
, &rdrain
, dev
);
4511 int mlx4_ib_qp_event_init(void)
4513 mlx4_ib_qp_event_wq
= alloc_ordered_workqueue("mlx4_ib_qp_event_wq", 0);
4514 if (!mlx4_ib_qp_event_wq
)
4520 void mlx4_ib_qp_event_cleanup(void)
4522 destroy_workqueue(mlx4_ib_qp_event_wq
);