2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
38 /* not supported currently */
39 static int wq_signature
;
42 MLX5_IB_ACK_REQ_FREQ
= 8,
46 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
48 MLX5_IB_LINK_TYPE_IB
= 0,
49 MLX5_IB_LINK_TYPE_ETH
= 1
53 MLX5_IB_SQ_STRIDE
= 6,
54 MLX5_IB_CACHE_LINE_SIZE
= 64,
57 static const u32 mlx5_ib_opcode
[] = {
58 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
59 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
60 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
61 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
62 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
63 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
64 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
65 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
66 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
67 [IB_WR_FAST_REG_MR
] = MLX5_OPCODE_UMR
,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
70 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
76 unsigned int page_shift
;
83 static int is_qp0(enum ib_qp_type qp_type
)
85 return qp_type
== IB_QPT_SMI
;
88 static int is_qp1(enum ib_qp_type qp_type
)
90 return qp_type
== IB_QPT_GSI
;
93 static int is_sqp(enum ib_qp_type qp_type
)
95 return is_qp0(qp_type
) || is_qp1(qp_type
);
98 static void *get_wqe(struct mlx5_ib_qp
*qp
, int offset
)
100 return mlx5_buf_offset(&qp
->buf
, offset
);
103 static void *get_recv_wqe(struct mlx5_ib_qp
*qp
, int n
)
105 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
108 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
)
110 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< MLX5_IB_SQ_STRIDE
));
113 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
115 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
116 struct ib_event event
;
118 if (type
== MLX5_EVENT_TYPE_PATH_MIG
)
119 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
121 if (ibqp
->event_handler
) {
122 event
.device
= ibqp
->device
;
123 event
.element
.qp
= ibqp
;
125 case MLX5_EVENT_TYPE_PATH_MIG
:
126 event
.event
= IB_EVENT_PATH_MIG
;
128 case MLX5_EVENT_TYPE_COMM_EST
:
129 event
.event
= IB_EVENT_COMM_EST
;
131 case MLX5_EVENT_TYPE_SQ_DRAINED
:
132 event
.event
= IB_EVENT_SQ_DRAINED
;
134 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
135 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
137 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
138 event
.event
= IB_EVENT_QP_FATAL
;
140 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
141 event
.event
= IB_EVENT_PATH_MIG_ERR
;
143 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
144 event
.event
= IB_EVENT_QP_REQ_ERR
;
146 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
147 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
150 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
154 ibqp
->event_handler(&event
, ibqp
->qp_context
);
158 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
159 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
164 /* Sanity check RQ size before proceeding */
165 if (cap
->max_recv_wr
> dev
->mdev
.caps
.max_wqes
)
171 qp
->rq
.wqe_shift
= 0;
174 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
175 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
176 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
177 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
179 wqe_size
= qp
->wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) : 0;
180 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
181 wqe_size
= roundup_pow_of_two(wqe_size
);
182 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
183 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
184 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
185 if (wqe_size
> dev
->mdev
.caps
.max_rq_desc_sz
) {
186 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
188 dev
->mdev
.caps
.max_rq_desc_sz
);
191 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
192 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
193 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
200 static int sq_overhead(enum ib_qp_type qp_type
)
206 size
+= sizeof(struct mlx5_wqe_xrc_seg
);
209 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
210 sizeof(struct mlx5_wqe_atomic_seg
) +
211 sizeof(struct mlx5_wqe_raddr_seg
);
218 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
219 sizeof(struct mlx5_wqe_raddr_seg
) +
220 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
221 sizeof(struct mlx5_mkey_seg
);
227 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
228 sizeof(struct mlx5_wqe_datagram_seg
);
231 case MLX5_IB_QPT_REG_UMR
:
232 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
233 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
234 sizeof(struct mlx5_mkey_seg
);
244 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
249 size
= sq_overhead(attr
->qp_type
);
253 if (attr
->cap
.max_inline_data
) {
254 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
255 attr
->cap
.max_inline_data
;
258 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
260 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
263 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
264 struct mlx5_ib_qp
*qp
)
269 if (!attr
->cap
.max_send_wr
)
272 wqe_size
= calc_send_wqe(attr
);
273 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
277 if (wqe_size
> dev
->mdev
.caps
.max_sq_desc_sz
) {
278 mlx5_ib_dbg(dev
, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
279 wqe_size
, dev
->mdev
.caps
.max_sq_desc_sz
);
283 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
->qp_type
) -
284 sizeof(struct mlx5_wqe_inline_seg
);
285 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
287 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
288 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
289 if (qp
->sq
.wqe_cnt
> dev
->mdev
.caps
.max_wqes
) {
290 mlx5_ib_dbg(dev
, "wqe count(%d) exceeds limits(%d)\n",
291 qp
->sq
.wqe_cnt
, dev
->mdev
.caps
.max_wqes
);
294 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
295 qp
->sq
.max_gs
= attr
->cap
.max_send_sge
;
296 qp
->sq
.max_post
= wq_size
/ wqe_size
;
297 attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
302 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
303 struct mlx5_ib_qp
*qp
,
304 struct mlx5_ib_create_qp
*ucmd
)
306 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
308 if (desc_sz
> dev
->mdev
.caps
.max_sq_desc_sz
) {
309 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
310 desc_sz
, dev
->mdev
.caps
.max_sq_desc_sz
);
314 if (ucmd
->sq_wqe_count
&& ((1 << ilog2(ucmd
->sq_wqe_count
)) != ucmd
->sq_wqe_count
)) {
315 mlx5_ib_warn(dev
, "sq_wqe_count %d, sq_wqe_count %d\n",
316 ucmd
->sq_wqe_count
, ucmd
->sq_wqe_count
);
320 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
322 if (qp
->sq
.wqe_cnt
> dev
->mdev
.caps
.max_wqes
) {
323 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
324 qp
->sq
.wqe_cnt
, dev
->mdev
.caps
.max_wqes
);
328 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
329 (qp
->sq
.wqe_cnt
<< 6);
334 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
336 if (attr
->qp_type
== IB_QPT_XRC_INI
||
337 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
338 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
339 !attr
->cap
.max_recv_wr
)
345 static int first_med_uuar(void)
350 static int next_uuar(int n
)
354 while (((n
% 4) & 2))
360 static int num_med_uuar(struct mlx5_uuar_info
*uuari
)
364 n
= uuari
->num_uars
* MLX5_NON_FP_BF_REGS_PER_PAGE
-
365 uuari
->num_low_latency_uuars
- 1;
367 return n
>= 0 ? n
: 0;
370 static int max_uuari(struct mlx5_uuar_info
*uuari
)
372 return uuari
->num_uars
* 4;
375 static int first_hi_uuar(struct mlx5_uuar_info
*uuari
)
381 med
= num_med_uuar(uuari
);
382 for (t
= 0, i
= first_med_uuar();; i
= next_uuar(i
)) {
391 static int alloc_high_class_uuar(struct mlx5_uuar_info
*uuari
)
395 for (i
= first_hi_uuar(uuari
); i
< max_uuari(uuari
); i
= next_uuar(i
)) {
396 if (!test_bit(i
, uuari
->bitmap
)) {
397 set_bit(i
, uuari
->bitmap
);
406 static int alloc_med_class_uuar(struct mlx5_uuar_info
*uuari
)
408 int minidx
= first_med_uuar();
411 for (i
= first_med_uuar(); i
< first_hi_uuar(uuari
); i
= next_uuar(i
)) {
412 if (uuari
->count
[i
] < uuari
->count
[minidx
])
416 uuari
->count
[minidx
]++;
420 static int alloc_uuar(struct mlx5_uuar_info
*uuari
,
421 enum mlx5_ib_latency_class lat
)
425 mutex_lock(&uuari
->lock
);
427 case MLX5_IB_LATENCY_CLASS_LOW
:
429 uuari
->count
[uuarn
]++;
432 case MLX5_IB_LATENCY_CLASS_MEDIUM
:
436 uuarn
= alloc_med_class_uuar(uuari
);
439 case MLX5_IB_LATENCY_CLASS_HIGH
:
443 uuarn
= alloc_high_class_uuar(uuari
);
446 case MLX5_IB_LATENCY_CLASS_FAST_PATH
:
450 mutex_unlock(&uuari
->lock
);
455 static void free_med_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
457 clear_bit(uuarn
, uuari
->bitmap
);
458 --uuari
->count
[uuarn
];
461 static void free_high_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
463 clear_bit(uuarn
, uuari
->bitmap
);
464 --uuari
->count
[uuarn
];
467 static void free_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
469 int nuuars
= uuari
->num_uars
* MLX5_BF_REGS_PER_PAGE
;
470 int high_uuar
= nuuars
- uuari
->num_low_latency_uuars
;
472 mutex_lock(&uuari
->lock
);
474 --uuari
->count
[uuarn
];
478 if (uuarn
< high_uuar
) {
479 free_med_class_uuar(uuari
, uuarn
);
483 free_high_class_uuar(uuari
, uuarn
);
486 mutex_unlock(&uuari
->lock
);
489 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
492 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
493 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
494 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
495 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
496 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
497 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
498 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
503 static int to_mlx5_st(enum ib_qp_type type
)
506 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
507 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
508 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
509 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
511 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
512 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
513 case IB_QPT_GSI
: return MLX5_QP_ST_QP1
;
514 case IB_QPT_RAW_IPV6
: return MLX5_QP_ST_RAW_IPV6
;
515 case IB_QPT_RAW_ETHERTYPE
: return MLX5_QP_ST_RAW_ETHERTYPE
;
516 case IB_QPT_RAW_PACKET
:
518 default: return -EINVAL
;
522 static int uuarn_to_uar_index(struct mlx5_uuar_info
*uuari
, int uuarn
)
524 return uuari
->uars
[uuarn
/ MLX5_BF_REGS_PER_PAGE
].index
;
527 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
528 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
529 struct mlx5_create_qp_mbox_in
**in
,
530 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
)
532 struct mlx5_ib_ucontext
*context
;
533 struct mlx5_ib_create_qp ucmd
;
542 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
544 mlx5_ib_dbg(dev
, "copy failed\n");
548 context
= to_mucontext(pd
->uobject
->context
);
550 * TBD: should come from the verbs when we have the API
552 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_HIGH
);
554 mlx5_ib_dbg(dev
, "failed to allocate low latency UUAR\n");
555 mlx5_ib_dbg(dev
, "reverting to medium latency\n");
556 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_MEDIUM
);
558 mlx5_ib_dbg(dev
, "failed to allocate medium latency UUAR\n");
559 mlx5_ib_dbg(dev
, "reverting to high latency\n");
560 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_LOW
);
562 mlx5_ib_warn(dev
, "uuar allocation failed\n");
568 uar_index
= uuarn_to_uar_index(&context
->uuari
, uuarn
);
569 mlx5_ib_dbg(dev
, "uuarn 0x%x, uar_index 0x%x\n", uuarn
, uar_index
);
571 err
= set_user_buf_size(dev
, qp
, &ucmd
);
575 if (ucmd
.buf_addr
&& qp
->buf_size
) {
576 qp
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
.buf_addr
,
578 if (IS_ERR(qp
->umem
)) {
579 mlx5_ib_dbg(dev
, "umem_get failed\n");
580 err
= PTR_ERR(qp
->umem
);
588 mlx5_ib_cont_pages(qp
->umem
, ucmd
.buf_addr
, &npages
, &page_shift
,
590 err
= mlx5_ib_get_buf_offset(ucmd
.buf_addr
, page_shift
, &offset
);
592 mlx5_ib_warn(dev
, "bad offset\n");
595 mlx5_ib_dbg(dev
, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
596 ucmd
.buf_addr
, qp
->buf_size
, npages
, page_shift
, ncont
, offset
);
599 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * ncont
;
600 *in
= mlx5_vzalloc(*inlen
);
606 mlx5_ib_populate_pas(dev
, qp
->umem
, page_shift
, (*in
)->pas
, 0);
607 (*in
)->ctx
.log_pg_sz_remote_qpn
=
608 cpu_to_be32((page_shift
- MLX5_ADAPTER_PAGE_SHIFT
) << 24);
609 (*in
)->ctx
.params2
= cpu_to_be32(offset
<< 6);
611 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
612 resp
->uuar_index
= uuarn
;
615 err
= mlx5_ib_db_map_user(context
, ucmd
.db_addr
, &qp
->db
);
617 mlx5_ib_dbg(dev
, "map failed\n");
621 err
= ib_copy_to_udata(udata
, resp
, sizeof(*resp
));
623 mlx5_ib_dbg(dev
, "copy failed\n");
626 qp
->create_type
= MLX5_QP_USER
;
631 mlx5_ib_db_unmap_user(context
, &qp
->db
);
638 ib_umem_release(qp
->umem
);
641 free_uuar(&context
->uuari
, uuarn
);
645 static void destroy_qp_user(struct ib_pd
*pd
, struct mlx5_ib_qp
*qp
)
647 struct mlx5_ib_ucontext
*context
;
649 context
= to_mucontext(pd
->uobject
->context
);
650 mlx5_ib_db_unmap_user(context
, &qp
->db
);
652 ib_umem_release(qp
->umem
);
653 free_uuar(&context
->uuari
, qp
->uuarn
);
656 static int create_kernel_qp(struct mlx5_ib_dev
*dev
,
657 struct ib_qp_init_attr
*init_attr
,
658 struct mlx5_ib_qp
*qp
,
659 struct mlx5_create_qp_mbox_in
**in
, int *inlen
)
661 enum mlx5_ib_latency_class lc
= MLX5_IB_LATENCY_CLASS_LOW
;
662 struct mlx5_uuar_info
*uuari
;
667 uuari
= &dev
->mdev
.priv
.uuari
;
668 if (init_attr
->create_flags
)
671 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
672 lc
= MLX5_IB_LATENCY_CLASS_FAST_PATH
;
674 uuarn
= alloc_uuar(uuari
, lc
);
676 mlx5_ib_dbg(dev
, "\n");
680 qp
->bf
= &uuari
->bfs
[uuarn
];
681 uar_index
= qp
->bf
->uar
->index
;
683 err
= calc_sq_size(dev
, init_attr
, qp
);
685 mlx5_ib_dbg(dev
, "err %d\n", err
);
690 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
691 qp
->buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
693 err
= mlx5_buf_alloc(&dev
->mdev
, qp
->buf_size
, PAGE_SIZE
* 2, &qp
->buf
);
695 mlx5_ib_dbg(dev
, "err %d\n", err
);
699 qp
->sq
.qend
= mlx5_get_send_wqe(qp
, qp
->sq
.wqe_cnt
);
700 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * qp
->buf
.npages
;
701 *in
= mlx5_vzalloc(*inlen
);
706 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
707 (*in
)->ctx
.log_pg_sz_remote_qpn
=
708 cpu_to_be32((qp
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
) << 24);
709 /* Set "fast registration enabled" for all kernel QPs */
710 (*in
)->ctx
.params1
|= cpu_to_be32(1 << 11);
711 (*in
)->ctx
.sq_crq_size
|= cpu_to_be16(1 << 4);
713 mlx5_fill_page_array(&qp
->buf
, (*in
)->pas
);
715 err
= mlx5_db_alloc(&dev
->mdev
, &qp
->db
);
717 mlx5_ib_dbg(dev
, "err %d\n", err
);
724 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
725 qp
->sq
.wr_data
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
726 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
727 qp
->sq
.w_list
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
728 qp
->sq
.wqe_head
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
730 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
731 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
735 qp
->create_type
= MLX5_QP_KERNEL
;
740 mlx5_db_free(&dev
->mdev
, &qp
->db
);
741 kfree(qp
->sq
.wqe_head
);
742 kfree(qp
->sq
.w_list
);
744 kfree(qp
->sq
.wr_data
);
751 mlx5_buf_free(&dev
->mdev
, &qp
->buf
);
754 free_uuar(&dev
->mdev
.priv
.uuari
, uuarn
);
758 static void destroy_qp_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
760 mlx5_db_free(&dev
->mdev
, &qp
->db
);
761 kfree(qp
->sq
.wqe_head
);
762 kfree(qp
->sq
.w_list
);
764 kfree(qp
->sq
.wr_data
);
766 mlx5_buf_free(&dev
->mdev
, &qp
->buf
);
767 free_uuar(&dev
->mdev
.priv
.uuari
, qp
->bf
->uuarn
);
770 static __be32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
772 if (attr
->srq
|| (attr
->qp_type
== IB_QPT_XRC_TGT
) ||
773 (attr
->qp_type
== IB_QPT_XRC_INI
))
774 return cpu_to_be32(MLX5_SRQ_RQ
);
775 else if (!qp
->has_rq
)
776 return cpu_to_be32(MLX5_ZERO_LEN_RQ
);
778 return cpu_to_be32(MLX5_NON_ZERO_RQ
);
781 static int is_connected(enum ib_qp_type qp_type
)
783 if (qp_type
== IB_QPT_RC
|| qp_type
== IB_QPT_UC
)
789 static int create_qp_common(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
790 struct ib_qp_init_attr
*init_attr
,
791 struct ib_udata
*udata
, struct mlx5_ib_qp
*qp
)
793 struct mlx5_ib_resources
*devr
= &dev
->devr
;
794 struct mlx5_ib_create_qp_resp resp
;
795 struct mlx5_create_qp_mbox_in
*in
;
796 struct mlx5_ib_create_qp ucmd
;
797 int inlen
= sizeof(*in
);
800 mutex_init(&qp
->mutex
);
801 spin_lock_init(&qp
->sq
.lock
);
802 spin_lock_init(&qp
->rq
.lock
);
804 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
805 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
807 if (pd
&& pd
->uobject
) {
808 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
809 mlx5_ib_dbg(dev
, "copy failed\n");
813 qp
->wq_sig
= !!(ucmd
.flags
& MLX5_QP_FLAG_SIGNATURE
);
814 qp
->scat_cqe
= !!(ucmd
.flags
& MLX5_QP_FLAG_SCATTER_CQE
);
816 qp
->wq_sig
= !!wq_signature
;
819 qp
->has_rq
= qp_has_rq(init_attr
);
820 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
,
821 qp
, (pd
&& pd
->uobject
) ? &ucmd
: NULL
);
823 mlx5_ib_dbg(dev
, "err %d\n", err
);
829 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d)\n", ucmd
.sq_wqe_count
);
830 if (ucmd
.rq_wqe_shift
!= qp
->rq
.wqe_shift
||
831 ucmd
.rq_wqe_count
!= qp
->rq
.wqe_cnt
) {
832 mlx5_ib_dbg(dev
, "invalid rq params\n");
835 if (ucmd
.sq_wqe_count
> dev
->mdev
.caps
.max_wqes
) {
836 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d) > max allowed (%d)\n",
837 ucmd
.sq_wqe_count
, dev
->mdev
.caps
.max_wqes
);
840 err
= create_user_qp(dev
, pd
, qp
, udata
, &in
, &resp
, &inlen
);
842 mlx5_ib_dbg(dev
, "err %d\n", err
);
844 err
= create_kernel_qp(dev
, init_attr
, qp
, &in
, &inlen
);
846 mlx5_ib_dbg(dev
, "err %d\n", err
);
848 qp
->pa_lkey
= to_mpd(pd
)->pa_lkey
;
854 in
= mlx5_vzalloc(sizeof(*in
));
858 qp
->create_type
= MLX5_QP_EMPTY
;
861 if (is_sqp(init_attr
->qp_type
))
862 qp
->port
= init_attr
->port_num
;
864 in
->ctx
.flags
= cpu_to_be32(to_mlx5_st(init_attr
->qp_type
) << 16 |
865 MLX5_QP_PM_MIGRATED
<< 11);
867 if (init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
868 in
->ctx
.flags_pd
= cpu_to_be32(to_mpd(pd
? pd
: devr
->p0
)->pdn
);
870 in
->ctx
.flags_pd
= cpu_to_be32(MLX5_QP_LAT_SENSITIVE
);
873 in
->ctx
.flags_pd
|= cpu_to_be32(MLX5_QP_ENABLE_SIG
);
875 if (qp
->scat_cqe
&& is_connected(init_attr
->qp_type
)) {
879 rcqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->recv_cq
);
880 scqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->send_cq
);
883 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA64_CQE
;
885 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA32_CQE
;
887 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) {
889 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA64_CQE
;
891 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA32_CQE
;
895 if (qp
->rq
.wqe_cnt
) {
896 in
->ctx
.rq_size_stride
= (qp
->rq
.wqe_shift
- 4);
897 in
->ctx
.rq_size_stride
|= ilog2(qp
->rq
.wqe_cnt
) << 3;
900 in
->ctx
.rq_type_srqn
= get_rx_type(qp
, init_attr
);
903 in
->ctx
.sq_crq_size
|= cpu_to_be16(ilog2(qp
->sq
.wqe_cnt
) << 11);
905 in
->ctx
.sq_crq_size
|= cpu_to_be16(0x8000);
907 /* Set default resources */
908 switch (init_attr
->qp_type
) {
910 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
911 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
912 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
913 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(init_attr
->xrcd
)->xrcdn
);
916 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
917 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
918 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
921 if (init_attr
->srq
) {
922 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x0
)->xrcdn
);
923 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(init_attr
->srq
)->msrq
.srqn
);
925 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
926 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
930 if (init_attr
->send_cq
)
931 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
933 if (init_attr
->recv_cq
)
934 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
936 in
->ctx
.db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
938 err
= mlx5_core_create_qp(&dev
->mdev
, &qp
->mqp
, in
, inlen
);
940 mlx5_ib_dbg(dev
, "create qp failed\n");
945 /* Hardware wants QPN written in big-endian order (after
946 * shifting) for send doorbell. Precompute this value to save
947 * a little bit when posting sends.
949 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
951 qp
->mqp
.event
= mlx5_ib_qp_event
;
956 if (qp
->create_type
== MLX5_QP_USER
)
957 destroy_qp_user(pd
, qp
);
958 else if (qp
->create_type
== MLX5_QP_KERNEL
)
959 destroy_qp_kernel(dev
, qp
);
965 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
966 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
970 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
971 spin_lock_irq(&send_cq
->lock
);
972 spin_lock_nested(&recv_cq
->lock
,
973 SINGLE_DEPTH_NESTING
);
974 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
975 spin_lock_irq(&send_cq
->lock
);
976 __acquire(&recv_cq
->lock
);
978 spin_lock_irq(&recv_cq
->lock
);
979 spin_lock_nested(&send_cq
->lock
,
980 SINGLE_DEPTH_NESTING
);
983 spin_lock_irq(&send_cq
->lock
);
985 } else if (recv_cq
) {
986 spin_lock_irq(&recv_cq
->lock
);
990 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
991 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
995 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
996 spin_unlock(&recv_cq
->lock
);
997 spin_unlock_irq(&send_cq
->lock
);
998 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
999 __release(&recv_cq
->lock
);
1000 spin_unlock_irq(&send_cq
->lock
);
1002 spin_unlock(&send_cq
->lock
);
1003 spin_unlock_irq(&recv_cq
->lock
);
1006 spin_unlock_irq(&send_cq
->lock
);
1008 } else if (recv_cq
) {
1009 spin_unlock_irq(&recv_cq
->lock
);
1013 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
1015 return to_mpd(qp
->ibqp
.pd
);
1018 static void get_cqs(struct mlx5_ib_qp
*qp
,
1019 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
1021 switch (qp
->ibqp
.qp_type
) {
1022 case IB_QPT_XRC_TGT
:
1026 case MLX5_IB_QPT_REG_UMR
:
1027 case IB_QPT_XRC_INI
:
1028 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1037 case IB_QPT_RAW_IPV6
:
1038 case IB_QPT_RAW_ETHERTYPE
:
1039 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1040 *recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
1043 case IB_QPT_RAW_PACKET
:
1052 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1054 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
1055 struct mlx5_modify_qp_mbox_in
*in
;
1058 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
1061 if (qp
->state
!= IB_QPS_RESET
)
1062 if (mlx5_core_qp_modify(&dev
->mdev
, to_mlx5_state(qp
->state
),
1063 MLX5_QP_STATE_RST
, in
, sizeof(*in
), &qp
->mqp
))
1064 mlx5_ib_warn(dev
, "mlx5_ib: modify QP %06x to RESET failed\n",
1067 get_cqs(qp
, &send_cq
, &recv_cq
);
1069 if (qp
->create_type
== MLX5_QP_KERNEL
) {
1070 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
1071 __mlx5_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1072 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1073 if (send_cq
!= recv_cq
)
1074 __mlx5_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1075 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
1078 err
= mlx5_core_destroy_qp(&dev
->mdev
, &qp
->mqp
);
1080 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n", qp
->mqp
.qpn
);
1084 if (qp
->create_type
== MLX5_QP_KERNEL
)
1085 destroy_qp_kernel(dev
, qp
);
1086 else if (qp
->create_type
== MLX5_QP_USER
)
1087 destroy_qp_user(&get_pd(qp
)->ibpd
, qp
);
1090 static const char *ib_qp_type_str(enum ib_qp_type type
)
1094 return "IB_QPT_SMI";
1096 return "IB_QPT_GSI";
1103 case IB_QPT_RAW_IPV6
:
1104 return "IB_QPT_RAW_IPV6";
1105 case IB_QPT_RAW_ETHERTYPE
:
1106 return "IB_QPT_RAW_ETHERTYPE";
1107 case IB_QPT_XRC_INI
:
1108 return "IB_QPT_XRC_INI";
1109 case IB_QPT_XRC_TGT
:
1110 return "IB_QPT_XRC_TGT";
1111 case IB_QPT_RAW_PACKET
:
1112 return "IB_QPT_RAW_PACKET";
1113 case MLX5_IB_QPT_REG_UMR
:
1114 return "MLX5_IB_QPT_REG_UMR";
1117 return "Invalid QP type";
1121 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
1122 struct ib_qp_init_attr
*init_attr
,
1123 struct ib_udata
*udata
)
1125 struct mlx5_ib_dev
*dev
;
1126 struct mlx5_ib_qp
*qp
;
1131 dev
= to_mdev(pd
->device
);
1133 /* being cautious here */
1134 if (init_attr
->qp_type
!= IB_QPT_XRC_TGT
&&
1135 init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
) {
1136 pr_warn("%s: no PD for transport %s\n", __func__
,
1137 ib_qp_type_str(init_attr
->qp_type
));
1138 return ERR_PTR(-EINVAL
);
1140 dev
= to_mdev(to_mxrcd(init_attr
->xrcd
)->ibxrcd
.device
);
1143 switch (init_attr
->qp_type
) {
1144 case IB_QPT_XRC_TGT
:
1145 case IB_QPT_XRC_INI
:
1146 if (!(dev
->mdev
.caps
.flags
& MLX5_DEV_CAP_FLAG_XRC
)) {
1147 mlx5_ib_dbg(dev
, "XRC not supported\n");
1148 return ERR_PTR(-ENOSYS
);
1150 init_attr
->recv_cq
= NULL
;
1151 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
1152 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1153 init_attr
->send_cq
= NULL
;
1162 case MLX5_IB_QPT_REG_UMR
:
1163 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1165 return ERR_PTR(-ENOMEM
);
1167 err
= create_qp_common(dev
, pd
, init_attr
, udata
, qp
);
1169 mlx5_ib_dbg(dev
, "create_qp_common failed\n");
1171 return ERR_PTR(err
);
1174 if (is_qp0(init_attr
->qp_type
))
1175 qp
->ibqp
.qp_num
= 0;
1176 else if (is_qp1(init_attr
->qp_type
))
1177 qp
->ibqp
.qp_num
= 1;
1179 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1181 mlx5_ib_dbg(dev
, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1182 qp
->ibqp
.qp_num
, qp
->mqp
.qpn
, to_mcq(init_attr
->recv_cq
)->mcq
.cqn
,
1183 to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
1189 case IB_QPT_RAW_IPV6
:
1190 case IB_QPT_RAW_ETHERTYPE
:
1191 case IB_QPT_RAW_PACKET
:
1194 mlx5_ib_dbg(dev
, "unsupported qp type %d\n",
1195 init_attr
->qp_type
);
1196 /* Don't support raw QPs */
1197 return ERR_PTR(-EINVAL
);
1203 int mlx5_ib_destroy_qp(struct ib_qp
*qp
)
1205 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
1206 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
1208 destroy_qp_common(dev
, mqp
);
1215 static __be32
to_mlx5_access_flags(struct mlx5_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1218 u32 hw_access_flags
= 0;
1222 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1223 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1225 dest_rd_atomic
= qp
->resp_depth
;
1227 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1228 access_flags
= attr
->qp_access_flags
;
1230 access_flags
= qp
->atomic_rd_en
;
1232 if (!dest_rd_atomic
)
1233 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1235 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1236 hw_access_flags
|= MLX5_QP_BIT_RRE
;
1237 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1238 hw_access_flags
|= (MLX5_QP_BIT_RAE
| MLX5_ATOMIC_MODE_CX
);
1239 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1240 hw_access_flags
|= MLX5_QP_BIT_RWE
;
1242 return cpu_to_be32(hw_access_flags
);
1246 MLX5_PATH_FLAG_FL
= 1 << 0,
1247 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
1248 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
1251 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
1253 if (rate
== IB_RATE_PORT_CURRENT
) {
1255 } else if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_300_GBPS
) {
1258 while (rate
!= IB_RATE_2_5_GBPS
&&
1259 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
1260 dev
->mdev
.caps
.stat_rate_support
))
1264 return rate
+ MLX5_STAT_RATE_OFFSET
;
1267 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, const struct ib_ah_attr
*ah
,
1268 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
1269 u32 path_flags
, const struct ib_qp_attr
*attr
)
1273 path
->fl
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
1274 path
->free_ar
= (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x80 : 0;
1276 if (attr_mask
& IB_QP_PKEY_INDEX
)
1277 path
->pkey_index
= attr
->pkey_index
;
1279 path
->grh_mlid
= ah
->src_path_bits
& 0x7f;
1280 path
->rlid
= cpu_to_be16(ah
->dlid
);
1282 if (ah
->ah_flags
& IB_AH_GRH
) {
1283 path
->grh_mlid
|= 1 << 7;
1284 path
->mgid_index
= ah
->grh
.sgid_index
;
1285 path
->hop_limit
= ah
->grh
.hop_limit
;
1286 path
->tclass_flowlabel
=
1287 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1288 (ah
->grh
.flow_label
));
1289 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1292 err
= ib_rate_to_mlx5(dev
, ah
->static_rate
);
1295 path
->static_rate
= err
;
1298 if (ah
->ah_flags
& IB_AH_GRH
) {
1299 if (ah
->grh
.sgid_index
>= dev
->mdev
.caps
.port
[port
- 1].gid_table_len
) {
1300 pr_err(KERN_ERR
"sgid_index (%u) too large. max is %d\n",
1301 ah
->grh
.sgid_index
, dev
->mdev
.caps
.port
[port
- 1].gid_table_len
);
1305 path
->grh_mlid
|= 1 << 7;
1306 path
->mgid_index
= ah
->grh
.sgid_index
;
1307 path
->hop_limit
= ah
->grh
.hop_limit
;
1308 path
->tclass_flowlabel
=
1309 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1310 (ah
->grh
.flow_label
));
1311 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1314 if (attr_mask
& IB_QP_TIMEOUT
)
1315 path
->ackto_lt
= attr
->timeout
<< 3;
1317 path
->sl
= ah
->sl
& 0xf;
1322 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
1323 [MLX5_QP_STATE_INIT
] = {
1324 [MLX5_QP_STATE_INIT
] = {
1325 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1326 MLX5_QP_OPTPAR_RAE
|
1327 MLX5_QP_OPTPAR_RWE
|
1328 MLX5_QP_OPTPAR_PKEY_INDEX
|
1329 MLX5_QP_OPTPAR_PRI_PORT
,
1330 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1331 MLX5_QP_OPTPAR_PKEY_INDEX
|
1332 MLX5_QP_OPTPAR_PRI_PORT
,
1333 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1334 MLX5_QP_OPTPAR_Q_KEY
|
1335 MLX5_QP_OPTPAR_PRI_PORT
,
1337 [MLX5_QP_STATE_RTR
] = {
1338 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1339 MLX5_QP_OPTPAR_RRE
|
1340 MLX5_QP_OPTPAR_RAE
|
1341 MLX5_QP_OPTPAR_RWE
|
1342 MLX5_QP_OPTPAR_PKEY_INDEX
,
1343 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1344 MLX5_QP_OPTPAR_RWE
|
1345 MLX5_QP_OPTPAR_PKEY_INDEX
,
1346 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1347 MLX5_QP_OPTPAR_Q_KEY
,
1348 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1349 MLX5_QP_OPTPAR_Q_KEY
,
1350 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1351 MLX5_QP_OPTPAR_RRE
|
1352 MLX5_QP_OPTPAR_RAE
|
1353 MLX5_QP_OPTPAR_RWE
|
1354 MLX5_QP_OPTPAR_PKEY_INDEX
,
1357 [MLX5_QP_STATE_RTR
] = {
1358 [MLX5_QP_STATE_RTS
] = {
1359 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1360 MLX5_QP_OPTPAR_RRE
|
1361 MLX5_QP_OPTPAR_RAE
|
1362 MLX5_QP_OPTPAR_RWE
|
1363 MLX5_QP_OPTPAR_PM_STATE
|
1364 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
1365 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1366 MLX5_QP_OPTPAR_RWE
|
1367 MLX5_QP_OPTPAR_PM_STATE
,
1368 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1371 [MLX5_QP_STATE_RTS
] = {
1372 [MLX5_QP_STATE_RTS
] = {
1373 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1374 MLX5_QP_OPTPAR_RAE
|
1375 MLX5_QP_OPTPAR_RWE
|
1376 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
1377 MLX5_QP_OPTPAR_PM_STATE
|
1378 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
1379 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1380 MLX5_QP_OPTPAR_PM_STATE
|
1381 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
1382 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
1383 MLX5_QP_OPTPAR_SRQN
|
1384 MLX5_QP_OPTPAR_CQN_RCV
,
1387 [MLX5_QP_STATE_SQER
] = {
1388 [MLX5_QP_STATE_RTS
] = {
1389 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1390 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
1391 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
,
1392 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
1393 MLX5_QP_OPTPAR_RWE
|
1394 MLX5_QP_OPTPAR_RAE
|
1400 static int ib_nr_to_mlx5_nr(int ib_mask
)
1405 case IB_QP_CUR_STATE
:
1407 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
1409 case IB_QP_ACCESS_FLAGS
:
1410 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
1412 case IB_QP_PKEY_INDEX
:
1413 return MLX5_QP_OPTPAR_PKEY_INDEX
;
1415 return MLX5_QP_OPTPAR_PRI_PORT
;
1417 return MLX5_QP_OPTPAR_Q_KEY
;
1419 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
1420 MLX5_QP_OPTPAR_PRI_PORT
;
1421 case IB_QP_PATH_MTU
:
1424 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
1425 case IB_QP_RETRY_CNT
:
1426 return MLX5_QP_OPTPAR_RETRY_COUNT
;
1427 case IB_QP_RNR_RETRY
:
1428 return MLX5_QP_OPTPAR_RNR_RETRY
;
1431 case IB_QP_MAX_QP_RD_ATOMIC
:
1432 return MLX5_QP_OPTPAR_SRA_MAX
;
1433 case IB_QP_ALT_PATH
:
1434 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
1435 case IB_QP_MIN_RNR_TIMER
:
1436 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
1439 case IB_QP_MAX_DEST_RD_ATOMIC
:
1440 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
1441 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
1442 case IB_QP_PATH_MIG_STATE
:
1443 return MLX5_QP_OPTPAR_PM_STATE
;
1446 case IB_QP_DEST_QPN
:
1452 static int ib_mask_to_mlx5_opt(int ib_mask
)
1457 for (i
= 0; i
< 8 * sizeof(int); i
++) {
1458 if ((1 << i
) & ib_mask
)
1459 result
|= ib_nr_to_mlx5_nr(1 << i
);
1465 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
1466 const struct ib_qp_attr
*attr
, int attr_mask
,
1467 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
1469 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1470 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1471 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
1472 struct mlx5_qp_context
*context
;
1473 struct mlx5_modify_qp_mbox_in
*in
;
1474 struct mlx5_ib_pd
*pd
;
1475 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
1476 enum mlx5_qp_optpar optpar
;
1481 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
1486 err
= to_mlx5_st(ibqp
->qp_type
);
1490 context
->flags
= cpu_to_be32(err
<< 16);
1492 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
1493 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
1495 switch (attr
->path_mig_state
) {
1496 case IB_MIG_MIGRATED
:
1497 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
1500 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
1503 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
1508 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
) {
1509 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
1510 } else if (ibqp
->qp_type
== IB_QPT_UD
||
1511 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
1512 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
1513 } else if (attr_mask
& IB_QP_PATH_MTU
) {
1514 if (attr
->path_mtu
< IB_MTU_256
||
1515 attr
->path_mtu
> IB_MTU_4096
) {
1516 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
1520 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) | dev
->mdev
.caps
.log_max_msg
;
1523 if (attr_mask
& IB_QP_DEST_QPN
)
1524 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
1526 if (attr_mask
& IB_QP_PKEY_INDEX
)
1527 context
->pri_path
.pkey_index
= attr
->pkey_index
;
1529 /* todo implement counter_index functionality */
1531 if (is_sqp(ibqp
->qp_type
))
1532 context
->pri_path
.port
= qp
->port
;
1534 if (attr_mask
& IB_QP_PORT
)
1535 context
->pri_path
.port
= attr
->port_num
;
1537 if (attr_mask
& IB_QP_AV
) {
1538 err
= mlx5_set_path(dev
, &attr
->ah_attr
, &context
->pri_path
,
1539 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
1540 attr_mask
, 0, attr
);
1545 if (attr_mask
& IB_QP_TIMEOUT
)
1546 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
1548 if (attr_mask
& IB_QP_ALT_PATH
) {
1549 err
= mlx5_set_path(dev
, &attr
->alt_ah_attr
, &context
->alt_path
,
1550 attr
->alt_port_num
, attr_mask
, 0, attr
);
1556 get_cqs(qp
, &send_cq
, &recv_cq
);
1558 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
1559 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
1560 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
1561 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
1563 if (attr_mask
& IB_QP_RNR_RETRY
)
1564 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
1566 if (attr_mask
& IB_QP_RETRY_CNT
)
1567 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
1569 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1570 if (attr
->max_rd_atomic
)
1572 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
1575 if (attr_mask
& IB_QP_SQ_PSN
)
1576 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
1578 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1579 if (attr
->max_dest_rd_atomic
)
1581 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
1584 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
))
1585 context
->params2
|= to_mlx5_access_flags(qp
, attr
, attr_mask
);
1587 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1588 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
1590 if (attr_mask
& IB_QP_RQ_PSN
)
1591 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
1593 if (attr_mask
& IB_QP_QKEY
)
1594 context
->qkey
= cpu_to_be32(attr
->qkey
);
1596 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1597 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1599 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
1600 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
1605 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1606 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
1609 mlx5_cur
= to_mlx5_state(cur_state
);
1610 mlx5_new
= to_mlx5_state(new_state
);
1611 mlx5_st
= to_mlx5_st(ibqp
->qp_type
);
1615 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
1616 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
1617 in
->optparam
= cpu_to_be32(optpar
);
1618 err
= mlx5_core_qp_modify(&dev
->mdev
, to_mlx5_state(cur_state
),
1619 to_mlx5_state(new_state
), in
, sqd_event
,
1624 qp
->state
= new_state
;
1626 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1627 qp
->atomic_rd_en
= attr
->qp_access_flags
;
1628 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1629 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
1630 if (attr_mask
& IB_QP_PORT
)
1631 qp
->port
= attr
->port_num
;
1632 if (attr_mask
& IB_QP_ALT_PATH
)
1633 qp
->alt_port
= attr
->alt_port_num
;
1636 * If we moved a kernel QP to RESET, clean up all old CQ
1637 * entries and reinitialize the QP.
1639 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
1640 mlx5_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1641 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
1642 if (send_cq
!= recv_cq
)
1643 mlx5_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1649 qp
->sq
.cur_post
= 0;
1650 qp
->sq
.last_poll
= 0;
1651 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
1652 qp
->db
.db
[MLX5_SND_DBR
] = 0;
1660 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1661 int attr_mask
, struct ib_udata
*udata
)
1663 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1664 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1665 enum ib_qp_state cur_state
, new_state
;
1669 mutex_lock(&qp
->mutex
);
1671 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
1672 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1674 if (ibqp
->qp_type
!= MLX5_IB_QPT_REG_UMR
&&
1675 !ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
, attr_mask
,
1676 IB_LINK_LAYER_UNSPECIFIED
))
1679 if ((attr_mask
& IB_QP_PORT
) &&
1680 (attr
->port_num
== 0 || attr
->port_num
> dev
->mdev
.caps
.num_ports
))
1683 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1684 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1685 if (attr
->pkey_index
>= dev
->mdev
.caps
.port
[port
- 1].pkey_table_len
)
1689 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1690 attr
->max_rd_atomic
> dev
->mdev
.caps
.max_ra_res_qp
)
1693 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1694 attr
->max_dest_rd_atomic
> dev
->mdev
.caps
.max_ra_req_qp
)
1697 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1702 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
1705 mutex_unlock(&qp
->mutex
);
1709 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
1711 struct mlx5_ib_cq
*cq
;
1714 cur
= wq
->head
- wq
->tail
;
1715 if (likely(cur
+ nreq
< wq
->max_post
))
1719 spin_lock(&cq
->lock
);
1720 cur
= wq
->head
- wq
->tail
;
1721 spin_unlock(&cq
->lock
);
1723 return cur
+ nreq
>= wq
->max_post
;
1726 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
1727 u64 remote_addr
, u32 rkey
)
1729 rseg
->raddr
= cpu_to_be64(remote_addr
);
1730 rseg
->rkey
= cpu_to_be32(rkey
);
1734 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
1735 struct ib_send_wr
*wr
)
1737 memcpy(&dseg
->av
, &to_mah(wr
->wr
.ud
.ah
)->av
, sizeof(struct mlx5_av
));
1738 dseg
->av
.dqp_dct
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
| MLX5_EXTENDED_UD_AV
);
1739 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1742 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
1744 dseg
->byte_count
= cpu_to_be32(sg
->length
);
1745 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
1746 dseg
->addr
= cpu_to_be64(sg
->addr
);
1749 static __be16
get_klm_octo(int npages
)
1751 return cpu_to_be16(ALIGN(npages
, 8) / 2);
1754 static __be64
frwr_mkey_mask(void)
1758 result
= MLX5_MKEY_MASK_LEN
|
1759 MLX5_MKEY_MASK_PAGE_SIZE
|
1760 MLX5_MKEY_MASK_START_ADDR
|
1761 MLX5_MKEY_MASK_EN_RINVAL
|
1762 MLX5_MKEY_MASK_KEY
|
1768 MLX5_MKEY_MASK_SMALL_FENCE
|
1769 MLX5_MKEY_MASK_FREE
;
1771 return cpu_to_be64(result
);
1774 static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
1775 struct ib_send_wr
*wr
, int li
)
1777 memset(umr
, 0, sizeof(*umr
));
1780 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
1781 umr
->flags
= 1 << 7;
1785 umr
->flags
= (1 << 5); /* fail if not free */
1786 umr
->klm_octowords
= get_klm_octo(wr
->wr
.fast_reg
.page_list_len
);
1787 umr
->mkey_mask
= frwr_mkey_mask();
1790 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
1791 struct ib_send_wr
*wr
)
1793 struct umr_wr
*umrwr
= (struct umr_wr
*)&wr
->wr
.fast_reg
;
1796 memset(umr
, 0, sizeof(*umr
));
1798 if (!(wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
)) {
1799 umr
->flags
= 1 << 5; /* fail if not free */
1800 umr
->klm_octowords
= get_klm_octo(umrwr
->npages
);
1801 mask
= MLX5_MKEY_MASK_LEN
|
1802 MLX5_MKEY_MASK_PAGE_SIZE
|
1803 MLX5_MKEY_MASK_START_ADDR
|
1807 MLX5_MKEY_MASK_KEY
|
1811 MLX5_MKEY_MASK_FREE
;
1812 umr
->mkey_mask
= cpu_to_be64(mask
);
1814 umr
->flags
= 2 << 5; /* fail if free */
1815 mask
= MLX5_MKEY_MASK_FREE
;
1816 umr
->mkey_mask
= cpu_to_be64(mask
);
1820 umr
->flags
|= (1 << 7); /* inline */
1823 static u8
get_umr_flags(int acc
)
1825 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
1826 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
1827 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
1828 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
1829 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
| MLX5_ACCESS_MODE_MTT
;
1832 static void set_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
,
1835 memset(seg
, 0, sizeof(*seg
));
1837 seg
->status
= 1 << 6;
1841 seg
->flags
= get_umr_flags(wr
->wr
.fast_reg
.access_flags
);
1842 *writ
= seg
->flags
& (MLX5_PERM_LOCAL_WRITE
| IB_ACCESS_REMOTE_WRITE
);
1843 seg
->qpn_mkey7_0
= cpu_to_be32((wr
->wr
.fast_reg
.rkey
& 0xff) | 0xffffff00);
1844 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
1845 seg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
1846 seg
->len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
1847 seg
->xlt_oct_size
= cpu_to_be32((wr
->wr
.fast_reg
.page_list_len
+ 1) / 2);
1848 seg
->log2_page_size
= wr
->wr
.fast_reg
.page_shift
;
1851 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
)
1853 memset(seg
, 0, sizeof(*seg
));
1854 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
) {
1855 seg
->status
= 1 << 6;
1859 seg
->flags
= convert_access(wr
->wr
.fast_reg
.access_flags
);
1860 seg
->flags_pd
= cpu_to_be32(to_mpd((struct ib_pd
*)wr
->wr
.fast_reg
.page_list
)->pdn
);
1861 seg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
1862 seg
->len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
1863 seg
->log2_page_size
= wr
->wr
.fast_reg
.page_shift
;
1864 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff00 |
1865 mlx5_mkey_variant(wr
->wr
.fast_reg
.rkey
));
1868 static void set_frwr_pages(struct mlx5_wqe_data_seg
*dseg
,
1869 struct ib_send_wr
*wr
,
1870 struct mlx5_core_dev
*mdev
,
1871 struct mlx5_ib_pd
*pd
,
1874 struct mlx5_ib_fast_reg_page_list
*mfrpl
= to_mfrpl(wr
->wr
.fast_reg
.page_list
);
1875 u64
*page_list
= wr
->wr
.fast_reg
.page_list
->page_list
;
1876 u64 perm
= MLX5_EN_RD
| (writ
? MLX5_EN_WR
: 0);
1879 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; i
++)
1880 mfrpl
->mapped_page_list
[i
] = cpu_to_be64(page_list
[i
] | perm
);
1881 dseg
->addr
= cpu_to_be64(mfrpl
->map
);
1882 dseg
->byte_count
= cpu_to_be32(ALIGN(sizeof(u64
) * wr
->wr
.fast_reg
.page_list_len
, 64));
1883 dseg
->lkey
= cpu_to_be32(pd
->pa_lkey
);
1886 static __be32
send_ieth(struct ib_send_wr
*wr
)
1888 switch (wr
->opcode
) {
1889 case IB_WR_SEND_WITH_IMM
:
1890 case IB_WR_RDMA_WRITE_WITH_IMM
:
1891 return wr
->ex
.imm_data
;
1893 case IB_WR_SEND_WITH_INV
:
1894 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
1901 static u8
calc_sig(void *wqe
, int size
)
1907 for (i
= 0; i
< size
; i
++)
1913 static u8
wq_sig(void *wqe
)
1915 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
1918 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, struct ib_send_wr
*wr
,
1921 struct mlx5_wqe_inline_seg
*seg
;
1922 void *qend
= qp
->sq
.qend
;
1930 wqe
+= sizeof(*seg
);
1931 for (i
= 0; i
< wr
->num_sge
; i
++) {
1932 addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
1933 len
= wr
->sg_list
[i
].length
;
1936 if (unlikely(inl
> qp
->max_inline_data
))
1939 if (unlikely(wqe
+ len
> qend
)) {
1941 memcpy(wqe
, addr
, copy
);
1944 wqe
= mlx5_get_send_wqe(qp
, 0);
1946 memcpy(wqe
, addr
, len
);
1950 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
1952 *sz
= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
1957 static int set_frwr_li_wr(void **seg
, struct ib_send_wr
*wr
, int *size
,
1958 struct mlx5_core_dev
*mdev
, struct mlx5_ib_pd
*pd
, struct mlx5_ib_qp
*qp
)
1963 li
= wr
->opcode
== IB_WR_LOCAL_INV
? 1 : 0;
1964 if (unlikely(wr
->send_flags
& IB_SEND_INLINE
))
1967 set_frwr_umr_segment(*seg
, wr
, li
);
1968 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
1969 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
1970 if (unlikely((*seg
== qp
->sq
.qend
)))
1971 *seg
= mlx5_get_send_wqe(qp
, 0);
1972 set_mkey_segment(*seg
, wr
, li
, &writ
);
1973 *seg
+= sizeof(struct mlx5_mkey_seg
);
1974 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
1975 if (unlikely((*seg
== qp
->sq
.qend
)))
1976 *seg
= mlx5_get_send_wqe(qp
, 0);
1978 if (unlikely(wr
->wr
.fast_reg
.page_list_len
>
1979 wr
->wr
.fast_reg
.page_list
->max_page_list_len
))
1982 set_frwr_pages(*seg
, wr
, mdev
, pd
, writ
);
1983 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
1984 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
1989 static void dump_wqe(struct mlx5_ib_qp
*qp
, int idx
, int size_16
)
1995 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp
, tidx
));
1996 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
1997 if ((i
& 0xf) == 0) {
1998 void *buf
= mlx5_get_send_wqe(qp
, tidx
);
1999 tidx
= (tidx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
2003 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
2004 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
2005 be32_to_cpu(p
[j
+ 3]));
2009 static void mlx5_bf_copy(u64 __iomem
*dst
, u64
*src
,
2010 unsigned bytecnt
, struct mlx5_ib_qp
*qp
)
2012 while (bytecnt
> 0) {
2013 __iowrite64_copy(dst
++, src
++, 8);
2014 __iowrite64_copy(dst
++, src
++, 8);
2015 __iowrite64_copy(dst
++, src
++, 8);
2016 __iowrite64_copy(dst
++, src
++, 8);
2017 __iowrite64_copy(dst
++, src
++, 8);
2018 __iowrite64_copy(dst
++, src
++, 8);
2019 __iowrite64_copy(dst
++, src
++, 8);
2020 __iowrite64_copy(dst
++, src
++, 8);
2022 if (unlikely(src
== qp
->sq
.qend
))
2023 src
= mlx5_get_send_wqe(qp
, 0);
2027 static u8
get_fence(u8 fence
, struct ib_send_wr
*wr
)
2029 if (unlikely(wr
->opcode
== IB_WR_LOCAL_INV
&&
2030 wr
->send_flags
& IB_SEND_FENCE
))
2031 return MLX5_FENCE_MODE_STRONG_ORDERING
;
2033 if (unlikely(fence
)) {
2034 if (wr
->send_flags
& IB_SEND_FENCE
)
2035 return MLX5_FENCE_MODE_SMALL_AND_FENCE
;
2044 int mlx5_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
2045 struct ib_send_wr
**bad_wr
)
2047 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
2048 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2049 struct mlx5_core_dev
*mdev
= &dev
->mdev
;
2050 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2051 struct mlx5_wqe_data_seg
*dpseg
;
2052 struct mlx5_wqe_xrc_seg
*xrc
;
2053 struct mlx5_bf
*bf
= qp
->bf
;
2054 int uninitialized_var(size
);
2055 void *qend
= qp
->sq
.qend
;
2056 unsigned long flags
;
2069 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
2071 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
2072 if (unlikely(wr
->opcode
>= sizeof(mlx5_ib_opcode
) / sizeof(mlx5_ib_opcode
[0]))) {
2073 mlx5_ib_warn(dev
, "\n");
2079 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
))) {
2080 mlx5_ib_warn(dev
, "\n");
2086 fence
= qp
->fm_cache
;
2087 num_sge
= wr
->num_sge
;
2088 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
2089 mlx5_ib_warn(dev
, "\n");
2095 idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
2096 seg
= mlx5_get_send_wqe(qp
, idx
);
2098 *(uint32_t *)(seg
+ 8) = 0;
2099 ctrl
->imm
= send_ieth(wr
);
2100 ctrl
->fm_ce_se
= qp
->sq_signal_bits
|
2101 (wr
->send_flags
& IB_SEND_SIGNALED
?
2102 MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
2103 (wr
->send_flags
& IB_SEND_SOLICITED
?
2104 MLX5_WQE_CTRL_SOLICITED
: 0);
2106 seg
+= sizeof(*ctrl
);
2107 size
= sizeof(*ctrl
) / 16;
2109 switch (ibqp
->qp_type
) {
2110 case IB_QPT_XRC_INI
:
2112 xrc
->xrc_srqn
= htonl(wr
->xrc_remote_srq_num
);
2113 seg
+= sizeof(*xrc
);
2114 size
+= sizeof(*xrc
) / 16;
2117 switch (wr
->opcode
) {
2118 case IB_WR_RDMA_READ
:
2119 case IB_WR_RDMA_WRITE
:
2120 case IB_WR_RDMA_WRITE_WITH_IMM
:
2121 set_raddr_seg(seg
, wr
->wr
.rdma
.remote_addr
,
2123 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2124 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
2127 case IB_WR_ATOMIC_CMP_AND_SWP
:
2128 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2129 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
2130 mlx5_ib_warn(dev
, "Atomic operations are not supported yet\n");
2135 case IB_WR_LOCAL_INV
:
2136 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2137 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
2138 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
2139 err
= set_frwr_li_wr(&seg
, wr
, &size
, mdev
, to_mpd(ibqp
->pd
), qp
);
2141 mlx5_ib_warn(dev
, "\n");
2148 case IB_WR_FAST_REG_MR
:
2149 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2150 qp
->sq
.wr_data
[idx
] = IB_WR_FAST_REG_MR
;
2151 ctrl
->imm
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2152 err
= set_frwr_li_wr(&seg
, wr
, &size
, mdev
, to_mpd(ibqp
->pd
), qp
);
2154 mlx5_ib_warn(dev
, "\n");
2167 switch (wr
->opcode
) {
2168 case IB_WR_RDMA_WRITE
:
2169 case IB_WR_RDMA_WRITE_WITH_IMM
:
2170 set_raddr_seg(seg
, wr
->wr
.rdma
.remote_addr
,
2172 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2173 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
2184 set_datagram_seg(seg
, wr
);
2185 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
2186 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
2187 if (unlikely((seg
== qend
)))
2188 seg
= mlx5_get_send_wqe(qp
, 0);
2191 case MLX5_IB_QPT_REG_UMR
:
2192 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
2194 mlx5_ib_warn(dev
, "bad opcode\n");
2197 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
2198 ctrl
->imm
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2199 set_reg_umr_segment(seg
, wr
);
2200 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
2201 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
2202 if (unlikely((seg
== qend
)))
2203 seg
= mlx5_get_send_wqe(qp
, 0);
2204 set_reg_mkey_segment(seg
, wr
);
2205 seg
+= sizeof(struct mlx5_mkey_seg
);
2206 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
2207 if (unlikely((seg
== qend
)))
2208 seg
= mlx5_get_send_wqe(qp
, 0);
2215 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
2216 int uninitialized_var(sz
);
2218 err
= set_data_inl_seg(qp
, wr
, seg
, &sz
);
2219 if (unlikely(err
)) {
2220 mlx5_ib_warn(dev
, "\n");
2228 for (i
= 0; i
< num_sge
; i
++) {
2229 if (unlikely(dpseg
== qend
)) {
2230 seg
= mlx5_get_send_wqe(qp
, 0);
2233 if (likely(wr
->sg_list
[i
].length
)) {
2234 set_data_ptr_seg(dpseg
, wr
->sg_list
+ i
);
2235 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
2241 mlx5_opcode
= mlx5_ib_opcode
[wr
->opcode
];
2242 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
2244 ((u32
)opmod
<< 24));
2245 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->mqp
.qpn
<< 8));
2246 ctrl
->fm_ce_se
|= get_fence(fence
, wr
);
2247 qp
->fm_cache
= next_fence
;
2248 if (unlikely(qp
->wq_sig
))
2249 ctrl
->signature
= wq_sig(ctrl
);
2251 qp
->sq
.wrid
[idx
] = wr
->wr_id
;
2252 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
2253 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
2254 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
2255 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
2258 dump_wqe(qp
, idx
, size
);
2263 qp
->sq
.head
+= nreq
;
2265 /* Make sure that descriptors are written before
2266 * updating doorbell record and ringing the doorbell
2270 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
2272 /* Make sure doorbell record is visible to the HCA before
2273 * we hit doorbell */
2277 spin_lock(&bf
->lock
);
2280 if (0 && nreq
== 1 && bf
->uuarn
&& inl
&& size
> 1 && size
<= bf
->buf_size
/ 16) {
2281 mlx5_bf_copy(bf
->reg
+ bf
->offset
, (u64
*)ctrl
, ALIGN(size
* 16, 64), qp
);
2284 mlx5_write64((__be32
*)ctrl
, bf
->regreg
+ bf
->offset
,
2285 MLX5_GET_DOORBELL_LOCK(&bf
->lock32
));
2286 /* Make sure doorbells don't leak out of SQ spinlock
2287 * and reach the HCA out of order.
2291 bf
->offset
^= bf
->buf_size
;
2293 spin_unlock(&bf
->lock
);
2296 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2301 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
2303 sig
->signature
= calc_sig(sig
, size
);
2306 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
2307 struct ib_recv_wr
**bad_wr
)
2309 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2310 struct mlx5_wqe_data_seg
*scat
;
2311 struct mlx5_rwqe_sig
*sig
;
2312 unsigned long flags
;
2318 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2320 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
2322 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
2323 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2329 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2335 scat
= get_recv_wqe(qp
, ind
);
2339 for (i
= 0; i
< wr
->num_sge
; i
++)
2340 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
2342 if (i
< qp
->rq
.max_gs
) {
2343 scat
[i
].byte_count
= 0;
2344 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
2349 sig
= (struct mlx5_rwqe_sig
*)scat
;
2350 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
2353 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
2355 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
2360 qp
->rq
.head
+= nreq
;
2362 /* Make sure that descriptors are written before
2367 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2370 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2375 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
2377 switch (mlx5_state
) {
2378 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
2379 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
2380 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
2381 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
2382 case MLX5_QP_STATE_SQ_DRAINING
:
2383 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
2384 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
2385 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
2390 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
2392 switch (mlx5_mig_state
) {
2393 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
2394 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
2395 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
2400 static int to_ib_qp_access_flags(int mlx5_flags
)
2404 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
2405 ib_flags
|= IB_ACCESS_REMOTE_READ
;
2406 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
2407 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
2408 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
2409 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
2414 static void to_ib_ah_attr(struct mlx5_ib_dev
*ibdev
, struct ib_ah_attr
*ib_ah_attr
,
2415 struct mlx5_qp_path
*path
)
2417 struct mlx5_core_dev
*dev
= &ibdev
->mdev
;
2419 memset(ib_ah_attr
, 0, sizeof(*ib_ah_attr
));
2420 ib_ah_attr
->port_num
= path
->port
;
2422 if (ib_ah_attr
->port_num
== 0 || ib_ah_attr
->port_num
> dev
->caps
.num_ports
)
2425 ib_ah_attr
->sl
= path
->sl
& 0xf;
2427 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
2428 ib_ah_attr
->src_path_bits
= path
->grh_mlid
& 0x7f;
2429 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
2430 ib_ah_attr
->ah_flags
= (path
->grh_mlid
& (1 << 7)) ? IB_AH_GRH
: 0;
2431 if (ib_ah_attr
->ah_flags
) {
2432 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
2433 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
2434 ib_ah_attr
->grh
.traffic_class
=
2435 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
2436 ib_ah_attr
->grh
.flow_label
=
2437 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
2438 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
2439 path
->rgid
, sizeof(ib_ah_attr
->grh
.dgid
.raw
));
2443 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
2444 struct ib_qp_init_attr
*qp_init_attr
)
2446 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2447 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2448 struct mlx5_query_qp_mbox_out
*outb
;
2449 struct mlx5_qp_context
*context
;
2453 mutex_lock(&qp
->mutex
);
2454 outb
= kzalloc(sizeof(*outb
), GFP_KERNEL
);
2459 context
= &outb
->ctx
;
2460 err
= mlx5_core_qp_query(&dev
->mdev
, &qp
->mqp
, outb
, sizeof(*outb
));
2464 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
2466 qp
->state
= to_ib_qp_state(mlx5_state
);
2467 qp_attr
->qp_state
= qp
->state
;
2468 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
2469 qp_attr
->path_mig_state
=
2470 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
2471 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
2472 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
2473 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
2474 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
2475 qp_attr
->qp_access_flags
=
2476 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
2478 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
2479 to_ib_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
2480 to_ib_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
2481 qp_attr
->alt_pkey_index
= context
->alt_path
.pkey_index
& 0x7f;
2482 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
2485 qp_attr
->pkey_index
= context
->pri_path
.pkey_index
& 0x7f;
2486 qp_attr
->port_num
= context
->pri_path
.port
;
2488 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2489 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
2491 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
2493 qp_attr
->max_dest_rd_atomic
=
2494 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
2495 qp_attr
->min_rnr_timer
=
2496 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
2497 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
2498 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
2499 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
2500 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
2501 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
2502 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
2503 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
2505 if (!ibqp
->uobject
) {
2506 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
2507 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
2509 qp_attr
->cap
.max_send_wr
= 0;
2510 qp_attr
->cap
.max_send_sge
= 0;
2513 /* We don't support inline sends for kernel QPs (yet), and we
2514 * don't know what userspace's value should be.
2516 qp_attr
->cap
.max_inline_data
= 0;
2518 qp_init_attr
->cap
= qp_attr
->cap
;
2520 qp_init_attr
->create_flags
= 0;
2521 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
2522 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
2524 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
2525 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
2531 mutex_unlock(&qp
->mutex
);
2535 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
2536 struct ib_ucontext
*context
,
2537 struct ib_udata
*udata
)
2539 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
2540 struct mlx5_ib_xrcd
*xrcd
;
2543 if (!(dev
->mdev
.caps
.flags
& MLX5_DEV_CAP_FLAG_XRC
))
2544 return ERR_PTR(-ENOSYS
);
2546 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
2548 return ERR_PTR(-ENOMEM
);
2550 err
= mlx5_core_xrcd_alloc(&dev
->mdev
, &xrcd
->xrcdn
);
2553 return ERR_PTR(-ENOMEM
);
2556 return &xrcd
->ibxrcd
;
2559 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
2561 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
2562 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
2565 err
= mlx5_core_xrcd_dealloc(&dev
->mdev
, xrcdn
);
2567 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);