2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
38 /* not supported currently */
39 static int wq_signature
;
42 MLX5_IB_ACK_REQ_FREQ
= 8,
46 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
48 MLX5_IB_LINK_TYPE_IB
= 0,
49 MLX5_IB_LINK_TYPE_ETH
= 1
53 MLX5_IB_SQ_STRIDE
= 6,
54 MLX5_IB_CACHE_LINE_SIZE
= 64,
57 static const u32 mlx5_ib_opcode
[] = {
58 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
59 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
60 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
61 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
62 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
63 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
64 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
65 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
66 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
67 [IB_WR_FAST_REG_MR
] = MLX5_OPCODE_UMR
,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
70 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
76 unsigned int page_shift
;
83 static int is_qp0(enum ib_qp_type qp_type
)
85 return qp_type
== IB_QPT_SMI
;
88 static int is_qp1(enum ib_qp_type qp_type
)
90 return qp_type
== IB_QPT_GSI
;
93 static int is_sqp(enum ib_qp_type qp_type
)
95 return is_qp0(qp_type
) || is_qp1(qp_type
);
98 static void *get_wqe(struct mlx5_ib_qp
*qp
, int offset
)
100 return mlx5_buf_offset(&qp
->buf
, offset
);
103 static void *get_recv_wqe(struct mlx5_ib_qp
*qp
, int n
)
105 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
108 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
)
110 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< MLX5_IB_SQ_STRIDE
));
113 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
115 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
116 struct ib_event event
;
118 if (type
== MLX5_EVENT_TYPE_PATH_MIG
)
119 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
121 if (ibqp
->event_handler
) {
122 event
.device
= ibqp
->device
;
123 event
.element
.qp
= ibqp
;
125 case MLX5_EVENT_TYPE_PATH_MIG
:
126 event
.event
= IB_EVENT_PATH_MIG
;
128 case MLX5_EVENT_TYPE_COMM_EST
:
129 event
.event
= IB_EVENT_COMM_EST
;
131 case MLX5_EVENT_TYPE_SQ_DRAINED
:
132 event
.event
= IB_EVENT_SQ_DRAINED
;
134 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
135 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
137 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
138 event
.event
= IB_EVENT_QP_FATAL
;
140 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
141 event
.event
= IB_EVENT_PATH_MIG_ERR
;
143 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
144 event
.event
= IB_EVENT_QP_REQ_ERR
;
146 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
147 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
150 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
154 ibqp
->event_handler(&event
, ibqp
->qp_context
);
158 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
159 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
164 /* Sanity check RQ size before proceeding */
165 if (cap
->max_recv_wr
> dev
->mdev
.caps
.max_wqes
)
171 qp
->rq
.wqe_shift
= 0;
174 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
175 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
176 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
177 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
179 wqe_size
= qp
->wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) : 0;
180 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
181 wqe_size
= roundup_pow_of_two(wqe_size
);
182 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
183 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
184 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
185 if (wqe_size
> dev
->mdev
.caps
.max_rq_desc_sz
) {
186 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
188 dev
->mdev
.caps
.max_rq_desc_sz
);
191 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
192 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
193 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
200 static int sq_overhead(enum ib_qp_type qp_type
)
206 size
+= sizeof(struct mlx5_wqe_xrc_seg
);
209 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
210 sizeof(struct mlx5_wqe_atomic_seg
) +
211 sizeof(struct mlx5_wqe_raddr_seg
);
218 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
219 sizeof(struct mlx5_wqe_raddr_seg
);
225 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
226 sizeof(struct mlx5_wqe_datagram_seg
);
229 case MLX5_IB_QPT_REG_UMR
:
230 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
231 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
232 sizeof(struct mlx5_mkey_seg
);
242 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
247 size
= sq_overhead(attr
->qp_type
);
251 if (attr
->cap
.max_inline_data
) {
252 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
253 attr
->cap
.max_inline_data
;
256 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
258 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
261 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
262 struct mlx5_ib_qp
*qp
)
267 if (!attr
->cap
.max_send_wr
)
270 wqe_size
= calc_send_wqe(attr
);
271 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
275 if (wqe_size
> dev
->mdev
.caps
.max_sq_desc_sz
) {
276 mlx5_ib_dbg(dev
, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
277 wqe_size
, dev
->mdev
.caps
.max_sq_desc_sz
);
281 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
->qp_type
) -
282 sizeof(struct mlx5_wqe_inline_seg
);
283 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
285 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
286 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
287 if (qp
->sq
.wqe_cnt
> dev
->mdev
.caps
.max_wqes
) {
288 mlx5_ib_dbg(dev
, "wqe count(%d) exceeds limits(%d)\n",
289 qp
->sq
.wqe_cnt
, dev
->mdev
.caps
.max_wqes
);
292 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
293 qp
->sq
.max_gs
= attr
->cap
.max_send_sge
;
294 qp
->sq
.max_post
= wq_size
/ wqe_size
;
295 attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
300 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
301 struct mlx5_ib_qp
*qp
,
302 struct mlx5_ib_create_qp
*ucmd
)
304 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
306 if (desc_sz
> dev
->mdev
.caps
.max_sq_desc_sz
) {
307 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
308 desc_sz
, dev
->mdev
.caps
.max_sq_desc_sz
);
312 if (ucmd
->sq_wqe_count
&& ((1 << ilog2(ucmd
->sq_wqe_count
)) != ucmd
->sq_wqe_count
)) {
313 mlx5_ib_warn(dev
, "sq_wqe_count %d, sq_wqe_count %d\n",
314 ucmd
->sq_wqe_count
, ucmd
->sq_wqe_count
);
318 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
320 if (qp
->sq
.wqe_cnt
> dev
->mdev
.caps
.max_wqes
) {
321 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
322 qp
->sq
.wqe_cnt
, dev
->mdev
.caps
.max_wqes
);
326 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
327 (qp
->sq
.wqe_cnt
<< 6);
332 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
334 if (attr
->qp_type
== IB_QPT_XRC_INI
||
335 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
336 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
337 !attr
->cap
.max_recv_wr
)
343 static int first_med_uuar(void)
348 static int next_uuar(int n
)
352 while (((n
% 4) & 2))
358 static int num_med_uuar(struct mlx5_uuar_info
*uuari
)
362 n
= uuari
->num_uars
* MLX5_NON_FP_BF_REGS_PER_PAGE
-
363 uuari
->num_low_latency_uuars
- 1;
365 return n
>= 0 ? n
: 0;
368 static int max_uuari(struct mlx5_uuar_info
*uuari
)
370 return uuari
->num_uars
* 4;
373 static int first_hi_uuar(struct mlx5_uuar_info
*uuari
)
379 med
= num_med_uuar(uuari
);
380 for (t
= 0, i
= first_med_uuar();; i
= next_uuar(i
)) {
389 static int alloc_high_class_uuar(struct mlx5_uuar_info
*uuari
)
393 for (i
= first_hi_uuar(uuari
); i
< max_uuari(uuari
); i
= next_uuar(i
)) {
394 if (!test_bit(i
, uuari
->bitmap
)) {
395 set_bit(i
, uuari
->bitmap
);
404 static int alloc_med_class_uuar(struct mlx5_uuar_info
*uuari
)
406 int minidx
= first_med_uuar();
409 for (i
= first_med_uuar(); i
< first_hi_uuar(uuari
); i
= next_uuar(i
)) {
410 if (uuari
->count
[i
] < uuari
->count
[minidx
])
414 uuari
->count
[minidx
]++;
418 static int alloc_uuar(struct mlx5_uuar_info
*uuari
,
419 enum mlx5_ib_latency_class lat
)
423 mutex_lock(&uuari
->lock
);
425 case MLX5_IB_LATENCY_CLASS_LOW
:
427 uuari
->count
[uuarn
]++;
430 case MLX5_IB_LATENCY_CLASS_MEDIUM
:
431 uuarn
= alloc_med_class_uuar(uuari
);
434 case MLX5_IB_LATENCY_CLASS_HIGH
:
435 uuarn
= alloc_high_class_uuar(uuari
);
438 case MLX5_IB_LATENCY_CLASS_FAST_PATH
:
442 mutex_unlock(&uuari
->lock
);
447 static void free_med_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
449 clear_bit(uuarn
, uuari
->bitmap
);
450 --uuari
->count
[uuarn
];
453 static void free_high_class_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
455 clear_bit(uuarn
, uuari
->bitmap
);
456 --uuari
->count
[uuarn
];
459 static void free_uuar(struct mlx5_uuar_info
*uuari
, int uuarn
)
461 int nuuars
= uuari
->num_uars
* MLX5_BF_REGS_PER_PAGE
;
462 int high_uuar
= nuuars
- uuari
->num_low_latency_uuars
;
464 mutex_lock(&uuari
->lock
);
466 --uuari
->count
[uuarn
];
470 if (uuarn
< high_uuar
) {
471 free_med_class_uuar(uuari
, uuarn
);
475 free_high_class_uuar(uuari
, uuarn
);
478 mutex_unlock(&uuari
->lock
);
481 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
484 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
485 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
486 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
487 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
488 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
489 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
490 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
495 static int to_mlx5_st(enum ib_qp_type type
)
498 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
499 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
500 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
501 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
503 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
504 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
505 case IB_QPT_GSI
: return MLX5_QP_ST_QP1
;
506 case IB_QPT_RAW_IPV6
: return MLX5_QP_ST_RAW_IPV6
;
507 case IB_QPT_RAW_ETHERTYPE
: return MLX5_QP_ST_RAW_ETHERTYPE
;
508 case IB_QPT_RAW_PACKET
:
510 default: return -EINVAL
;
514 static int uuarn_to_uar_index(struct mlx5_uuar_info
*uuari
, int uuarn
)
516 return uuari
->uars
[uuarn
/ MLX5_BF_REGS_PER_PAGE
].index
;
519 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
520 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
521 struct mlx5_create_qp_mbox_in
**in
,
522 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
)
524 struct mlx5_ib_ucontext
*context
;
525 struct mlx5_ib_create_qp ucmd
;
534 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
536 mlx5_ib_dbg(dev
, "copy failed\n");
540 context
= to_mucontext(pd
->uobject
->context
);
542 * TBD: should come from the verbs when we have the API
544 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_HIGH
);
546 mlx5_ib_dbg(dev
, "failed to allocate low latency UUAR\n");
547 mlx5_ib_dbg(dev
, "reverting to medium latency\n");
548 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_MEDIUM
);
550 mlx5_ib_dbg(dev
, "failed to allocate medium latency UUAR\n");
551 mlx5_ib_dbg(dev
, "reverting to high latency\n");
552 uuarn
= alloc_uuar(&context
->uuari
, MLX5_IB_LATENCY_CLASS_LOW
);
554 mlx5_ib_warn(dev
, "uuar allocation failed\n");
560 uar_index
= uuarn_to_uar_index(&context
->uuari
, uuarn
);
561 mlx5_ib_dbg(dev
, "uuarn 0x%x, uar_index 0x%x\n", uuarn
, uar_index
);
563 err
= set_user_buf_size(dev
, qp
, &ucmd
);
567 if (ucmd
.buf_addr
&& qp
->buf_size
) {
568 qp
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
.buf_addr
,
570 if (IS_ERR(qp
->umem
)) {
571 mlx5_ib_dbg(dev
, "umem_get failed\n");
572 err
= PTR_ERR(qp
->umem
);
580 mlx5_ib_cont_pages(qp
->umem
, ucmd
.buf_addr
, &npages
, &page_shift
,
582 err
= mlx5_ib_get_buf_offset(ucmd
.buf_addr
, page_shift
, &offset
);
584 mlx5_ib_warn(dev
, "bad offset\n");
587 mlx5_ib_dbg(dev
, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
588 ucmd
.buf_addr
, qp
->buf_size
, npages
, page_shift
, ncont
, offset
);
591 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * ncont
;
592 *in
= mlx5_vzalloc(*inlen
);
598 mlx5_ib_populate_pas(dev
, qp
->umem
, page_shift
, (*in
)->pas
, 0);
599 (*in
)->ctx
.log_pg_sz_remote_qpn
=
600 cpu_to_be32((page_shift
- MLX5_ADAPTER_PAGE_SHIFT
) << 24);
601 (*in
)->ctx
.params2
= cpu_to_be32(offset
<< 6);
603 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
604 resp
->uuar_index
= uuarn
;
607 err
= mlx5_ib_db_map_user(context
, ucmd
.db_addr
, &qp
->db
);
609 mlx5_ib_dbg(dev
, "map failed\n");
613 err
= ib_copy_to_udata(udata
, resp
, sizeof(*resp
));
615 mlx5_ib_dbg(dev
, "copy failed\n");
618 qp
->create_type
= MLX5_QP_USER
;
623 mlx5_ib_db_unmap_user(context
, &qp
->db
);
630 ib_umem_release(qp
->umem
);
633 free_uuar(&context
->uuari
, uuarn
);
637 static void destroy_qp_user(struct ib_pd
*pd
, struct mlx5_ib_qp
*qp
)
639 struct mlx5_ib_ucontext
*context
;
641 context
= to_mucontext(pd
->uobject
->context
);
642 mlx5_ib_db_unmap_user(context
, &qp
->db
);
644 ib_umem_release(qp
->umem
);
645 free_uuar(&context
->uuari
, qp
->uuarn
);
648 static int create_kernel_qp(struct mlx5_ib_dev
*dev
,
649 struct ib_qp_init_attr
*init_attr
,
650 struct mlx5_ib_qp
*qp
,
651 struct mlx5_create_qp_mbox_in
**in
, int *inlen
)
653 enum mlx5_ib_latency_class lc
= MLX5_IB_LATENCY_CLASS_LOW
;
654 struct mlx5_uuar_info
*uuari
;
659 uuari
= &dev
->mdev
.priv
.uuari
;
660 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
661 qp
->flags
|= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
663 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
664 lc
= MLX5_IB_LATENCY_CLASS_FAST_PATH
;
666 uuarn
= alloc_uuar(uuari
, lc
);
668 mlx5_ib_dbg(dev
, "\n");
672 qp
->bf
= &uuari
->bfs
[uuarn
];
673 uar_index
= qp
->bf
->uar
->index
;
675 err
= calc_sq_size(dev
, init_attr
, qp
);
677 mlx5_ib_dbg(dev
, "err %d\n", err
);
682 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
683 qp
->buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
685 err
= mlx5_buf_alloc(&dev
->mdev
, qp
->buf_size
, PAGE_SIZE
* 2, &qp
->buf
);
687 mlx5_ib_dbg(dev
, "err %d\n", err
);
691 qp
->sq
.qend
= mlx5_get_send_wqe(qp
, qp
->sq
.wqe_cnt
);
692 *inlen
= sizeof(**in
) + sizeof(*(*in
)->pas
) * qp
->buf
.npages
;
693 *in
= mlx5_vzalloc(*inlen
);
698 (*in
)->ctx
.qp_counter_set_usr_page
= cpu_to_be32(uar_index
);
699 (*in
)->ctx
.log_pg_sz_remote_qpn
=
700 cpu_to_be32((qp
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
) << 24);
701 /* Set "fast registration enabled" for all kernel QPs */
702 (*in
)->ctx
.params1
|= cpu_to_be32(1 << 11);
703 (*in
)->ctx
.sq_crq_size
|= cpu_to_be16(1 << 4);
705 mlx5_fill_page_array(&qp
->buf
, (*in
)->pas
);
707 err
= mlx5_db_alloc(&dev
->mdev
, &qp
->db
);
709 mlx5_ib_dbg(dev
, "err %d\n", err
);
716 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
717 qp
->sq
.wr_data
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
718 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
719 qp
->sq
.w_list
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
720 qp
->sq
.wqe_head
= kmalloc(qp
->sq
.wqe_cnt
* sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
722 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
723 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
727 qp
->create_type
= MLX5_QP_KERNEL
;
732 mlx5_db_free(&dev
->mdev
, &qp
->db
);
733 kfree(qp
->sq
.wqe_head
);
734 kfree(qp
->sq
.w_list
);
736 kfree(qp
->sq
.wr_data
);
743 mlx5_buf_free(&dev
->mdev
, &qp
->buf
);
746 free_uuar(&dev
->mdev
.priv
.uuari
, uuarn
);
750 static void destroy_qp_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
752 mlx5_db_free(&dev
->mdev
, &qp
->db
);
753 kfree(qp
->sq
.wqe_head
);
754 kfree(qp
->sq
.w_list
);
756 kfree(qp
->sq
.wr_data
);
758 mlx5_buf_free(&dev
->mdev
, &qp
->buf
);
759 free_uuar(&dev
->mdev
.priv
.uuari
, qp
->bf
->uuarn
);
762 static __be32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
764 if (attr
->srq
|| (attr
->qp_type
== IB_QPT_XRC_TGT
) ||
765 (attr
->qp_type
== IB_QPT_XRC_INI
))
766 return cpu_to_be32(MLX5_SRQ_RQ
);
767 else if (!qp
->has_rq
)
768 return cpu_to_be32(MLX5_ZERO_LEN_RQ
);
770 return cpu_to_be32(MLX5_NON_ZERO_RQ
);
773 static int is_connected(enum ib_qp_type qp_type
)
775 if (qp_type
== IB_QPT_RC
|| qp_type
== IB_QPT_UC
)
781 static int create_qp_common(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
782 struct ib_qp_init_attr
*init_attr
,
783 struct ib_udata
*udata
, struct mlx5_ib_qp
*qp
)
785 struct mlx5_ib_resources
*devr
= &dev
->devr
;
786 struct mlx5_ib_create_qp_resp resp
;
787 struct mlx5_create_qp_mbox_in
*in
;
788 struct mlx5_ib_create_qp ucmd
;
789 int inlen
= sizeof(*in
);
792 mutex_init(&qp
->mutex
);
793 spin_lock_init(&qp
->sq
.lock
);
794 spin_lock_init(&qp
->rq
.lock
);
796 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
797 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
799 if (pd
&& pd
->uobject
) {
800 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
801 mlx5_ib_dbg(dev
, "copy failed\n");
805 qp
->wq_sig
= !!(ucmd
.flags
& MLX5_QP_FLAG_SIGNATURE
);
806 qp
->scat_cqe
= !!(ucmd
.flags
& MLX5_QP_FLAG_SCATTER_CQE
);
808 qp
->wq_sig
= !!wq_signature
;
811 qp
->has_rq
= qp_has_rq(init_attr
);
812 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
,
813 qp
, (pd
&& pd
->uobject
) ? &ucmd
: NULL
);
815 mlx5_ib_dbg(dev
, "err %d\n", err
);
821 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d)\n", ucmd
.sq_wqe_count
);
822 if (ucmd
.rq_wqe_shift
!= qp
->rq
.wqe_shift
||
823 ucmd
.rq_wqe_count
!= qp
->rq
.wqe_cnt
) {
824 mlx5_ib_dbg(dev
, "invalid rq params\n");
827 if (ucmd
.sq_wqe_count
> dev
->mdev
.caps
.max_wqes
) {
828 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d) > max allowed (%d)\n",
829 ucmd
.sq_wqe_count
, dev
->mdev
.caps
.max_wqes
);
832 err
= create_user_qp(dev
, pd
, qp
, udata
, &in
, &resp
, &inlen
);
834 mlx5_ib_dbg(dev
, "err %d\n", err
);
836 err
= create_kernel_qp(dev
, init_attr
, qp
, &in
, &inlen
);
838 mlx5_ib_dbg(dev
, "err %d\n", err
);
840 qp
->pa_lkey
= to_mpd(pd
)->pa_lkey
;
846 in
= mlx5_vzalloc(sizeof(*in
));
850 qp
->create_type
= MLX5_QP_EMPTY
;
853 if (is_sqp(init_attr
->qp_type
))
854 qp
->port
= init_attr
->port_num
;
856 in
->ctx
.flags
= cpu_to_be32(to_mlx5_st(init_attr
->qp_type
) << 16 |
857 MLX5_QP_PM_MIGRATED
<< 11);
859 if (init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
860 in
->ctx
.flags_pd
= cpu_to_be32(to_mpd(pd
? pd
: devr
->p0
)->pdn
);
862 in
->ctx
.flags_pd
= cpu_to_be32(MLX5_QP_LAT_SENSITIVE
);
865 in
->ctx
.flags_pd
|= cpu_to_be32(MLX5_QP_ENABLE_SIG
);
867 if (qp
->scat_cqe
&& is_connected(init_attr
->qp_type
)) {
871 rcqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->recv_cq
);
872 scqe_sz
= mlx5_ib_get_cqe_size(dev
, init_attr
->send_cq
);
875 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA64_CQE
;
877 in
->ctx
.cs_res
= MLX5_RES_SCAT_DATA32_CQE
;
879 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) {
881 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA64_CQE
;
883 in
->ctx
.cs_req
= MLX5_REQ_SCAT_DATA32_CQE
;
887 if (qp
->rq
.wqe_cnt
) {
888 in
->ctx
.rq_size_stride
= (qp
->rq
.wqe_shift
- 4);
889 in
->ctx
.rq_size_stride
|= ilog2(qp
->rq
.wqe_cnt
) << 3;
892 in
->ctx
.rq_type_srqn
= get_rx_type(qp
, init_attr
);
895 in
->ctx
.sq_crq_size
|= cpu_to_be16(ilog2(qp
->sq
.wqe_cnt
) << 11);
897 in
->ctx
.sq_crq_size
|= cpu_to_be16(0x8000);
899 /* Set default resources */
900 switch (init_attr
->qp_type
) {
902 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
903 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
904 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
905 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(init_attr
->xrcd
)->xrcdn
);
908 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(devr
->c0
)->mcq
.cqn
);
909 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
910 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
913 if (init_attr
->srq
) {
914 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x0
)->xrcdn
);
915 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(init_attr
->srq
)->msrq
.srqn
);
917 in
->ctx
.xrcd
= cpu_to_be32(to_mxrcd(devr
->x1
)->xrcdn
);
918 in
->ctx
.rq_type_srqn
|= cpu_to_be32(to_msrq(devr
->s0
)->msrq
.srqn
);
922 if (init_attr
->send_cq
)
923 in
->ctx
.cqn_send
= cpu_to_be32(to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
925 if (init_attr
->recv_cq
)
926 in
->ctx
.cqn_recv
= cpu_to_be32(to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
928 in
->ctx
.db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
930 err
= mlx5_core_create_qp(&dev
->mdev
, &qp
->mqp
, in
, inlen
);
932 mlx5_ib_dbg(dev
, "create qp failed\n");
937 /* Hardware wants QPN written in big-endian order (after
938 * shifting) for send doorbell. Precompute this value to save
939 * a little bit when posting sends.
941 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
943 qp
->mqp
.event
= mlx5_ib_qp_event
;
948 if (qp
->create_type
== MLX5_QP_USER
)
949 destroy_qp_user(pd
, qp
);
950 else if (qp
->create_type
== MLX5_QP_KERNEL
)
951 destroy_qp_kernel(dev
, qp
);
957 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
958 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
962 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
963 spin_lock_irq(&send_cq
->lock
);
964 spin_lock_nested(&recv_cq
->lock
,
965 SINGLE_DEPTH_NESTING
);
966 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
967 spin_lock_irq(&send_cq
->lock
);
968 __acquire(&recv_cq
->lock
);
970 spin_lock_irq(&recv_cq
->lock
);
971 spin_lock_nested(&send_cq
->lock
,
972 SINGLE_DEPTH_NESTING
);
975 spin_lock_irq(&send_cq
->lock
);
977 } else if (recv_cq
) {
978 spin_lock_irq(&recv_cq
->lock
);
982 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
983 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
987 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
988 spin_unlock(&recv_cq
->lock
);
989 spin_unlock_irq(&send_cq
->lock
);
990 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
991 __release(&recv_cq
->lock
);
992 spin_unlock_irq(&send_cq
->lock
);
994 spin_unlock(&send_cq
->lock
);
995 spin_unlock_irq(&recv_cq
->lock
);
998 spin_unlock_irq(&send_cq
->lock
);
1000 } else if (recv_cq
) {
1001 spin_unlock_irq(&recv_cq
->lock
);
1005 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
1007 return to_mpd(qp
->ibqp
.pd
);
1010 static void get_cqs(struct mlx5_ib_qp
*qp
,
1011 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
1013 switch (qp
->ibqp
.qp_type
) {
1014 case IB_QPT_XRC_TGT
:
1018 case MLX5_IB_QPT_REG_UMR
:
1019 case IB_QPT_XRC_INI
:
1020 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1029 case IB_QPT_RAW_IPV6
:
1030 case IB_QPT_RAW_ETHERTYPE
:
1031 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1032 *recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
1035 case IB_QPT_RAW_PACKET
:
1044 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1046 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
1047 struct mlx5_modify_qp_mbox_in
*in
;
1050 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
1053 if (qp
->state
!= IB_QPS_RESET
)
1054 if (mlx5_core_qp_modify(&dev
->mdev
, to_mlx5_state(qp
->state
),
1055 MLX5_QP_STATE_RST
, in
, sizeof(*in
), &qp
->mqp
))
1056 mlx5_ib_warn(dev
, "mlx5_ib: modify QP %06x to RESET failed\n",
1059 get_cqs(qp
, &send_cq
, &recv_cq
);
1061 if (qp
->create_type
== MLX5_QP_KERNEL
) {
1062 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
1063 __mlx5_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1064 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1065 if (send_cq
!= recv_cq
)
1066 __mlx5_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1067 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
1070 err
= mlx5_core_destroy_qp(&dev
->mdev
, &qp
->mqp
);
1072 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n", qp
->mqp
.qpn
);
1076 if (qp
->create_type
== MLX5_QP_KERNEL
)
1077 destroy_qp_kernel(dev
, qp
);
1078 else if (qp
->create_type
== MLX5_QP_USER
)
1079 destroy_qp_user(&get_pd(qp
)->ibpd
, qp
);
1082 static const char *ib_qp_type_str(enum ib_qp_type type
)
1086 return "IB_QPT_SMI";
1088 return "IB_QPT_GSI";
1095 case IB_QPT_RAW_IPV6
:
1096 return "IB_QPT_RAW_IPV6";
1097 case IB_QPT_RAW_ETHERTYPE
:
1098 return "IB_QPT_RAW_ETHERTYPE";
1099 case IB_QPT_XRC_INI
:
1100 return "IB_QPT_XRC_INI";
1101 case IB_QPT_XRC_TGT
:
1102 return "IB_QPT_XRC_TGT";
1103 case IB_QPT_RAW_PACKET
:
1104 return "IB_QPT_RAW_PACKET";
1105 case MLX5_IB_QPT_REG_UMR
:
1106 return "MLX5_IB_QPT_REG_UMR";
1109 return "Invalid QP type";
1113 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
1114 struct ib_qp_init_attr
*init_attr
,
1115 struct ib_udata
*udata
)
1117 struct mlx5_ib_dev
*dev
;
1118 struct mlx5_ib_qp
*qp
;
1123 dev
= to_mdev(pd
->device
);
1125 /* being cautious here */
1126 if (init_attr
->qp_type
!= IB_QPT_XRC_TGT
&&
1127 init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
) {
1128 pr_warn("%s: no PD for transport %s\n", __func__
,
1129 ib_qp_type_str(init_attr
->qp_type
));
1130 return ERR_PTR(-EINVAL
);
1132 dev
= to_mdev(to_mxrcd(init_attr
->xrcd
)->ibxrcd
.device
);
1135 switch (init_attr
->qp_type
) {
1136 case IB_QPT_XRC_TGT
:
1137 case IB_QPT_XRC_INI
:
1138 if (!(dev
->mdev
.caps
.flags
& MLX5_DEV_CAP_FLAG_XRC
)) {
1139 mlx5_ib_dbg(dev
, "XRC not supported\n");
1140 return ERR_PTR(-ENOSYS
);
1142 init_attr
->recv_cq
= NULL
;
1143 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
1144 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1145 init_attr
->send_cq
= NULL
;
1154 case MLX5_IB_QPT_REG_UMR
:
1155 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1157 return ERR_PTR(-ENOMEM
);
1159 err
= create_qp_common(dev
, pd
, init_attr
, udata
, qp
);
1161 mlx5_ib_dbg(dev
, "create_qp_common failed\n");
1163 return ERR_PTR(err
);
1166 if (is_qp0(init_attr
->qp_type
))
1167 qp
->ibqp
.qp_num
= 0;
1168 else if (is_qp1(init_attr
->qp_type
))
1169 qp
->ibqp
.qp_num
= 1;
1171 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1173 mlx5_ib_dbg(dev
, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1174 qp
->ibqp
.qp_num
, qp
->mqp
.qpn
, to_mcq(init_attr
->recv_cq
)->mcq
.cqn
,
1175 to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
1181 case IB_QPT_RAW_IPV6
:
1182 case IB_QPT_RAW_ETHERTYPE
:
1183 case IB_QPT_RAW_PACKET
:
1186 mlx5_ib_dbg(dev
, "unsupported qp type %d\n",
1187 init_attr
->qp_type
);
1188 /* Don't support raw QPs */
1189 return ERR_PTR(-EINVAL
);
1195 int mlx5_ib_destroy_qp(struct ib_qp
*qp
)
1197 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
1198 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
1200 destroy_qp_common(dev
, mqp
);
1207 static __be32
to_mlx5_access_flags(struct mlx5_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1210 u32 hw_access_flags
= 0;
1214 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1215 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1217 dest_rd_atomic
= qp
->resp_depth
;
1219 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1220 access_flags
= attr
->qp_access_flags
;
1222 access_flags
= qp
->atomic_rd_en
;
1224 if (!dest_rd_atomic
)
1225 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1227 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1228 hw_access_flags
|= MLX5_QP_BIT_RRE
;
1229 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1230 hw_access_flags
|= (MLX5_QP_BIT_RAE
| MLX5_ATOMIC_MODE_CX
);
1231 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1232 hw_access_flags
|= MLX5_QP_BIT_RWE
;
1234 return cpu_to_be32(hw_access_flags
);
1238 MLX5_PATH_FLAG_FL
= 1 << 0,
1239 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
1240 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
1243 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
1245 if (rate
== IB_RATE_PORT_CURRENT
) {
1247 } else if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_300_GBPS
) {
1250 while (rate
!= IB_RATE_2_5_GBPS
&&
1251 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
1252 dev
->mdev
.caps
.stat_rate_support
))
1256 return rate
+ MLX5_STAT_RATE_OFFSET
;
1259 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, const struct ib_ah_attr
*ah
,
1260 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
1261 u32 path_flags
, const struct ib_qp_attr
*attr
)
1265 path
->fl
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
1266 path
->free_ar
= (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x80 : 0;
1268 if (attr_mask
& IB_QP_PKEY_INDEX
)
1269 path
->pkey_index
= attr
->pkey_index
;
1271 path
->grh_mlid
= ah
->src_path_bits
& 0x7f;
1272 path
->rlid
= cpu_to_be16(ah
->dlid
);
1274 if (ah
->ah_flags
& IB_AH_GRH
) {
1275 path
->grh_mlid
|= 1 << 7;
1276 path
->mgid_index
= ah
->grh
.sgid_index
;
1277 path
->hop_limit
= ah
->grh
.hop_limit
;
1278 path
->tclass_flowlabel
=
1279 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1280 (ah
->grh
.flow_label
));
1281 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1284 err
= ib_rate_to_mlx5(dev
, ah
->static_rate
);
1287 path
->static_rate
= err
;
1290 if (ah
->ah_flags
& IB_AH_GRH
) {
1291 if (ah
->grh
.sgid_index
>= dev
->mdev
.caps
.port
[port
- 1].gid_table_len
) {
1292 pr_err(KERN_ERR
"sgid_index (%u) too large. max is %d\n",
1293 ah
->grh
.sgid_index
, dev
->mdev
.caps
.port
[port
- 1].gid_table_len
);
1297 path
->grh_mlid
|= 1 << 7;
1298 path
->mgid_index
= ah
->grh
.sgid_index
;
1299 path
->hop_limit
= ah
->grh
.hop_limit
;
1300 path
->tclass_flowlabel
=
1301 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1302 (ah
->grh
.flow_label
));
1303 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1306 if (attr_mask
& IB_QP_TIMEOUT
)
1307 path
->ackto_lt
= attr
->timeout
<< 3;
1309 path
->sl
= ah
->sl
& 0xf;
1314 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
1315 [MLX5_QP_STATE_INIT
] = {
1316 [MLX5_QP_STATE_INIT
] = {
1317 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1318 MLX5_QP_OPTPAR_RAE
|
1319 MLX5_QP_OPTPAR_RWE
|
1320 MLX5_QP_OPTPAR_PKEY_INDEX
|
1321 MLX5_QP_OPTPAR_PRI_PORT
,
1322 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1323 MLX5_QP_OPTPAR_PKEY_INDEX
|
1324 MLX5_QP_OPTPAR_PRI_PORT
,
1325 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1326 MLX5_QP_OPTPAR_Q_KEY
|
1327 MLX5_QP_OPTPAR_PRI_PORT
,
1329 [MLX5_QP_STATE_RTR
] = {
1330 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1331 MLX5_QP_OPTPAR_RRE
|
1332 MLX5_QP_OPTPAR_RAE
|
1333 MLX5_QP_OPTPAR_RWE
|
1334 MLX5_QP_OPTPAR_PKEY_INDEX
,
1335 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1336 MLX5_QP_OPTPAR_RWE
|
1337 MLX5_QP_OPTPAR_PKEY_INDEX
,
1338 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1339 MLX5_QP_OPTPAR_Q_KEY
,
1340 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
1341 MLX5_QP_OPTPAR_Q_KEY
,
1342 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1343 MLX5_QP_OPTPAR_RRE
|
1344 MLX5_QP_OPTPAR_RAE
|
1345 MLX5_QP_OPTPAR_RWE
|
1346 MLX5_QP_OPTPAR_PKEY_INDEX
,
1349 [MLX5_QP_STATE_RTR
] = {
1350 [MLX5_QP_STATE_RTS
] = {
1351 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1352 MLX5_QP_OPTPAR_RRE
|
1353 MLX5_QP_OPTPAR_RAE
|
1354 MLX5_QP_OPTPAR_RWE
|
1355 MLX5_QP_OPTPAR_PM_STATE
|
1356 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
1357 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
1358 MLX5_QP_OPTPAR_RWE
|
1359 MLX5_QP_OPTPAR_PM_STATE
,
1360 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1363 [MLX5_QP_STATE_RTS
] = {
1364 [MLX5_QP_STATE_RTS
] = {
1365 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
1366 MLX5_QP_OPTPAR_RAE
|
1367 MLX5_QP_OPTPAR_RWE
|
1368 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
1369 MLX5_QP_OPTPAR_PM_STATE
|
1370 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
1371 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
1372 MLX5_QP_OPTPAR_PM_STATE
|
1373 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
1374 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
1375 MLX5_QP_OPTPAR_SRQN
|
1376 MLX5_QP_OPTPAR_CQN_RCV
,
1379 [MLX5_QP_STATE_SQER
] = {
1380 [MLX5_QP_STATE_RTS
] = {
1381 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
1382 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
1383 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
,
1384 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
1385 MLX5_QP_OPTPAR_RWE
|
1386 MLX5_QP_OPTPAR_RAE
|
1392 static int ib_nr_to_mlx5_nr(int ib_mask
)
1397 case IB_QP_CUR_STATE
:
1399 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
1401 case IB_QP_ACCESS_FLAGS
:
1402 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
1404 case IB_QP_PKEY_INDEX
:
1405 return MLX5_QP_OPTPAR_PKEY_INDEX
;
1407 return MLX5_QP_OPTPAR_PRI_PORT
;
1409 return MLX5_QP_OPTPAR_Q_KEY
;
1411 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
1412 MLX5_QP_OPTPAR_PRI_PORT
;
1413 case IB_QP_PATH_MTU
:
1416 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
1417 case IB_QP_RETRY_CNT
:
1418 return MLX5_QP_OPTPAR_RETRY_COUNT
;
1419 case IB_QP_RNR_RETRY
:
1420 return MLX5_QP_OPTPAR_RNR_RETRY
;
1423 case IB_QP_MAX_QP_RD_ATOMIC
:
1424 return MLX5_QP_OPTPAR_SRA_MAX
;
1425 case IB_QP_ALT_PATH
:
1426 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
1427 case IB_QP_MIN_RNR_TIMER
:
1428 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
1431 case IB_QP_MAX_DEST_RD_ATOMIC
:
1432 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
1433 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
1434 case IB_QP_PATH_MIG_STATE
:
1435 return MLX5_QP_OPTPAR_PM_STATE
;
1438 case IB_QP_DEST_QPN
:
1444 static int ib_mask_to_mlx5_opt(int ib_mask
)
1449 for (i
= 0; i
< 8 * sizeof(int); i
++) {
1450 if ((1 << i
) & ib_mask
)
1451 result
|= ib_nr_to_mlx5_nr(1 << i
);
1457 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
1458 const struct ib_qp_attr
*attr
, int attr_mask
,
1459 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
1461 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1462 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1463 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
1464 struct mlx5_qp_context
*context
;
1465 struct mlx5_modify_qp_mbox_in
*in
;
1466 struct mlx5_ib_pd
*pd
;
1467 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
1468 enum mlx5_qp_optpar optpar
;
1473 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
1478 err
= to_mlx5_st(ibqp
->qp_type
);
1482 context
->flags
= cpu_to_be32(err
<< 16);
1484 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
1485 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
1487 switch (attr
->path_mig_state
) {
1488 case IB_MIG_MIGRATED
:
1489 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
1492 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
1495 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
1500 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
) {
1501 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
1502 } else if (ibqp
->qp_type
== IB_QPT_UD
||
1503 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
1504 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
1505 } else if (attr_mask
& IB_QP_PATH_MTU
) {
1506 if (attr
->path_mtu
< IB_MTU_256
||
1507 attr
->path_mtu
> IB_MTU_4096
) {
1508 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
1512 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) | dev
->mdev
.caps
.log_max_msg
;
1515 if (attr_mask
& IB_QP_DEST_QPN
)
1516 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
1518 if (attr_mask
& IB_QP_PKEY_INDEX
)
1519 context
->pri_path
.pkey_index
= attr
->pkey_index
;
1521 /* todo implement counter_index functionality */
1523 if (is_sqp(ibqp
->qp_type
))
1524 context
->pri_path
.port
= qp
->port
;
1526 if (attr_mask
& IB_QP_PORT
)
1527 context
->pri_path
.port
= attr
->port_num
;
1529 if (attr_mask
& IB_QP_AV
) {
1530 err
= mlx5_set_path(dev
, &attr
->ah_attr
, &context
->pri_path
,
1531 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
1532 attr_mask
, 0, attr
);
1537 if (attr_mask
& IB_QP_TIMEOUT
)
1538 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
1540 if (attr_mask
& IB_QP_ALT_PATH
) {
1541 err
= mlx5_set_path(dev
, &attr
->alt_ah_attr
, &context
->alt_path
,
1542 attr
->alt_port_num
, attr_mask
, 0, attr
);
1548 get_cqs(qp
, &send_cq
, &recv_cq
);
1550 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
1551 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
1552 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
1553 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
1555 if (attr_mask
& IB_QP_RNR_RETRY
)
1556 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
1558 if (attr_mask
& IB_QP_RETRY_CNT
)
1559 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
1561 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1562 if (attr
->max_rd_atomic
)
1564 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
1567 if (attr_mask
& IB_QP_SQ_PSN
)
1568 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
1570 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1571 if (attr
->max_dest_rd_atomic
)
1573 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
1576 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
))
1577 context
->params2
|= to_mlx5_access_flags(qp
, attr
, attr_mask
);
1579 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1580 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
1582 if (attr_mask
& IB_QP_RQ_PSN
)
1583 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
1585 if (attr_mask
& IB_QP_QKEY
)
1586 context
->qkey
= cpu_to_be32(attr
->qkey
);
1588 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1589 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1591 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
1592 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
1597 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1598 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
1601 mlx5_cur
= to_mlx5_state(cur_state
);
1602 mlx5_new
= to_mlx5_state(new_state
);
1603 mlx5_st
= to_mlx5_st(ibqp
->qp_type
);
1607 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
1608 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
1609 in
->optparam
= cpu_to_be32(optpar
);
1610 err
= mlx5_core_qp_modify(&dev
->mdev
, to_mlx5_state(cur_state
),
1611 to_mlx5_state(new_state
), in
, sqd_event
,
1616 qp
->state
= new_state
;
1618 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1619 qp
->atomic_rd_en
= attr
->qp_access_flags
;
1620 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1621 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
1622 if (attr_mask
& IB_QP_PORT
)
1623 qp
->port
= attr
->port_num
;
1624 if (attr_mask
& IB_QP_ALT_PATH
)
1625 qp
->alt_port
= attr
->alt_port_num
;
1628 * If we moved a kernel QP to RESET, clean up all old CQ
1629 * entries and reinitialize the QP.
1631 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
1632 mlx5_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1633 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
1634 if (send_cq
!= recv_cq
)
1635 mlx5_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1641 qp
->sq
.cur_post
= 0;
1642 qp
->sq
.last_poll
= 0;
1643 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
1644 qp
->db
.db
[MLX5_SND_DBR
] = 0;
1652 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1653 int attr_mask
, struct ib_udata
*udata
)
1655 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
1656 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
1657 enum ib_qp_state cur_state
, new_state
;
1661 mutex_lock(&qp
->mutex
);
1663 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
1664 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1666 if (ibqp
->qp_type
!= MLX5_IB_QPT_REG_UMR
&&
1667 !ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
, attr_mask
,
1668 IB_LINK_LAYER_UNSPECIFIED
))
1671 if ((attr_mask
& IB_QP_PORT
) &&
1672 (attr
->port_num
== 0 || attr
->port_num
> dev
->mdev
.caps
.num_ports
))
1675 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1676 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1677 if (attr
->pkey_index
>= dev
->mdev
.caps
.port
[port
- 1].pkey_table_len
)
1681 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1682 attr
->max_rd_atomic
> dev
->mdev
.caps
.max_ra_res_qp
)
1685 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1686 attr
->max_dest_rd_atomic
> dev
->mdev
.caps
.max_ra_req_qp
)
1689 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1694 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
1697 mutex_unlock(&qp
->mutex
);
1701 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
1703 struct mlx5_ib_cq
*cq
;
1706 cur
= wq
->head
- wq
->tail
;
1707 if (likely(cur
+ nreq
< wq
->max_post
))
1711 spin_lock(&cq
->lock
);
1712 cur
= wq
->head
- wq
->tail
;
1713 spin_unlock(&cq
->lock
);
1715 return cur
+ nreq
>= wq
->max_post
;
1718 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
1719 u64 remote_addr
, u32 rkey
)
1721 rseg
->raddr
= cpu_to_be64(remote_addr
);
1722 rseg
->rkey
= cpu_to_be32(rkey
);
1726 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
1727 struct ib_send_wr
*wr
)
1729 memcpy(&dseg
->av
, &to_mah(wr
->wr
.ud
.ah
)->av
, sizeof(struct mlx5_av
));
1730 dseg
->av
.dqp_dct
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
| MLX5_EXTENDED_UD_AV
);
1731 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1734 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
1736 dseg
->byte_count
= cpu_to_be32(sg
->length
);
1737 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
1738 dseg
->addr
= cpu_to_be64(sg
->addr
);
1741 static __be16
get_klm_octo(int npages
)
1743 return cpu_to_be16(ALIGN(npages
, 8) / 2);
1746 static __be64
frwr_mkey_mask(void)
1750 result
= MLX5_MKEY_MASK_LEN
|
1751 MLX5_MKEY_MASK_PAGE_SIZE
|
1752 MLX5_MKEY_MASK_START_ADDR
|
1753 MLX5_MKEY_MASK_EN_RINVAL
|
1754 MLX5_MKEY_MASK_KEY
|
1760 MLX5_MKEY_MASK_SMALL_FENCE
|
1761 MLX5_MKEY_MASK_FREE
;
1763 return cpu_to_be64(result
);
1766 static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
1767 struct ib_send_wr
*wr
, int li
)
1769 memset(umr
, 0, sizeof(*umr
));
1772 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
1773 umr
->flags
= 1 << 7;
1777 umr
->flags
= (1 << 5); /* fail if not free */
1778 umr
->klm_octowords
= get_klm_octo(wr
->wr
.fast_reg
.page_list_len
);
1779 umr
->mkey_mask
= frwr_mkey_mask();
1782 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
1783 struct ib_send_wr
*wr
)
1785 struct umr_wr
*umrwr
= (struct umr_wr
*)&wr
->wr
.fast_reg
;
1788 memset(umr
, 0, sizeof(*umr
));
1790 if (!(wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
)) {
1791 umr
->flags
= 1 << 5; /* fail if not free */
1792 umr
->klm_octowords
= get_klm_octo(umrwr
->npages
);
1793 mask
= MLX5_MKEY_MASK_LEN
|
1794 MLX5_MKEY_MASK_PAGE_SIZE
|
1795 MLX5_MKEY_MASK_START_ADDR
|
1799 MLX5_MKEY_MASK_KEY
|
1803 MLX5_MKEY_MASK_FREE
;
1804 umr
->mkey_mask
= cpu_to_be64(mask
);
1806 umr
->flags
= 2 << 5; /* fail if free */
1807 mask
= MLX5_MKEY_MASK_FREE
;
1808 umr
->mkey_mask
= cpu_to_be64(mask
);
1812 umr
->flags
|= (1 << 7); /* inline */
1815 static u8
get_umr_flags(int acc
)
1817 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
1818 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
1819 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
1820 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
1821 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
| MLX5_ACCESS_MODE_MTT
;
1824 static void set_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
,
1827 memset(seg
, 0, sizeof(*seg
));
1829 seg
->status
= 1 << 6;
1833 seg
->flags
= get_umr_flags(wr
->wr
.fast_reg
.access_flags
);
1834 *writ
= seg
->flags
& (MLX5_PERM_LOCAL_WRITE
| IB_ACCESS_REMOTE_WRITE
);
1835 seg
->qpn_mkey7_0
= cpu_to_be32((wr
->wr
.fast_reg
.rkey
& 0xff) | 0xffffff00);
1836 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
1837 seg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
1838 seg
->len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
1839 seg
->xlt_oct_size
= cpu_to_be32((wr
->wr
.fast_reg
.page_list_len
+ 1) / 2);
1840 seg
->log2_page_size
= wr
->wr
.fast_reg
.page_shift
;
1843 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
, struct ib_send_wr
*wr
)
1845 memset(seg
, 0, sizeof(*seg
));
1846 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UNREG
) {
1847 seg
->status
= 1 << 6;
1851 seg
->flags
= convert_access(wr
->wr
.fast_reg
.access_flags
);
1852 seg
->flags_pd
= cpu_to_be32(to_mpd((struct ib_pd
*)wr
->wr
.fast_reg
.page_list
)->pdn
);
1853 seg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
1854 seg
->len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
1855 seg
->log2_page_size
= wr
->wr
.fast_reg
.page_shift
;
1856 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff00 |
1857 mlx5_mkey_variant(wr
->wr
.fast_reg
.rkey
));
1860 static void set_frwr_pages(struct mlx5_wqe_data_seg
*dseg
,
1861 struct ib_send_wr
*wr
,
1862 struct mlx5_core_dev
*mdev
,
1863 struct mlx5_ib_pd
*pd
,
1866 struct mlx5_ib_fast_reg_page_list
*mfrpl
= to_mfrpl(wr
->wr
.fast_reg
.page_list
);
1867 u64
*page_list
= wr
->wr
.fast_reg
.page_list
->page_list
;
1868 u64 perm
= MLX5_EN_RD
| (writ
? MLX5_EN_WR
: 0);
1871 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; i
++)
1872 mfrpl
->mapped_page_list
[i
] = cpu_to_be64(page_list
[i
] | perm
);
1873 dseg
->addr
= cpu_to_be64(mfrpl
->map
);
1874 dseg
->byte_count
= cpu_to_be32(ALIGN(sizeof(u64
) * wr
->wr
.fast_reg
.page_list_len
, 64));
1875 dseg
->lkey
= cpu_to_be32(pd
->pa_lkey
);
1878 static __be32
send_ieth(struct ib_send_wr
*wr
)
1880 switch (wr
->opcode
) {
1881 case IB_WR_SEND_WITH_IMM
:
1882 case IB_WR_RDMA_WRITE_WITH_IMM
:
1883 return wr
->ex
.imm_data
;
1885 case IB_WR_SEND_WITH_INV
:
1886 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
1893 static u8
calc_sig(void *wqe
, int size
)
1899 for (i
= 0; i
< size
; i
++)
1905 static u8
wq_sig(void *wqe
)
1907 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
1910 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, struct ib_send_wr
*wr
,
1913 struct mlx5_wqe_inline_seg
*seg
;
1914 void *qend
= qp
->sq
.qend
;
1922 wqe
+= sizeof(*seg
);
1923 for (i
= 0; i
< wr
->num_sge
; i
++) {
1924 addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
1925 len
= wr
->sg_list
[i
].length
;
1928 if (unlikely(inl
> qp
->max_inline_data
))
1931 if (unlikely(wqe
+ len
> qend
)) {
1933 memcpy(wqe
, addr
, copy
);
1936 wqe
= mlx5_get_send_wqe(qp
, 0);
1938 memcpy(wqe
, addr
, len
);
1942 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
1944 *sz
= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
1949 static int set_frwr_li_wr(void **seg
, struct ib_send_wr
*wr
, int *size
,
1950 struct mlx5_core_dev
*mdev
, struct mlx5_ib_pd
*pd
, struct mlx5_ib_qp
*qp
)
1955 li
= wr
->opcode
== IB_WR_LOCAL_INV
? 1 : 0;
1956 if (unlikely(wr
->send_flags
& IB_SEND_INLINE
))
1959 set_frwr_umr_segment(*seg
, wr
, li
);
1960 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
1961 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
1962 if (unlikely((*seg
== qp
->sq
.qend
)))
1963 *seg
= mlx5_get_send_wqe(qp
, 0);
1964 set_mkey_segment(*seg
, wr
, li
, &writ
);
1965 *seg
+= sizeof(struct mlx5_mkey_seg
);
1966 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
1967 if (unlikely((*seg
== qp
->sq
.qend
)))
1968 *seg
= mlx5_get_send_wqe(qp
, 0);
1970 if (unlikely(wr
->wr
.fast_reg
.page_list_len
>
1971 wr
->wr
.fast_reg
.page_list
->max_page_list_len
))
1974 set_frwr_pages(*seg
, wr
, mdev
, pd
, writ
);
1975 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
1976 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
1981 static void dump_wqe(struct mlx5_ib_qp
*qp
, int idx
, int size_16
)
1987 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp
, tidx
));
1988 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
1989 if ((i
& 0xf) == 0) {
1990 void *buf
= mlx5_get_send_wqe(qp
, tidx
);
1991 tidx
= (tidx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
1995 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
1996 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
1997 be32_to_cpu(p
[j
+ 3]));
2001 static void mlx5_bf_copy(u64 __iomem
*dst
, u64
*src
,
2002 unsigned bytecnt
, struct mlx5_ib_qp
*qp
)
2004 while (bytecnt
> 0) {
2005 __iowrite64_copy(dst
++, src
++, 8);
2006 __iowrite64_copy(dst
++, src
++, 8);
2007 __iowrite64_copy(dst
++, src
++, 8);
2008 __iowrite64_copy(dst
++, src
++, 8);
2009 __iowrite64_copy(dst
++, src
++, 8);
2010 __iowrite64_copy(dst
++, src
++, 8);
2011 __iowrite64_copy(dst
++, src
++, 8);
2012 __iowrite64_copy(dst
++, src
++, 8);
2014 if (unlikely(src
== qp
->sq
.qend
))
2015 src
= mlx5_get_send_wqe(qp
, 0);
2019 static u8
get_fence(u8 fence
, struct ib_send_wr
*wr
)
2021 if (unlikely(wr
->opcode
== IB_WR_LOCAL_INV
&&
2022 wr
->send_flags
& IB_SEND_FENCE
))
2023 return MLX5_FENCE_MODE_STRONG_ORDERING
;
2025 if (unlikely(fence
)) {
2026 if (wr
->send_flags
& IB_SEND_FENCE
)
2027 return MLX5_FENCE_MODE_SMALL_AND_FENCE
;
2036 int mlx5_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
2037 struct ib_send_wr
**bad_wr
)
2039 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
2040 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2041 struct mlx5_core_dev
*mdev
= &dev
->mdev
;
2042 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2043 struct mlx5_wqe_data_seg
*dpseg
;
2044 struct mlx5_wqe_xrc_seg
*xrc
;
2045 struct mlx5_bf
*bf
= qp
->bf
;
2046 int uninitialized_var(size
);
2047 void *qend
= qp
->sq
.qend
;
2048 unsigned long flags
;
2061 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
2063 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
2064 if (unlikely(wr
->opcode
>= sizeof(mlx5_ib_opcode
) / sizeof(mlx5_ib_opcode
[0]))) {
2065 mlx5_ib_warn(dev
, "\n");
2071 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
))) {
2072 mlx5_ib_warn(dev
, "\n");
2078 fence
= qp
->fm_cache
;
2079 num_sge
= wr
->num_sge
;
2080 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
2081 mlx5_ib_warn(dev
, "\n");
2087 idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
2088 seg
= mlx5_get_send_wqe(qp
, idx
);
2090 *(uint32_t *)(seg
+ 8) = 0;
2091 ctrl
->imm
= send_ieth(wr
);
2092 ctrl
->fm_ce_se
= qp
->sq_signal_bits
|
2093 (wr
->send_flags
& IB_SEND_SIGNALED
?
2094 MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
2095 (wr
->send_flags
& IB_SEND_SOLICITED
?
2096 MLX5_WQE_CTRL_SOLICITED
: 0);
2098 seg
+= sizeof(*ctrl
);
2099 size
= sizeof(*ctrl
) / 16;
2101 switch (ibqp
->qp_type
) {
2102 case IB_QPT_XRC_INI
:
2104 xrc
->xrc_srqn
= htonl(wr
->xrc_remote_srq_num
);
2105 seg
+= sizeof(*xrc
);
2106 size
+= sizeof(*xrc
) / 16;
2109 switch (wr
->opcode
) {
2110 case IB_WR_RDMA_READ
:
2111 case IB_WR_RDMA_WRITE
:
2112 case IB_WR_RDMA_WRITE_WITH_IMM
:
2113 set_raddr_seg(seg
, wr
->wr
.rdma
.remote_addr
,
2115 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2116 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
2119 case IB_WR_ATOMIC_CMP_AND_SWP
:
2120 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2121 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
2122 mlx5_ib_warn(dev
, "Atomic operations are not supported yet\n");
2127 case IB_WR_LOCAL_INV
:
2128 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2129 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
2130 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
2131 err
= set_frwr_li_wr(&seg
, wr
, &size
, mdev
, to_mpd(ibqp
->pd
), qp
);
2133 mlx5_ib_warn(dev
, "\n");
2140 case IB_WR_FAST_REG_MR
:
2141 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
2142 qp
->sq
.wr_data
[idx
] = IB_WR_FAST_REG_MR
;
2143 ctrl
->imm
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2144 err
= set_frwr_li_wr(&seg
, wr
, &size
, mdev
, to_mpd(ibqp
->pd
), qp
);
2146 mlx5_ib_warn(dev
, "\n");
2159 switch (wr
->opcode
) {
2160 case IB_WR_RDMA_WRITE
:
2161 case IB_WR_RDMA_WRITE_WITH_IMM
:
2162 set_raddr_seg(seg
, wr
->wr
.rdma
.remote_addr
,
2164 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
2165 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
2176 set_datagram_seg(seg
, wr
);
2177 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
2178 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
2179 if (unlikely((seg
== qend
)))
2180 seg
= mlx5_get_send_wqe(qp
, 0);
2183 case MLX5_IB_QPT_REG_UMR
:
2184 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
2186 mlx5_ib_warn(dev
, "bad opcode\n");
2189 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
2190 ctrl
->imm
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
2191 set_reg_umr_segment(seg
, wr
);
2192 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
2193 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
2194 if (unlikely((seg
== qend
)))
2195 seg
= mlx5_get_send_wqe(qp
, 0);
2196 set_reg_mkey_segment(seg
, wr
);
2197 seg
+= sizeof(struct mlx5_mkey_seg
);
2198 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
2199 if (unlikely((seg
== qend
)))
2200 seg
= mlx5_get_send_wqe(qp
, 0);
2207 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
2208 int uninitialized_var(sz
);
2210 err
= set_data_inl_seg(qp
, wr
, seg
, &sz
);
2211 if (unlikely(err
)) {
2212 mlx5_ib_warn(dev
, "\n");
2220 for (i
= 0; i
< num_sge
; i
++) {
2221 if (unlikely(dpseg
== qend
)) {
2222 seg
= mlx5_get_send_wqe(qp
, 0);
2225 if (likely(wr
->sg_list
[i
].length
)) {
2226 set_data_ptr_seg(dpseg
, wr
->sg_list
+ i
);
2227 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
2233 mlx5_opcode
= mlx5_ib_opcode
[wr
->opcode
];
2234 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
2236 ((u32
)opmod
<< 24));
2237 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->mqp
.qpn
<< 8));
2238 ctrl
->fm_ce_se
|= get_fence(fence
, wr
);
2239 qp
->fm_cache
= next_fence
;
2240 if (unlikely(qp
->wq_sig
))
2241 ctrl
->signature
= wq_sig(ctrl
);
2243 qp
->sq
.wrid
[idx
] = wr
->wr_id
;
2244 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
2245 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
2246 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
2247 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
2250 dump_wqe(qp
, idx
, size
);
2255 qp
->sq
.head
+= nreq
;
2257 /* Make sure that descriptors are written before
2258 * updating doorbell record and ringing the doorbell
2262 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
2264 /* Make sure doorbell record is visible to the HCA before
2265 * we hit doorbell */
2269 spin_lock(&bf
->lock
);
2272 if (0 && nreq
== 1 && bf
->uuarn
&& inl
&& size
> 1 && size
<= bf
->buf_size
/ 16) {
2273 mlx5_bf_copy(bf
->reg
+ bf
->offset
, (u64
*)ctrl
, ALIGN(size
* 16, 64), qp
);
2276 mlx5_write64((__be32
*)ctrl
, bf
->regreg
+ bf
->offset
,
2277 MLX5_GET_DOORBELL_LOCK(&bf
->lock32
));
2278 /* Make sure doorbells don't leak out of SQ spinlock
2279 * and reach the HCA out of order.
2283 bf
->offset
^= bf
->buf_size
;
2285 spin_unlock(&bf
->lock
);
2288 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2293 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
2295 sig
->signature
= calc_sig(sig
, size
);
2298 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
2299 struct ib_recv_wr
**bad_wr
)
2301 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2302 struct mlx5_wqe_data_seg
*scat
;
2303 struct mlx5_rwqe_sig
*sig
;
2304 unsigned long flags
;
2310 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2312 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
2314 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
2315 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2321 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2327 scat
= get_recv_wqe(qp
, ind
);
2331 for (i
= 0; i
< wr
->num_sge
; i
++)
2332 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
2334 if (i
< qp
->rq
.max_gs
) {
2335 scat
[i
].byte_count
= 0;
2336 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
2341 sig
= (struct mlx5_rwqe_sig
*)scat
;
2342 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
2345 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
2347 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
2352 qp
->rq
.head
+= nreq
;
2354 /* Make sure that descriptors are written before
2359 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2362 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2367 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
2369 switch (mlx5_state
) {
2370 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
2371 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
2372 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
2373 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
2374 case MLX5_QP_STATE_SQ_DRAINING
:
2375 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
2376 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
2377 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
2382 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
2384 switch (mlx5_mig_state
) {
2385 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
2386 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
2387 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
2392 static int to_ib_qp_access_flags(int mlx5_flags
)
2396 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
2397 ib_flags
|= IB_ACCESS_REMOTE_READ
;
2398 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
2399 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
2400 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
2401 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
2406 static void to_ib_ah_attr(struct mlx5_ib_dev
*ibdev
, struct ib_ah_attr
*ib_ah_attr
,
2407 struct mlx5_qp_path
*path
)
2409 struct mlx5_core_dev
*dev
= &ibdev
->mdev
;
2411 memset(ib_ah_attr
, 0, sizeof(*ib_ah_attr
));
2412 ib_ah_attr
->port_num
= path
->port
;
2414 if (ib_ah_attr
->port_num
== 0 || ib_ah_attr
->port_num
> dev
->caps
.num_ports
)
2417 ib_ah_attr
->sl
= path
->sl
& 0xf;
2419 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
2420 ib_ah_attr
->src_path_bits
= path
->grh_mlid
& 0x7f;
2421 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
2422 ib_ah_attr
->ah_flags
= (path
->grh_mlid
& (1 << 7)) ? IB_AH_GRH
: 0;
2423 if (ib_ah_attr
->ah_flags
) {
2424 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
2425 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
2426 ib_ah_attr
->grh
.traffic_class
=
2427 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
2428 ib_ah_attr
->grh
.flow_label
=
2429 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
2430 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
2431 path
->rgid
, sizeof(ib_ah_attr
->grh
.dgid
.raw
));
2435 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
2436 struct ib_qp_init_attr
*qp_init_attr
)
2438 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2439 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
2440 struct mlx5_query_qp_mbox_out
*outb
;
2441 struct mlx5_qp_context
*context
;
2445 mutex_lock(&qp
->mutex
);
2446 outb
= kzalloc(sizeof(*outb
), GFP_KERNEL
);
2451 context
= &outb
->ctx
;
2452 err
= mlx5_core_qp_query(&dev
->mdev
, &qp
->mqp
, outb
, sizeof(*outb
));
2456 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
2458 qp
->state
= to_ib_qp_state(mlx5_state
);
2459 qp_attr
->qp_state
= qp
->state
;
2460 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
2461 qp_attr
->path_mig_state
=
2462 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
2463 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
2464 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
2465 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
2466 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
2467 qp_attr
->qp_access_flags
=
2468 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
2470 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
2471 to_ib_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
2472 to_ib_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
2473 qp_attr
->alt_pkey_index
= context
->alt_path
.pkey_index
& 0x7f;
2474 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
2477 qp_attr
->pkey_index
= context
->pri_path
.pkey_index
& 0x7f;
2478 qp_attr
->port_num
= context
->pri_path
.port
;
2480 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2481 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
2483 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
2485 qp_attr
->max_dest_rd_atomic
=
2486 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
2487 qp_attr
->min_rnr_timer
=
2488 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
2489 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
2490 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
2491 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
2492 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
2493 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
2494 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
2495 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
2497 if (!ibqp
->uobject
) {
2498 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
2499 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
2501 qp_attr
->cap
.max_send_wr
= 0;
2502 qp_attr
->cap
.max_send_sge
= 0;
2505 /* We don't support inline sends for kernel QPs (yet), and we
2506 * don't know what userspace's value should be.
2508 qp_attr
->cap
.max_inline_data
= 0;
2510 qp_init_attr
->cap
= qp_attr
->cap
;
2512 qp_init_attr
->create_flags
= 0;
2513 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
2514 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
2516 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
2517 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
2523 mutex_unlock(&qp
->mutex
);
2527 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
2528 struct ib_ucontext
*context
,
2529 struct ib_udata
*udata
)
2531 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
2532 struct mlx5_ib_xrcd
*xrcd
;
2535 if (!(dev
->mdev
.caps
.flags
& MLX5_DEV_CAP_FLAG_XRC
))
2536 return ERR_PTR(-ENOSYS
);
2538 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
2540 return ERR_PTR(-ENOMEM
);
2542 err
= mlx5_core_xrcd_alloc(&dev
->mdev
, &xrcd
->xrcdn
);
2545 return ERR_PTR(-ENOMEM
);
2548 return &xrcd
->ibxrcd
;
2551 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
2553 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
2554 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
2557 err
= mlx5_core_xrcd_dealloc(&dev
->mdev
, xrcdn
);
2559 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);