2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_cache.h>
34 #include <rdma/ib_pack.h>
36 #include <linux/mlx4/qp.h>
42 MLX4_IB_ACK_REQ_FREQ
= 8,
46 MLX4_IB_DEFAULT_SCHED_QUEUE
= 0x83,
47 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f
52 * Largest possible UD header: send with GRH and immediate data.
54 MLX4_IB_UD_HEADER_SIZE
= 72
62 struct ib_ud_header ud_header
;
63 u8 header_buf
[MLX4_IB_UD_HEADER_SIZE
];
66 static const __be32 mlx4_ib_opcode
[] = {
67 [IB_WR_SEND
] = __constant_cpu_to_be32(MLX4_OPCODE_SEND
),
68 [IB_WR_SEND_WITH_IMM
] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM
),
69 [IB_WR_RDMA_WRITE
] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE
),
70 [IB_WR_RDMA_WRITE_WITH_IMM
] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM
),
71 [IB_WR_RDMA_READ
] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ
),
72 [IB_WR_ATOMIC_CMP_AND_SWP
] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS
),
73 [IB_WR_ATOMIC_FETCH_AND_ADD
] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA
),
76 static struct mlx4_ib_sqp
*to_msqp(struct mlx4_ib_qp
*mqp
)
78 return container_of(mqp
, struct mlx4_ib_sqp
, qp
);
81 static int is_sqp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
83 return qp
->mqp
.qpn
>= dev
->dev
->caps
.sqp_start
&&
84 qp
->mqp
.qpn
<= dev
->dev
->caps
.sqp_start
+ 3;
87 static int is_qp0(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
89 return qp
->mqp
.qpn
>= dev
->dev
->caps
.sqp_start
&&
90 qp
->mqp
.qpn
<= dev
->dev
->caps
.sqp_start
+ 1;
93 static void *get_wqe(struct mlx4_ib_qp
*qp
, int offset
)
95 if (qp
->buf
.nbufs
== 1)
96 return qp
->buf
.u
.direct
.buf
+ offset
;
98 return qp
->buf
.u
.page_list
[offset
>> PAGE_SHIFT
].buf
+
99 (offset
& (PAGE_SIZE
- 1));
102 static void *get_recv_wqe(struct mlx4_ib_qp
*qp
, int n
)
104 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
107 static void *get_send_wqe(struct mlx4_ib_qp
*qp
, int n
)
109 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< qp
->sq
.wqe_shift
));
113 * Stamp a SQ WQE so that it is invalid if prefetched by marking the
114 * first four bytes of every 64 byte chunk with 0xffffffff, except for
115 * the very first chunk of the WQE.
117 static void stamp_send_wqe(struct mlx4_ib_qp
*qp
, int n
)
119 u32
*wqe
= get_send_wqe(qp
, n
);
122 for (i
= 16; i
< 1 << (qp
->sq
.wqe_shift
- 2); i
+= 16)
126 static void mlx4_ib_qp_event(struct mlx4_qp
*qp
, enum mlx4_event type
)
128 struct ib_event event
;
129 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
131 if (type
== MLX4_EVENT_TYPE_PATH_MIG
)
132 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
134 if (ibqp
->event_handler
) {
135 event
.device
= ibqp
->device
;
136 event
.element
.qp
= ibqp
;
138 case MLX4_EVENT_TYPE_PATH_MIG
:
139 event
.event
= IB_EVENT_PATH_MIG
;
141 case MLX4_EVENT_TYPE_COMM_EST
:
142 event
.event
= IB_EVENT_COMM_EST
;
144 case MLX4_EVENT_TYPE_SQ_DRAINED
:
145 event
.event
= IB_EVENT_SQ_DRAINED
;
147 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
148 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
150 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
151 event
.event
= IB_EVENT_QP_FATAL
;
153 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
154 event
.event
= IB_EVENT_PATH_MIG_ERR
;
156 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
157 event
.event
= IB_EVENT_QP_REQ_ERR
;
159 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
160 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
163 printk(KERN_WARNING
"mlx4_ib: Unexpected event type %d "
164 "on QP %06x\n", type
, qp
->qpn
);
168 ibqp
->event_handler(&event
, ibqp
->qp_context
);
172 static int send_wqe_overhead(enum ib_qp_type type
)
175 * UD WQEs must have a datagram segment.
176 * RC and UC WQEs might have a remote address segment.
177 * MLX WQEs need two extra inline data segments (for the UD
178 * header and space for the ICRC).
182 return sizeof (struct mlx4_wqe_ctrl_seg
) +
183 sizeof (struct mlx4_wqe_datagram_seg
);
185 return sizeof (struct mlx4_wqe_ctrl_seg
) +
186 sizeof (struct mlx4_wqe_raddr_seg
);
188 return sizeof (struct mlx4_wqe_ctrl_seg
) +
189 sizeof (struct mlx4_wqe_atomic_seg
) +
190 sizeof (struct mlx4_wqe_raddr_seg
);
193 return sizeof (struct mlx4_wqe_ctrl_seg
) +
194 ALIGN(MLX4_IB_UD_HEADER_SIZE
+
195 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE
,
197 sizeof (struct mlx4_wqe_inline_seg
),
198 sizeof (struct mlx4_wqe_data_seg
)) +
200 sizeof (struct mlx4_wqe_inline_seg
),
201 sizeof (struct mlx4_wqe_data_seg
));
203 return sizeof (struct mlx4_wqe_ctrl_seg
);
207 static int set_rq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
208 int is_user
, int has_srq
, struct mlx4_ib_qp
*qp
)
210 /* Sanity check RQ size before proceeding */
211 if (cap
->max_recv_wr
> dev
->dev
->caps
.max_wqes
||
212 cap
->max_recv_sge
> dev
->dev
->caps
.max_rq_sg
)
216 /* QPs attached to an SRQ should have no RQ */
217 if (cap
->max_recv_wr
)
220 qp
->rq
.wqe_cnt
= qp
->rq
.max_gs
= 0;
222 /* HW requires >= 1 RQ entry with >= 1 gather entry */
223 if (is_user
&& (!cap
->max_recv_wr
|| !cap
->max_recv_sge
))
226 qp
->rq
.wqe_cnt
= roundup_pow_of_two(max(1U, cap
->max_recv_wr
));
227 qp
->rq
.max_gs
= roundup_pow_of_two(max(1U, cap
->max_recv_sge
));
228 qp
->rq
.wqe_shift
= ilog2(qp
->rq
.max_gs
* sizeof (struct mlx4_wqe_data_seg
));
231 cap
->max_recv_wr
= qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
232 cap
->max_recv_sge
= qp
->rq
.max_gs
;
237 static int set_kernel_sq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
238 enum ib_qp_type type
, struct mlx4_ib_qp
*qp
)
240 /* Sanity check SQ size before proceeding */
241 if (cap
->max_send_wr
> dev
->dev
->caps
.max_wqes
||
242 cap
->max_send_sge
> dev
->dev
->caps
.max_sq_sg
||
243 cap
->max_inline_data
+ send_wqe_overhead(type
) +
244 sizeof (struct mlx4_wqe_inline_seg
) > dev
->dev
->caps
.max_sq_desc_sz
)
248 * For MLX transport we need 2 extra S/G entries:
249 * one for the header and one for the checksum at the end
251 if ((type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) &&
252 cap
->max_send_sge
+ 2 > dev
->dev
->caps
.max_sq_sg
)
255 qp
->sq
.wqe_shift
= ilog2(roundup_pow_of_two(max(cap
->max_send_sge
*
256 sizeof (struct mlx4_wqe_data_seg
),
257 cap
->max_inline_data
+
258 sizeof (struct mlx4_wqe_inline_seg
)) +
259 send_wqe_overhead(type
)));
260 qp
->sq
.max_gs
= ((1 << qp
->sq
.wqe_shift
) - send_wqe_overhead(type
)) /
261 sizeof (struct mlx4_wqe_data_seg
);
264 * We need to leave 2 KB + 1 WQE of headroom in the SQ to
265 * allow HW to prefetch.
267 qp
->sq_spare_wqes
= (2048 >> qp
->sq
.wqe_shift
) + 1;
268 qp
->sq
.wqe_cnt
= roundup_pow_of_two(cap
->max_send_wr
+ qp
->sq_spare_wqes
);
270 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
271 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
272 if (qp
->rq
.wqe_shift
> qp
->sq
.wqe_shift
) {
274 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
276 qp
->rq
.offset
= qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
;
280 cap
->max_send_wr
= qp
->sq
.max_post
= qp
->sq
.wqe_cnt
- qp
->sq_spare_wqes
;
281 cap
->max_send_sge
= qp
->sq
.max_gs
;
282 /* We don't support inline sends for kernel QPs (yet) */
283 cap
->max_inline_data
= 0;
288 static int set_user_sq_size(struct mlx4_ib_qp
*qp
,
289 struct mlx4_ib_create_qp
*ucmd
)
291 qp
->sq
.wqe_cnt
= 1 << ucmd
->log_sq_bb_count
;
292 qp
->sq
.wqe_shift
= ucmd
->log_sq_stride
;
294 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
295 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
300 static int create_qp_common(struct mlx4_ib_dev
*dev
, struct ib_pd
*pd
,
301 struct ib_qp_init_attr
*init_attr
,
302 struct ib_udata
*udata
, int sqpn
, struct mlx4_ib_qp
*qp
)
306 mutex_init(&qp
->mutex
);
307 spin_lock_init(&qp
->sq
.lock
);
308 spin_lock_init(&qp
->rq
.lock
);
310 qp
->state
= IB_QPS_RESET
;
311 qp
->atomic_rd_en
= 0;
319 err
= set_rq_size(dev
, &init_attr
->cap
, !!pd
->uobject
, !!init_attr
->srq
, qp
);
324 struct mlx4_ib_create_qp ucmd
;
326 if (ib_copy_from_udata(&ucmd
, udata
, sizeof ucmd
)) {
331 qp
->sq_no_prefetch
= ucmd
.sq_no_prefetch
;
333 err
= set_user_sq_size(qp
, &ucmd
);
337 qp
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
.buf_addr
,
339 if (IS_ERR(qp
->umem
)) {
340 err
= PTR_ERR(qp
->umem
);
344 err
= mlx4_mtt_init(dev
->dev
, ib_umem_page_count(qp
->umem
),
345 ilog2(qp
->umem
->page_size
), &qp
->mtt
);
349 err
= mlx4_ib_umem_write_mtt(dev
, &qp
->mtt
, qp
->umem
);
353 if (!init_attr
->srq
) {
354 err
= mlx4_ib_db_map_user(to_mucontext(pd
->uobject
->context
),
355 ucmd
.db_addr
, &qp
->db
);
360 qp
->sq_no_prefetch
= 0;
362 err
= set_kernel_sq_size(dev
, &init_attr
->cap
, init_attr
->qp_type
, qp
);
366 if (!init_attr
->srq
) {
367 err
= mlx4_ib_db_alloc(dev
, &qp
->db
, 0);
374 if (mlx4_buf_alloc(dev
->dev
, qp
->buf_size
, PAGE_SIZE
* 2, &qp
->buf
)) {
379 err
= mlx4_mtt_init(dev
->dev
, qp
->buf
.npages
, qp
->buf
.page_shift
,
384 err
= mlx4_buf_write_mtt(dev
->dev
, &qp
->mtt
, &qp
->buf
);
388 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof (u64
), GFP_KERNEL
);
389 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof (u64
), GFP_KERNEL
);
391 if (!qp
->sq
.wrid
|| !qp
->rq
.wrid
) {
397 err
= mlx4_qp_alloc(dev
->dev
, sqpn
, &qp
->mqp
);
402 * Hardware wants QPN written in big-endian order (after
403 * shifting) for send doorbell. Precompute this value to save
404 * a little bit when posting sends.
406 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
408 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
409 qp
->sq_signal_bits
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
411 qp
->sq_signal_bits
= 0;
413 qp
->mqp
.event
= mlx4_ib_qp_event
;
420 mlx4_ib_db_unmap_user(to_mucontext(pd
->uobject
->context
),
428 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
432 ib_umem_release(qp
->umem
);
434 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
437 if (!pd
->uobject
&& !init_attr
->srq
)
438 mlx4_ib_db_free(dev
, &qp
->db
);
444 static enum mlx4_qp_state
to_mlx4_state(enum ib_qp_state state
)
447 case IB_QPS_RESET
: return MLX4_QP_STATE_RST
;
448 case IB_QPS_INIT
: return MLX4_QP_STATE_INIT
;
449 case IB_QPS_RTR
: return MLX4_QP_STATE_RTR
;
450 case IB_QPS_RTS
: return MLX4_QP_STATE_RTS
;
451 case IB_QPS_SQD
: return MLX4_QP_STATE_SQD
;
452 case IB_QPS_SQE
: return MLX4_QP_STATE_SQER
;
453 case IB_QPS_ERR
: return MLX4_QP_STATE_ERR
;
458 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
460 if (send_cq
== recv_cq
)
461 spin_lock_irq(&send_cq
->lock
);
462 else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
463 spin_lock_irq(&send_cq
->lock
);
464 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
466 spin_lock_irq(&recv_cq
->lock
);
467 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
471 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
473 if (send_cq
== recv_cq
)
474 spin_unlock_irq(&send_cq
->lock
);
475 else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
476 spin_unlock(&recv_cq
->lock
);
477 spin_unlock_irq(&send_cq
->lock
);
479 spin_unlock(&send_cq
->lock
);
480 spin_unlock_irq(&recv_cq
->lock
);
484 static void destroy_qp_common(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
,
487 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
489 if (qp
->state
!= IB_QPS_RESET
)
490 if (mlx4_qp_modify(dev
->dev
, NULL
, to_mlx4_state(qp
->state
),
491 MLX4_QP_STATE_RST
, NULL
, 0, 0, &qp
->mqp
))
492 printk(KERN_WARNING
"mlx4_ib: modify QP %06x to RESET failed.\n",
495 send_cq
= to_mcq(qp
->ibqp
.send_cq
);
496 recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
498 mlx4_ib_lock_cqs(send_cq
, recv_cq
);
501 __mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
502 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
): NULL
);
503 if (send_cq
!= recv_cq
)
504 __mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
507 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
509 mlx4_ib_unlock_cqs(send_cq
, recv_cq
);
511 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
512 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
516 mlx4_ib_db_unmap_user(to_mucontext(qp
->ibqp
.uobject
->context
),
518 ib_umem_release(qp
->umem
);
522 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
524 mlx4_ib_db_free(dev
, &qp
->db
);
528 struct ib_qp
*mlx4_ib_create_qp(struct ib_pd
*pd
,
529 struct ib_qp_init_attr
*init_attr
,
530 struct ib_udata
*udata
)
532 struct mlx4_ib_dev
*dev
= to_mdev(pd
->device
);
533 struct mlx4_ib_sqp
*sqp
;
534 struct mlx4_ib_qp
*qp
;
537 switch (init_attr
->qp_type
) {
542 qp
= kmalloc(sizeof *qp
, GFP_KERNEL
);
544 return ERR_PTR(-ENOMEM
);
546 err
= create_qp_common(dev
, pd
, init_attr
, udata
, 0, qp
);
552 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
559 /* Userspace is not allowed to create special QPs: */
561 return ERR_PTR(-EINVAL
);
563 sqp
= kmalloc(sizeof *sqp
, GFP_KERNEL
);
565 return ERR_PTR(-ENOMEM
);
569 err
= create_qp_common(dev
, pd
, init_attr
, udata
,
570 dev
->dev
->caps
.sqp_start
+
571 (init_attr
->qp_type
== IB_QPT_SMI
? 0 : 2) +
572 init_attr
->port_num
- 1,
579 qp
->port
= init_attr
->port_num
;
580 qp
->ibqp
.qp_num
= init_attr
->qp_type
== IB_QPT_SMI
? 0 : 1;
585 /* Don't support raw QPs */
586 return ERR_PTR(-EINVAL
);
592 int mlx4_ib_destroy_qp(struct ib_qp
*qp
)
594 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
595 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
597 if (is_qp0(dev
, mqp
))
598 mlx4_CLOSE_PORT(dev
->dev
, mqp
->port
);
600 destroy_qp_common(dev
, mqp
, !!qp
->pd
->uobject
);
602 if (is_sqp(dev
, mqp
))
610 static int to_mlx4_st(enum ib_qp_type type
)
613 case IB_QPT_RC
: return MLX4_QP_ST_RC
;
614 case IB_QPT_UC
: return MLX4_QP_ST_UC
;
615 case IB_QPT_UD
: return MLX4_QP_ST_UD
;
617 case IB_QPT_GSI
: return MLX4_QP_ST_MLX
;
622 static __be32
to_mlx4_access_flags(struct mlx4_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
627 u32 hw_access_flags
= 0;
629 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
630 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
632 dest_rd_atomic
= qp
->resp_depth
;
634 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
635 access_flags
= attr
->qp_access_flags
;
637 access_flags
= qp
->atomic_rd_en
;
640 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
642 if (access_flags
& IB_ACCESS_REMOTE_READ
)
643 hw_access_flags
|= MLX4_QP_BIT_RRE
;
644 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
645 hw_access_flags
|= MLX4_QP_BIT_RAE
;
646 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
647 hw_access_flags
|= MLX4_QP_BIT_RWE
;
649 return cpu_to_be32(hw_access_flags
);
652 static void store_sqp_attrs(struct mlx4_ib_sqp
*sqp
, const struct ib_qp_attr
*attr
,
655 if (attr_mask
& IB_QP_PKEY_INDEX
)
656 sqp
->pkey_index
= attr
->pkey_index
;
657 if (attr_mask
& IB_QP_QKEY
)
658 sqp
->qkey
= attr
->qkey
;
659 if (attr_mask
& IB_QP_SQ_PSN
)
660 sqp
->send_psn
= attr
->sq_psn
;
663 static void mlx4_set_sched(struct mlx4_qp_path
*path
, u8 port
)
665 path
->sched_queue
= (path
->sched_queue
& 0xbf) | ((port
- 1) << 6);
668 static int mlx4_set_path(struct mlx4_ib_dev
*dev
, const struct ib_ah_attr
*ah
,
669 struct mlx4_qp_path
*path
, u8 port
)
671 path
->grh_mylmc
= ah
->src_path_bits
& 0x7f;
672 path
->rlid
= cpu_to_be16(ah
->dlid
);
673 if (ah
->static_rate
) {
674 path
->static_rate
= ah
->static_rate
+ MLX4_STAT_RATE_OFFSET
;
675 while (path
->static_rate
> IB_RATE_2_5_GBPS
+ MLX4_STAT_RATE_OFFSET
&&
676 !(1 << path
->static_rate
& dev
->dev
->caps
.stat_rate_support
))
679 path
->static_rate
= 0;
680 path
->counter_index
= 0xff;
682 if (ah
->ah_flags
& IB_AH_GRH
) {
683 if (ah
->grh
.sgid_index
>= dev
->dev
->caps
.gid_table_len
[port
]) {
684 printk(KERN_ERR
"sgid_index (%u) too large. max is %d\n",
685 ah
->grh
.sgid_index
, dev
->dev
->caps
.gid_table_len
[port
] - 1);
689 path
->grh_mylmc
|= 1 << 7;
690 path
->mgid_index
= ah
->grh
.sgid_index
;
691 path
->hop_limit
= ah
->grh
.hop_limit
;
692 path
->tclass_flowlabel
=
693 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
694 (ah
->grh
.flow_label
));
695 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
698 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
699 ((port
- 1) << 6) | ((ah
->sl
& 0xf) << 2);
704 static int __mlx4_ib_modify_qp(struct ib_qp
*ibqp
,
705 const struct ib_qp_attr
*attr
, int attr_mask
,
706 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
708 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
709 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
710 struct mlx4_qp_context
*context
;
711 enum mlx4_qp_optpar optpar
= 0;
715 context
= kzalloc(sizeof *context
, GFP_KERNEL
);
719 context
->flags
= cpu_to_be32((to_mlx4_state(new_state
) << 28) |
720 (to_mlx4_st(ibqp
->qp_type
) << 16));
721 context
->flags
|= cpu_to_be32(1 << 8); /* DE? */
723 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
724 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
726 optpar
|= MLX4_QP_OPTPAR_PM_STATE
;
727 switch (attr
->path_mig_state
) {
728 case IB_MIG_MIGRATED
:
729 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
732 context
->flags
|= cpu_to_be32(MLX4_QP_PM_REARM
<< 11);
735 context
->flags
|= cpu_to_be32(MLX4_QP_PM_ARMED
<< 11);
740 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
||
741 ibqp
->qp_type
== IB_QPT_UD
)
742 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 11;
743 else if (attr_mask
& IB_QP_PATH_MTU
) {
744 if (attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> IB_MTU_4096
) {
745 printk(KERN_ERR
"path MTU (%u) is invalid\n",
749 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) | 31;
753 context
->rq_size_stride
= ilog2(qp
->rq
.wqe_cnt
) << 3;
754 context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
757 context
->sq_size_stride
= ilog2(qp
->sq
.wqe_cnt
) << 3;
758 context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
760 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
761 context
->sq_size_stride
|= !!qp
->sq_no_prefetch
<< 7;
763 if (qp
->ibqp
.uobject
)
764 context
->usr_page
= cpu_to_be32(to_mucontext(ibqp
->uobject
->context
)->uar
.index
);
766 context
->usr_page
= cpu_to_be32(dev
->priv_uar
.index
);
768 if (attr_mask
& IB_QP_DEST_QPN
)
769 context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
771 if (attr_mask
& IB_QP_PORT
) {
772 if (cur_state
== IB_QPS_SQD
&& new_state
== IB_QPS_SQD
&&
773 !(attr_mask
& IB_QP_AV
)) {
774 mlx4_set_sched(&context
->pri_path
, attr
->port_num
);
775 optpar
|= MLX4_QP_OPTPAR_SCHED_QUEUE
;
779 if (attr_mask
& IB_QP_PKEY_INDEX
) {
780 context
->pri_path
.pkey_index
= attr
->pkey_index
;
781 optpar
|= MLX4_QP_OPTPAR_PKEY_INDEX
;
784 if (attr_mask
& IB_QP_AV
) {
785 if (mlx4_set_path(dev
, &attr
->ah_attr
, &context
->pri_path
,
786 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
))
789 optpar
|= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
|
790 MLX4_QP_OPTPAR_SCHED_QUEUE
);
793 if (attr_mask
& IB_QP_TIMEOUT
) {
794 context
->pri_path
.ackto
= attr
->timeout
<< 3;
795 optpar
|= MLX4_QP_OPTPAR_ACK_TIMEOUT
;
798 if (attr_mask
& IB_QP_ALT_PATH
) {
799 if (attr
->alt_port_num
== 0 ||
800 attr
->alt_port_num
> dev
->dev
->caps
.num_ports
)
803 if (attr
->alt_pkey_index
>=
804 dev
->dev
->caps
.pkey_table_len
[attr
->alt_port_num
])
807 if (mlx4_set_path(dev
, &attr
->alt_ah_attr
, &context
->alt_path
,
811 context
->alt_path
.pkey_index
= attr
->alt_pkey_index
;
812 context
->alt_path
.ackto
= attr
->alt_timeout
<< 3;
813 optpar
|= MLX4_QP_OPTPAR_ALT_ADDR_PATH
;
816 context
->pd
= cpu_to_be32(to_mpd(ibqp
->pd
)->pdn
);
817 context
->params1
= cpu_to_be32(MLX4_IB_ACK_REQ_FREQ
<< 28);
819 if (attr_mask
& IB_QP_RNR_RETRY
) {
820 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
821 optpar
|= MLX4_QP_OPTPAR_RNR_RETRY
;
824 if (attr_mask
& IB_QP_RETRY_CNT
) {
825 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
826 optpar
|= MLX4_QP_OPTPAR_RETRY_COUNT
;
829 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
830 if (attr
->max_rd_atomic
)
832 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
833 optpar
|= MLX4_QP_OPTPAR_SRA_MAX
;
836 if (attr_mask
& IB_QP_SQ_PSN
)
837 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
839 context
->cqn_send
= cpu_to_be32(to_mcq(ibqp
->send_cq
)->mcq
.cqn
);
841 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
842 if (attr
->max_dest_rd_atomic
)
844 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
845 optpar
|= MLX4_QP_OPTPAR_RRA_MAX
;
848 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
849 context
->params2
|= to_mlx4_access_flags(qp
, attr
, attr_mask
);
850 optpar
|= MLX4_QP_OPTPAR_RWE
| MLX4_QP_OPTPAR_RRE
| MLX4_QP_OPTPAR_RAE
;
854 context
->params2
|= cpu_to_be32(MLX4_QP_BIT_RIC
);
856 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
857 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
858 optpar
|= MLX4_QP_OPTPAR_RNR_TIMEOUT
;
860 if (attr_mask
& IB_QP_RQ_PSN
)
861 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
863 context
->cqn_recv
= cpu_to_be32(to_mcq(ibqp
->recv_cq
)->mcq
.cqn
);
865 if (attr_mask
& IB_QP_QKEY
) {
866 context
->qkey
= cpu_to_be32(attr
->qkey
);
867 optpar
|= MLX4_QP_OPTPAR_Q_KEY
;
871 context
->srqn
= cpu_to_be32(1 << 24 | to_msrq(ibqp
->srq
)->msrq
.srqn
);
873 if (!ibqp
->srq
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
874 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
876 if (cur_state
== IB_QPS_INIT
&&
877 new_state
== IB_QPS_RTR
&&
878 (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
||
879 ibqp
->qp_type
== IB_QPT_UD
)) {
880 context
->pri_path
.sched_queue
= (qp
->port
- 1) << 6;
882 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
;
884 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_SCHED_QUEUE
;
887 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
888 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
894 * Before passing a kernel QP to the HW, make sure that the
895 * ownership bits of the send queue are set and the SQ
896 * headroom is stamped so that the hardware doesn't start
897 * processing stale work requests.
899 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
900 struct mlx4_wqe_ctrl_seg
*ctrl
;
903 for (i
= 0; i
< qp
->sq
.wqe_cnt
; ++i
) {
904 ctrl
= get_send_wqe(qp
, i
);
905 ctrl
->owner_opcode
= cpu_to_be32(1 << 31);
907 stamp_send_wqe(qp
, i
);
911 err
= mlx4_qp_modify(dev
->dev
, &qp
->mtt
, to_mlx4_state(cur_state
),
912 to_mlx4_state(new_state
), context
, optpar
,
913 sqd_event
, &qp
->mqp
);
917 qp
->state
= new_state
;
919 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
920 qp
->atomic_rd_en
= attr
->qp_access_flags
;
921 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
922 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
923 if (attr_mask
& IB_QP_PORT
)
924 qp
->port
= attr
->port_num
;
925 if (attr_mask
& IB_QP_ALT_PATH
)
926 qp
->alt_port
= attr
->alt_port_num
;
929 store_sqp_attrs(to_msqp(qp
), attr
, attr_mask
);
932 * If we moved QP0 to RTR, bring the IB link up; if we moved
933 * QP0 to RESET or ERROR, bring the link back down.
935 if (is_qp0(dev
, qp
)) {
936 if (cur_state
!= IB_QPS_RTR
&& new_state
== IB_QPS_RTR
)
937 if (mlx4_INIT_PORT(dev
->dev
, qp
->port
))
938 printk(KERN_WARNING
"INIT_PORT failed for port %d\n",
941 if (cur_state
!= IB_QPS_RESET
&& cur_state
!= IB_QPS_ERR
&&
942 (new_state
== IB_QPS_RESET
|| new_state
== IB_QPS_ERR
))
943 mlx4_CLOSE_PORT(dev
->dev
, qp
->port
);
947 * If we moved a kernel QP to RESET, clean up all old CQ
948 * entries and reinitialize the QP.
950 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
951 mlx4_ib_cq_clean(to_mcq(ibqp
->recv_cq
), qp
->mqp
.qpn
,
952 ibqp
->srq
? to_msrq(ibqp
->srq
): NULL
);
953 if (ibqp
->send_cq
!= ibqp
->recv_cq
)
954 mlx4_ib_cq_clean(to_mcq(ibqp
->send_cq
), qp
->mqp
.qpn
, NULL
);
969 static const struct ib_qp_attr mlx4_ib_qp_attr
= { .port_num
= 1 };
970 static const int mlx4_ib_qp_attr_mask_table
[IB_QPT_UD
+ 1] = {
971 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
974 [IB_QPT_UC
] = (IB_QP_PKEY_INDEX
|
977 [IB_QPT_RC
] = (IB_QP_PKEY_INDEX
|
980 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
982 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
986 int mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
987 int attr_mask
, struct ib_udata
*udata
)
989 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
990 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
991 enum ib_qp_state cur_state
, new_state
;
994 mutex_lock(&qp
->mutex
);
996 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
997 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
999 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
, attr_mask
))
1002 if ((attr_mask
& IB_QP_PORT
) &&
1003 (attr
->port_num
== 0 || attr
->port_num
> dev
->dev
->caps
.num_ports
)) {
1007 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1008 int p
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1009 if (attr
->pkey_index
>= dev
->dev
->caps
.pkey_table_len
[p
])
1013 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1014 attr
->max_rd_atomic
> dev
->dev
->caps
.max_qp_init_rdma
) {
1018 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1019 attr
->max_dest_rd_atomic
> dev
->dev
->caps
.max_qp_dest_rdma
) {
1023 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1028 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_ERR
) {
1029 err
= __mlx4_ib_modify_qp(ibqp
, &mlx4_ib_qp_attr
,
1030 mlx4_ib_qp_attr_mask_table
[ibqp
->qp_type
],
1031 IB_QPS_RESET
, IB_QPS_INIT
);
1034 cur_state
= IB_QPS_INIT
;
1037 err
= __mlx4_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
1040 mutex_unlock(&qp
->mutex
);
1044 static int build_mlx_header(struct mlx4_ib_sqp
*sqp
, struct ib_send_wr
*wr
,
1047 struct ib_device
*ib_dev
= &to_mdev(sqp
->qp
.ibqp
.device
)->ib_dev
;
1048 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
1049 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
1050 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
1058 for (i
= 0; i
< wr
->num_sge
; ++i
)
1059 send_size
+= wr
->sg_list
[i
].length
;
1061 ib_ud_header_init(send_size
, mlx4_ib_ah_grh_present(ah
), &sqp
->ud_header
);
1063 sqp
->ud_header
.lrh
.service_level
=
1064 be32_to_cpu(ah
->av
.sl_tclass_flowlabel
) >> 28;
1065 sqp
->ud_header
.lrh
.destination_lid
= ah
->av
.dlid
;
1066 sqp
->ud_header
.lrh
.source_lid
= cpu_to_be16(ah
->av
.g_slid
& 0x7f);
1067 if (mlx4_ib_ah_grh_present(ah
)) {
1068 sqp
->ud_header
.grh
.traffic_class
=
1069 (be32_to_cpu(ah
->av
.sl_tclass_flowlabel
) >> 20) & 0xff;
1070 sqp
->ud_header
.grh
.flow_label
=
1071 ah
->av
.sl_tclass_flowlabel
& cpu_to_be32(0xfffff);
1072 sqp
->ud_header
.grh
.hop_limit
= ah
->av
.hop_limit
;
1073 ib_get_cached_gid(ib_dev
, be32_to_cpu(ah
->av
.port_pd
) >> 24,
1074 ah
->av
.gid_index
, &sqp
->ud_header
.grh
.source_gid
);
1075 memcpy(sqp
->ud_header
.grh
.destination_gid
.raw
,
1079 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
1080 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MLX4_WQE_MLX_VL15
: 0) |
1081 (sqp
->ud_header
.lrh
.destination_lid
==
1082 IB_LID_PERMISSIVE
? MLX4_WQE_MLX_SLR
: 0) |
1083 (sqp
->ud_header
.lrh
.service_level
<< 8));
1084 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
1086 switch (wr
->opcode
) {
1088 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
1089 sqp
->ud_header
.immediate_present
= 0;
1091 case IB_WR_SEND_WITH_IMM
:
1092 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
1093 sqp
->ud_header
.immediate_present
= 1;
1094 sqp
->ud_header
.immediate_data
= wr
->imm_data
;
1100 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 : 0;
1101 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
1102 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
1103 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
1104 if (!sqp
->qp
.ibqp
.qp_num
)
1105 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, sqp
->pkey_index
, &pkey
);
1107 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, wr
->wr
.ud
.pkey_index
, &pkey
);
1108 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
1109 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1110 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
1111 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
& 0x80000000 ?
1112 sqp
->qkey
: wr
->wr
.ud
.remote_qkey
);
1113 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
1115 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
1118 printk(KERN_ERR
"built UD header of size %d:\n", header_size
);
1119 for (i
= 0; i
< header_size
/ 4; ++i
) {
1121 printk(" [%02x] ", i
* 4);
1123 be32_to_cpu(((__be32
*) sqp
->header_buf
)[i
]));
1124 if ((i
+ 1) % 8 == 0)
1131 * Inline data segments may not cross a 64 byte boundary. If
1132 * our UD header is bigger than the space available up to the
1133 * next 64 byte boundary in the WQE, use two inline data
1134 * segments to hold the UD header.
1136 spc
= MLX4_INLINE_ALIGN
-
1137 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
1138 if (header_size
<= spc
) {
1139 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
1140 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
1143 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
1144 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
1146 inl
= (void *) (inl
+ 1) + spc
;
1147 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
1149 * Need a barrier here to make sure all the data is
1150 * visible before the byte_count field is set.
1151 * Otherwise the HCA prefetcher could grab the 64-byte
1152 * chunk with this inline segment and get a valid (!=
1153 * 0xffffffff) byte count but stale data, and end up
1154 * generating a packet with bad headers.
1156 * The first inline segment's byte_count field doesn't
1157 * need a barrier, because it comes after a
1158 * control/MLX segment and therefore is at an offset
1162 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
1166 return ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
1169 static int mlx4_wq_overflow(struct mlx4_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
1172 struct mlx4_ib_cq
*cq
;
1174 cur
= wq
->head
- wq
->tail
;
1175 if (likely(cur
+ nreq
< wq
->max_post
))
1179 spin_lock(&cq
->lock
);
1180 cur
= wq
->head
- wq
->tail
;
1181 spin_unlock(&cq
->lock
);
1183 return cur
+ nreq
>= wq
->max_post
;
1186 static __always_inline
void set_raddr_seg(struct mlx4_wqe_raddr_seg
*rseg
,
1187 u64 remote_addr
, u32 rkey
)
1189 rseg
->raddr
= cpu_to_be64(remote_addr
);
1190 rseg
->rkey
= cpu_to_be32(rkey
);
1194 static void set_atomic_seg(struct mlx4_wqe_atomic_seg
*aseg
, struct ib_send_wr
*wr
)
1196 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1197 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.swap
);
1198 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1200 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1206 static void set_datagram_seg(struct mlx4_wqe_datagram_seg
*dseg
,
1207 struct ib_send_wr
*wr
)
1209 memcpy(dseg
->av
, &to_mah(wr
->wr
.ud
.ah
)->av
, sizeof (struct mlx4_av
));
1210 dseg
->dqpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1211 dseg
->qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1214 static void set_mlx_icrc_seg(void *dseg
)
1217 struct mlx4_wqe_inline_seg
*iseg
= dseg
;
1222 * Need a barrier here before writing the byte_count field to
1223 * make sure that all the data is visible before the
1224 * byte_count field is set. Otherwise, if the segment begins
1225 * a new cacheline, the HCA prefetcher could grab the 64-byte
1226 * chunk and get a valid (!= * 0xffffffff) byte count but
1227 * stale data, and end up sending the wrong data.
1231 iseg
->byte_count
= cpu_to_be32((1 << 31) | 4);
1234 static void set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
1236 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
1237 dseg
->addr
= cpu_to_be64(sg
->addr
);
1240 * Need a barrier here before writing the byte_count field to
1241 * make sure that all the data is visible before the
1242 * byte_count field is set. Otherwise, if the segment begins
1243 * a new cacheline, the HCA prefetcher could grab the 64-byte
1244 * chunk and get a valid (!= * 0xffffffff) byte count but
1245 * stale data, and end up sending the wrong data.
1249 dseg
->byte_count
= cpu_to_be32(sg
->length
);
1252 static void __set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
1254 dseg
->byte_count
= cpu_to_be32(sg
->length
);
1255 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
1256 dseg
->addr
= cpu_to_be64(sg
->addr
);
1259 int mlx4_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1260 struct ib_send_wr
**bad_wr
)
1262 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1264 struct mlx4_wqe_ctrl_seg
*ctrl
;
1265 struct mlx4_wqe_data_seg
*dseg
;
1266 unsigned long flags
;
1273 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
1277 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1278 if (mlx4_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1284 if (unlikely(wr
->num_sge
> qp
->sq
.max_gs
)) {
1290 ctrl
= wqe
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
1291 qp
->sq
.wrid
[ind
& (qp
->sq
.wqe_cnt
- 1)] = wr
->wr_id
;
1294 (wr
->send_flags
& IB_SEND_SIGNALED
?
1295 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) : 0) |
1296 (wr
->send_flags
& IB_SEND_SOLICITED
?
1297 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED
) : 0) |
1300 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
||
1301 wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
1302 ctrl
->imm
= wr
->imm_data
;
1306 wqe
+= sizeof *ctrl
;
1307 size
= sizeof *ctrl
/ 16;
1309 switch (ibqp
->qp_type
) {
1312 switch (wr
->opcode
) {
1313 case IB_WR_ATOMIC_CMP_AND_SWP
:
1314 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1315 set_raddr_seg(wqe
, wr
->wr
.atomic
.remote_addr
,
1316 wr
->wr
.atomic
.rkey
);
1317 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
1319 set_atomic_seg(wqe
, wr
);
1320 wqe
+= sizeof (struct mlx4_wqe_atomic_seg
);
1322 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
1323 sizeof (struct mlx4_wqe_atomic_seg
)) / 16;
1327 case IB_WR_RDMA_READ
:
1328 case IB_WR_RDMA_WRITE
:
1329 case IB_WR_RDMA_WRITE_WITH_IMM
:
1330 set_raddr_seg(wqe
, wr
->wr
.rdma
.remote_addr
,
1332 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
1333 size
+= sizeof (struct mlx4_wqe_raddr_seg
) / 16;
1337 /* No extra segments required for sends */
1343 set_datagram_seg(wqe
, wr
);
1344 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
1345 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
1350 err
= build_mlx_header(to_msqp(qp
), wr
, ctrl
);
1366 * Write data segments in reverse order, so as to
1367 * overwrite cacheline stamp last within each
1368 * cacheline. This avoids issues with WQE
1373 dseg
+= wr
->num_sge
- 1;
1374 size
+= wr
->num_sge
* (sizeof (struct mlx4_wqe_data_seg
) / 16);
1376 /* Add one more inline data segment for ICRC for MLX sends */
1377 if (unlikely(qp
->ibqp
.qp_type
== IB_QPT_SMI
||
1378 qp
->ibqp
.qp_type
== IB_QPT_GSI
)) {
1379 set_mlx_icrc_seg(dseg
+ 1);
1380 size
+= sizeof (struct mlx4_wqe_data_seg
) / 16;
1383 for (i
= wr
->num_sge
- 1; i
>= 0; --i
, --dseg
)
1384 set_data_seg(dseg
, wr
->sg_list
+ i
);
1386 ctrl
->fence_size
= (wr
->send_flags
& IB_SEND_FENCE
?
1387 MLX4_WQE_CTRL_FENCE
: 0) | size
;
1390 * Make sure descriptor is fully written before
1391 * setting ownership bit (because HW can start
1392 * executing as soon as we do).
1396 if (wr
->opcode
< 0 || wr
->opcode
>= ARRAY_SIZE(mlx4_ib_opcode
)) {
1401 ctrl
->owner_opcode
= mlx4_ib_opcode
[wr
->opcode
] |
1402 (ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0);
1405 * We can improve latency by not stamping the last
1406 * send queue WQE until after ringing the doorbell, so
1407 * only stamp here if there are still more WQEs to post.
1410 stamp_send_wqe(qp
, (ind
+ qp
->sq_spare_wqes
) &
1411 (qp
->sq
.wqe_cnt
- 1));
1418 qp
->sq
.head
+= nreq
;
1421 * Make sure that descriptors are written before
1426 writel(qp
->doorbell_qpn
,
1427 to_mdev(ibqp
->device
)->uar_map
+ MLX4_SEND_DOORBELL
);
1430 * Make sure doorbells don't leak out of SQ spinlock
1431 * and reach the HCA out of order.
1435 stamp_send_wqe(qp
, (ind
+ qp
->sq_spare_wqes
- 1) &
1436 (qp
->sq
.wqe_cnt
- 1));
1439 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
1444 int mlx4_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
1445 struct ib_recv_wr
**bad_wr
)
1447 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1448 struct mlx4_wqe_data_seg
*scat
;
1449 unsigned long flags
;
1455 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
1457 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
1459 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1460 if (mlx4_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.send_cq
)) {
1466 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
1472 scat
= get_recv_wqe(qp
, ind
);
1474 for (i
= 0; i
< wr
->num_sge
; ++i
)
1475 __set_data_seg(scat
+ i
, wr
->sg_list
+ i
);
1477 if (i
< qp
->rq
.max_gs
) {
1478 scat
[i
].byte_count
= 0;
1479 scat
[i
].lkey
= cpu_to_be32(MLX4_INVALID_LKEY
);
1483 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
1485 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
1490 qp
->rq
.head
+= nreq
;
1493 * Make sure that descriptors are written before
1498 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
1501 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
1506 static inline enum ib_qp_state
to_ib_qp_state(enum mlx4_qp_state mlx4_state
)
1508 switch (mlx4_state
) {
1509 case MLX4_QP_STATE_RST
: return IB_QPS_RESET
;
1510 case MLX4_QP_STATE_INIT
: return IB_QPS_INIT
;
1511 case MLX4_QP_STATE_RTR
: return IB_QPS_RTR
;
1512 case MLX4_QP_STATE_RTS
: return IB_QPS_RTS
;
1513 case MLX4_QP_STATE_SQ_DRAINING
:
1514 case MLX4_QP_STATE_SQD
: return IB_QPS_SQD
;
1515 case MLX4_QP_STATE_SQER
: return IB_QPS_SQE
;
1516 case MLX4_QP_STATE_ERR
: return IB_QPS_ERR
;
1521 static inline enum ib_mig_state
to_ib_mig_state(int mlx4_mig_state
)
1523 switch (mlx4_mig_state
) {
1524 case MLX4_QP_PM_ARMED
: return IB_MIG_ARMED
;
1525 case MLX4_QP_PM_REARM
: return IB_MIG_REARM
;
1526 case MLX4_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
1531 static int to_ib_qp_access_flags(int mlx4_flags
)
1535 if (mlx4_flags
& MLX4_QP_BIT_RRE
)
1536 ib_flags
|= IB_ACCESS_REMOTE_READ
;
1537 if (mlx4_flags
& MLX4_QP_BIT_RWE
)
1538 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
1539 if (mlx4_flags
& MLX4_QP_BIT_RAE
)
1540 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
1545 static void to_ib_ah_attr(struct mlx4_dev
*dev
, struct ib_ah_attr
*ib_ah_attr
,
1546 struct mlx4_qp_path
*path
)
1548 memset(ib_ah_attr
, 0, sizeof *ib_ah_attr
);
1549 ib_ah_attr
->port_num
= path
->sched_queue
& 0x40 ? 2 : 1;
1551 if (ib_ah_attr
->port_num
== 0 || ib_ah_attr
->port_num
> dev
->caps
.num_ports
)
1554 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
1555 ib_ah_attr
->sl
= (path
->sched_queue
>> 2) & 0xf;
1556 ib_ah_attr
->src_path_bits
= path
->grh_mylmc
& 0x7f;
1557 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
1558 ib_ah_attr
->ah_flags
= (path
->grh_mylmc
& (1 << 7)) ? IB_AH_GRH
: 0;
1559 if (ib_ah_attr
->ah_flags
) {
1560 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
1561 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
1562 ib_ah_attr
->grh
.traffic_class
=
1563 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
1564 ib_ah_attr
->grh
.flow_label
=
1565 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
1566 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
1567 path
->rgid
, sizeof ib_ah_attr
->grh
.dgid
.raw
);
1571 int mlx4_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
1572 struct ib_qp_init_attr
*qp_init_attr
)
1574 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
1575 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1576 struct mlx4_qp_context context
;
1580 if (qp
->state
== IB_QPS_RESET
) {
1581 qp_attr
->qp_state
= IB_QPS_RESET
;
1585 err
= mlx4_qp_query(dev
->dev
, &qp
->mqp
, &context
);
1589 mlx4_state
= be32_to_cpu(context
.flags
) >> 28;
1591 qp_attr
->qp_state
= to_ib_qp_state(mlx4_state
);
1592 qp_attr
->path_mtu
= context
.mtu_msgmax
>> 5;
1593 qp_attr
->path_mig_state
=
1594 to_ib_mig_state((be32_to_cpu(context
.flags
) >> 11) & 0x3);
1595 qp_attr
->qkey
= be32_to_cpu(context
.qkey
);
1596 qp_attr
->rq_psn
= be32_to_cpu(context
.rnr_nextrecvpsn
) & 0xffffff;
1597 qp_attr
->sq_psn
= be32_to_cpu(context
.next_send_psn
) & 0xffffff;
1598 qp_attr
->dest_qp_num
= be32_to_cpu(context
.remote_qpn
) & 0xffffff;
1599 qp_attr
->qp_access_flags
=
1600 to_ib_qp_access_flags(be32_to_cpu(context
.params2
));
1602 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
1603 to_ib_ah_attr(dev
->dev
, &qp_attr
->ah_attr
, &context
.pri_path
);
1604 to_ib_ah_attr(dev
->dev
, &qp_attr
->alt_ah_attr
, &context
.alt_path
);
1605 qp_attr
->alt_pkey_index
= context
.alt_path
.pkey_index
& 0x7f;
1606 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
1609 qp_attr
->pkey_index
= context
.pri_path
.pkey_index
& 0x7f;
1610 if (qp_attr
->qp_state
== IB_QPS_INIT
)
1611 qp_attr
->port_num
= qp
->port
;
1613 qp_attr
->port_num
= context
.pri_path
.sched_queue
& 0x40 ? 2 : 1;
1615 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
1616 qp_attr
->sq_draining
= mlx4_state
== MLX4_QP_STATE_SQ_DRAINING
;
1618 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
.params1
) >> 21) & 0x7);
1620 qp_attr
->max_dest_rd_atomic
=
1621 1 << ((be32_to_cpu(context
.params2
) >> 21) & 0x7);
1622 qp_attr
->min_rnr_timer
=
1623 (be32_to_cpu(context
.rnr_nextrecvpsn
) >> 24) & 0x1f;
1624 qp_attr
->timeout
= context
.pri_path
.ackto
>> 3;
1625 qp_attr
->retry_cnt
= (be32_to_cpu(context
.params1
) >> 16) & 0x7;
1626 qp_attr
->rnr_retry
= (be32_to_cpu(context
.params1
) >> 13) & 0x7;
1627 qp_attr
->alt_timeout
= context
.alt_path
.ackto
>> 3;
1630 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
1631 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
1632 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
1634 if (!ibqp
->uobject
) {
1635 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
1636 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
1638 qp_attr
->cap
.max_send_wr
= 0;
1639 qp_attr
->cap
.max_send_sge
= 0;
1643 * We don't support inline sends for kernel QPs (yet), and we
1644 * don't know what userspace's value should be.
1646 qp_attr
->cap
.max_inline_data
= 0;
1648 qp_init_attr
->cap
= qp_attr
->cap
;