2 * Copyright (c) 2012 - 2019 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_smi.h>
36 #include <rdma/ib_verbs.h>
42 * qib_ud_loopback - handle send on loopback QPs
43 * @sqp: the sending QP
44 * @swqe: the send work request
46 * This is called from qib_make_ud_req() to forward a WQE addressed
48 * Note that the receive interrupt handler may be calling qib_ud_rcv()
49 * while this is being called.
51 static void qib_ud_loopback(struct rvt_qp
*sqp
, struct rvt_swqe
*swqe
)
53 struct qib_ibport
*ibp
= to_iport(sqp
->ibqp
.device
, sqp
->port_num
);
54 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
55 struct qib_devdata
*dd
= ppd
->dd
;
56 struct rvt_dev_info
*rdi
= &dd
->verbs_dev
.rdi
;
58 struct rdma_ah_attr
*ah_attr
;
60 struct rvt_sge_state ssge
;
64 enum ib_qp_type sqptype
, dqptype
;
67 qp
= rvt_lookup_qpn(rdi
, &ibp
->rvp
, rvt_get_swqe_remote_qpn(swqe
));
69 ibp
->rvp
.n_pkt_drops
++;
73 sqptype
= sqp
->ibqp
.qp_type
== IB_QPT_GSI
?
74 IB_QPT_UD
: sqp
->ibqp
.qp_type
;
75 dqptype
= qp
->ibqp
.qp_type
== IB_QPT_GSI
?
76 IB_QPT_UD
: qp
->ibqp
.qp_type
;
78 if (dqptype
!= sqptype
||
79 !(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
)) {
80 ibp
->rvp
.n_pkt_drops
++;
84 ah_attr
= rvt_get_swqe_ah_attr(swqe
);
85 ppd
= ppd_from_ibp(ibp
);
87 if (qp
->ibqp
.qp_num
> 1) {
92 pkey1
= qib_get_pkey(ibp
, sqp
->s_pkey_index
);
93 pkey2
= qib_get_pkey(ibp
, qp
->s_pkey_index
);
94 if (unlikely(!qib_pkey_ok(pkey1
, pkey2
))) {
95 lid
= ppd
->lid
| (rdma_ah_get_path_bits(ah_attr
) &
96 ((1 << ppd
->lmc
) - 1));
97 qib_bad_pkey(ibp
, pkey1
,
98 rdma_ah_get_sl(ah_attr
),
99 sqp
->ibqp
.qp_num
, qp
->ibqp
.qp_num
,
101 cpu_to_be16(rdma_ah_get_dlid(ah_attr
)));
107 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
108 * Qkeys with the high order bit set mean use the
109 * qkey from the QP context instead of the WR (see 10.2.5).
111 if (qp
->ibqp
.qp_num
) {
114 qkey
= (int)rvt_get_swqe_remote_qkey(swqe
) < 0 ?
115 sqp
->qkey
: rvt_get_swqe_remote_qkey(swqe
);
116 if (unlikely(qkey
!= qp
->qkey
))
121 * A GRH is expected to precede the data even if not
122 * present on the wire.
124 length
= swqe
->length
;
125 memset(&wc
, 0, sizeof(wc
));
126 wc
.byte_len
= length
+ sizeof(struct ib_grh
);
128 if (swqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
129 wc
.wc_flags
= IB_WC_WITH_IMM
;
130 wc
.ex
.imm_data
= swqe
->wr
.ex
.imm_data
;
133 spin_lock_irqsave(&qp
->r_lock
, flags
);
136 * Get the next work request entry to find where to put the data.
138 if (qp
->r_flags
& RVT_R_REUSE_SGE
)
139 qp
->r_flags
&= ~RVT_R_REUSE_SGE
;
143 ret
= rvt_get_rwqe(qp
, false);
145 rvt_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
149 if (qp
->ibqp
.qp_num
== 0)
150 ibp
->rvp
.n_vl15_dropped
++;
154 /* Silently drop packets which are too big. */
155 if (unlikely(wc
.byte_len
> qp
->r_len
)) {
156 qp
->r_flags
|= RVT_R_REUSE_SGE
;
157 ibp
->rvp
.n_pkt_drops
++;
161 if (rdma_ah_get_ah_flags(ah_attr
) & IB_AH_GRH
) {
163 const struct ib_global_route
*grd
= rdma_ah_read_grh(ah_attr
);
165 qib_make_grh(ibp
, &grh
, grd
, 0, 0);
166 rvt_copy_sge(qp
, &qp
->r_sge
, &grh
,
167 sizeof(grh
), true, false);
168 wc
.wc_flags
|= IB_WC_GRH
;
170 rvt_skip_sge(&qp
->r_sge
, sizeof(struct ib_grh
), true);
171 ssge
.sg_list
= swqe
->sg_list
+ 1;
172 ssge
.sge
= *swqe
->sg_list
;
173 ssge
.num_sge
= swqe
->wr
.num_sge
;
176 u32 len
= rvt_get_sge_length(sge
, length
);
178 rvt_copy_sge(qp
, &qp
->r_sge
, sge
->vaddr
, len
, true, false);
181 sge
->sge_length
-= len
;
182 if (sge
->sge_length
== 0) {
184 *sge
= *ssge
.sg_list
++;
185 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
186 if (++sge
->n
>= RVT_SEGSZ
) {
187 if (++sge
->m
>= sge
->mr
->mapsz
)
192 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
194 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
198 rvt_put_ss(&qp
->r_sge
);
199 if (!test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
201 wc
.wr_id
= qp
->r_wr_id
;
202 wc
.status
= IB_WC_SUCCESS
;
203 wc
.opcode
= IB_WC_RECV
;
205 wc
.src_qp
= sqp
->ibqp
.qp_num
;
206 wc
.pkey_index
= qp
->ibqp
.qp_type
== IB_QPT_GSI
?
207 rvt_get_swqe_pkey_index(swqe
) : 0;
208 wc
.slid
= ppd
->lid
| (rdma_ah_get_path_bits(ah_attr
) &
209 ((1 << ppd
->lmc
) - 1));
210 wc
.sl
= rdma_ah_get_sl(ah_attr
);
211 wc
.dlid_path_bits
= rdma_ah_get_dlid(ah_attr
) & ((1 << ppd
->lmc
) - 1);
212 wc
.port_num
= qp
->port_num
;
213 /* Signal completion event if the solicited bit is set. */
214 rvt_recv_cq(qp
, &wc
, swqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
215 ibp
->rvp
.n_loop_pkts
++;
217 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
223 * qib_make_ud_req - construct a UD request packet
226 * Assumes the s_lock is held.
228 * Return 1 if constructed; otherwise, return 0.
230 int qib_make_ud_req(struct rvt_qp
*qp
, unsigned long *flags
)
232 struct qib_qp_priv
*priv
= qp
->priv
;
233 struct ib_other_headers
*ohdr
;
234 struct rdma_ah_attr
*ah_attr
;
235 struct qib_pportdata
*ppd
;
236 struct qib_ibport
*ibp
;
237 struct rvt_swqe
*wqe
;
246 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_NEXT_SEND_OK
)) {
247 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_FLUSH_SEND
))
249 /* We are in the error state, flush the work request. */
250 if (qp
->s_last
== READ_ONCE(qp
->s_head
))
252 /* If DMAs are in progress, we can't flush immediately. */
253 if (atomic_read(&priv
->s_dma_busy
)) {
254 qp
->s_flags
|= RVT_S_WAIT_DMA
;
257 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
258 rvt_send_complete(qp
, wqe
, IB_WC_WR_FLUSH_ERR
);
262 /* see post_one_send() */
263 if (qp
->s_cur
== READ_ONCE(qp
->s_head
))
266 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_cur
);
267 next_cur
= qp
->s_cur
+ 1;
268 if (next_cur
>= qp
->s_size
)
271 /* Construct the header. */
272 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
273 ppd
= ppd_from_ibp(ibp
);
274 ah_attr
= rvt_get_swqe_ah_attr(wqe
);
275 if (rdma_ah_get_dlid(ah_attr
) >= be16_to_cpu(IB_MULTICAST_LID_BASE
)) {
276 if (rdma_ah_get_dlid(ah_attr
) !=
277 be16_to_cpu(IB_LID_PERMISSIVE
))
278 this_cpu_inc(ibp
->pmastats
->n_multicast_xmit
);
280 this_cpu_inc(ibp
->pmastats
->n_unicast_xmit
);
282 this_cpu_inc(ibp
->pmastats
->n_unicast_xmit
);
283 lid
= rdma_ah_get_dlid(ah_attr
) & ~((1 << ppd
->lmc
) - 1);
284 if (unlikely(lid
== ppd
->lid
)) {
285 unsigned long tflags
= *flags
;
287 * If DMAs are in progress, we can't generate
288 * a completion for the loopback packet since
289 * it would be out of order.
290 * XXX Instead of waiting, we could queue a
291 * zero length descriptor so we get a callback.
293 if (atomic_read(&priv
->s_dma_busy
)) {
294 qp
->s_flags
|= RVT_S_WAIT_DMA
;
297 qp
->s_cur
= next_cur
;
298 spin_unlock_irqrestore(&qp
->s_lock
, tflags
);
299 qib_ud_loopback(qp
, wqe
);
300 spin_lock_irqsave(&qp
->s_lock
, tflags
);
302 rvt_send_complete(qp
, wqe
, IB_WC_SUCCESS
);
307 qp
->s_cur
= next_cur
;
308 extra_bytes
= -wqe
->length
& 3;
309 nwords
= (wqe
->length
+ extra_bytes
) >> 2;
311 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
313 qp
->s_cur_size
= wqe
->length
;
314 qp
->s_cur_sge
= &qp
->s_sge
;
315 qp
->s_srate
= rdma_ah_get_static_rate(ah_attr
);
317 qp
->s_sge
.sge
= wqe
->sg_list
[0];
318 qp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
319 qp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
320 qp
->s_sge
.total_len
= wqe
->length
;
322 if (rdma_ah_get_ah_flags(ah_attr
) & IB_AH_GRH
) {
323 /* Header size in 32-bit words. */
324 qp
->s_hdrwords
+= qib_make_grh(ibp
, &priv
->s_hdr
->u
.l
.grh
,
325 rdma_ah_read_grh(ah_attr
),
326 qp
->s_hdrwords
, nwords
);
328 ohdr
= &priv
->s_hdr
->u
.l
.oth
;
330 * Don't worry about sending to locally attached multicast
331 * QPs. It is unspecified by the spec. what happens.
334 /* Header size in 32-bit words. */
336 ohdr
= &priv
->s_hdr
->u
.oth
;
338 if (wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
340 ohdr
->u
.ud
.imm_data
= wqe
->wr
.ex
.imm_data
;
341 bth0
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
<< 24;
343 bth0
= IB_OPCODE_UD_SEND_ONLY
<< 24;
344 lrh0
|= rdma_ah_get_sl(ah_attr
) << 4;
345 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
)
346 lrh0
|= 0xF000; /* Set VL (see ch. 13.5.3.1) */
348 lrh0
|= ibp
->sl_to_vl
[rdma_ah_get_sl(ah_attr
)] << 12;
349 priv
->s_hdr
->lrh
[0] = cpu_to_be16(lrh0
);
350 priv
->s_hdr
->lrh
[1] =
351 cpu_to_be16(rdma_ah_get_dlid(ah_attr
)); /* DEST LID */
352 priv
->s_hdr
->lrh
[2] =
353 cpu_to_be16(qp
->s_hdrwords
+ nwords
+ SIZE_OF_CRC
);
356 lid
|= rdma_ah_get_path_bits(ah_attr
) &
357 ((1 << ppd
->lmc
) - 1);
358 priv
->s_hdr
->lrh
[3] = cpu_to_be16(lid
);
360 priv
->s_hdr
->lrh
[3] = IB_LID_PERMISSIVE
;
361 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
362 bth0
|= IB_BTH_SOLICITED
;
363 bth0
|= extra_bytes
<< 20;
364 bth0
|= qp
->ibqp
.qp_type
== IB_QPT_SMI
? QIB_DEFAULT_P_KEY
:
365 qib_get_pkey(ibp
, qp
->ibqp
.qp_type
== IB_QPT_GSI
?
366 rvt_get_swqe_pkey_index(wqe
) : qp
->s_pkey_index
);
367 ohdr
->bth
[0] = cpu_to_be32(bth0
);
369 * Use the multicast QP if the destination LID is a multicast LID.
371 ohdr
->bth
[1] = rdma_ah_get_dlid(ah_attr
) >=
372 be16_to_cpu(IB_MULTICAST_LID_BASE
) &&
373 rdma_ah_get_dlid(ah_attr
) != be16_to_cpu(IB_LID_PERMISSIVE
) ?
374 cpu_to_be32(QIB_MULTICAST_QPN
) :
375 cpu_to_be32(rvt_get_swqe_remote_qpn(wqe
));
376 ohdr
->bth
[2] = cpu_to_be32(wqe
->psn
& QIB_PSN_MASK
);
378 * Qkeys with the high order bit set mean use the
379 * qkey from the QP context instead of the WR (see 10.2.5).
382 cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe
) < 0 ? qp
->qkey
:
383 rvt_get_swqe_remote_qkey(wqe
));
384 ohdr
->u
.ud
.deth
[1] = cpu_to_be32(qp
->ibqp
.qp_num
);
389 qp
->s_flags
&= ~RVT_S_BUSY
;
393 static unsigned qib_lookup_pkey(struct qib_ibport
*ibp
, u16 pkey
)
395 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
396 struct qib_devdata
*dd
= ppd
->dd
;
397 unsigned ctxt
= ppd
->hw_pidx
;
400 pkey
&= 0x7fff; /* remove limited/full membership bit */
402 for (i
= 0; i
< ARRAY_SIZE(dd
->rcd
[ctxt
]->pkeys
); ++i
)
403 if ((dd
->rcd
[ctxt
]->pkeys
[i
] & 0x7fff) == pkey
)
407 * Should not get here, this means hardware failed to validate pkeys.
408 * Punt and return index 0.
414 * qib_ud_rcv - receive an incoming UD packet
415 * @ibp: the port the packet came in on
416 * @hdr: the packet header
417 * @has_grh: true if the packet has a GRH
418 * @data: the packet data
419 * @tlen: the packet length
420 * @qp: the QP the packet came on
422 * This is called from qib_qp_rcv() to process an incoming UD packet
424 * Called at interrupt level.
426 void qib_ud_rcv(struct qib_ibport
*ibp
, struct ib_header
*hdr
,
427 int has_grh
, void *data
, u32 tlen
, struct rvt_qp
*qp
)
429 struct ib_other_headers
*ohdr
;
441 hdrsize
= 8 + 12 + 8; /* LRH + BTH + DETH */
443 ohdr
= &hdr
->u
.l
.oth
;
444 hdrsize
= 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
446 qkey
= be32_to_cpu(ohdr
->u
.ud
.deth
[0]);
447 src_qp
= be32_to_cpu(ohdr
->u
.ud
.deth
[1]) & RVT_QPN_MASK
;
450 * Get the number of bytes the message was padded by
451 * and drop incomplete packets.
453 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
454 if (unlikely(tlen
< (hdrsize
+ pad
+ 4)))
457 tlen
-= hdrsize
+ pad
+ 4;
460 * Check that the permissive LID is only used on QP0
461 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
463 if (qp
->ibqp
.qp_num
) {
464 if (unlikely(hdr
->lrh
[1] == IB_LID_PERMISSIVE
||
465 hdr
->lrh
[3] == IB_LID_PERMISSIVE
))
467 if (qp
->ibqp
.qp_num
> 1) {
470 pkey1
= be32_to_cpu(ohdr
->bth
[0]);
471 pkey2
= qib_get_pkey(ibp
, qp
->s_pkey_index
);
472 if (unlikely(!qib_pkey_ok(pkey1
, pkey2
))) {
475 (be16_to_cpu(hdr
->lrh
[0]) >> 4) &
477 src_qp
, qp
->ibqp
.qp_num
,
478 hdr
->lrh
[3], hdr
->lrh
[1]);
482 if (unlikely(qkey
!= qp
->qkey
))
485 /* Drop invalid MAD packets (see 13.5.3.1). */
486 if (unlikely(qp
->ibqp
.qp_num
== 1 &&
488 (be16_to_cpu(hdr
->lrh
[0]) >> 12) == 15)))
493 /* Drop invalid MAD packets (see 13.5.3.1). */
494 if (tlen
!= 256 || (be16_to_cpu(hdr
->lrh
[0]) >> 12) != 15)
496 smp
= (struct ib_smp
*) data
;
497 if ((hdr
->lrh
[1] == IB_LID_PERMISSIVE
||
498 hdr
->lrh
[3] == IB_LID_PERMISSIVE
) &&
499 smp
->mgmt_class
!= IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
504 * The opcode is in the low byte when its in network order
505 * (top byte when in host order).
507 opcode
= be32_to_cpu(ohdr
->bth
[0]) >> 24;
508 if (qp
->ibqp
.qp_num
> 1 &&
509 opcode
== IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
) {
510 wc
.ex
.imm_data
= ohdr
->u
.ud
.imm_data
;
511 wc
.wc_flags
= IB_WC_WITH_IMM
;
512 } else if (opcode
== IB_OPCODE_UD_SEND_ONLY
) {
519 * A GRH is expected to precede the data even if not
520 * present on the wire.
522 wc
.byte_len
= tlen
+ sizeof(struct ib_grh
);
525 * Get the next work request entry to find where to put the data.
527 if (qp
->r_flags
& RVT_R_REUSE_SGE
)
528 qp
->r_flags
&= ~RVT_R_REUSE_SGE
;
532 ret
= rvt_get_rwqe(qp
, false);
534 rvt_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
538 if (qp
->ibqp
.qp_num
== 0)
539 ibp
->rvp
.n_vl15_dropped
++;
543 /* Silently drop packets which are too big. */
544 if (unlikely(wc
.byte_len
> qp
->r_len
)) {
545 qp
->r_flags
|= RVT_R_REUSE_SGE
;
549 rvt_copy_sge(qp
, &qp
->r_sge
, &hdr
->u
.l
.grh
,
550 sizeof(struct ib_grh
), true, false);
551 wc
.wc_flags
|= IB_WC_GRH
;
553 rvt_skip_sge(&qp
->r_sge
, sizeof(struct ib_grh
), true);
554 rvt_copy_sge(qp
, &qp
->r_sge
, data
, wc
.byte_len
- sizeof(struct ib_grh
),
556 rvt_put_ss(&qp
->r_sge
);
557 if (!test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
559 wc
.wr_id
= qp
->r_wr_id
;
560 wc
.status
= IB_WC_SUCCESS
;
561 wc
.opcode
= IB_WC_RECV
;
565 wc
.pkey_index
= qp
->ibqp
.qp_type
== IB_QPT_GSI
?
566 qib_lookup_pkey(ibp
, be32_to_cpu(ohdr
->bth
[0])) : 0;
567 wc
.slid
= be16_to_cpu(hdr
->lrh
[3]);
568 wc
.sl
= (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF;
569 dlid
= be16_to_cpu(hdr
->lrh
[1]);
571 * Save the LMC lower bits if the destination LID is a unicast LID.
573 wc
.dlid_path_bits
= dlid
>= be16_to_cpu(IB_MULTICAST_LID_BASE
) ? 0 :
574 dlid
& ((1 << ppd_from_ibp(ibp
)->lmc
) - 1);
575 wc
.port_num
= qp
->port_num
;
576 /* Signal completion event if the solicited bit is set. */
577 rvt_recv_cq(qp
, &wc
, ib_bth_is_solicited(ohdr
));
581 ibp
->rvp
.n_pkt_drops
++;