2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/net.h>
49 #include <rdma/ib_smi.h>
53 #include "verbs_txreq.h"
56 /* We support only two types - 9B and 16B for now */
57 static const hfi1_make_req hfi1_make_ud_req_tbl
[2] = {
58 [HFI1_PKT_TYPE_9B
] = &hfi1_make_ud_req_9B
,
59 [HFI1_PKT_TYPE_16B
] = &hfi1_make_ud_req_16B
63 * ud_loopback - handle send on loopback QPs
64 * @sqp: the sending QP
65 * @swqe: the send work request
67 * This is called from hfi1_make_ud_req() to forward a WQE addressed
69 * Note that the receive interrupt handler may be calling hfi1_ud_rcv()
70 * while this is being called.
72 static void ud_loopback(struct rvt_qp
*sqp
, struct rvt_swqe
*swqe
)
74 struct hfi1_ibport
*ibp
= to_iport(sqp
->ibqp
.device
, sqp
->port_num
);
75 struct hfi1_pportdata
*ppd
;
76 struct hfi1_qp_priv
*priv
= sqp
->priv
;
78 struct rdma_ah_attr
*ah_attr
;
80 struct rvt_sge_state ssge
;
84 enum ib_qp_type sqptype
, dqptype
;
88 qp
= rvt_lookup_qpn(ib_to_rvt(sqp
->ibqp
.device
), &ibp
->rvp
,
89 swqe
->ud_wr
.remote_qpn
);
91 ibp
->rvp
.n_pkt_drops
++;
96 sqptype
= sqp
->ibqp
.qp_type
== IB_QPT_GSI
?
97 IB_QPT_UD
: sqp
->ibqp
.qp_type
;
98 dqptype
= qp
->ibqp
.qp_type
== IB_QPT_GSI
?
99 IB_QPT_UD
: qp
->ibqp
.qp_type
;
101 if (dqptype
!= sqptype
||
102 !(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
)) {
103 ibp
->rvp
.n_pkt_drops
++;
107 ah_attr
= &ibah_to_rvtah(swqe
->ud_wr
.ah
)->attr
;
108 ppd
= ppd_from_ibp(ibp
);
110 if (qp
->ibqp
.qp_num
> 1) {
113 u8 sc5
= ibp
->sl_to_sc
[rdma_ah_get_sl(ah_attr
)];
115 pkey
= hfi1_get_pkey(ibp
, sqp
->s_pkey_index
);
116 slid
= ppd
->lid
| (rdma_ah_get_path_bits(ah_attr
) &
117 ((1 << ppd
->lmc
) - 1));
118 if (unlikely(ingress_pkey_check(ppd
, pkey
, sc5
,
121 hfi1_bad_pkey(ibp
, pkey
,
122 rdma_ah_get_sl(ah_attr
),
123 sqp
->ibqp
.qp_num
, qp
->ibqp
.qp_num
,
124 slid
, rdma_ah_get_dlid(ah_attr
));
130 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
131 * Qkeys with the high order bit set mean use the
132 * qkey from the QP context instead of the WR (see 10.2.5).
134 if (qp
->ibqp
.qp_num
) {
137 qkey
= (int)swqe
->ud_wr
.remote_qkey
< 0 ?
138 sqp
->qkey
: swqe
->ud_wr
.remote_qkey
;
139 if (unlikely(qkey
!= qp
->qkey
))
140 goto drop
; /* silently drop per IBTA spec */
144 * A GRH is expected to precede the data even if not
145 * present on the wire.
147 length
= swqe
->length
;
148 memset(&wc
, 0, sizeof(wc
));
149 wc
.byte_len
= length
+ sizeof(struct ib_grh
);
151 if (swqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
152 wc
.wc_flags
= IB_WC_WITH_IMM
;
153 wc
.ex
.imm_data
= swqe
->wr
.ex
.imm_data
;
156 spin_lock_irqsave(&qp
->r_lock
, flags
);
159 * Get the next work request entry to find where to put the data.
161 if (qp
->r_flags
& RVT_R_REUSE_SGE
) {
162 qp
->r_flags
&= ~RVT_R_REUSE_SGE
;
166 ret
= hfi1_rvt_get_rwqe(qp
, 0);
168 rvt_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
172 if (qp
->ibqp
.qp_num
== 0)
173 ibp
->rvp
.n_vl15_dropped
++;
177 /* Silently drop packets which are too big. */
178 if (unlikely(wc
.byte_len
> qp
->r_len
)) {
179 qp
->r_flags
|= RVT_R_REUSE_SGE
;
180 ibp
->rvp
.n_pkt_drops
++;
184 if (rdma_ah_get_ah_flags(ah_attr
) & IB_AH_GRH
) {
186 struct ib_global_route grd
= *(rdma_ah_read_grh(ah_attr
));
189 * For loopback packets with extended LIDs, the
190 * sgid_index in the GRH is 0 and the dgid is
191 * OPA GID of the sender. While creating a response
192 * to the loopback packet, IB core creates the new
193 * sgid_index from the DGID and that will be the
194 * OPA_GID_INDEX. The new dgid is from the sgid
195 * index and that will be in the IB GID format.
197 * We now have a case where the sent packet had a
198 * different sgid_index and dgid compared to the
199 * one that was received in response.
201 * Fix this inconsistency.
203 if (priv
->hdr_type
== HFI1_PKT_TYPE_16B
) {
204 if (grd
.sgid_index
== 0)
205 grd
.sgid_index
= OPA_GID_INDEX
;
207 if (ib_is_opa_gid(&grd
.dgid
))
208 grd
.dgid
.global
.interface_id
=
209 cpu_to_be64(ppd
->guids
[HFI1_PORT_GUID_INDEX
]);
212 hfi1_make_grh(ibp
, &grh
, &grd
, 0, 0);
213 hfi1_copy_sge(&qp
->r_sge
, &grh
,
214 sizeof(grh
), true, false);
215 wc
.wc_flags
|= IB_WC_GRH
;
217 rvt_skip_sge(&qp
->r_sge
, sizeof(struct ib_grh
), true);
219 ssge
.sg_list
= swqe
->sg_list
+ 1;
220 ssge
.sge
= *swqe
->sg_list
;
221 ssge
.num_sge
= swqe
->wr
.num_sge
;
224 u32 len
= sge
->length
;
228 if (len
> sge
->sge_length
)
229 len
= sge
->sge_length
;
230 WARN_ON_ONCE(len
== 0);
231 hfi1_copy_sge(&qp
->r_sge
, sge
->vaddr
, len
, true, false);
234 sge
->sge_length
-= len
;
235 if (sge
->sge_length
== 0) {
237 *sge
= *ssge
.sg_list
++;
238 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
239 if (++sge
->n
>= RVT_SEGSZ
) {
240 if (++sge
->m
>= sge
->mr
->mapsz
)
245 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
247 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
251 rvt_put_ss(&qp
->r_sge
);
252 if (!test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
254 wc
.wr_id
= qp
->r_wr_id
;
255 wc
.status
= IB_WC_SUCCESS
;
256 wc
.opcode
= IB_WC_RECV
;
258 wc
.src_qp
= sqp
->ibqp
.qp_num
;
259 if (qp
->ibqp
.qp_type
== IB_QPT_GSI
|| qp
->ibqp
.qp_type
== IB_QPT_SMI
) {
260 if (sqp
->ibqp
.qp_type
== IB_QPT_GSI
||
261 sqp
->ibqp
.qp_type
== IB_QPT_SMI
)
262 wc
.pkey_index
= swqe
->ud_wr
.pkey_index
;
264 wc
.pkey_index
= sqp
->s_pkey_index
;
268 wc
.slid
= (ppd
->lid
| (rdma_ah_get_path_bits(ah_attr
) &
269 ((1 << ppd
->lmc
) - 1))) & U16_MAX
;
270 /* Check for loopback when the port lid is not set */
271 if (wc
.slid
== 0 && sqp
->ibqp
.qp_type
== IB_QPT_GSI
)
272 wc
.slid
= be16_to_cpu(IB_LID_PERMISSIVE
);
273 wc
.sl
= rdma_ah_get_sl(ah_attr
);
274 wc
.dlid_path_bits
= rdma_ah_get_dlid(ah_attr
) & ((1 << ppd
->lmc
) - 1);
275 wc
.port_num
= qp
->port_num
;
276 /* Signal completion event if the solicited bit is set. */
277 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
,
278 swqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
279 ibp
->rvp
.n_loop_pkts
++;
281 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
286 static void hfi1_make_bth_deth(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
,
287 struct ib_other_headers
*ohdr
,
288 u16
*pkey
, u32 extra_bytes
, bool bypass
)
291 struct hfi1_ibport
*ibp
;
293 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
294 if (wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
295 ohdr
->u
.ud
.imm_data
= wqe
->wr
.ex
.imm_data
;
296 bth0
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
<< 24;
298 bth0
= IB_OPCODE_UD_SEND_ONLY
<< 24;
301 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
302 bth0
|= IB_BTH_SOLICITED
;
303 bth0
|= extra_bytes
<< 20;
304 if (qp
->ibqp
.qp_type
== IB_QPT_GSI
|| qp
->ibqp
.qp_type
== IB_QPT_SMI
)
305 *pkey
= hfi1_get_pkey(ibp
, wqe
->ud_wr
.pkey_index
);
307 *pkey
= hfi1_get_pkey(ibp
, qp
->s_pkey_index
);
310 ohdr
->bth
[0] = cpu_to_be32(bth0
);
311 ohdr
->bth
[1] = cpu_to_be32(wqe
->ud_wr
.remote_qpn
);
312 ohdr
->bth
[2] = cpu_to_be32(mask_psn(wqe
->psn
));
314 * Qkeys with the high order bit set mean use the
315 * qkey from the QP context instead of the WR (see 10.2.5).
317 ohdr
->u
.ud
.deth
[0] = cpu_to_be32((int)wqe
->ud_wr
.remote_qkey
< 0 ?
318 qp
->qkey
: wqe
->ud_wr
.remote_qkey
);
319 ohdr
->u
.ud
.deth
[1] = cpu_to_be32(qp
->ibqp
.qp_num
);
322 void hfi1_make_ud_req_9B(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
,
323 struct rvt_swqe
*wqe
)
325 u32 nwords
, extra_bytes
;
326 u16 len
, slid
, dlid
, pkey
;
329 struct hfi1_qp_priv
*priv
= qp
->priv
;
330 struct ib_other_headers
*ohdr
;
331 struct rdma_ah_attr
*ah_attr
;
332 struct hfi1_pportdata
*ppd
;
333 struct hfi1_ibport
*ibp
;
336 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
337 ppd
= ppd_from_ibp(ibp
);
338 ah_attr
= &ibah_to_rvtah(wqe
->ud_wr
.ah
)->attr
;
340 extra_bytes
= -wqe
->length
& 3;
341 nwords
= ((wqe
->length
+ extra_bytes
) >> 2) + SIZE_OF_CRC
;
342 /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
343 ps
->s_txreq
->hdr_dwords
= 7;
344 if (wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
)
345 ps
->s_txreq
->hdr_dwords
++;
347 if (rdma_ah_get_ah_flags(ah_attr
) & IB_AH_GRH
) {
348 grh
= &ps
->s_txreq
->phdr
.hdr
.ibh
.u
.l
.grh
;
349 ps
->s_txreq
->hdr_dwords
+=
350 hfi1_make_grh(ibp
, grh
, rdma_ah_read_grh(ah_attr
),
351 ps
->s_txreq
->hdr_dwords
- LRH_9B_DWORDS
,
354 ohdr
= &ps
->s_txreq
->phdr
.hdr
.ibh
.u
.l
.oth
;
357 ohdr
= &ps
->s_txreq
->phdr
.hdr
.ibh
.u
.oth
;
360 sc5
= ibp
->sl_to_sc
[rdma_ah_get_sl(ah_attr
)];
361 lrh0
|= (rdma_ah_get_sl(ah_attr
) & 0xf) << 4;
362 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
) {
363 lrh0
|= 0xF000; /* Set VL (see ch. 13.5.3.1) */
366 lrh0
|= (sc5
& 0xf) << 12;
370 dlid
= opa_get_lid(rdma_ah_get_dlid(ah_attr
), 9B
);
371 if (dlid
== be16_to_cpu(IB_LID_PERMISSIVE
)) {
372 slid
= be16_to_cpu(IB_LID_PERMISSIVE
);
374 u16 lid
= (u16
)ppd
->lid
;
377 lid
|= rdma_ah_get_path_bits(ah_attr
) &
378 ((1 << ppd
->lmc
) - 1);
381 slid
= be16_to_cpu(IB_LID_PERMISSIVE
);
384 hfi1_make_bth_deth(qp
, wqe
, ohdr
, &pkey
, extra_bytes
, false);
385 len
= ps
->s_txreq
->hdr_dwords
+ nwords
;
387 /* Setup the packet */
388 ps
->s_txreq
->phdr
.hdr
.hdr_type
= HFI1_PKT_TYPE_9B
;
389 hfi1_make_ib_hdr(&ps
->s_txreq
->phdr
.hdr
.ibh
,
390 lrh0
, len
, dlid
, slid
);
393 void hfi1_make_ud_req_16B(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
,
394 struct rvt_swqe
*wqe
)
396 struct hfi1_qp_priv
*priv
= qp
->priv
;
397 struct ib_other_headers
*ohdr
;
398 struct rdma_ah_attr
*ah_attr
;
399 struct hfi1_pportdata
*ppd
;
400 struct hfi1_ibport
*ibp
;
401 u32 dlid
, slid
, nwords
, extra_bytes
;
405 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
406 ppd
= ppd_from_ibp(ibp
);
407 ah_attr
= &ibah_to_rvtah(wqe
->ud_wr
.ah
)->attr
;
408 /* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */
409 ps
->s_txreq
->hdr_dwords
= 9;
410 if (wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
)
411 ps
->s_txreq
->hdr_dwords
++;
413 /* SW provides space for CRC and LT for bypass packets. */
414 extra_bytes
= hfi1_get_16b_padding((ps
->s_txreq
->hdr_dwords
<< 2),
416 nwords
= ((wqe
->length
+ extra_bytes
+ SIZE_OF_LT
) >> 2) + SIZE_OF_CRC
;
418 if ((rdma_ah_get_ah_flags(ah_attr
) & IB_AH_GRH
) &&
419 hfi1_check_mcast(rdma_ah_get_dlid(ah_attr
))) {
421 struct ib_global_route
*grd
= rdma_ah_retrieve_grh(ah_attr
);
423 * Ensure OPA GIDs are transformed to IB gids
424 * before creating the GRH.
426 if (grd
->sgid_index
== OPA_GID_INDEX
) {
427 dd_dev_warn(ppd
->dd
, "Bad sgid_index. sgid_index: %d\n",
431 grh
= &ps
->s_txreq
->phdr
.hdr
.opah
.u
.l
.grh
;
432 ps
->s_txreq
->hdr_dwords
+= hfi1_make_grh(
434 ps
->s_txreq
->hdr_dwords
- LRH_16B_DWORDS
,
436 ohdr
= &ps
->s_txreq
->phdr
.hdr
.opah
.u
.l
.oth
;
437 l4
= OPA_16B_L4_IB_GLOBAL
;
439 ohdr
= &ps
->s_txreq
->phdr
.hdr
.opah
.u
.oth
;
440 l4
= OPA_16B_L4_IB_LOCAL
;
443 sc5
= ibp
->sl_to_sc
[rdma_ah_get_sl(ah_attr
)];
444 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
)
449 dlid
= opa_get_lid(rdma_ah_get_dlid(ah_attr
), 16B
);
451 slid
= be32_to_cpu(OPA_LID_PERMISSIVE
);
453 slid
= ppd
->lid
| (rdma_ah_get_path_bits(ah_attr
) &
454 ((1 << ppd
->lmc
) - 1));
456 hfi1_make_bth_deth(qp
, wqe
, ohdr
, &pkey
, extra_bytes
, true);
457 /* Convert dwords to flits */
458 len
= (ps
->s_txreq
->hdr_dwords
+ nwords
) >> 1;
460 /* Setup the packet */
461 ps
->s_txreq
->phdr
.hdr
.hdr_type
= HFI1_PKT_TYPE_16B
;
462 hfi1_make_16b_hdr(&ps
->s_txreq
->phdr
.hdr
.opah
,
463 slid
, dlid
, len
, pkey
, 0, 0, l4
, priv
->s_sc
);
467 * hfi1_make_ud_req - construct a UD request packet
470 * Assume s_lock is held.
472 * Return 1 if constructed; otherwise, return 0.
474 int hfi1_make_ud_req(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
)
476 struct hfi1_qp_priv
*priv
= qp
->priv
;
477 struct rdma_ah_attr
*ah_attr
;
478 struct hfi1_pportdata
*ppd
;
479 struct hfi1_ibport
*ibp
;
480 struct rvt_swqe
*wqe
;
484 ps
->s_txreq
= get_txreq(ps
->dev
, qp
);
485 if (IS_ERR(ps
->s_txreq
))
488 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_NEXT_SEND_OK
)) {
489 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_FLUSH_SEND
))
491 /* We are in the error state, flush the work request. */
492 if (qp
->s_last
== READ_ONCE(qp
->s_head
))
494 /* If DMAs are in progress, we can't flush immediately. */
495 if (iowait_sdma_pending(&priv
->s_iowait
)) {
496 qp
->s_flags
|= RVT_S_WAIT_DMA
;
499 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
500 hfi1_send_complete(qp
, wqe
, IB_WC_WR_FLUSH_ERR
);
504 /* see post_one_send() */
505 if (qp
->s_cur
== READ_ONCE(qp
->s_head
))
508 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_cur
);
509 next_cur
= qp
->s_cur
+ 1;
510 if (next_cur
>= qp
->s_size
)
513 /* Construct the header. */
514 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
515 ppd
= ppd_from_ibp(ibp
);
516 ah_attr
= &ibah_to_rvtah(wqe
->ud_wr
.ah
)->attr
;
517 priv
->hdr_type
= hfi1_get_hdr_type(ppd
->lid
, ah_attr
);
518 if ((!hfi1_check_mcast(rdma_ah_get_dlid(ah_attr
))) ||
519 (rdma_ah_get_dlid(ah_attr
) == be32_to_cpu(OPA_LID_PERMISSIVE
))) {
520 lid
= rdma_ah_get_dlid(ah_attr
) & ~((1 << ppd
->lmc
) - 1);
521 if (unlikely(!loopback
&&
522 ((lid
== ppd
->lid
) ||
523 ((lid
== be32_to_cpu(OPA_LID_PERMISSIVE
)) &&
524 (qp
->ibqp
.qp_type
== IB_QPT_GSI
))))) {
525 unsigned long tflags
= ps
->flags
;
527 * If DMAs are in progress, we can't generate
528 * a completion for the loopback packet since
529 * it would be out of order.
530 * Instead of waiting, we could queue a
531 * zero length descriptor so we get a callback.
533 if (iowait_sdma_pending(&priv
->s_iowait
)) {
534 qp
->s_flags
|= RVT_S_WAIT_DMA
;
537 qp
->s_cur
= next_cur
;
538 spin_unlock_irqrestore(&qp
->s_lock
, tflags
);
539 ud_loopback(qp
, wqe
);
540 spin_lock_irqsave(&qp
->s_lock
, tflags
);
542 hfi1_send_complete(qp
, wqe
, IB_WC_SUCCESS
);
547 qp
->s_cur
= next_cur
;
548 ps
->s_txreq
->s_cur_size
= wqe
->length
;
549 ps
->s_txreq
->ss
= &qp
->s_sge
;
550 qp
->s_srate
= rdma_ah_get_static_rate(ah_attr
);
551 qp
->srate_mbps
= ib_rate_to_mbps(qp
->s_srate
);
553 qp
->s_sge
.sge
= wqe
->sg_list
[0];
554 qp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
555 qp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
556 qp
->s_sge
.total_len
= wqe
->length
;
558 /* Make the appropriate header */
559 hfi1_make_ud_req_tbl
[priv
->hdr_type
](qp
, ps
, qp
->s_wqe
);
560 priv
->s_sde
= qp_to_sdma_engine(qp
, priv
->s_sc
);
561 ps
->s_txreq
->sde
= priv
->s_sde
;
562 priv
->s_sendcontext
= qp_to_send_context(qp
, priv
->s_sc
);
563 ps
->s_txreq
->psc
= priv
->s_sendcontext
;
565 priv
->s_ahg
->ahgcount
= 0;
566 priv
->s_ahg
->ahgidx
= 0;
567 priv
->s_ahg
->tx_flags
= 0;
572 hfi1_put_txreq(ps
->s_txreq
);
577 hfi1_put_txreq(ps
->s_txreq
);
581 qp
->s_flags
&= ~RVT_S_BUSY
;
586 * Hardware can't check this so we do it here.
588 * This is a slightly different algorithm than the standard pkey check. It
589 * special cases the management keys and allows for 0x7fff and 0xffff to be in
590 * the table at the same time.
592 * @returns the index found or -1 if not found
594 int hfi1_lookup_pkey_idx(struct hfi1_ibport
*ibp
, u16 pkey
)
596 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
599 if (pkey
== FULL_MGMT_P_KEY
|| pkey
== LIM_MGMT_P_KEY
) {
600 unsigned lim_idx
= -1;
602 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); ++i
) {
603 /* here we look for an exact match */
604 if (ppd
->pkeys
[i
] == pkey
)
606 if (ppd
->pkeys
[i
] == LIM_MGMT_P_KEY
)
610 /* did not find 0xffff return 0x7fff idx if found */
611 if (pkey
== FULL_MGMT_P_KEY
)
618 pkey
&= 0x7fff; /* remove limited/full membership bit */
620 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); ++i
)
621 if ((ppd
->pkeys
[i
] & 0x7fff) == pkey
)
625 * Should not get here, this means hardware failed to validate pkeys.
630 void return_cnp_16B(struct hfi1_ibport
*ibp
, struct rvt_qp
*qp
,
631 u32 remote_qpn
, u32 pkey
, u32 slid
, u32 dlid
,
632 u8 sc5
, const struct ib_grh
*old_grh
)
634 u64 pbc
, pbc_flags
= 0;
635 u32 bth0
, plen
, vl
, hwords
= 7;
638 struct hfi1_16b_header hdr
;
639 struct ib_other_headers
*ohdr
;
640 struct pio_buf
*pbuf
;
641 struct send_context
*ctxt
= qp_to_send_context(qp
, sc5
);
642 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
645 /* Populate length */
646 nwords
= ((hfi1_get_16b_padding(hwords
<< 2, 0) +
647 SIZE_OF_LT
) >> 2) + SIZE_OF_CRC
;
649 struct ib_grh
*grh
= &hdr
.u
.l
.grh
;
651 grh
->version_tclass_flow
= old_grh
->version_tclass_flow
;
652 grh
->paylen
= cpu_to_be16(
653 (hwords
- LRH_16B_DWORDS
+ nwords
) << 2);
654 grh
->hop_limit
= 0xff;
655 grh
->sgid
= old_grh
->dgid
;
656 grh
->dgid
= old_grh
->sgid
;
658 l4
= OPA_16B_L4_IB_GLOBAL
;
659 hwords
+= sizeof(struct ib_grh
) / sizeof(u32
);
662 l4
= OPA_16B_L4_IB_LOCAL
;
665 /* BIT 16 to 19 is TVER. Bit 20 to 22 is pad cnt */
666 bth0
= (IB_OPCODE_CNP
<< 24) | (1 << 16) |
667 (hfi1_get_16b_padding(hwords
<< 2, 0) << 20);
668 ohdr
->bth
[0] = cpu_to_be32(bth0
);
670 ohdr
->bth
[1] = cpu_to_be32(remote_qpn
);
671 ohdr
->bth
[2] = 0; /* PSN 0 */
673 /* Convert dwords to flits */
674 len
= (hwords
+ nwords
) >> 1;
675 hfi1_make_16b_hdr(&hdr
, slid
, dlid
, len
, pkey
, 1, 0, l4
, sc5
);
677 plen
= 2 /* PBC */ + hwords
+ nwords
;
678 pbc_flags
|= PBC_PACKET_BYPASS
| PBC_INSERT_BYPASS_ICRC
;
679 vl
= sc_to_vlt(ppd
->dd
, sc5
);
680 pbc
= create_pbc(ppd
, pbc_flags
, qp
->srate_mbps
, vl
, plen
);
682 pbuf
= sc_buffer_alloc(ctxt
, plen
, NULL
, NULL
);
684 ppd
->dd
->pio_inline_send(ppd
->dd
, pbuf
, pbc
,
689 void return_cnp(struct hfi1_ibport
*ibp
, struct rvt_qp
*qp
, u32 remote_qpn
,
690 u32 pkey
, u32 slid
, u32 dlid
, u8 sc5
,
691 const struct ib_grh
*old_grh
)
693 u64 pbc
, pbc_flags
= 0;
694 u32 bth0
, plen
, vl
, hwords
= 5;
696 u8 sl
= ibp
->sc_to_sl
[sc5
];
697 struct ib_header hdr
;
698 struct ib_other_headers
*ohdr
;
699 struct pio_buf
*pbuf
;
700 struct send_context
*ctxt
= qp_to_send_context(qp
, sc5
);
701 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
704 struct ib_grh
*grh
= &hdr
.u
.l
.grh
;
706 grh
->version_tclass_flow
= old_grh
->version_tclass_flow
;
707 grh
->paylen
= cpu_to_be16(
708 (hwords
- LRH_9B_DWORDS
+ SIZE_OF_CRC
) << 2);
709 grh
->hop_limit
= 0xff;
710 grh
->sgid
= old_grh
->dgid
;
711 grh
->dgid
= old_grh
->sgid
;
714 hwords
+= sizeof(struct ib_grh
) / sizeof(u32
);
720 lrh0
|= (sc5
& 0xf) << 12 | sl
<< 4;
722 bth0
= pkey
| (IB_OPCODE_CNP
<< 24);
723 ohdr
->bth
[0] = cpu_to_be32(bth0
);
725 ohdr
->bth
[1] = cpu_to_be32(remote_qpn
| (1 << IB_BECN_SHIFT
));
726 ohdr
->bth
[2] = 0; /* PSN 0 */
728 hfi1_make_ib_hdr(&hdr
, lrh0
, hwords
+ SIZE_OF_CRC
, dlid
, slid
);
729 plen
= 2 /* PBC */ + hwords
;
730 pbc_flags
|= (ib_is_sc5(sc5
) << PBC_DC_INFO_SHIFT
);
731 vl
= sc_to_vlt(ppd
->dd
, sc5
);
732 pbc
= create_pbc(ppd
, pbc_flags
, qp
->srate_mbps
, vl
, plen
);
734 pbuf
= sc_buffer_alloc(ctxt
, plen
, NULL
, NULL
);
736 ppd
->dd
->pio_inline_send(ppd
->dd
, pbuf
, pbc
,
742 * opa_smp_check() - Do the regular pkey checking, and the additional
743 * checks for SMPs specified in OPAv1 rev 1.0, 9/19/2016 update, section
744 * 9.10.25 ("SMA Packet Checks").
747 * - Checks are done using the pkey directly from the packet's BTH,
748 * and specifically _not_ the pkey that we attach to the completion,
749 * which may be different.
750 * - These checks are specifically for "non-local" SMPs (i.e., SMPs
751 * which originated on another node). SMPs which are sent from, and
752 * destined to this node are checked in opa_local_smp_check().
754 * At the point where opa_smp_check() is called, we know:
755 * - destination QP is QP0
757 * opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
759 static int opa_smp_check(struct hfi1_ibport
*ibp
, u16 pkey
, u8 sc5
,
760 struct rvt_qp
*qp
, u16 slid
, struct opa_smp
*smp
)
762 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
765 * I don't think it's possible for us to get here with sc != 0xf,
766 * but check it to be certain.
771 if (rcv_pkey_check(ppd
, pkey
, sc5
, slid
))
775 * At this point we know (and so don't need to check again) that
776 * the pkey is either LIM_MGMT_P_KEY, or FULL_MGMT_P_KEY
777 * (see ingress_pkey_check).
779 if (smp
->mgmt_class
!= IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
&&
780 smp
->mgmt_class
!= IB_MGMT_CLASS_SUBN_LID_ROUTED
) {
781 ingress_pkey_table_fail(ppd
, pkey
, slid
);
786 * SMPs fall into one of four (disjoint) categories:
787 * SMA request, SMA response, SMA trap, or SMA trap repress.
788 * Our response depends, in part, on which type of SMP we're
791 * If this is an SMA response, skip the check here.
793 * If this is an SMA request or SMA trap repress:
794 * - pkey != FULL_MGMT_P_KEY =>
795 * increment port recv constraint errors, drop MAD
798 * - accept if the port is running an SM
799 * - drop MAD if it's an SMA trap
800 * - pkey == FULL_MGMT_P_KEY =>
801 * reply with unsupported method
802 * - pkey != FULL_MGMT_P_KEY =>
803 * increment port recv constraint errors, drop MAD
805 switch (smp
->method
) {
806 case IB_MGMT_METHOD_GET_RESP
:
807 case IB_MGMT_METHOD_REPORT_RESP
:
809 case IB_MGMT_METHOD_GET
:
810 case IB_MGMT_METHOD_SET
:
811 case IB_MGMT_METHOD_REPORT
:
812 case IB_MGMT_METHOD_TRAP_REPRESS
:
813 if (pkey
!= FULL_MGMT_P_KEY
) {
814 ingress_pkey_table_fail(ppd
, pkey
, slid
);
819 if (ibp
->rvp
.port_cap_flags
& IB_PORT_SM
)
821 if (smp
->method
== IB_MGMT_METHOD_TRAP
)
823 if (pkey
== FULL_MGMT_P_KEY
) {
824 smp
->status
|= IB_SMP_UNSUP_METHOD
;
827 ingress_pkey_table_fail(ppd
, pkey
, slid
);
834 * hfi1_ud_rcv - receive an incoming UD packet
835 * @ibp: the port the packet came in on
836 * @hdr: the packet header
837 * @rcv_flags: flags relevant to rcv processing
838 * @data: the packet data
839 * @tlen: the packet length
840 * @qp: the QP the packet came on
842 * This is called from qp_rcv() to process an incoming UD packet
844 * Called at interrupt level.
846 void hfi1_ud_rcv(struct hfi1_packet
*packet
)
848 struct ib_other_headers
*ohdr
= packet
->ohdr
;
849 u32 hdrsize
= packet
->hlen
;
854 int mgmt_pkey_idx
= -1;
855 struct hfi1_ibport
*ibp
= rcd_to_iport(packet
->rcd
);
856 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
857 void *data
= packet
->payload
;
858 u32 tlen
= packet
->tlen
;
859 struct rvt_qp
*qp
= packet
->qp
;
862 u8 opcode
= packet
->opcode
;
864 u32 dlid
= packet
->dlid
;
865 u32 slid
= packet
->slid
;
867 bool dlid_is_permissive
;
868 bool slid_is_permissive
;
870 extra_bytes
= packet
->pad
+ packet
->extra_byte
+ (SIZE_OF_CRC
<< 2);
871 qkey
= ib_get_qkey(ohdr
);
872 src_qp
= ib_get_sqpn(ohdr
);
874 if (packet
->etype
== RHF_RCV_TYPE_BYPASS
) {
876 opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE
), 16B
);
878 pkey
= hfi1_16B_get_pkey(packet
->hdr
);
879 dlid_is_permissive
= (dlid
== permissive_lid
);
880 slid_is_permissive
= (slid
== permissive_lid
);
882 pkey
= ib_bth_get_pkey(ohdr
);
883 dlid_is_permissive
= (dlid
== be16_to_cpu(IB_LID_PERMISSIVE
));
884 slid_is_permissive
= (slid
== be16_to_cpu(IB_LID_PERMISSIVE
));
886 sl_from_sc
= ibp
->sc_to_sl
[sc5
];
888 process_ecn(qp
, packet
, (opcode
!= IB_OPCODE_CNP
));
890 * Get the number of bytes the message was padded by
891 * and drop incomplete packets.
893 if (unlikely(tlen
< (hdrsize
+ extra_bytes
)))
896 tlen
-= hdrsize
+ extra_bytes
;
899 * Check that the permissive LID is only used on QP0
900 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
902 if (qp
->ibqp
.qp_num
) {
903 if (unlikely(dlid_is_permissive
|| slid_is_permissive
))
905 if (qp
->ibqp
.qp_num
> 1) {
906 if (unlikely(rcv_pkey_check(ppd
, pkey
, sc5
, slid
))) {
908 * Traps will not be sent for packets dropped
909 * by the HW. This is fine, as sending trap
910 * for invalid pkeys is optional according to
911 * IB spec (release 1.3, section 10.9.4)
915 src_qp
, qp
->ibqp
.qp_num
,
921 mgmt_pkey_idx
= hfi1_lookup_pkey_idx(ibp
, pkey
);
922 if (mgmt_pkey_idx
< 0)
925 if (unlikely(qkey
!= qp
->qkey
)) /* Silent drop */
928 /* Drop invalid MAD packets (see 13.5.3.1). */
929 if (unlikely(qp
->ibqp
.qp_num
== 1 &&
930 (tlen
> 2048 || (sc5
== 0xF))))
933 /* Received on QP0, and so by definition, this is an SMP */
934 struct opa_smp
*smp
= (struct opa_smp
*)data
;
936 if (opa_smp_check(ibp
, pkey
, sc5
, qp
, slid
, smp
))
941 if ((dlid_is_permissive
|| slid_is_permissive
) &&
942 smp
->mgmt_class
!= IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
945 /* look up SMI pkey */
946 mgmt_pkey_idx
= hfi1_lookup_pkey_idx(ibp
, pkey
);
947 if (mgmt_pkey_idx
< 0)
951 if (qp
->ibqp
.qp_num
> 1 &&
952 opcode
== IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
) {
953 wc
.ex
.imm_data
= ohdr
->u
.ud
.imm_data
;
954 wc
.wc_flags
= IB_WC_WITH_IMM
;
956 } else if (opcode
== IB_OPCODE_UD_SEND_ONLY
) {
964 * A GRH is expected to precede the data even if not
965 * present on the wire.
967 wc
.byte_len
= tlen
+ sizeof(struct ib_grh
);
970 * Get the next work request entry to find where to put the data.
972 if (qp
->r_flags
& RVT_R_REUSE_SGE
) {
973 qp
->r_flags
&= ~RVT_R_REUSE_SGE
;
977 ret
= hfi1_rvt_get_rwqe(qp
, 0);
979 rvt_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
983 if (qp
->ibqp
.qp_num
== 0)
984 ibp
->rvp
.n_vl15_dropped
++;
988 /* Silently drop packets which are too big. */
989 if (unlikely(wc
.byte_len
> qp
->r_len
)) {
990 qp
->r_flags
|= RVT_R_REUSE_SGE
;
994 hfi1_copy_sge(&qp
->r_sge
, packet
->grh
,
995 sizeof(struct ib_grh
), true, false);
996 wc
.wc_flags
|= IB_WC_GRH
;
997 } else if (packet
->etype
== RHF_RCV_TYPE_BYPASS
) {
1000 * Assuming we only created 16B on the send side
1001 * if we want to use large LIDs, since GRH was stripped
1002 * out when creating 16B, add back the GRH here.
1004 hfi1_make_ext_grh(packet
, &grh
, slid
, dlid
);
1005 hfi1_copy_sge(&qp
->r_sge
, &grh
,
1006 sizeof(struct ib_grh
), true, false);
1007 wc
.wc_flags
|= IB_WC_GRH
;
1009 rvt_skip_sge(&qp
->r_sge
, sizeof(struct ib_grh
), true);
1011 hfi1_copy_sge(&qp
->r_sge
, data
, wc
.byte_len
- sizeof(struct ib_grh
),
1013 rvt_put_ss(&qp
->r_sge
);
1014 if (!test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
1016 wc
.wr_id
= qp
->r_wr_id
;
1017 wc
.status
= IB_WC_SUCCESS
;
1018 wc
.opcode
= IB_WC_RECV
;
1023 if (qp
->ibqp
.qp_type
== IB_QPT_GSI
||
1024 qp
->ibqp
.qp_type
== IB_QPT_SMI
) {
1025 if (mgmt_pkey_idx
< 0) {
1026 if (net_ratelimit()) {
1027 struct hfi1_devdata
*dd
= ppd
->dd
;
1029 dd_dev_err(dd
, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
1034 wc
.pkey_index
= (unsigned)mgmt_pkey_idx
;
1038 if (slid_is_permissive
)
1039 slid
= be32_to_cpu(OPA_LID_PERMISSIVE
);
1040 wc
.slid
= slid
& U16_MAX
;
1044 * Save the LMC lower bits if the destination LID is a unicast LID.
1046 wc
.dlid_path_bits
= hfi1_check_mcast(dlid
) ? 0 :
1047 dlid
& ((1 << ppd_from_ibp(ibp
)->lmc
) - 1);
1048 wc
.port_num
= qp
->port_num
;
1049 /* Signal completion event if the solicited bit is set. */
1050 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
,
1051 ib_bth_is_solicited(ohdr
));
1055 ibp
->rvp
.n_pkt_drops
++;