2 * Copyright(c) 2015 - 2019 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/net.h>
49 #include <rdma/ib_smi.h>
53 #include "verbs_txreq.h"
54 #include "trace_ibhdrs.h"
57 /* We support only two types - 9B and 16B for now */
58 static const hfi1_make_req hfi1_make_ud_req_tbl
[2] = {
59 [HFI1_PKT_TYPE_9B
] = &hfi1_make_ud_req_9B
,
60 [HFI1_PKT_TYPE_16B
] = &hfi1_make_ud_req_16B
64 * ud_loopback - handle send on loopback QPs
65 * @sqp: the sending QP
66 * @swqe: the send work request
68 * This is called from hfi1_make_ud_req() to forward a WQE addressed
70 * Note that the receive interrupt handler may be calling hfi1_ud_rcv()
71 * while this is being called.
73 static void ud_loopback(struct rvt_qp
*sqp
, struct rvt_swqe
*swqe
)
75 struct hfi1_ibport
*ibp
= to_iport(sqp
->ibqp
.device
, sqp
->port_num
);
76 struct hfi1_pportdata
*ppd
;
77 struct hfi1_qp_priv
*priv
= sqp
->priv
;
79 struct rdma_ah_attr
*ah_attr
;
81 struct rvt_sge_state ssge
;
85 enum ib_qp_type sqptype
, dqptype
;
89 qp
= rvt_lookup_qpn(ib_to_rvt(sqp
->ibqp
.device
), &ibp
->rvp
,
90 rvt_get_swqe_remote_qpn(swqe
));
92 ibp
->rvp
.n_pkt_drops
++;
97 sqptype
= sqp
->ibqp
.qp_type
== IB_QPT_GSI
?
98 IB_QPT_UD
: sqp
->ibqp
.qp_type
;
99 dqptype
= qp
->ibqp
.qp_type
== IB_QPT_GSI
?
100 IB_QPT_UD
: qp
->ibqp
.qp_type
;
102 if (dqptype
!= sqptype
||
103 !(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
)) {
104 ibp
->rvp
.n_pkt_drops
++;
108 ah_attr
= rvt_get_swqe_ah_attr(swqe
);
109 ppd
= ppd_from_ibp(ibp
);
111 if (qp
->ibqp
.qp_num
> 1) {
114 u8 sc5
= ibp
->sl_to_sc
[rdma_ah_get_sl(ah_attr
)];
116 pkey
= hfi1_get_pkey(ibp
, sqp
->s_pkey_index
);
117 slid
= ppd
->lid
| (rdma_ah_get_path_bits(ah_attr
) &
118 ((1 << ppd
->lmc
) - 1));
119 if (unlikely(ingress_pkey_check(ppd
, pkey
, sc5
,
122 hfi1_bad_pkey(ibp
, pkey
,
123 rdma_ah_get_sl(ah_attr
),
124 sqp
->ibqp
.qp_num
, qp
->ibqp
.qp_num
,
125 slid
, rdma_ah_get_dlid(ah_attr
));
131 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
132 * Qkeys with the high order bit set mean use the
133 * qkey from the QP context instead of the WR (see 10.2.5).
135 if (qp
->ibqp
.qp_num
) {
138 qkey
= (int)rvt_get_swqe_remote_qkey(swqe
) < 0 ?
139 sqp
->qkey
: rvt_get_swqe_remote_qkey(swqe
);
140 if (unlikely(qkey
!= qp
->qkey
))
141 goto drop
; /* silently drop per IBTA spec */
145 * A GRH is expected to precede the data even if not
146 * present on the wire.
148 length
= swqe
->length
;
149 memset(&wc
, 0, sizeof(wc
));
150 wc
.byte_len
= length
+ sizeof(struct ib_grh
);
152 if (swqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
153 wc
.wc_flags
= IB_WC_WITH_IMM
;
154 wc
.ex
.imm_data
= swqe
->wr
.ex
.imm_data
;
157 spin_lock_irqsave(&qp
->r_lock
, flags
);
160 * Get the next work request entry to find where to put the data.
162 if (qp
->r_flags
& RVT_R_REUSE_SGE
) {
163 qp
->r_flags
&= ~RVT_R_REUSE_SGE
;
167 ret
= rvt_get_rwqe(qp
, false);
169 rvt_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
173 if (qp
->ibqp
.qp_num
== 0)
174 ibp
->rvp
.n_vl15_dropped
++;
178 /* Silently drop packets which are too big. */
179 if (unlikely(wc
.byte_len
> qp
->r_len
)) {
180 qp
->r_flags
|= RVT_R_REUSE_SGE
;
181 ibp
->rvp
.n_pkt_drops
++;
185 if (rdma_ah_get_ah_flags(ah_attr
) & IB_AH_GRH
) {
187 struct ib_global_route grd
= *(rdma_ah_read_grh(ah_attr
));
190 * For loopback packets with extended LIDs, the
191 * sgid_index in the GRH is 0 and the dgid is
192 * OPA GID of the sender. While creating a response
193 * to the loopback packet, IB core creates the new
194 * sgid_index from the DGID and that will be the
195 * OPA_GID_INDEX. The new dgid is from the sgid
196 * index and that will be in the IB GID format.
198 * We now have a case where the sent packet had a
199 * different sgid_index and dgid compared to the
200 * one that was received in response.
202 * Fix this inconsistency.
204 if (priv
->hdr_type
== HFI1_PKT_TYPE_16B
) {
205 if (grd
.sgid_index
== 0)
206 grd
.sgid_index
= OPA_GID_INDEX
;
208 if (ib_is_opa_gid(&grd
.dgid
))
209 grd
.dgid
.global
.interface_id
=
210 cpu_to_be64(ppd
->guids
[HFI1_PORT_GUID_INDEX
]);
213 hfi1_make_grh(ibp
, &grh
, &grd
, 0, 0);
214 rvt_copy_sge(qp
, &qp
->r_sge
, &grh
,
215 sizeof(grh
), true, false);
216 wc
.wc_flags
|= IB_WC_GRH
;
218 rvt_skip_sge(&qp
->r_sge
, sizeof(struct ib_grh
), true);
220 ssge
.sg_list
= swqe
->sg_list
+ 1;
221 ssge
.sge
= *swqe
->sg_list
;
222 ssge
.num_sge
= swqe
->wr
.num_sge
;
225 u32 len
= rvt_get_sge_length(sge
, length
);
227 WARN_ON_ONCE(len
== 0);
228 rvt_copy_sge(qp
, &qp
->r_sge
, sge
->vaddr
, len
, true, false);
229 rvt_update_sge(&ssge
, len
, false);
232 rvt_put_ss(&qp
->r_sge
);
233 if (!test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
235 wc
.wr_id
= qp
->r_wr_id
;
236 wc
.status
= IB_WC_SUCCESS
;
237 wc
.opcode
= IB_WC_RECV
;
239 wc
.src_qp
= sqp
->ibqp
.qp_num
;
240 if (qp
->ibqp
.qp_type
== IB_QPT_GSI
|| qp
->ibqp
.qp_type
== IB_QPT_SMI
) {
241 if (sqp
->ibqp
.qp_type
== IB_QPT_GSI
||
242 sqp
->ibqp
.qp_type
== IB_QPT_SMI
)
243 wc
.pkey_index
= rvt_get_swqe_pkey_index(swqe
);
245 wc
.pkey_index
= sqp
->s_pkey_index
;
249 wc
.slid
= (ppd
->lid
| (rdma_ah_get_path_bits(ah_attr
) &
250 ((1 << ppd
->lmc
) - 1))) & U16_MAX
;
251 /* Check for loopback when the port lid is not set */
252 if (wc
.slid
== 0 && sqp
->ibqp
.qp_type
== IB_QPT_GSI
)
253 wc
.slid
= be16_to_cpu(IB_LID_PERMISSIVE
);
254 wc
.sl
= rdma_ah_get_sl(ah_attr
);
255 wc
.dlid_path_bits
= rdma_ah_get_dlid(ah_attr
) & ((1 << ppd
->lmc
) - 1);
256 wc
.port_num
= qp
->port_num
;
257 /* Signal completion event if the solicited bit is set. */
258 rvt_recv_cq(qp
, &wc
, swqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
259 ibp
->rvp
.n_loop_pkts
++;
261 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
266 static void hfi1_make_bth_deth(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
,
267 struct ib_other_headers
*ohdr
,
268 u16
*pkey
, u32 extra_bytes
, bool bypass
)
271 struct hfi1_ibport
*ibp
;
273 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
274 if (wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
275 ohdr
->u
.ud
.imm_data
= wqe
->wr
.ex
.imm_data
;
276 bth0
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
<< 24;
278 bth0
= IB_OPCODE_UD_SEND_ONLY
<< 24;
281 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
282 bth0
|= IB_BTH_SOLICITED
;
283 bth0
|= extra_bytes
<< 20;
284 if (qp
->ibqp
.qp_type
== IB_QPT_GSI
|| qp
->ibqp
.qp_type
== IB_QPT_SMI
)
285 *pkey
= hfi1_get_pkey(ibp
, rvt_get_swqe_pkey_index(wqe
));
287 *pkey
= hfi1_get_pkey(ibp
, qp
->s_pkey_index
);
290 ohdr
->bth
[0] = cpu_to_be32(bth0
);
291 ohdr
->bth
[1] = cpu_to_be32(rvt_get_swqe_remote_qpn(wqe
));
292 ohdr
->bth
[2] = cpu_to_be32(mask_psn(wqe
->psn
));
294 * Qkeys with the high order bit set mean use the
295 * qkey from the QP context instead of the WR (see 10.2.5).
298 cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe
) < 0 ? qp
->qkey
:
299 rvt_get_swqe_remote_qkey(wqe
));
300 ohdr
->u
.ud
.deth
[1] = cpu_to_be32(qp
->ibqp
.qp_num
);
303 void hfi1_make_ud_req_9B(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
,
304 struct rvt_swqe
*wqe
)
306 u32 nwords
, extra_bytes
;
307 u16 len
, slid
, dlid
, pkey
;
310 struct hfi1_qp_priv
*priv
= qp
->priv
;
311 struct ib_other_headers
*ohdr
;
312 struct rdma_ah_attr
*ah_attr
;
313 struct hfi1_pportdata
*ppd
;
314 struct hfi1_ibport
*ibp
;
317 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
318 ppd
= ppd_from_ibp(ibp
);
319 ah_attr
= rvt_get_swqe_ah_attr(wqe
);
321 extra_bytes
= -wqe
->length
& 3;
322 nwords
= ((wqe
->length
+ extra_bytes
) >> 2) + SIZE_OF_CRC
;
323 /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
324 ps
->s_txreq
->hdr_dwords
= 7;
325 if (wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
)
326 ps
->s_txreq
->hdr_dwords
++;
328 if (rdma_ah_get_ah_flags(ah_attr
) & IB_AH_GRH
) {
329 grh
= &ps
->s_txreq
->phdr
.hdr
.ibh
.u
.l
.grh
;
330 ps
->s_txreq
->hdr_dwords
+=
331 hfi1_make_grh(ibp
, grh
, rdma_ah_read_grh(ah_attr
),
332 ps
->s_txreq
->hdr_dwords
- LRH_9B_DWORDS
,
335 ohdr
= &ps
->s_txreq
->phdr
.hdr
.ibh
.u
.l
.oth
;
338 ohdr
= &ps
->s_txreq
->phdr
.hdr
.ibh
.u
.oth
;
341 sc5
= ibp
->sl_to_sc
[rdma_ah_get_sl(ah_attr
)];
342 lrh0
|= (rdma_ah_get_sl(ah_attr
) & 0xf) << 4;
343 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
) {
344 lrh0
|= 0xF000; /* Set VL (see ch. 13.5.3.1) */
347 lrh0
|= (sc5
& 0xf) << 12;
351 dlid
= opa_get_lid(rdma_ah_get_dlid(ah_attr
), 9B
);
352 if (dlid
== be16_to_cpu(IB_LID_PERMISSIVE
)) {
353 slid
= be16_to_cpu(IB_LID_PERMISSIVE
);
355 u16 lid
= (u16
)ppd
->lid
;
358 lid
|= rdma_ah_get_path_bits(ah_attr
) &
359 ((1 << ppd
->lmc
) - 1);
362 slid
= be16_to_cpu(IB_LID_PERMISSIVE
);
365 hfi1_make_bth_deth(qp
, wqe
, ohdr
, &pkey
, extra_bytes
, false);
366 len
= ps
->s_txreq
->hdr_dwords
+ nwords
;
368 /* Setup the packet */
369 ps
->s_txreq
->phdr
.hdr
.hdr_type
= HFI1_PKT_TYPE_9B
;
370 hfi1_make_ib_hdr(&ps
->s_txreq
->phdr
.hdr
.ibh
,
371 lrh0
, len
, dlid
, slid
);
374 void hfi1_make_ud_req_16B(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
,
375 struct rvt_swqe
*wqe
)
377 struct hfi1_qp_priv
*priv
= qp
->priv
;
378 struct ib_other_headers
*ohdr
;
379 struct rdma_ah_attr
*ah_attr
;
380 struct hfi1_pportdata
*ppd
;
381 struct hfi1_ibport
*ibp
;
382 u32 dlid
, slid
, nwords
, extra_bytes
;
383 u32 dest_qp
= rvt_get_swqe_remote_qpn(wqe
);
384 u32 src_qp
= qp
->ibqp
.qp_num
;
387 bool is_mgmt
= false;
389 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
390 ppd
= ppd_from_ibp(ibp
);
391 ah_attr
= rvt_get_swqe_ah_attr(wqe
);
394 * Build 16B Management Packet if either the destination
395 * or source queue pair number is 0 or 1.
397 if (dest_qp
== 0 || src_qp
== 0 || dest_qp
== 1 || src_qp
== 1) {
398 /* header size in dwords 16B LRH+L4_FM = (16+8)/4. */
399 ps
->s_txreq
->hdr_dwords
= 6;
402 /* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */
403 ps
->s_txreq
->hdr_dwords
= 9;
404 if (wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
)
405 ps
->s_txreq
->hdr_dwords
++;
408 /* SW provides space for CRC and LT for bypass packets. */
409 extra_bytes
= hfi1_get_16b_padding((ps
->s_txreq
->hdr_dwords
<< 2),
411 nwords
= ((wqe
->length
+ extra_bytes
+ SIZE_OF_LT
) >> 2) + SIZE_OF_CRC
;
413 if ((rdma_ah_get_ah_flags(ah_attr
) & IB_AH_GRH
) &&
414 hfi1_check_mcast(rdma_ah_get_dlid(ah_attr
))) {
416 struct ib_global_route
*grd
= rdma_ah_retrieve_grh(ah_attr
);
418 * Ensure OPA GIDs are transformed to IB gids
419 * before creating the GRH.
421 if (grd
->sgid_index
== OPA_GID_INDEX
) {
422 dd_dev_warn(ppd
->dd
, "Bad sgid_index. sgid_index: %d\n",
426 grh
= &ps
->s_txreq
->phdr
.hdr
.opah
.u
.l
.grh
;
427 ps
->s_txreq
->hdr_dwords
+= hfi1_make_grh(
429 ps
->s_txreq
->hdr_dwords
- LRH_16B_DWORDS
,
431 ohdr
= &ps
->s_txreq
->phdr
.hdr
.opah
.u
.l
.oth
;
432 l4
= OPA_16B_L4_IB_GLOBAL
;
434 ohdr
= &ps
->s_txreq
->phdr
.hdr
.opah
.u
.oth
;
435 l4
= OPA_16B_L4_IB_LOCAL
;
438 sc5
= ibp
->sl_to_sc
[rdma_ah_get_sl(ah_attr
)];
439 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
)
444 dlid
= opa_get_lid(rdma_ah_get_dlid(ah_attr
), 16B
);
446 slid
= be32_to_cpu(OPA_LID_PERMISSIVE
);
448 slid
= ppd
->lid
| (rdma_ah_get_path_bits(ah_attr
) &
449 ((1 << ppd
->lmc
) - 1));
453 pkey
= hfi1_get_pkey(ibp
, rvt_get_swqe_pkey_index(wqe
));
454 hfi1_16B_set_qpn(&ps
->s_txreq
->phdr
.hdr
.opah
.u
.mgmt
,
457 hfi1_make_bth_deth(qp
, wqe
, ohdr
, &pkey
, extra_bytes
, true);
459 /* Convert dwords to flits */
460 len
= (ps
->s_txreq
->hdr_dwords
+ nwords
) >> 1;
462 /* Setup the packet */
463 ps
->s_txreq
->phdr
.hdr
.hdr_type
= HFI1_PKT_TYPE_16B
;
464 hfi1_make_16b_hdr(&ps
->s_txreq
->phdr
.hdr
.opah
,
465 slid
, dlid
, len
, pkey
, 0, 0, l4
, priv
->s_sc
);
469 * hfi1_make_ud_req - construct a UD request packet
472 * Assume s_lock is held.
474 * Return 1 if constructed; otherwise, return 0.
476 int hfi1_make_ud_req(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
)
478 struct hfi1_qp_priv
*priv
= qp
->priv
;
479 struct rdma_ah_attr
*ah_attr
;
480 struct hfi1_pportdata
*ppd
;
481 struct hfi1_ibport
*ibp
;
482 struct rvt_swqe
*wqe
;
486 ps
->s_txreq
= get_txreq(ps
->dev
, qp
);
490 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_NEXT_SEND_OK
)) {
491 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_FLUSH_SEND
))
493 /* We are in the error state, flush the work request. */
494 if (qp
->s_last
== READ_ONCE(qp
->s_head
))
496 /* If DMAs are in progress, we can't flush immediately. */
497 if (iowait_sdma_pending(&priv
->s_iowait
)) {
498 qp
->s_flags
|= RVT_S_WAIT_DMA
;
501 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
502 rvt_send_complete(qp
, wqe
, IB_WC_WR_FLUSH_ERR
);
506 /* see post_one_send() */
507 if (qp
->s_cur
== READ_ONCE(qp
->s_head
))
510 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_cur
);
511 next_cur
= qp
->s_cur
+ 1;
512 if (next_cur
>= qp
->s_size
)
515 /* Construct the header. */
516 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
517 ppd
= ppd_from_ibp(ibp
);
518 ah_attr
= rvt_get_swqe_ah_attr(wqe
);
519 priv
->hdr_type
= hfi1_get_hdr_type(ppd
->lid
, ah_attr
);
520 if ((!hfi1_check_mcast(rdma_ah_get_dlid(ah_attr
))) ||
521 (rdma_ah_get_dlid(ah_attr
) == be32_to_cpu(OPA_LID_PERMISSIVE
))) {
522 lid
= rdma_ah_get_dlid(ah_attr
) & ~((1 << ppd
->lmc
) - 1);
523 if (unlikely(!loopback
&&
524 ((lid
== ppd
->lid
) ||
525 ((lid
== be32_to_cpu(OPA_LID_PERMISSIVE
)) &&
526 (qp
->ibqp
.qp_type
== IB_QPT_GSI
))))) {
527 unsigned long tflags
= ps
->flags
;
529 * If DMAs are in progress, we can't generate
530 * a completion for the loopback packet since
531 * it would be out of order.
532 * Instead of waiting, we could queue a
533 * zero length descriptor so we get a callback.
535 if (iowait_sdma_pending(&priv
->s_iowait
)) {
536 qp
->s_flags
|= RVT_S_WAIT_DMA
;
539 qp
->s_cur
= next_cur
;
540 spin_unlock_irqrestore(&qp
->s_lock
, tflags
);
541 ud_loopback(qp
, wqe
);
542 spin_lock_irqsave(&qp
->s_lock
, tflags
);
544 rvt_send_complete(qp
, wqe
, IB_WC_SUCCESS
);
549 qp
->s_cur
= next_cur
;
550 ps
->s_txreq
->s_cur_size
= wqe
->length
;
551 ps
->s_txreq
->ss
= &qp
->s_sge
;
552 qp
->s_srate
= rdma_ah_get_static_rate(ah_attr
);
553 qp
->srate_mbps
= ib_rate_to_mbps(qp
->s_srate
);
555 qp
->s_sge
.sge
= wqe
->sg_list
[0];
556 qp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
557 qp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
558 qp
->s_sge
.total_len
= wqe
->length
;
560 /* Make the appropriate header */
561 hfi1_make_ud_req_tbl
[priv
->hdr_type
](qp
, ps
, qp
->s_wqe
);
562 priv
->s_sde
= qp_to_sdma_engine(qp
, priv
->s_sc
);
563 ps
->s_txreq
->sde
= priv
->s_sde
;
564 priv
->s_sendcontext
= qp_to_send_context(qp
, priv
->s_sc
);
565 ps
->s_txreq
->psc
= priv
->s_sendcontext
;
567 priv
->s_ahg
->ahgcount
= 0;
568 priv
->s_ahg
->ahgidx
= 0;
569 priv
->s_ahg
->tx_flags
= 0;
574 hfi1_put_txreq(ps
->s_txreq
);
579 hfi1_put_txreq(ps
->s_txreq
);
583 qp
->s_flags
&= ~RVT_S_BUSY
;
588 * Hardware can't check this so we do it here.
590 * This is a slightly different algorithm than the standard pkey check. It
591 * special cases the management keys and allows for 0x7fff and 0xffff to be in
592 * the table at the same time.
594 * @returns the index found or -1 if not found
596 int hfi1_lookup_pkey_idx(struct hfi1_ibport
*ibp
, u16 pkey
)
598 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
601 if (pkey
== FULL_MGMT_P_KEY
|| pkey
== LIM_MGMT_P_KEY
) {
602 unsigned lim_idx
= -1;
604 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); ++i
) {
605 /* here we look for an exact match */
606 if (ppd
->pkeys
[i
] == pkey
)
608 if (ppd
->pkeys
[i
] == LIM_MGMT_P_KEY
)
612 /* did not find 0xffff return 0x7fff idx if found */
613 if (pkey
== FULL_MGMT_P_KEY
)
620 pkey
&= 0x7fff; /* remove limited/full membership bit */
622 for (i
= 0; i
< ARRAY_SIZE(ppd
->pkeys
); ++i
)
623 if ((ppd
->pkeys
[i
] & 0x7fff) == pkey
)
627 * Should not get here, this means hardware failed to validate pkeys.
632 void return_cnp_16B(struct hfi1_ibport
*ibp
, struct rvt_qp
*qp
,
633 u32 remote_qpn
, u16 pkey
, u32 slid
, u32 dlid
,
634 u8 sc5
, const struct ib_grh
*old_grh
)
636 u64 pbc
, pbc_flags
= 0;
637 u32 bth0
, plen
, vl
, hwords
= 7;
640 struct hfi1_opa_header hdr
;
641 struct ib_other_headers
*ohdr
;
642 struct pio_buf
*pbuf
;
643 struct send_context
*ctxt
= qp_to_send_context(qp
, sc5
);
644 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
647 hdr
.hdr_type
= HFI1_PKT_TYPE_16B
;
648 /* Populate length */
649 nwords
= ((hfi1_get_16b_padding(hwords
<< 2, 0) +
650 SIZE_OF_LT
) >> 2) + SIZE_OF_CRC
;
652 struct ib_grh
*grh
= &hdr
.opah
.u
.l
.grh
;
654 grh
->version_tclass_flow
= old_grh
->version_tclass_flow
;
655 grh
->paylen
= cpu_to_be16(
656 (hwords
- LRH_16B_DWORDS
+ nwords
) << 2);
657 grh
->hop_limit
= 0xff;
658 grh
->sgid
= old_grh
->dgid
;
659 grh
->dgid
= old_grh
->sgid
;
660 ohdr
= &hdr
.opah
.u
.l
.oth
;
661 l4
= OPA_16B_L4_IB_GLOBAL
;
662 hwords
+= sizeof(struct ib_grh
) / sizeof(u32
);
664 ohdr
= &hdr
.opah
.u
.oth
;
665 l4
= OPA_16B_L4_IB_LOCAL
;
668 /* BIT 16 to 19 is TVER. Bit 20 to 22 is pad cnt */
669 bth0
= (IB_OPCODE_CNP
<< 24) | (1 << 16) |
670 (hfi1_get_16b_padding(hwords
<< 2, 0) << 20);
671 ohdr
->bth
[0] = cpu_to_be32(bth0
);
673 ohdr
->bth
[1] = cpu_to_be32(remote_qpn
);
674 ohdr
->bth
[2] = 0; /* PSN 0 */
676 /* Convert dwords to flits */
677 len
= (hwords
+ nwords
) >> 1;
678 hfi1_make_16b_hdr(&hdr
.opah
, slid
, dlid
, len
, pkey
, 1, 0, l4
, sc5
);
680 plen
= 2 /* PBC */ + hwords
+ nwords
;
681 pbc_flags
|= PBC_PACKET_BYPASS
| PBC_INSERT_BYPASS_ICRC
;
682 vl
= sc_to_vlt(ppd
->dd
, sc5
);
683 pbc
= create_pbc(ppd
, pbc_flags
, qp
->srate_mbps
, vl
, plen
);
685 pbuf
= sc_buffer_alloc(ctxt
, plen
, NULL
, NULL
);
686 if (!IS_ERR_OR_NULL(pbuf
)) {
687 trace_pio_output_ibhdr(ppd
->dd
, &hdr
, sc5
);
688 ppd
->dd
->pio_inline_send(ppd
->dd
, pbuf
, pbc
,
694 void return_cnp(struct hfi1_ibport
*ibp
, struct rvt_qp
*qp
, u32 remote_qpn
,
695 u16 pkey
, u32 slid
, u32 dlid
, u8 sc5
,
696 const struct ib_grh
*old_grh
)
698 u64 pbc
, pbc_flags
= 0;
699 u32 bth0
, plen
, vl
, hwords
= 5;
701 u8 sl
= ibp
->sc_to_sl
[sc5
];
702 struct hfi1_opa_header hdr
;
703 struct ib_other_headers
*ohdr
;
704 struct pio_buf
*pbuf
;
705 struct send_context
*ctxt
= qp_to_send_context(qp
, sc5
);
706 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
708 hdr
.hdr_type
= HFI1_PKT_TYPE_9B
;
710 struct ib_grh
*grh
= &hdr
.ibh
.u
.l
.grh
;
712 grh
->version_tclass_flow
= old_grh
->version_tclass_flow
;
713 grh
->paylen
= cpu_to_be16(
714 (hwords
- LRH_9B_DWORDS
+ SIZE_OF_CRC
) << 2);
715 grh
->hop_limit
= 0xff;
716 grh
->sgid
= old_grh
->dgid
;
717 grh
->dgid
= old_grh
->sgid
;
718 ohdr
= &hdr
.ibh
.u
.l
.oth
;
720 hwords
+= sizeof(struct ib_grh
) / sizeof(u32
);
722 ohdr
= &hdr
.ibh
.u
.oth
;
726 lrh0
|= (sc5
& 0xf) << 12 | sl
<< 4;
728 bth0
= pkey
| (IB_OPCODE_CNP
<< 24);
729 ohdr
->bth
[0] = cpu_to_be32(bth0
);
731 ohdr
->bth
[1] = cpu_to_be32(remote_qpn
| (1 << IB_BECN_SHIFT
));
732 ohdr
->bth
[2] = 0; /* PSN 0 */
734 hfi1_make_ib_hdr(&hdr
.ibh
, lrh0
, hwords
+ SIZE_OF_CRC
, dlid
, slid
);
735 plen
= 2 /* PBC */ + hwords
;
736 pbc_flags
|= (ib_is_sc5(sc5
) << PBC_DC_INFO_SHIFT
);
737 vl
= sc_to_vlt(ppd
->dd
, sc5
);
738 pbc
= create_pbc(ppd
, pbc_flags
, qp
->srate_mbps
, vl
, plen
);
740 pbuf
= sc_buffer_alloc(ctxt
, plen
, NULL
, NULL
);
741 if (!IS_ERR_OR_NULL(pbuf
)) {
742 trace_pio_output_ibhdr(ppd
->dd
, &hdr
, sc5
);
743 ppd
->dd
->pio_inline_send(ppd
->dd
, pbuf
, pbc
,
750 * opa_smp_check() - Do the regular pkey checking, and the additional
751 * checks for SMPs specified in OPAv1 rev 1.0, 9/19/2016 update, section
752 * 9.10.25 ("SMA Packet Checks").
755 * - Checks are done using the pkey directly from the packet's BTH,
756 * and specifically _not_ the pkey that we attach to the completion,
757 * which may be different.
758 * - These checks are specifically for "non-local" SMPs (i.e., SMPs
759 * which originated on another node). SMPs which are sent from, and
760 * destined to this node are checked in opa_local_smp_check().
762 * At the point where opa_smp_check() is called, we know:
763 * - destination QP is QP0
765 * opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
767 static int opa_smp_check(struct hfi1_ibport
*ibp
, u16 pkey
, u8 sc5
,
768 struct rvt_qp
*qp
, u16 slid
, struct opa_smp
*smp
)
770 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
773 * I don't think it's possible for us to get here with sc != 0xf,
774 * but check it to be certain.
779 if (rcv_pkey_check(ppd
, pkey
, sc5
, slid
))
783 * At this point we know (and so don't need to check again) that
784 * the pkey is either LIM_MGMT_P_KEY, or FULL_MGMT_P_KEY
785 * (see ingress_pkey_check).
787 if (smp
->mgmt_class
!= IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
&&
788 smp
->mgmt_class
!= IB_MGMT_CLASS_SUBN_LID_ROUTED
) {
789 ingress_pkey_table_fail(ppd
, pkey
, slid
);
794 * SMPs fall into one of four (disjoint) categories:
795 * SMA request, SMA response, SMA trap, or SMA trap repress.
796 * Our response depends, in part, on which type of SMP we're
799 * If this is an SMA response, skip the check here.
801 * If this is an SMA request or SMA trap repress:
802 * - pkey != FULL_MGMT_P_KEY =>
803 * increment port recv constraint errors, drop MAD
806 * - accept if the port is running an SM
807 * - drop MAD if it's an SMA trap
808 * - pkey == FULL_MGMT_P_KEY =>
809 * reply with unsupported method
810 * - pkey != FULL_MGMT_P_KEY =>
811 * increment port recv constraint errors, drop MAD
813 switch (smp
->method
) {
814 case IB_MGMT_METHOD_GET_RESP
:
815 case IB_MGMT_METHOD_REPORT_RESP
:
817 case IB_MGMT_METHOD_GET
:
818 case IB_MGMT_METHOD_SET
:
819 case IB_MGMT_METHOD_REPORT
:
820 case IB_MGMT_METHOD_TRAP_REPRESS
:
821 if (pkey
!= FULL_MGMT_P_KEY
) {
822 ingress_pkey_table_fail(ppd
, pkey
, slid
);
827 if (ibp
->rvp
.port_cap_flags
& IB_PORT_SM
)
829 if (smp
->method
== IB_MGMT_METHOD_TRAP
)
831 if (pkey
== FULL_MGMT_P_KEY
) {
832 smp
->status
|= IB_SMP_UNSUP_METHOD
;
835 ingress_pkey_table_fail(ppd
, pkey
, slid
);
842 * hfi1_ud_rcv - receive an incoming UD packet
843 * @ibp: the port the packet came in on
844 * @hdr: the packet header
845 * @rcv_flags: flags relevant to rcv processing
846 * @data: the packet data
847 * @tlen: the packet length
848 * @qp: the QP the packet came on
850 * This is called from qp_rcv() to process an incoming UD packet
852 * Called at interrupt level.
854 void hfi1_ud_rcv(struct hfi1_packet
*packet
)
856 u32 hdrsize
= packet
->hlen
;
860 int mgmt_pkey_idx
= -1;
861 struct hfi1_ibport
*ibp
= rcd_to_iport(packet
->rcd
);
862 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
863 void *data
= packet
->payload
;
864 u32 tlen
= packet
->tlen
;
865 struct rvt_qp
*qp
= packet
->qp
;
868 u8 opcode
= packet
->opcode
;
870 u32 dlid
= packet
->dlid
;
871 u32 slid
= packet
->slid
;
874 bool dlid_is_permissive
;
875 bool slid_is_permissive
;
876 bool solicited
= false;
878 extra_bytes
= packet
->pad
+ packet
->extra_byte
+ (SIZE_OF_CRC
<< 2);
880 if (packet
->etype
== RHF_RCV_TYPE_BYPASS
) {
882 opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE
), 16B
);
884 l4
= hfi1_16B_get_l4(packet
->hdr
);
885 pkey
= hfi1_16B_get_pkey(packet
->hdr
);
886 dlid_is_permissive
= (dlid
== permissive_lid
);
887 slid_is_permissive
= (slid
== permissive_lid
);
889 pkey
= ib_bth_get_pkey(packet
->ohdr
);
890 dlid_is_permissive
= (dlid
== be16_to_cpu(IB_LID_PERMISSIVE
));
891 slid_is_permissive
= (slid
== be16_to_cpu(IB_LID_PERMISSIVE
));
893 sl_from_sc
= ibp
->sc_to_sl
[sc5
];
895 if (likely(l4
!= OPA_16B_L4_FM
)) {
896 src_qp
= ib_get_sqpn(packet
->ohdr
);
897 solicited
= ib_bth_is_solicited(packet
->ohdr
);
899 src_qp
= hfi1_16B_get_src_qpn(packet
->mgmt
);
902 process_ecn(qp
, packet
);
904 * Get the number of bytes the message was padded by
905 * and drop incomplete packets.
907 if (unlikely(tlen
< (hdrsize
+ extra_bytes
)))
910 tlen
-= hdrsize
+ extra_bytes
;
913 * Check that the permissive LID is only used on QP0
914 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
916 if (qp
->ibqp
.qp_num
) {
917 if (unlikely(dlid_is_permissive
|| slid_is_permissive
))
919 if (qp
->ibqp
.qp_num
> 1) {
920 if (unlikely(rcv_pkey_check(ppd
, pkey
, sc5
, slid
))) {
922 * Traps will not be sent for packets dropped
923 * by the HW. This is fine, as sending trap
924 * for invalid pkeys is optional according to
925 * IB spec (release 1.3, section 10.9.4)
929 src_qp
, qp
->ibqp
.qp_num
,
935 mgmt_pkey_idx
= hfi1_lookup_pkey_idx(ibp
, pkey
);
936 if (mgmt_pkey_idx
< 0)
939 if (unlikely(l4
!= OPA_16B_L4_FM
&&
940 ib_get_qkey(packet
->ohdr
) != qp
->qkey
))
941 return; /* Silent drop */
943 /* Drop invalid MAD packets (see 13.5.3.1). */
944 if (unlikely(qp
->ibqp
.qp_num
== 1 &&
945 (tlen
> 2048 || (sc5
== 0xF))))
948 /* Received on QP0, and so by definition, this is an SMP */
949 struct opa_smp
*smp
= (struct opa_smp
*)data
;
951 if (opa_smp_check(ibp
, pkey
, sc5
, qp
, slid
, smp
))
956 if ((dlid_is_permissive
|| slid_is_permissive
) &&
957 smp
->mgmt_class
!= IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
960 /* look up SMI pkey */
961 mgmt_pkey_idx
= hfi1_lookup_pkey_idx(ibp
, pkey
);
962 if (mgmt_pkey_idx
< 0)
966 if (qp
->ibqp
.qp_num
> 1 &&
967 opcode
== IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
) {
968 wc
.ex
.imm_data
= packet
->ohdr
->u
.ud
.imm_data
;
969 wc
.wc_flags
= IB_WC_WITH_IMM
;
970 } else if (opcode
== IB_OPCODE_UD_SEND_ONLY
) {
978 * A GRH is expected to precede the data even if not
979 * present on the wire.
981 wc
.byte_len
= tlen
+ sizeof(struct ib_grh
);
984 * Get the next work request entry to find where to put the data.
986 if (qp
->r_flags
& RVT_R_REUSE_SGE
) {
987 qp
->r_flags
&= ~RVT_R_REUSE_SGE
;
991 ret
= rvt_get_rwqe(qp
, false);
993 rvt_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
997 if (qp
->ibqp
.qp_num
== 0)
998 ibp
->rvp
.n_vl15_dropped
++;
1002 /* Silently drop packets which are too big. */
1003 if (unlikely(wc
.byte_len
> qp
->r_len
)) {
1004 qp
->r_flags
|= RVT_R_REUSE_SGE
;
1008 rvt_copy_sge(qp
, &qp
->r_sge
, packet
->grh
,
1009 sizeof(struct ib_grh
), true, false);
1010 wc
.wc_flags
|= IB_WC_GRH
;
1011 } else if (packet
->etype
== RHF_RCV_TYPE_BYPASS
) {
1014 * Assuming we only created 16B on the send side
1015 * if we want to use large LIDs, since GRH was stripped
1016 * out when creating 16B, add back the GRH here.
1018 hfi1_make_ext_grh(packet
, &grh
, slid
, dlid
);
1019 rvt_copy_sge(qp
, &qp
->r_sge
, &grh
,
1020 sizeof(struct ib_grh
), true, false);
1021 wc
.wc_flags
|= IB_WC_GRH
;
1023 rvt_skip_sge(&qp
->r_sge
, sizeof(struct ib_grh
), true);
1025 rvt_copy_sge(qp
, &qp
->r_sge
, data
, wc
.byte_len
- sizeof(struct ib_grh
),
1027 rvt_put_ss(&qp
->r_sge
);
1028 if (!test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
1030 wc
.wr_id
= qp
->r_wr_id
;
1031 wc
.status
= IB_WC_SUCCESS
;
1032 wc
.opcode
= IB_WC_RECV
;
1037 if (qp
->ibqp
.qp_type
== IB_QPT_GSI
||
1038 qp
->ibqp
.qp_type
== IB_QPT_SMI
) {
1039 if (mgmt_pkey_idx
< 0) {
1040 if (net_ratelimit()) {
1041 struct hfi1_devdata
*dd
= ppd
->dd
;
1043 dd_dev_err(dd
, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
1048 wc
.pkey_index
= (unsigned)mgmt_pkey_idx
;
1052 if (slid_is_permissive
)
1053 slid
= be32_to_cpu(OPA_LID_PERMISSIVE
);
1054 wc
.slid
= slid
& U16_MAX
;
1058 * Save the LMC lower bits if the destination LID is a unicast LID.
1060 wc
.dlid_path_bits
= hfi1_check_mcast(dlid
) ? 0 :
1061 dlid
& ((1 << ppd_from_ibp(ibp
)->lmc
) - 1);
1062 wc
.port_num
= qp
->port_num
;
1063 /* Signal completion event if the solicited bit is set. */
1064 rvt_recv_cq(qp
, &wc
, solicited
);
1068 ibp
->rvp
.n_pkt_drops
++;