2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/ib_smi.h>
40 * qib_ud_loopback - handle send on loopback QPs
41 * @sqp: the sending QP
42 * @swqe: the send work request
44 * This is called from qib_make_ud_req() to forward a WQE addressed
46 * Note that the receive interrupt handler may be calling qib_ud_rcv()
47 * while this is being called.
49 static void qib_ud_loopback(struct qib_qp
*sqp
, struct qib_swqe
*swqe
)
51 struct qib_ibport
*ibp
= to_iport(sqp
->ibqp
.device
, sqp
->port_num
);
52 struct qib_pportdata
*ppd
;
54 struct ib_ah_attr
*ah_attr
;
56 struct qib_sge_state ssge
;
61 qp
= qib_lookup_qpn(ibp
, swqe
->wr
.wr
.ud
.remote_qpn
);
66 if (qp
->ibqp
.qp_type
!= sqp
->ibqp
.qp_type
||
67 !(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_RECV_OK
)) {
72 ah_attr
= &to_iah(swqe
->wr
.wr
.ud
.ah
)->attr
;
73 ppd
= ppd_from_ibp(ibp
);
75 if (qp
->ibqp
.qp_num
> 1) {
80 pkey1
= qib_get_pkey(ibp
, sqp
->s_pkey_index
);
81 pkey2
= qib_get_pkey(ibp
, qp
->s_pkey_index
);
82 if (unlikely(!qib_pkey_ok(pkey1
, pkey2
))) {
83 lid
= ppd
->lid
| (ah_attr
->src_path_bits
&
84 ((1 << ppd
->lmc
) - 1));
85 qib_bad_pqkey(ibp
, IB_NOTICE_TRAP_BAD_PKEY
, pkey1
,
87 sqp
->ibqp
.qp_num
, qp
->ibqp
.qp_num
,
89 cpu_to_be16(ah_attr
->dlid
));
95 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
96 * Qkeys with the high order bit set mean use the
97 * qkey from the QP context instead of the WR (see 10.2.5).
99 if (qp
->ibqp
.qp_num
) {
102 qkey
= (int)swqe
->wr
.wr
.ud
.remote_qkey
< 0 ?
103 sqp
->qkey
: swqe
->wr
.wr
.ud
.remote_qkey
;
104 if (unlikely(qkey
!= qp
->qkey
)) {
107 lid
= ppd
->lid
| (ah_attr
->src_path_bits
&
108 ((1 << ppd
->lmc
) - 1));
109 qib_bad_pqkey(ibp
, IB_NOTICE_TRAP_BAD_QKEY
, qkey
,
111 sqp
->ibqp
.qp_num
, qp
->ibqp
.qp_num
,
113 cpu_to_be16(ah_attr
->dlid
));
119 * A GRH is expected to precede the data even if not
120 * present on the wire.
122 length
= swqe
->length
;
123 memset(&wc
, 0, sizeof wc
);
124 wc
.byte_len
= length
+ sizeof(struct ib_grh
);
126 if (swqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
127 wc
.wc_flags
= IB_WC_WITH_IMM
;
128 wc
.ex
.imm_data
= swqe
->wr
.ex
.imm_data
;
131 spin_lock_irqsave(&qp
->r_lock
, flags
);
134 * Get the next work request entry to find where to put the data.
136 if (qp
->r_flags
& QIB_R_REUSE_SGE
)
137 qp
->r_flags
&= ~QIB_R_REUSE_SGE
;
141 ret
= qib_get_rwqe(qp
, 0);
143 qib_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
147 if (qp
->ibqp
.qp_num
== 0)
148 ibp
->n_vl15_dropped
++;
152 /* Silently drop packets which are too big. */
153 if (unlikely(wc
.byte_len
> qp
->r_len
)) {
154 qp
->r_flags
|= QIB_R_REUSE_SGE
;
159 if (ah_attr
->ah_flags
& IB_AH_GRH
) {
160 qib_copy_sge(&qp
->r_sge
, &ah_attr
->grh
,
161 sizeof(struct ib_grh
), 1);
162 wc
.wc_flags
|= IB_WC_GRH
;
164 qib_skip_sge(&qp
->r_sge
, sizeof(struct ib_grh
), 1);
165 ssge
.sg_list
= swqe
->sg_list
+ 1;
166 ssge
.sge
= *swqe
->sg_list
;
167 ssge
.num_sge
= swqe
->wr
.num_sge
;
170 u32 len
= sge
->length
;
174 if (len
> sge
->sge_length
)
175 len
= sge
->sge_length
;
177 qib_copy_sge(&qp
->r_sge
, sge
->vaddr
, len
, 1);
180 sge
->sge_length
-= len
;
181 if (sge
->sge_length
== 0) {
183 *sge
= *ssge
.sg_list
++;
184 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
185 if (++sge
->n
>= QIB_SEGSZ
) {
186 if (++sge
->m
>= sge
->mr
->mapsz
)
191 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
193 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
197 while (qp
->r_sge
.num_sge
) {
198 atomic_dec(&qp
->r_sge
.sge
.mr
->refcount
);
199 if (--qp
->r_sge
.num_sge
)
200 qp
->r_sge
.sge
= *qp
->r_sge
.sg_list
++;
202 if (!test_and_clear_bit(QIB_R_WRID_VALID
, &qp
->r_aflags
))
204 wc
.wr_id
= qp
->r_wr_id
;
205 wc
.status
= IB_WC_SUCCESS
;
206 wc
.opcode
= IB_WC_RECV
;
208 wc
.src_qp
= sqp
->ibqp
.qp_num
;
209 wc
.pkey_index
= qp
->ibqp
.qp_type
== IB_QPT_GSI
?
210 swqe
->wr
.wr
.ud
.pkey_index
: 0;
211 wc
.slid
= ppd
->lid
| (ah_attr
->src_path_bits
& ((1 << ppd
->lmc
) - 1));
213 wc
.dlid_path_bits
= ah_attr
->dlid
& ((1 << ppd
->lmc
) - 1);
214 wc
.port_num
= qp
->port_num
;
215 /* Signal completion event if the solicited bit is set. */
216 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
217 swqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
220 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
222 if (atomic_dec_and_test(&qp
->refcount
))
227 * qib_make_ud_req - construct a UD request packet
230 * Return 1 if constructed; otherwise, return 0.
232 int qib_make_ud_req(struct qib_qp
*qp
)
234 struct qib_other_headers
*ohdr
;
235 struct ib_ah_attr
*ah_attr
;
236 struct qib_pportdata
*ppd
;
237 struct qib_ibport
*ibp
;
238 struct qib_swqe
*wqe
;
248 spin_lock_irqsave(&qp
->s_lock
, flags
);
250 if (!(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_NEXT_SEND_OK
)) {
251 if (!(ib_qib_state_ops
[qp
->state
] & QIB_FLUSH_SEND
))
253 /* We are in the error state, flush the work request. */
254 if (qp
->s_last
== qp
->s_head
)
256 /* If DMAs are in progress, we can't flush immediately. */
257 if (atomic_read(&qp
->s_dma_busy
)) {
258 qp
->s_flags
|= QIB_S_WAIT_DMA
;
261 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
262 qib_send_complete(qp
, wqe
, IB_WC_WR_FLUSH_ERR
);
266 if (qp
->s_cur
== qp
->s_head
)
269 wqe
= get_swqe_ptr(qp
, qp
->s_cur
);
270 next_cur
= qp
->s_cur
+ 1;
271 if (next_cur
>= qp
->s_size
)
274 /* Construct the header. */
275 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
276 ppd
= ppd_from_ibp(ibp
);
277 ah_attr
= &to_iah(wqe
->wr
.wr
.ud
.ah
)->attr
;
278 if (ah_attr
->dlid
>= QIB_MULTICAST_LID_BASE
) {
279 if (ah_attr
->dlid
!= QIB_PERMISSIVE_LID
)
280 ibp
->n_multicast_xmit
++;
282 ibp
->n_unicast_xmit
++;
284 ibp
->n_unicast_xmit
++;
285 lid
= ah_attr
->dlid
& ~((1 << ppd
->lmc
) - 1);
286 if (unlikely(lid
== ppd
->lid
)) {
288 * If DMAs are in progress, we can't generate
289 * a completion for the loopback packet since
290 * it would be out of order.
291 * XXX Instead of waiting, we could queue a
292 * zero length descriptor so we get a callback.
294 if (atomic_read(&qp
->s_dma_busy
)) {
295 qp
->s_flags
|= QIB_S_WAIT_DMA
;
298 qp
->s_cur
= next_cur
;
299 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
300 qib_ud_loopback(qp
, wqe
);
301 spin_lock_irqsave(&qp
->s_lock
, flags
);
302 qib_send_complete(qp
, wqe
, IB_WC_SUCCESS
);
307 qp
->s_cur
= next_cur
;
308 extra_bytes
= -wqe
->length
& 3;
309 nwords
= (wqe
->length
+ extra_bytes
) >> 2;
311 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
313 qp
->s_cur_size
= wqe
->length
;
314 qp
->s_cur_sge
= &qp
->s_sge
;
315 qp
->s_srate
= ah_attr
->static_rate
;
317 qp
->s_sge
.sge
= wqe
->sg_list
[0];
318 qp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
319 qp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
320 qp
->s_sge
.total_len
= wqe
->length
;
322 if (ah_attr
->ah_flags
& IB_AH_GRH
) {
323 /* Header size in 32-bit words. */
324 qp
->s_hdrwords
+= qib_make_grh(ibp
, &qp
->s_hdr
.u
.l
.grh
,
326 qp
->s_hdrwords
, nwords
);
328 ohdr
= &qp
->s_hdr
.u
.l
.oth
;
330 * Don't worry about sending to locally attached multicast
331 * QPs. It is unspecified by the spec. what happens.
334 /* Header size in 32-bit words. */
336 ohdr
= &qp
->s_hdr
.u
.oth
;
338 if (wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
340 ohdr
->u
.ud
.imm_data
= wqe
->wr
.ex
.imm_data
;
341 bth0
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
<< 24;
343 bth0
= IB_OPCODE_UD_SEND_ONLY
<< 24;
344 lrh0
|= ah_attr
->sl
<< 4;
345 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
)
346 lrh0
|= 0xF000; /* Set VL (see ch. 13.5.3.1) */
348 lrh0
|= ibp
->sl_to_vl
[ah_attr
->sl
] << 12;
349 qp
->s_hdr
.lrh
[0] = cpu_to_be16(lrh0
);
350 qp
->s_hdr
.lrh
[1] = cpu_to_be16(ah_attr
->dlid
); /* DEST LID */
351 qp
->s_hdr
.lrh
[2] = cpu_to_be16(qp
->s_hdrwords
+ nwords
+ SIZE_OF_CRC
);
354 lid
|= ah_attr
->src_path_bits
& ((1 << ppd
->lmc
) - 1);
355 qp
->s_hdr
.lrh
[3] = cpu_to_be16(lid
);
357 qp
->s_hdr
.lrh
[3] = IB_LID_PERMISSIVE
;
358 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
359 bth0
|= IB_BTH_SOLICITED
;
360 bth0
|= extra_bytes
<< 20;
361 bth0
|= qp
->ibqp
.qp_type
== IB_QPT_SMI
? QIB_DEFAULT_P_KEY
:
362 qib_get_pkey(ibp
, qp
->ibqp
.qp_type
== IB_QPT_GSI
?
363 wqe
->wr
.wr
.ud
.pkey_index
: qp
->s_pkey_index
);
364 ohdr
->bth
[0] = cpu_to_be32(bth0
);
366 * Use the multicast QP if the destination LID is a multicast LID.
368 ohdr
->bth
[1] = ah_attr
->dlid
>= QIB_MULTICAST_LID_BASE
&&
369 ah_attr
->dlid
!= QIB_PERMISSIVE_LID
?
370 cpu_to_be32(QIB_MULTICAST_QPN
) :
371 cpu_to_be32(wqe
->wr
.wr
.ud
.remote_qpn
);
372 ohdr
->bth
[2] = cpu_to_be32(qp
->s_next_psn
++ & QIB_PSN_MASK
);
374 * Qkeys with the high order bit set mean use the
375 * qkey from the QP context instead of the WR (see 10.2.5).
377 ohdr
->u
.ud
.deth
[0] = cpu_to_be32((int)wqe
->wr
.wr
.ud
.remote_qkey
< 0 ?
378 qp
->qkey
: wqe
->wr
.wr
.ud
.remote_qkey
);
379 ohdr
->u
.ud
.deth
[1] = cpu_to_be32(qp
->ibqp
.qp_num
);
386 qp
->s_flags
&= ~QIB_S_BUSY
;
388 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
392 static unsigned qib_lookup_pkey(struct qib_ibport
*ibp
, u16 pkey
)
394 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
395 struct qib_devdata
*dd
= ppd
->dd
;
396 unsigned ctxt
= ppd
->hw_pidx
;
399 pkey
&= 0x7fff; /* remove limited/full membership bit */
401 for (i
= 0; i
< ARRAY_SIZE(dd
->rcd
[ctxt
]->pkeys
); ++i
)
402 if ((dd
->rcd
[ctxt
]->pkeys
[i
] & 0x7fff) == pkey
)
406 * Should not get here, this means hardware failed to validate pkeys.
407 * Punt and return index 0.
413 * qib_ud_rcv - receive an incoming UD packet
414 * @ibp: the port the packet came in on
415 * @hdr: the packet header
416 * @has_grh: true if the packet has a GRH
417 * @data: the packet data
418 * @tlen: the packet length
419 * @qp: the QP the packet came on
421 * This is called from qib_qp_rcv() to process an incoming UD packet
423 * Called at interrupt level.
425 void qib_ud_rcv(struct qib_ibport
*ibp
, struct qib_ib_header
*hdr
,
426 int has_grh
, void *data
, u32 tlen
, struct qib_qp
*qp
)
428 struct qib_other_headers
*ohdr
;
440 hdrsize
= 8 + 12 + 8; /* LRH + BTH + DETH */
442 ohdr
= &hdr
->u
.l
.oth
;
443 hdrsize
= 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
445 qkey
= be32_to_cpu(ohdr
->u
.ud
.deth
[0]);
446 src_qp
= be32_to_cpu(ohdr
->u
.ud
.deth
[1]) & QIB_QPN_MASK
;
449 * Get the number of bytes the message was padded by
450 * and drop incomplete packets.
452 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
453 if (unlikely(tlen
< (hdrsize
+ pad
+ 4)))
456 tlen
-= hdrsize
+ pad
+ 4;
459 * Check that the permissive LID is only used on QP0
460 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
462 if (qp
->ibqp
.qp_num
) {
463 if (unlikely(hdr
->lrh
[1] == IB_LID_PERMISSIVE
||
464 hdr
->lrh
[3] == IB_LID_PERMISSIVE
))
466 if (qp
->ibqp
.qp_num
> 1) {
469 pkey1
= be32_to_cpu(ohdr
->bth
[0]);
470 pkey2
= qib_get_pkey(ibp
, qp
->s_pkey_index
);
471 if (unlikely(!qib_pkey_ok(pkey1
, pkey2
))) {
472 qib_bad_pqkey(ibp
, IB_NOTICE_TRAP_BAD_PKEY
,
474 (be16_to_cpu(hdr
->lrh
[0]) >> 4) &
476 src_qp
, qp
->ibqp
.qp_num
,
477 hdr
->lrh
[3], hdr
->lrh
[1]);
481 if (unlikely(qkey
!= qp
->qkey
)) {
482 qib_bad_pqkey(ibp
, IB_NOTICE_TRAP_BAD_QKEY
, qkey
,
483 (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF,
484 src_qp
, qp
->ibqp
.qp_num
,
485 hdr
->lrh
[3], hdr
->lrh
[1]);
488 /* Drop invalid MAD packets (see 13.5.3.1). */
489 if (unlikely(qp
->ibqp
.qp_num
== 1 &&
491 (be16_to_cpu(hdr
->lrh
[0]) >> 12) == 15)))
496 /* Drop invalid MAD packets (see 13.5.3.1). */
497 if (tlen
!= 256 || (be16_to_cpu(hdr
->lrh
[0]) >> 12) != 15)
499 smp
= (struct ib_smp
*) data
;
500 if ((hdr
->lrh
[1] == IB_LID_PERMISSIVE
||
501 hdr
->lrh
[3] == IB_LID_PERMISSIVE
) &&
502 smp
->mgmt_class
!= IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
507 * The opcode is in the low byte when its in network order
508 * (top byte when in host order).
510 opcode
= be32_to_cpu(ohdr
->bth
[0]) >> 24;
511 if (qp
->ibqp
.qp_num
> 1 &&
512 opcode
== IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
) {
513 wc
.ex
.imm_data
= ohdr
->u
.ud
.imm_data
;
514 wc
.wc_flags
= IB_WC_WITH_IMM
;
516 } else if (opcode
== IB_OPCODE_UD_SEND_ONLY
) {
523 * A GRH is expected to precede the data even if not
524 * present on the wire.
526 wc
.byte_len
= tlen
+ sizeof(struct ib_grh
);
529 * Get the next work request entry to find where to put the data.
531 if (qp
->r_flags
& QIB_R_REUSE_SGE
)
532 qp
->r_flags
&= ~QIB_R_REUSE_SGE
;
536 ret
= qib_get_rwqe(qp
, 0);
538 qib_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
542 if (qp
->ibqp
.qp_num
== 0)
543 ibp
->n_vl15_dropped
++;
547 /* Silently drop packets which are too big. */
548 if (unlikely(wc
.byte_len
> qp
->r_len
)) {
549 qp
->r_flags
|= QIB_R_REUSE_SGE
;
553 qib_copy_sge(&qp
->r_sge
, &hdr
->u
.l
.grh
,
554 sizeof(struct ib_grh
), 1);
555 wc
.wc_flags
|= IB_WC_GRH
;
557 qib_skip_sge(&qp
->r_sge
, sizeof(struct ib_grh
), 1);
558 qib_copy_sge(&qp
->r_sge
, data
, wc
.byte_len
- sizeof(struct ib_grh
), 1);
559 while (qp
->r_sge
.num_sge
) {
560 atomic_dec(&qp
->r_sge
.sge
.mr
->refcount
);
561 if (--qp
->r_sge
.num_sge
)
562 qp
->r_sge
.sge
= *qp
->r_sge
.sg_list
++;
564 if (!test_and_clear_bit(QIB_R_WRID_VALID
, &qp
->r_aflags
))
566 wc
.wr_id
= qp
->r_wr_id
;
567 wc
.status
= IB_WC_SUCCESS
;
568 wc
.opcode
= IB_WC_RECV
;
572 wc
.pkey_index
= qp
->ibqp
.qp_type
== IB_QPT_GSI
?
573 qib_lookup_pkey(ibp
, be32_to_cpu(ohdr
->bth
[0])) : 0;
574 wc
.slid
= be16_to_cpu(hdr
->lrh
[3]);
575 wc
.sl
= (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF;
576 dlid
= be16_to_cpu(hdr
->lrh
[1]);
578 * Save the LMC lower bits if the destination LID is a unicast LID.
580 wc
.dlid_path_bits
= dlid
>= QIB_MULTICAST_LID_BASE
? 0 :
581 dlid
& ((1 << ppd_from_ibp(ibp
)->lmc
) - 1);
582 wc
.port_num
= qp
->port_num
;
583 /* Signal completion event if the solicited bit is set. */
584 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
586 cpu_to_be32(IB_BTH_SOLICITED
)) != 0);