2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/ib_smi.h>
36 #include "ipath_verbs.h"
37 #include "ipath_kernel.h"
39 static int init_sge(struct ipath_qp
*qp
, struct ipath_rwqe
*wqe
,
40 u32
*lengthp
, struct ipath_sge_state
*ss
)
42 int user
= to_ipd(qp
->ibqp
.pd
)->user
;
47 for (i
= j
= 0; i
< wqe
->num_sge
; i
++) {
48 if (wqe
->sg_list
[i
].length
== 0)
51 if ((user
&& wqe
->sg_list
[i
].lkey
== 0) ||
52 !ipath_lkey_ok(qp
, j
? &ss
->sg_list
[j
- 1] : &ss
->sge
,
53 &wqe
->sg_list
[i
], IB_ACCESS_LOCAL_WRITE
))
55 *lengthp
+= wqe
->sg_list
[i
].length
;
63 wc
.wr_id
= wqe
->wr_id
;
64 wc
.status
= IB_WC_LOC_PROT_ERR
;
65 wc
.opcode
= IB_WC_RECV
;
75 wc
.dlid_path_bits
= 0;
77 /* Signal solicited completion event. */
78 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
85 * ipath_ud_loopback - handle send on loopback QPs
88 * @length: the length of the data to send
89 * @wr: the work request
90 * @wc: the work completion entry
92 * This is called from ipath_post_ud_send() to forward a WQE addressed
94 * Note that the receive interrupt handler may be calling ipath_ud_rcv()
95 * while this is being called.
97 static void ipath_ud_loopback(struct ipath_qp
*sqp
,
98 struct ipath_sge_state
*ss
,
99 u32 length
, struct ib_send_wr
*wr
,
102 struct ipath_ibdev
*dev
= to_idev(sqp
->ibqp
.device
);
104 struct ib_ah_attr
*ah_attr
;
107 struct ipath_srq
*srq
;
108 struct ipath_sge_state rsge
;
109 struct ipath_sge
*sge
;
110 struct ipath_rwq
*wq
;
111 struct ipath_rwqe
*wqe
;
112 void (*handler
)(struct ib_event
*, void *);
116 qp
= ipath_lookup_qpn(&dev
->qp_table
, wr
->wr
.ud
.remote_qpn
);
121 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
122 * Qkeys with the high order bit set mean use the
123 * qkey from the QP context instead of the WR (see 10.2.5).
125 if (unlikely(qp
->ibqp
.qp_num
&&
126 ((int) wr
->wr
.ud
.remote_qkey
< 0
127 ? qp
->qkey
: wr
->wr
.ud
.remote_qkey
) != qp
->qkey
)) {
128 /* XXX OK to lose a count once in a while. */
129 dev
->qkey_violations
++;
135 * A GRH is expected to preceed the data even if not
136 * present on the wire.
138 wc
->byte_len
= length
+ sizeof(struct ib_grh
);
140 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
) {
141 wc
->wc_flags
= IB_WC_WITH_IMM
;
142 wc
->imm_data
= wr
->imm_data
;
148 if (wr
->num_sge
> 1) {
149 rsge
.sg_list
= kmalloc((wr
->num_sge
- 1) *
150 sizeof(struct ipath_sge
),
156 * Get the next work request entry to find where to put the data.
157 * Note that it is safe to drop the lock after changing rq->tail
158 * since ipath_post_receive() won't fill the empty slot.
161 srq
= to_isrq(qp
->ibqp
.srq
);
162 handler
= srq
->ibsrq
.event_handler
;
170 spin_lock_irqsave(&rq
->lock
, flags
);
174 if (unlikely(tail
== wq
->head
)) {
175 spin_unlock_irqrestore(&rq
->lock
, flags
);
179 wqe
= get_rwqe_ptr(rq
, tail
);
180 if (++tail
>= rq
->size
)
182 if (init_sge(qp
, wqe
, &rlen
, &rsge
))
186 /* Silently drop packets which are too big. */
187 if (wc
->byte_len
> rlen
) {
188 spin_unlock_irqrestore(&rq
->lock
, flags
);
193 wc
->wr_id
= wqe
->wr_id
;
198 * validate head pointer value and compute
199 * the number of remaining WQEs.
205 n
+= rq
->size
- tail
;
208 if (n
< srq
->limit
) {
212 spin_unlock_irqrestore(&rq
->lock
, flags
);
213 ev
.device
= qp
->ibqp
.device
;
214 ev
.element
.srq
= qp
->ibqp
.srq
;
215 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
216 handler(&ev
, srq
->ibsrq
.srq_context
);
218 spin_unlock_irqrestore(&rq
->lock
, flags
);
220 spin_unlock_irqrestore(&rq
->lock
, flags
);
222 ah_attr
= &to_iah(wr
->wr
.ud
.ah
)->attr
;
223 if (ah_attr
->ah_flags
& IB_AH_GRH
) {
224 ipath_copy_sge(&rsge
, &ah_attr
->grh
, sizeof(struct ib_grh
));
225 wc
->wc_flags
|= IB_WC_GRH
;
227 ipath_skip_sge(&rsge
, sizeof(struct ib_grh
));
230 u32 len
= sge
->length
;
235 ipath_copy_sge(&rsge
, sge
->vaddr
, len
);
238 sge
->sge_length
-= len
;
239 if (sge
->sge_length
== 0) {
241 *sge
= *ss
->sg_list
++;
242 } else if (sge
->length
== 0 && sge
->mr
!= NULL
) {
243 if (++sge
->n
>= IPATH_SEGSZ
) {
244 if (++sge
->m
>= sge
->mr
->mapsz
)
249 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
251 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
255 wc
->status
= IB_WC_SUCCESS
;
256 wc
->opcode
= IB_WC_RECV
;
259 wc
->src_qp
= sqp
->ibqp
.qp_num
;
260 /* XXX do we know which pkey matched? Only needed for GSI. */
262 wc
->slid
= dev
->dd
->ipath_lid
|
263 (ah_attr
->src_path_bits
&
264 ((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1));
265 wc
->sl
= ah_attr
->sl
;
267 ah_attr
->dlid
& ((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1);
268 /* Signal completion event if the solicited bit is set. */
269 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), wc
,
270 wr
->send_flags
& IB_SEND_SOLICITED
);
275 if (atomic_dec_and_test(&qp
->refcount
))
280 * ipath_post_ud_send - post a UD send on QP
282 * @wr: the work request
284 * Note that we actually send the data as it is posted instead of putting
285 * the request into a ring buffer. If we wanted to use a ring buffer,
286 * we would need to save a reference to the destination address in the SWQE.
288 int ipath_post_ud_send(struct ipath_qp
*qp
, struct ib_send_wr
*wr
)
290 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
291 struct ipath_other_headers
*ohdr
;
292 struct ib_ah_attr
*ah_attr
;
293 struct ipath_sge_state ss
;
294 struct ipath_sge
*sg_list
;
306 if (!(ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_SEND_OK
)) {
311 if (wr
->wr
.ud
.ah
->pd
!= qp
->ibqp
.pd
) {
316 /* IB spec says that num_sge == 0 is OK. */
317 if (wr
->num_sge
> qp
->s_max_sge
) {
322 if (wr
->num_sge
> 1) {
323 sg_list
= kmalloc((qp
->s_max_sge
- 1) * sizeof(*sg_list
),
332 /* Check the buffer to send. */
333 ss
.sg_list
= sg_list
;
337 ss
.sge
.sge_length
= 0;
340 for (i
= 0; i
< wr
->num_sge
; i
++) {
342 if (to_ipd(qp
->ibqp
.pd
)->user
&& wr
->sg_list
[i
].lkey
== 0) {
347 if (wr
->sg_list
[i
].length
== 0)
349 if (!ipath_lkey_ok(qp
, ss
.num_sge
?
350 sg_list
+ ss
.num_sge
- 1 : &ss
.sge
,
351 &wr
->sg_list
[i
], 0)) {
355 len
+= wr
->sg_list
[i
].length
;
358 /* Check for invalid packet size. */
359 if (len
> dev
->dd
->ipath_ibmtu
) {
363 extra_bytes
= (4 - len
) & 3;
364 nwords
= (len
+ extra_bytes
) >> 2;
366 /* Construct the header. */
367 ah_attr
= &to_iah(wr
->wr
.ud
.ah
)->attr
;
368 if (ah_attr
->dlid
== 0) {
372 if (ah_attr
->dlid
>= IPATH_MULTICAST_LID_BASE
) {
373 if (ah_attr
->dlid
!= IPATH_PERMISSIVE_LID
)
374 dev
->n_multicast_xmit
++;
376 dev
->n_unicast_xmit
++;
378 dev
->n_unicast_xmit
++;
379 lid
= ah_attr
->dlid
&
380 ~((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1);
381 if (unlikely(lid
== dev
->dd
->ipath_lid
)) {
383 * Pass in an uninitialized ib_wc to save stack
386 ipath_ud_loopback(qp
, &ss
, len
, wr
, &wc
);
390 if (ah_attr
->ah_flags
& IB_AH_GRH
) {
391 /* Header size in 32-bit words. */
393 lrh0
= IPATH_LRH_GRH
;
394 ohdr
= &qp
->s_hdr
.u
.l
.oth
;
395 qp
->s_hdr
.u
.l
.grh
.version_tclass_flow
=
396 cpu_to_be32((6 << 28) |
397 (ah_attr
->grh
.traffic_class
<< 20) |
398 ah_attr
->grh
.flow_label
);
399 qp
->s_hdr
.u
.l
.grh
.paylen
=
400 cpu_to_be16(((wr
->opcode
==
401 IB_WR_SEND_WITH_IMM
? 6 : 5) +
402 nwords
+ SIZE_OF_CRC
) << 2);
403 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
404 qp
->s_hdr
.u
.l
.grh
.next_hdr
= 0x1B;
405 qp
->s_hdr
.u
.l
.grh
.hop_limit
= ah_attr
->grh
.hop_limit
;
406 /* The SGID is 32-bit aligned. */
407 qp
->s_hdr
.u
.l
.grh
.sgid
.global
.subnet_prefix
=
409 qp
->s_hdr
.u
.l
.grh
.sgid
.global
.interface_id
=
411 qp
->s_hdr
.u
.l
.grh
.dgid
= ah_attr
->grh
.dgid
;
413 * Don't worry about sending to locally attached multicast
414 * QPs. It is unspecified by the spec. what happens.
417 /* Header size in 32-bit words. */
419 lrh0
= IPATH_LRH_BTH
;
420 ohdr
= &qp
->s_hdr
.u
.oth
;
422 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
) {
423 ohdr
->u
.ud
.imm_data
= wr
->imm_data
;
424 wc
.imm_data
= wr
->imm_data
;
426 bth0
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
<< 24;
427 } else if (wr
->opcode
== IB_WR_SEND
) {
429 bth0
= IB_OPCODE_UD_SEND_ONLY
<< 24;
434 lrh0
|= ah_attr
->sl
<< 4;
435 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
)
436 lrh0
|= 0xF000; /* Set VL (see ch. 13.5.3.1) */
437 qp
->s_hdr
.lrh
[0] = cpu_to_be16(lrh0
);
438 qp
->s_hdr
.lrh
[1] = cpu_to_be16(ah_attr
->dlid
); /* DEST LID */
439 qp
->s_hdr
.lrh
[2] = cpu_to_be16(hwords
+ nwords
+ SIZE_OF_CRC
);
440 lid
= dev
->dd
->ipath_lid
;
442 lid
|= ah_attr
->src_path_bits
&
443 ((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1);
444 qp
->s_hdr
.lrh
[3] = cpu_to_be16(lid
);
446 qp
->s_hdr
.lrh
[3] = IB_LID_PERMISSIVE
;
447 if (wr
->send_flags
& IB_SEND_SOLICITED
)
449 bth0
|= extra_bytes
<< 20;
450 bth0
|= qp
->ibqp
.qp_type
== IB_QPT_SMI
? IPATH_DEFAULT_P_KEY
:
451 ipath_get_pkey(dev
->dd
, qp
->s_pkey_index
);
452 ohdr
->bth
[0] = cpu_to_be32(bth0
);
454 * Use the multicast QP if the destination LID is a multicast LID.
456 ohdr
->bth
[1] = ah_attr
->dlid
>= IPATH_MULTICAST_LID_BASE
&&
457 ah_attr
->dlid
!= IPATH_PERMISSIVE_LID
?
458 __constant_cpu_to_be32(IPATH_MULTICAST_QPN
) :
459 cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
460 /* XXX Could lose a PSN count but not worth locking */
461 ohdr
->bth
[2] = cpu_to_be32(qp
->s_next_psn
++ & IPATH_PSN_MASK
);
463 * Qkeys with the high order bit set mean use the
464 * qkey from the QP context instead of the WR (see 10.2.5).
466 ohdr
->u
.ud
.deth
[0] = cpu_to_be32((int)wr
->wr
.ud
.remote_qkey
< 0 ?
467 qp
->qkey
: wr
->wr
.ud
.remote_qkey
);
468 ohdr
->u
.ud
.deth
[1] = cpu_to_be32(qp
->ibqp
.qp_num
);
469 if (ipath_verbs_send(dev
->dd
, hwords
, (u32
*) &qp
->s_hdr
,
474 /* Queue the completion status entry. */
475 if (!(qp
->s_flags
& IPATH_S_SIGNAL_REQ_WR
) ||
476 (wr
->send_flags
& IB_SEND_SIGNALED
)) {
477 wc
.wr_id
= wr
->wr_id
;
478 wc
.status
= IB_WC_SUCCESS
;
480 wc
.opcode
= IB_WC_SEND
;
485 /* XXX initialize other fields? */
486 ipath_cq_enter(to_icq(qp
->ibqp
.send_cq
), &wc
, 0);
497 * ipath_ud_rcv - receive an incoming UD packet
498 * @dev: the device the packet came in on
499 * @hdr: the packet header
500 * @has_grh: true if the packet has a GRH
501 * @data: the packet data
502 * @tlen: the packet length
503 * @qp: the QP the packet came on
505 * This is called from ipath_qp_rcv() to process an incoming UD packet
507 * Called at interrupt level.
509 void ipath_ud_rcv(struct ipath_ibdev
*dev
, struct ipath_ib_header
*hdr
,
510 int has_grh
, void *data
, u32 tlen
, struct ipath_qp
*qp
)
512 struct ipath_other_headers
*ohdr
;
525 hdrsize
= 8 + 12 + 8; /* LRH + BTH + DETH */
526 qkey
= be32_to_cpu(ohdr
->u
.ud
.deth
[0]);
527 src_qp
= be32_to_cpu(ohdr
->u
.ud
.deth
[1]);
530 ohdr
= &hdr
->u
.l
.oth
;
531 hdrsize
= 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
533 * The header with GRH is 68 bytes and the core driver sets
534 * the eager header buffer size to 56 bytes so the last 12
535 * bytes of the IB header is in the data buffer.
537 header_in_data
= dev
->dd
->ipath_rcvhdrentsize
== 16;
538 if (header_in_data
) {
539 qkey
= be32_to_cpu(((__be32
*) data
)[1]);
540 src_qp
= be32_to_cpu(((__be32
*) data
)[2]);
543 qkey
= be32_to_cpu(ohdr
->u
.ud
.deth
[0]);
544 src_qp
= be32_to_cpu(ohdr
->u
.ud
.deth
[1]);
547 src_qp
&= IPATH_QPN_MASK
;
550 * Check that the permissive LID is only used on QP0
551 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
553 if (qp
->ibqp
.qp_num
) {
554 if (unlikely(hdr
->lrh
[1] == IB_LID_PERMISSIVE
||
555 hdr
->lrh
[3] == IB_LID_PERMISSIVE
)) {
559 if (unlikely(qkey
!= qp
->qkey
)) {
560 /* XXX OK to lose a count once in a while. */
561 dev
->qkey_violations
++;
565 } else if (hdr
->lrh
[1] == IB_LID_PERMISSIVE
||
566 hdr
->lrh
[3] == IB_LID_PERMISSIVE
) {
567 struct ib_smp
*smp
= (struct ib_smp
*) data
;
569 if (smp
->mgmt_class
!= IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
575 /* Get the number of bytes the message was padded by. */
576 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
577 if (unlikely(tlen
< (hdrsize
+ pad
+ 4))) {
578 /* Drop incomplete packets. */
582 tlen
-= hdrsize
+ pad
+ 4;
584 /* Drop invalid MAD packets (see 13.5.3.1). */
585 if (unlikely((qp
->ibqp
.qp_num
== 0 &&
587 (be16_to_cpu(hdr
->lrh
[0]) >> 12) != 15)) ||
588 (qp
->ibqp
.qp_num
== 1 &&
590 (be16_to_cpu(hdr
->lrh
[0]) >> 12) == 15)))) {
596 * A GRH is expected to preceed the data even if not
597 * present on the wire.
599 wc
.byte_len
= tlen
+ sizeof(struct ib_grh
);
602 * The opcode is in the low byte when its in network order
603 * (top byte when in host order).
605 opcode
= be32_to_cpu(ohdr
->bth
[0]) >> 24;
606 if (qp
->ibqp
.qp_num
> 1 &&
607 opcode
== IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
) {
608 if (header_in_data
) {
609 wc
.imm_data
= *(__be32
*) data
;
610 data
+= sizeof(__be32
);
612 wc
.imm_data
= ohdr
->u
.ud
.imm_data
;
613 wc
.wc_flags
= IB_WC_WITH_IMM
;
614 hdrsize
+= sizeof(u32
);
615 } else if (opcode
== IB_OPCODE_UD_SEND_ONLY
) {
624 * Get the next work request entry to find where to put the data.
628 else if (!ipath_get_rwqe(qp
, 0)) {
630 * Count VL15 packets dropped due to no receive buffer.
631 * Otherwise, count them as buffer overruns since usually,
632 * the HW will be able to receive packets even if there are
633 * no QPs with posted receive buffers.
635 if (qp
->ibqp
.qp_num
== 0)
636 dev
->n_vl15_dropped
++;
641 /* Silently drop packets which are too big. */
642 if (wc
.byte_len
> qp
->r_len
) {
648 ipath_copy_sge(&qp
->r_sge
, &hdr
->u
.l
.grh
,
649 sizeof(struct ib_grh
));
650 wc
.wc_flags
|= IB_WC_GRH
;
652 ipath_skip_sge(&qp
->r_sge
, sizeof(struct ib_grh
));
653 ipath_copy_sge(&qp
->r_sge
, data
,
654 wc
.byte_len
- sizeof(struct ib_grh
));
655 qp
->r_wrid_valid
= 0;
656 wc
.wr_id
= qp
->r_wr_id
;
657 wc
.status
= IB_WC_SUCCESS
;
658 wc
.opcode
= IB_WC_RECV
;
662 /* XXX do we know which pkey matched? Only needed for GSI. */
664 wc
.slid
= be16_to_cpu(hdr
->lrh
[3]);
665 wc
.sl
= (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF;
666 dlid
= be16_to_cpu(hdr
->lrh
[1]);
668 * Save the LMC lower bits if the destination LID is a unicast LID.
670 wc
.dlid_path_bits
= dlid
>= IPATH_MULTICAST_LID_BASE
? 0 :
671 dlid
& ((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1);
672 /* Signal completion event if the solicited bit is set. */
673 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
675 __constant_cpu_to_be32(1 << 23)) != 0);