2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/sched.h>
35 #include <rdma/ib_smi.h>
37 #include "ipath_verbs.h"
38 #include "ipath_kernel.h"
41 * ipath_ud_loopback - handle send on loopback QPs
42 * @sqp: the sending QP
43 * @swqe: the send work request
45 * This is called from ipath_make_ud_req() to forward a WQE addressed
47 * Note that the receive interrupt handler may be calling ipath_ud_rcv()
48 * while this is being called.
50 static void ipath_ud_loopback(struct ipath_qp
*sqp
, struct ipath_swqe
*swqe
)
52 struct ipath_ibdev
*dev
= to_idev(sqp
->ibqp
.device
);
54 struct ib_ah_attr
*ah_attr
;
57 struct ipath_srq
*srq
;
58 struct ipath_sge_state rsge
;
59 struct ipath_sge
*sge
;
61 struct ipath_rwqe
*wqe
;
62 void (*handler
)(struct ib_event
*, void *);
68 qp
= ipath_lookup_qpn(&dev
->qp_table
, swqe
->wr
.wr
.ud
.remote_qpn
);
69 if (!qp
|| !(ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_RECV_OK
)) {
75 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
76 * Qkeys with the high order bit set mean use the
77 * qkey from the QP context instead of the WR (see 10.2.5).
79 if (unlikely(qp
->ibqp
.qp_num
&&
80 ((int) swqe
->wr
.wr
.ud
.remote_qkey
< 0 ?
81 sqp
->qkey
: swqe
->wr
.wr
.ud
.remote_qkey
) != qp
->qkey
)) {
82 /* XXX OK to lose a count once in a while. */
83 dev
->qkey_violations
++;
89 * A GRH is expected to preceed the data even if not
90 * present on the wire.
92 length
= swqe
->length
;
93 memset(&wc
, 0, sizeof wc
);
94 wc
.byte_len
= length
+ sizeof(struct ib_grh
);
96 if (swqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
97 wc
.wc_flags
= IB_WC_WITH_IMM
;
98 wc
.ex
.imm_data
= swqe
->wr
.ex
.imm_data
;
102 * This would be a lot simpler if we could call ipath_get_rwqe()
103 * but that uses state that the receive interrupt handler uses
104 * so we would need to lock out receive interrupts while doing
108 srq
= to_isrq(qp
->ibqp
.srq
);
109 handler
= srq
->ibsrq
.event_handler
;
118 * Get the next work request entry to find where to put the data.
119 * Note that it is safe to drop the lock after changing rq->tail
120 * since ipath_post_receive() won't fill the empty slot.
122 spin_lock_irqsave(&rq
->lock
, flags
);
125 /* Validate tail before using it since it is user writable. */
126 if (tail
>= rq
->size
)
128 if (unlikely(tail
== wq
->head
)) {
129 spin_unlock_irqrestore(&rq
->lock
, flags
);
133 wqe
= get_rwqe_ptr(rq
, tail
);
134 rsge
.sg_list
= qp
->r_ud_sg_list
;
135 if (!ipath_init_sge(qp
, wqe
, &rlen
, &rsge
)) {
136 spin_unlock_irqrestore(&rq
->lock
, flags
);
140 /* Silently drop packets which are too big. */
141 if (wc
.byte_len
> rlen
) {
142 spin_unlock_irqrestore(&rq
->lock
, flags
);
146 if (++tail
>= rq
->size
)
149 wc
.wr_id
= wqe
->wr_id
;
154 * validate head pointer value and compute
155 * the number of remaining WQEs.
161 n
+= rq
->size
- tail
;
164 if (n
< srq
->limit
) {
168 spin_unlock_irqrestore(&rq
->lock
, flags
);
169 ev
.device
= qp
->ibqp
.device
;
170 ev
.element
.srq
= qp
->ibqp
.srq
;
171 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
172 handler(&ev
, srq
->ibsrq
.srq_context
);
174 spin_unlock_irqrestore(&rq
->lock
, flags
);
176 spin_unlock_irqrestore(&rq
->lock
, flags
);
178 ah_attr
= &to_iah(swqe
->wr
.wr
.ud
.ah
)->attr
;
179 if (ah_attr
->ah_flags
& IB_AH_GRH
) {
180 ipath_copy_sge(&rsge
, &ah_attr
->grh
, sizeof(struct ib_grh
));
181 wc
.wc_flags
|= IB_WC_GRH
;
183 ipath_skip_sge(&rsge
, sizeof(struct ib_grh
));
186 u32 len
= sge
->length
;
190 if (len
> sge
->sge_length
)
191 len
= sge
->sge_length
;
193 ipath_copy_sge(&rsge
, sge
->vaddr
, len
);
196 sge
->sge_length
-= len
;
197 if (sge
->sge_length
== 0) {
198 if (--swqe
->wr
.num_sge
)
200 } else if (sge
->length
== 0 && sge
->mr
!= NULL
) {
201 if (++sge
->n
>= IPATH_SEGSZ
) {
202 if (++sge
->m
>= sge
->mr
->mapsz
)
207 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
209 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
213 wc
.status
= IB_WC_SUCCESS
;
214 wc
.opcode
= IB_WC_RECV
;
216 wc
.src_qp
= sqp
->ibqp
.qp_num
;
217 /* XXX do we know which pkey matched? Only needed for GSI. */
219 wc
.slid
= dev
->dd
->ipath_lid
|
220 (ah_attr
->src_path_bits
&
221 ((1 << dev
->dd
->ipath_lmc
) - 1));
224 ah_attr
->dlid
& ((1 << dev
->dd
->ipath_lmc
) - 1);
226 /* Signal completion event if the solicited bit is set. */
227 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
228 swqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
230 if (atomic_dec_and_test(&qp
->refcount
))
236 * ipath_make_ud_req - construct a UD request packet
239 * Return 1 if constructed; otherwise, return 0.
241 int ipath_make_ud_req(struct ipath_qp
*qp
)
243 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
244 struct ipath_other_headers
*ohdr
;
245 struct ib_ah_attr
*ah_attr
;
246 struct ipath_swqe
*wqe
;
256 spin_lock_irqsave(&qp
->s_lock
, flags
);
258 if (!(ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_NEXT_SEND_OK
)) {
259 if (!(ib_ipath_state_ops
[qp
->state
] & IPATH_FLUSH_SEND
))
261 /* We are in the error state, flush the work request. */
262 if (qp
->s_last
== qp
->s_head
)
264 /* If DMAs are in progress, we can't flush immediately. */
265 if (atomic_read(&qp
->s_dma_busy
)) {
266 qp
->s_flags
|= IPATH_S_WAIT_DMA
;
269 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
270 ipath_send_complete(qp
, wqe
, IB_WC_WR_FLUSH_ERR
);
274 if (qp
->s_cur
== qp
->s_head
)
277 wqe
= get_swqe_ptr(qp
, qp
->s_cur
);
278 next_cur
= qp
->s_cur
+ 1;
279 if (next_cur
>= qp
->s_size
)
282 /* Construct the header. */
283 ah_attr
= &to_iah(wqe
->wr
.wr
.ud
.ah
)->attr
;
284 if (ah_attr
->dlid
>= IPATH_MULTICAST_LID_BASE
) {
285 if (ah_attr
->dlid
!= IPATH_PERMISSIVE_LID
)
286 dev
->n_multicast_xmit
++;
288 dev
->n_unicast_xmit
++;
290 dev
->n_unicast_xmit
++;
291 lid
= ah_attr
->dlid
& ~((1 << dev
->dd
->ipath_lmc
) - 1);
292 if (unlikely(lid
== dev
->dd
->ipath_lid
)) {
294 * If DMAs are in progress, we can't generate
295 * a completion for the loopback packet since
296 * it would be out of order.
297 * XXX Instead of waiting, we could queue a
298 * zero length descriptor so we get a callback.
300 if (atomic_read(&qp
->s_dma_busy
)) {
301 qp
->s_flags
|= IPATH_S_WAIT_DMA
;
304 qp
->s_cur
= next_cur
;
305 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
306 ipath_ud_loopback(qp
, wqe
);
307 spin_lock_irqsave(&qp
->s_lock
, flags
);
308 ipath_send_complete(qp
, wqe
, IB_WC_SUCCESS
);
313 qp
->s_cur
= next_cur
;
314 extra_bytes
= -wqe
->length
& 3;
315 nwords
= (wqe
->length
+ extra_bytes
) >> 2;
317 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
319 qp
->s_cur_size
= wqe
->length
;
320 qp
->s_cur_sge
= &qp
->s_sge
;
321 qp
->s_dmult
= ah_attr
->static_rate
;
323 qp
->s_sge
.sge
= wqe
->sg_list
[0];
324 qp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
325 qp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
327 if (ah_attr
->ah_flags
& IB_AH_GRH
) {
328 /* Header size in 32-bit words. */
329 qp
->s_hdrwords
+= ipath_make_grh(dev
, &qp
->s_hdr
.u
.l
.grh
,
331 qp
->s_hdrwords
, nwords
);
332 lrh0
= IPATH_LRH_GRH
;
333 ohdr
= &qp
->s_hdr
.u
.l
.oth
;
335 * Don't worry about sending to locally attached multicast
336 * QPs. It is unspecified by the spec. what happens.
339 /* Header size in 32-bit words. */
340 lrh0
= IPATH_LRH_BTH
;
341 ohdr
= &qp
->s_hdr
.u
.oth
;
343 if (wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
345 ohdr
->u
.ud
.imm_data
= wqe
->wr
.ex
.imm_data
;
346 bth0
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
<< 24;
348 bth0
= IB_OPCODE_UD_SEND_ONLY
<< 24;
349 lrh0
|= ah_attr
->sl
<< 4;
350 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
)
351 lrh0
|= 0xF000; /* Set VL (see ch. 13.5.3.1) */
352 qp
->s_hdr
.lrh
[0] = cpu_to_be16(lrh0
);
353 qp
->s_hdr
.lrh
[1] = cpu_to_be16(ah_attr
->dlid
); /* DEST LID */
354 qp
->s_hdr
.lrh
[2] = cpu_to_be16(qp
->s_hdrwords
+ nwords
+
356 lid
= dev
->dd
->ipath_lid
;
358 lid
|= ah_attr
->src_path_bits
&
359 ((1 << dev
->dd
->ipath_lmc
) - 1);
360 qp
->s_hdr
.lrh
[3] = cpu_to_be16(lid
);
362 qp
->s_hdr
.lrh
[3] = IB_LID_PERMISSIVE
;
363 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
365 bth0
|= extra_bytes
<< 20;
366 bth0
|= qp
->ibqp
.qp_type
== IB_QPT_SMI
? IPATH_DEFAULT_P_KEY
:
367 ipath_get_pkey(dev
->dd
, qp
->s_pkey_index
);
368 ohdr
->bth
[0] = cpu_to_be32(bth0
);
370 * Use the multicast QP if the destination LID is a multicast LID.
372 ohdr
->bth
[1] = ah_attr
->dlid
>= IPATH_MULTICAST_LID_BASE
&&
373 ah_attr
->dlid
!= IPATH_PERMISSIVE_LID
?
374 cpu_to_be32(IPATH_MULTICAST_QPN
) :
375 cpu_to_be32(wqe
->wr
.wr
.ud
.remote_qpn
);
376 ohdr
->bth
[2] = cpu_to_be32(qp
->s_next_psn
++ & IPATH_PSN_MASK
);
378 * Qkeys with the high order bit set mean use the
379 * qkey from the QP context instead of the WR (see 10.2.5).
381 ohdr
->u
.ud
.deth
[0] = cpu_to_be32((int)wqe
->wr
.wr
.ud
.remote_qkey
< 0 ?
382 qp
->qkey
: wqe
->wr
.wr
.ud
.remote_qkey
);
383 ohdr
->u
.ud
.deth
[1] = cpu_to_be32(qp
->ibqp
.qp_num
);
390 qp
->s_flags
&= ~IPATH_S_BUSY
;
392 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
397 * ipath_ud_rcv - receive an incoming UD packet
398 * @dev: the device the packet came in on
399 * @hdr: the packet header
400 * @has_grh: true if the packet has a GRH
401 * @data: the packet data
402 * @tlen: the packet length
403 * @qp: the QP the packet came on
405 * This is called from ipath_qp_rcv() to process an incoming UD packet
407 * Called at interrupt level.
409 void ipath_ud_rcv(struct ipath_ibdev
*dev
, struct ipath_ib_header
*hdr
,
410 int has_grh
, void *data
, u32 tlen
, struct ipath_qp
*qp
)
412 struct ipath_other_headers
*ohdr
;
425 hdrsize
= 8 + 12 + 8; /* LRH + BTH + DETH */
426 qkey
= be32_to_cpu(ohdr
->u
.ud
.deth
[0]);
427 src_qp
= be32_to_cpu(ohdr
->u
.ud
.deth
[1]);
430 ohdr
= &hdr
->u
.l
.oth
;
431 hdrsize
= 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
433 * The header with GRH is 68 bytes and the core driver sets
434 * the eager header buffer size to 56 bytes so the last 12
435 * bytes of the IB header is in the data buffer.
437 header_in_data
= dev
->dd
->ipath_rcvhdrentsize
== 16;
438 if (header_in_data
) {
439 qkey
= be32_to_cpu(((__be32
*) data
)[1]);
440 src_qp
= be32_to_cpu(((__be32
*) data
)[2]);
443 qkey
= be32_to_cpu(ohdr
->u
.ud
.deth
[0]);
444 src_qp
= be32_to_cpu(ohdr
->u
.ud
.deth
[1]);
447 src_qp
&= IPATH_QPN_MASK
;
450 * Check that the permissive LID is only used on QP0
451 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
453 if (qp
->ibqp
.qp_num
) {
454 if (unlikely(hdr
->lrh
[1] == IB_LID_PERMISSIVE
||
455 hdr
->lrh
[3] == IB_LID_PERMISSIVE
)) {
459 if (unlikely(qkey
!= qp
->qkey
)) {
460 /* XXX OK to lose a count once in a while. */
461 dev
->qkey_violations
++;
465 } else if (hdr
->lrh
[1] == IB_LID_PERMISSIVE
||
466 hdr
->lrh
[3] == IB_LID_PERMISSIVE
) {
467 struct ib_smp
*smp
= (struct ib_smp
*) data
;
469 if (smp
->mgmt_class
!= IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
476 * The opcode is in the low byte when its in network order
477 * (top byte when in host order).
479 opcode
= be32_to_cpu(ohdr
->bth
[0]) >> 24;
480 if (qp
->ibqp
.qp_num
> 1 &&
481 opcode
== IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
) {
482 if (header_in_data
) {
483 wc
.ex
.imm_data
= *(__be32
*) data
;
484 data
+= sizeof(__be32
);
486 wc
.ex
.imm_data
= ohdr
->u
.ud
.imm_data
;
487 wc
.wc_flags
= IB_WC_WITH_IMM
;
488 hdrsize
+= sizeof(u32
);
489 } else if (opcode
== IB_OPCODE_UD_SEND_ONLY
) {
497 /* Get the number of bytes the message was padded by. */
498 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
499 if (unlikely(tlen
< (hdrsize
+ pad
+ 4))) {
500 /* Drop incomplete packets. */
504 tlen
-= hdrsize
+ pad
+ 4;
506 /* Drop invalid MAD packets (see 13.5.3.1). */
507 if (unlikely((qp
->ibqp
.qp_num
== 0 &&
509 (be16_to_cpu(hdr
->lrh
[0]) >> 12) != 15)) ||
510 (qp
->ibqp
.qp_num
== 1 &&
512 (be16_to_cpu(hdr
->lrh
[0]) >> 12) == 15)))) {
518 * A GRH is expected to preceed the data even if not
519 * present on the wire.
521 wc
.byte_len
= tlen
+ sizeof(struct ib_grh
);
524 * Get the next work request entry to find where to put the data.
526 if (qp
->r_flags
& IPATH_R_REUSE_SGE
)
527 qp
->r_flags
&= ~IPATH_R_REUSE_SGE
;
528 else if (!ipath_get_rwqe(qp
, 0)) {
530 * Count VL15 packets dropped due to no receive buffer.
531 * Otherwise, count them as buffer overruns since usually,
532 * the HW will be able to receive packets even if there are
533 * no QPs with posted receive buffers.
535 if (qp
->ibqp
.qp_num
== 0)
536 dev
->n_vl15_dropped
++;
541 /* Silently drop packets which are too big. */
542 if (wc
.byte_len
> qp
->r_len
) {
543 qp
->r_flags
|= IPATH_R_REUSE_SGE
;
548 ipath_copy_sge(&qp
->r_sge
, &hdr
->u
.l
.grh
,
549 sizeof(struct ib_grh
));
550 wc
.wc_flags
|= IB_WC_GRH
;
552 ipath_skip_sge(&qp
->r_sge
, sizeof(struct ib_grh
));
553 ipath_copy_sge(&qp
->r_sge
, data
,
554 wc
.byte_len
- sizeof(struct ib_grh
));
555 if (!test_and_clear_bit(IPATH_R_WRID_VALID
, &qp
->r_aflags
))
557 wc
.wr_id
= qp
->r_wr_id
;
558 wc
.status
= IB_WC_SUCCESS
;
559 wc
.opcode
= IB_WC_RECV
;
563 /* XXX do we know which pkey matched? Only needed for GSI. */
565 wc
.slid
= be16_to_cpu(hdr
->lrh
[3]);
566 wc
.sl
= (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF;
567 dlid
= be16_to_cpu(hdr
->lrh
[1]);
569 * Save the LMC lower bits if the destination LID is a unicast LID.
571 wc
.dlid_path_bits
= dlid
>= IPATH_MULTICAST_LID_BASE
? 0 :
572 dlid
& ((1 << dev
->dd
->ipath_lmc
) - 1);
574 /* Signal completion event if the solicited bit is set. */
575 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
577 cpu_to_be32(1 << 23)) != 0);