2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/spinlock.h>
40 * Convert the AETH RNR timeout code into the number of microseconds.
42 const u32 ib_qib_rnr_table
[32] = {
43 655360, /* 00: 655.36 */
63 10240, /* 14: 10.24 */
64 15360, /* 15: 15.36 */
65 20480, /* 16: 20.48 */
66 30720, /* 17: 30.72 */
67 40960, /* 18: 40.96 */
68 61440, /* 19: 61.44 */
69 81920, /* 1A: 81.92 */
70 122880, /* 1B: 122.88 */
71 163840, /* 1C: 163.84 */
72 245760, /* 1D: 245.76 */
73 327680, /* 1E: 327.68 */
74 491520 /* 1F: 491.52 */
78 * Validate a RWQE and fill in the SGE state.
81 static int qib_init_sge(struct qib_qp
*qp
, struct qib_rwqe
*wqe
)
85 struct qib_lkey_table
*rkt
;
87 struct qib_sge_state
*ss
;
89 rkt
= &to_idev(qp
->ibqp
.device
)->lk_table
;
90 pd
= to_ipd(qp
->ibqp
.srq
? qp
->ibqp
.srq
->pd
: qp
->ibqp
.pd
);
92 ss
->sg_list
= qp
->r_sg_list
;
94 for (i
= j
= 0; i
< wqe
->num_sge
; i
++) {
95 if (wqe
->sg_list
[i
].length
== 0)
98 if (!qib_lkey_ok(rkt
, pd
, j
? &ss
->sg_list
[j
- 1] : &ss
->sge
,
99 &wqe
->sg_list
[i
], IB_ACCESS_LOCAL_WRITE
))
101 qp
->r_len
+= wqe
->sg_list
[i
].length
;
105 ss
->total_len
= qp
->r_len
;
111 struct qib_sge
*sge
= --j
? &ss
->sg_list
[j
- 1] : &ss
->sge
;
113 atomic_dec(&sge
->mr
->refcount
);
116 memset(&wc
, 0, sizeof(wc
));
117 wc
.wr_id
= wqe
->wr_id
;
118 wc
.status
= IB_WC_LOC_PROT_ERR
;
119 wc
.opcode
= IB_WC_RECV
;
121 /* Signal solicited completion event. */
122 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
129 * qib_get_rwqe - copy the next RWQE into the QP's RWQE
131 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
133 * Return -1 if there is a local error, 0 if no RWQE is available,
134 * otherwise return 1.
136 * Can be called from interrupt level.
138 int qib_get_rwqe(struct qib_qp
*qp
, int wr_id_only
)
144 struct qib_rwqe
*wqe
;
145 void (*handler
)(struct ib_event
*, void *);
150 srq
= to_isrq(qp
->ibqp
.srq
);
151 handler
= srq
->ibsrq
.event_handler
;
159 spin_lock_irqsave(&rq
->lock
, flags
);
160 if (!(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_RECV_OK
)) {
167 /* Validate tail before using it since it is user writable. */
168 if (tail
>= rq
->size
)
170 if (unlikely(tail
== wq
->head
)) {
174 /* Make sure entry is read after head index is read. */
176 wqe
= get_rwqe_ptr(rq
, tail
);
178 * Even though we update the tail index in memory, the verbs
179 * consumer is not supposed to post more entries until a
180 * completion is generated.
182 if (++tail
>= rq
->size
)
185 if (!wr_id_only
&& !qib_init_sge(qp
, wqe
)) {
189 qp
->r_wr_id
= wqe
->wr_id
;
192 set_bit(QIB_R_WRID_VALID
, &qp
->r_aflags
);
197 * Validate head pointer value and compute
198 * the number of remaining WQEs.
204 n
+= rq
->size
- tail
;
207 if (n
< srq
->limit
) {
211 spin_unlock_irqrestore(&rq
->lock
, flags
);
212 ev
.device
= qp
->ibqp
.device
;
213 ev
.element
.srq
= qp
->ibqp
.srq
;
214 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
215 handler(&ev
, srq
->ibsrq
.srq_context
);
220 spin_unlock_irqrestore(&rq
->lock
, flags
);
226 * Switch to alternate path.
227 * The QP s_lock should be held and interrupts disabled.
229 void qib_migrate_qp(struct qib_qp
*qp
)
233 qp
->s_mig_state
= IB_MIG_MIGRATED
;
234 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
235 qp
->port_num
= qp
->alt_ah_attr
.port_num
;
236 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
238 ev
.device
= qp
->ibqp
.device
;
239 ev
.element
.qp
= &qp
->ibqp
;
240 ev
.event
= IB_EVENT_PATH_MIG
;
241 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
244 static __be64
get_sguid(struct qib_ibport
*ibp
, unsigned index
)
247 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
251 return ibp
->guids
[index
- 1];
254 static int gid_ok(union ib_gid
*gid
, __be64 gid_prefix
, __be64 id
)
256 return (gid
->global
.interface_id
== id
&&
257 (gid
->global
.subnet_prefix
== gid_prefix
||
258 gid
->global
.subnet_prefix
== IB_DEFAULT_GID_PREFIX
));
263 * This should be called with the QP s_lock held.
265 int qib_ruc_check_hdr(struct qib_ibport
*ibp
, struct qib_ib_header
*hdr
,
266 int has_grh
, struct qib_qp
*qp
, u32 bth0
)
270 if (qp
->s_mig_state
== IB_MIG_ARMED
&& (bth0
& IB_BTH_MIG_REQ
)) {
272 if (qp
->alt_ah_attr
.ah_flags
& IB_AH_GRH
)
275 if (!(qp
->alt_ah_attr
.ah_flags
& IB_AH_GRH
))
277 guid
= get_sguid(ibp
, qp
->alt_ah_attr
.grh
.sgid_index
);
278 if (!gid_ok(&hdr
->u
.l
.grh
.dgid
, ibp
->gid_prefix
, guid
))
280 if (!gid_ok(&hdr
->u
.l
.grh
.sgid
,
281 qp
->alt_ah_attr
.grh
.dgid
.global
.subnet_prefix
,
282 qp
->alt_ah_attr
.grh
.dgid
.global
.interface_id
))
285 if (!qib_pkey_ok((u16
)bth0
,
286 qib_get_pkey(ibp
, qp
->s_alt_pkey_index
))) {
287 qib_bad_pqkey(ibp
, IB_NOTICE_TRAP_BAD_PKEY
,
289 (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF,
291 hdr
->lrh
[3], hdr
->lrh
[1]);
294 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
295 if (be16_to_cpu(hdr
->lrh
[3]) != qp
->alt_ah_attr
.dlid
||
296 ppd_from_ibp(ibp
)->port
!= qp
->alt_ah_attr
.port_num
)
301 if (qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)
304 if (!(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
))
306 guid
= get_sguid(ibp
,
307 qp
->remote_ah_attr
.grh
.sgid_index
);
308 if (!gid_ok(&hdr
->u
.l
.grh
.dgid
, ibp
->gid_prefix
, guid
))
310 if (!gid_ok(&hdr
->u
.l
.grh
.sgid
,
311 qp
->remote_ah_attr
.grh
.dgid
.global
.subnet_prefix
,
312 qp
->remote_ah_attr
.grh
.dgid
.global
.interface_id
))
315 if (!qib_pkey_ok((u16
)bth0
,
316 qib_get_pkey(ibp
, qp
->s_pkey_index
))) {
317 qib_bad_pqkey(ibp
, IB_NOTICE_TRAP_BAD_PKEY
,
319 (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF,
321 hdr
->lrh
[3], hdr
->lrh
[1]);
324 /* Validate the SLID. See Ch. 9.6.1.5 */
325 if (be16_to_cpu(hdr
->lrh
[3]) != qp
->remote_ah_attr
.dlid
||
326 ppd_from_ibp(ibp
)->port
!= qp
->port_num
)
328 if (qp
->s_mig_state
== IB_MIG_REARM
&&
329 !(bth0
& IB_BTH_MIG_REQ
))
330 qp
->s_mig_state
= IB_MIG_ARMED
;
340 * qib_ruc_loopback - handle UC and RC lookback requests
341 * @sqp: the sending QP
343 * This is called from qib_do_send() to
344 * forward a WQE addressed to the same HCA.
345 * Note that although we are single threaded due to the tasklet, we still
346 * have to protect against post_send(). We don't have to worry about
347 * receive interrupts since this is a connected protocol and all packets
348 * will pass through here.
350 static void qib_ruc_loopback(struct qib_qp
*sqp
)
352 struct qib_ibport
*ibp
= to_iport(sqp
->ibqp
.device
, sqp
->port_num
);
354 struct qib_swqe
*wqe
;
360 enum ib_wc_status send_status
;
365 * Note that we check the responder QP state after
366 * checking the requester's state.
368 qp
= qib_lookup_qpn(ibp
, sqp
->remote_qpn
);
370 spin_lock_irqsave(&sqp
->s_lock
, flags
);
372 /* Return if we are already busy processing a work request. */
373 if ((sqp
->s_flags
& (QIB_S_BUSY
| QIB_S_ANY_WAIT
)) ||
374 !(ib_qib_state_ops
[sqp
->state
] & QIB_PROCESS_OR_FLUSH_SEND
))
377 sqp
->s_flags
|= QIB_S_BUSY
;
380 if (sqp
->s_last
== sqp
->s_head
)
382 wqe
= get_swqe_ptr(sqp
, sqp
->s_last
);
384 /* Return if it is not OK to start a new work reqeust. */
385 if (!(ib_qib_state_ops
[sqp
->state
] & QIB_PROCESS_NEXT_SEND_OK
)) {
386 if (!(ib_qib_state_ops
[sqp
->state
] & QIB_FLUSH_SEND
))
388 /* We are in the error state, flush the work request. */
389 send_status
= IB_WC_WR_FLUSH_ERR
;
394 * We can rely on the entry not changing without the s_lock
395 * being held until we update s_last.
396 * We increment s_cur to indicate s_last is in progress.
398 if (sqp
->s_last
== sqp
->s_cur
) {
399 if (++sqp
->s_cur
>= sqp
->s_size
)
402 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
404 if (!qp
|| !(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_RECV_OK
) ||
405 qp
->ibqp
.qp_type
!= sqp
->ibqp
.qp_type
) {
408 * For RC, the requester would timeout and retry so
409 * shortcut the timeouts and just signal too many retries.
411 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
)
412 send_status
= IB_WC_RETRY_EXC_ERR
;
414 send_status
= IB_WC_SUCCESS
;
418 memset(&wc
, 0, sizeof wc
);
419 send_status
= IB_WC_SUCCESS
;
422 sqp
->s_sge
.sge
= wqe
->sg_list
[0];
423 sqp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
424 sqp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
425 sqp
->s_len
= wqe
->length
;
426 switch (wqe
->wr
.opcode
) {
427 case IB_WR_SEND_WITH_IMM
:
428 wc
.wc_flags
= IB_WC_WITH_IMM
;
429 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
432 ret
= qib_get_rwqe(qp
, 0);
439 case IB_WR_RDMA_WRITE_WITH_IMM
:
440 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
442 wc
.wc_flags
= IB_WC_WITH_IMM
;
443 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
444 ret
= qib_get_rwqe(qp
, 1);
450 case IB_WR_RDMA_WRITE
:
451 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
453 if (wqe
->length
== 0)
455 if (unlikely(!qib_rkey_ok(qp
, &qp
->r_sge
.sge
, wqe
->length
,
456 wqe
->wr
.wr
.rdma
.remote_addr
,
457 wqe
->wr
.wr
.rdma
.rkey
,
458 IB_ACCESS_REMOTE_WRITE
)))
460 qp
->r_sge
.sg_list
= NULL
;
461 qp
->r_sge
.num_sge
= 1;
462 qp
->r_sge
.total_len
= wqe
->length
;
465 case IB_WR_RDMA_READ
:
466 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
468 if (unlikely(!qib_rkey_ok(qp
, &sqp
->s_sge
.sge
, wqe
->length
,
469 wqe
->wr
.wr
.rdma
.remote_addr
,
470 wqe
->wr
.wr
.rdma
.rkey
,
471 IB_ACCESS_REMOTE_READ
)))
474 sqp
->s_sge
.sg_list
= NULL
;
475 sqp
->s_sge
.num_sge
= 1;
476 qp
->r_sge
.sge
= wqe
->sg_list
[0];
477 qp
->r_sge
.sg_list
= wqe
->sg_list
+ 1;
478 qp
->r_sge
.num_sge
= wqe
->wr
.num_sge
;
479 qp
->r_sge
.total_len
= wqe
->length
;
482 case IB_WR_ATOMIC_CMP_AND_SWP
:
483 case IB_WR_ATOMIC_FETCH_AND_ADD
:
484 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
)))
486 if (unlikely(!qib_rkey_ok(qp
, &qp
->r_sge
.sge
, sizeof(u64
),
487 wqe
->wr
.wr
.atomic
.remote_addr
,
488 wqe
->wr
.wr
.atomic
.rkey
,
489 IB_ACCESS_REMOTE_ATOMIC
)))
491 /* Perform atomic OP and save result. */
492 maddr
= (atomic64_t
*) qp
->r_sge
.sge
.vaddr
;
493 sdata
= wqe
->wr
.wr
.atomic
.compare_add
;
494 *(u64
*) sqp
->s_sge
.sge
.vaddr
=
495 (wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) ?
496 (u64
) atomic64_add_return(sdata
, maddr
) - sdata
:
497 (u64
) cmpxchg((u64
*) qp
->r_sge
.sge
.vaddr
,
498 sdata
, wqe
->wr
.wr
.atomic
.swap
);
499 atomic_dec(&qp
->r_sge
.sge
.mr
->refcount
);
500 qp
->r_sge
.num_sge
= 0;
504 send_status
= IB_WC_LOC_QP_OP_ERR
;
508 sge
= &sqp
->s_sge
.sge
;
510 u32 len
= sqp
->s_len
;
512 if (len
> sge
->length
)
514 if (len
> sge
->sge_length
)
515 len
= sge
->sge_length
;
517 qib_copy_sge(&qp
->r_sge
, sge
->vaddr
, len
, release
);
520 sge
->sge_length
-= len
;
521 if (sge
->sge_length
== 0) {
523 atomic_dec(&sge
->mr
->refcount
);
524 if (--sqp
->s_sge
.num_sge
)
525 *sge
= *sqp
->s_sge
.sg_list
++;
526 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
527 if (++sge
->n
>= QIB_SEGSZ
) {
528 if (++sge
->m
>= sge
->mr
->mapsz
)
533 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
535 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
540 while (qp
->r_sge
.num_sge
) {
541 atomic_dec(&qp
->r_sge
.sge
.mr
->refcount
);
542 if (--qp
->r_sge
.num_sge
)
543 qp
->r_sge
.sge
= *qp
->r_sge
.sg_list
++;
546 if (!test_and_clear_bit(QIB_R_WRID_VALID
, &qp
->r_aflags
))
549 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
550 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
552 wc
.opcode
= IB_WC_RECV
;
553 wc
.wr_id
= qp
->r_wr_id
;
554 wc
.status
= IB_WC_SUCCESS
;
555 wc
.byte_len
= wqe
->length
;
557 wc
.src_qp
= qp
->remote_qpn
;
558 wc
.slid
= qp
->remote_ah_attr
.dlid
;
559 wc
.sl
= qp
->remote_ah_attr
.sl
;
561 /* Signal completion event if the solicited bit is set. */
562 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
563 wqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
566 spin_lock_irqsave(&sqp
->s_lock
, flags
);
569 sqp
->s_rnr_retry
= sqp
->s_rnr_retry_cnt
;
570 qib_send_complete(sqp
, wqe
, send_status
);
575 if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
579 * Note: we don't need the s_lock held since the BUSY flag
580 * makes this single threaded.
582 if (sqp
->s_rnr_retry
== 0) {
583 send_status
= IB_WC_RNR_RETRY_EXC_ERR
;
586 if (sqp
->s_rnr_retry_cnt
< 7)
588 spin_lock_irqsave(&sqp
->s_lock
, flags
);
589 if (!(ib_qib_state_ops
[sqp
->state
] & QIB_PROCESS_RECV_OK
))
591 sqp
->s_flags
|= QIB_S_WAIT_RNR
;
592 sqp
->s_timer
.function
= qib_rc_rnr_retry
;
593 sqp
->s_timer
.expires
= jiffies
+
594 usecs_to_jiffies(ib_qib_rnr_table
[qp
->r_min_rnr_timer
]);
595 add_timer(&sqp
->s_timer
);
599 send_status
= IB_WC_REM_OP_ERR
;
600 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
604 send_status
= IB_WC_REM_INV_REQ_ERR
;
605 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
609 send_status
= IB_WC_REM_ACCESS_ERR
;
610 wc
.status
= IB_WC_LOC_PROT_ERR
;
612 /* responder goes to error state */
613 qib_rc_error(qp
, wc
.status
);
616 spin_lock_irqsave(&sqp
->s_lock
, flags
);
617 qib_send_complete(sqp
, wqe
, send_status
);
618 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
) {
619 int lastwqe
= qib_error_qp(sqp
, IB_WC_WR_FLUSH_ERR
);
621 sqp
->s_flags
&= ~QIB_S_BUSY
;
622 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
626 ev
.device
= sqp
->ibqp
.device
;
627 ev
.element
.qp
= &sqp
->ibqp
;
628 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
629 sqp
->ibqp
.event_handler(&ev
, sqp
->ibqp
.qp_context
);
634 sqp
->s_flags
&= ~QIB_S_BUSY
;
636 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
638 if (qp
&& atomic_dec_and_test(&qp
->refcount
))
643 * qib_make_grh - construct a GRH header
644 * @ibp: a pointer to the IB port
645 * @hdr: a pointer to the GRH header being constructed
646 * @grh: the global route address to send to
647 * @hwords: the number of 32 bit words of header being sent
648 * @nwords: the number of 32 bit words of data being sent
650 * Return the size of the header in 32 bit words.
652 u32
qib_make_grh(struct qib_ibport
*ibp
, struct ib_grh
*hdr
,
653 struct ib_global_route
*grh
, u32 hwords
, u32 nwords
)
655 hdr
->version_tclass_flow
=
656 cpu_to_be32((IB_GRH_VERSION
<< IB_GRH_VERSION_SHIFT
) |
657 (grh
->traffic_class
<< IB_GRH_TCLASS_SHIFT
) |
658 (grh
->flow_label
<< IB_GRH_FLOW_SHIFT
));
659 hdr
->paylen
= cpu_to_be16((hwords
- 2 + nwords
+ SIZE_OF_CRC
) << 2);
660 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
661 hdr
->next_hdr
= IB_GRH_NEXT_HDR
;
662 hdr
->hop_limit
= grh
->hop_limit
;
663 /* The SGID is 32-bit aligned. */
664 hdr
->sgid
.global
.subnet_prefix
= ibp
->gid_prefix
;
665 hdr
->sgid
.global
.interface_id
= grh
->sgid_index
?
666 ibp
->guids
[grh
->sgid_index
- 1] : ppd_from_ibp(ibp
)->guid
;
667 hdr
->dgid
= grh
->dgid
;
669 /* GRH header size in 32-bit words. */
670 return sizeof(struct ib_grh
) / sizeof(u32
);
673 void qib_make_ruc_header(struct qib_qp
*qp
, struct qib_other_headers
*ohdr
,
676 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
681 /* Construct the header. */
682 extra_bytes
= -qp
->s_cur_size
& 3;
683 nwords
= (qp
->s_cur_size
+ extra_bytes
) >> 2;
685 if (unlikely(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)) {
686 qp
->s_hdrwords
+= qib_make_grh(ibp
, &qp
->s_hdr
.u
.l
.grh
,
687 &qp
->remote_ah_attr
.grh
,
688 qp
->s_hdrwords
, nwords
);
691 lrh0
|= ibp
->sl_to_vl
[qp
->remote_ah_attr
.sl
] << 12 |
692 qp
->remote_ah_attr
.sl
<< 4;
693 qp
->s_hdr
.lrh
[0] = cpu_to_be16(lrh0
);
694 qp
->s_hdr
.lrh
[1] = cpu_to_be16(qp
->remote_ah_attr
.dlid
);
695 qp
->s_hdr
.lrh
[2] = cpu_to_be16(qp
->s_hdrwords
+ nwords
+ SIZE_OF_CRC
);
696 qp
->s_hdr
.lrh
[3] = cpu_to_be16(ppd_from_ibp(ibp
)->lid
|
697 qp
->remote_ah_attr
.src_path_bits
);
698 bth0
|= qib_get_pkey(ibp
, qp
->s_pkey_index
);
699 bth0
|= extra_bytes
<< 20;
700 if (qp
->s_mig_state
== IB_MIG_MIGRATED
)
701 bth0
|= IB_BTH_MIG_REQ
;
702 ohdr
->bth
[0] = cpu_to_be32(bth0
);
703 ohdr
->bth
[1] = cpu_to_be32(qp
->remote_qpn
);
704 ohdr
->bth
[2] = cpu_to_be32(bth2
);
708 * qib_do_send - perform a send on a QP
709 * @work: contains a pointer to the QP
711 * Process entries in the send work queue until credit or queue is
712 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
713 * Otherwise, two threads could send packets out of order.
715 void qib_do_send(struct work_struct
*work
)
717 struct qib_qp
*qp
= container_of(work
, struct qib_qp
, s_work
);
718 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
719 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
720 int (*make_req
)(struct qib_qp
*qp
);
723 if ((qp
->ibqp
.qp_type
== IB_QPT_RC
||
724 qp
->ibqp
.qp_type
== IB_QPT_UC
) &&
725 (qp
->remote_ah_attr
.dlid
& ~((1 << ppd
->lmc
) - 1)) == ppd
->lid
) {
726 qib_ruc_loopback(qp
);
730 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
731 make_req
= qib_make_rc_req
;
732 else if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
733 make_req
= qib_make_uc_req
;
735 make_req
= qib_make_ud_req
;
737 spin_lock_irqsave(&qp
->s_lock
, flags
);
739 /* Return if we are already busy processing a work request. */
740 if (!qib_send_ok(qp
)) {
741 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
745 qp
->s_flags
|= QIB_S_BUSY
;
747 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
750 /* Check for a constructed packet to be sent. */
751 if (qp
->s_hdrwords
!= 0) {
753 * If the packet cannot be sent now, return and
754 * the send tasklet will be woken up later.
756 if (qib_verbs_send(qp
, &qp
->s_hdr
, qp
->s_hdrwords
,
757 qp
->s_cur_sge
, qp
->s_cur_size
))
759 /* Record that s_hdr is empty. */
762 } while (make_req(qp
));
766 * This should be called with s_lock held.
768 void qib_send_complete(struct qib_qp
*qp
, struct qib_swqe
*wqe
,
769 enum ib_wc_status status
)
774 if (!(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_OR_FLUSH_SEND
))
777 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
778 struct qib_sge
*sge
= &wqe
->sg_list
[i
];
780 atomic_dec(&sge
->mr
->refcount
);
782 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
783 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
784 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
785 atomic_dec(&to_iah(wqe
->wr
.wr
.ud
.ah
)->refcount
);
787 /* See ch. 11.2.4.1 and 10.7.3.1 */
788 if (!(qp
->s_flags
& QIB_S_SIGNAL_REQ_WR
) ||
789 (wqe
->wr
.send_flags
& IB_SEND_SIGNALED
) ||
790 status
!= IB_WC_SUCCESS
) {
793 memset(&wc
, 0, sizeof wc
);
794 wc
.wr_id
= wqe
->wr
.wr_id
;
796 wc
.opcode
= ib_qib_wc_opcode
[wqe
->wr
.opcode
];
798 if (status
== IB_WC_SUCCESS
)
799 wc
.byte_len
= wqe
->length
;
800 qib_cq_enter(to_icq(qp
->ibqp
.send_cq
), &wc
,
801 status
!= IB_WC_SUCCESS
);
806 if (++last
>= qp
->s_size
)
809 if (qp
->s_acked
== old_last
)
811 if (qp
->s_cur
== old_last
)
813 if (qp
->s_tail
== old_last
)
815 if (qp
->state
== IB_QPS_SQD
&& last
== qp
->s_cur
)