2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/spinlock.h>
40 * Convert the AETH RNR timeout code into the number of microseconds.
42 const u32 ib_qib_rnr_table
[32] = {
43 655360, /* 00: 655.36 */
63 10240, /* 14: 10.24 */
64 15360, /* 15: 15.36 */
65 20480, /* 16: 20.48 */
66 30720, /* 17: 30.72 */
67 40960, /* 18: 40.96 */
68 61440, /* 19: 61.44 */
69 81920, /* 1A: 81.92 */
70 122880, /* 1B: 122.88 */
71 163840, /* 1C: 163.84 */
72 245760, /* 1D: 245.76 */
73 327680, /* 1E: 327.68 */
74 491520 /* 1F: 491.52 */
78 * Validate a RWQE and fill in the SGE state.
81 static int qib_init_sge(struct qib_qp
*qp
, struct qib_rwqe
*wqe
)
85 struct qib_lkey_table
*rkt
;
87 struct qib_sge_state
*ss
;
89 rkt
= &to_idev(qp
->ibqp
.device
)->lk_table
;
90 pd
= to_ipd(qp
->ibqp
.srq
? qp
->ibqp
.srq
->pd
: qp
->ibqp
.pd
);
92 ss
->sg_list
= qp
->r_sg_list
;
94 for (i
= j
= 0; i
< wqe
->num_sge
; i
++) {
95 if (wqe
->sg_list
[i
].length
== 0)
98 if (!qib_lkey_ok(rkt
, pd
, j
? &ss
->sg_list
[j
- 1] : &ss
->sge
,
99 &wqe
->sg_list
[i
], IB_ACCESS_LOCAL_WRITE
))
101 qp
->r_len
+= wqe
->sg_list
[i
].length
;
105 ss
->total_len
= qp
->r_len
;
111 struct qib_sge
*sge
= --j
? &ss
->sg_list
[j
- 1] : &ss
->sge
;
113 atomic_dec(&sge
->mr
->refcount
);
116 memset(&wc
, 0, sizeof(wc
));
117 wc
.wr_id
= wqe
->wr_id
;
118 wc
.status
= IB_WC_LOC_PROT_ERR
;
119 wc
.opcode
= IB_WC_RECV
;
121 /* Signal solicited completion event. */
122 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
129 * qib_get_rwqe - copy the next RWQE into the QP's RWQE
131 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
133 * Return -1 if there is a local error, 0 if no RWQE is available,
134 * otherwise return 1.
136 * Can be called from interrupt level.
138 int qib_get_rwqe(struct qib_qp
*qp
, int wr_id_only
)
144 struct qib_rwqe
*wqe
;
145 void (*handler
)(struct ib_event
*, void *);
150 srq
= to_isrq(qp
->ibqp
.srq
);
151 handler
= srq
->ibsrq
.event_handler
;
159 spin_lock_irqsave(&rq
->lock
, flags
);
160 if (!(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_RECV_OK
)) {
167 /* Validate tail before using it since it is user writable. */
168 if (tail
>= rq
->size
)
170 if (unlikely(tail
== wq
->head
)) {
174 /* Make sure entry is read after head index is read. */
176 wqe
= get_rwqe_ptr(rq
, tail
);
178 * Even though we update the tail index in memory, the verbs
179 * consumer is not supposed to post more entries until a
180 * completion is generated.
182 if (++tail
>= rq
->size
)
185 if (!wr_id_only
&& !qib_init_sge(qp
, wqe
)) {
189 qp
->r_wr_id
= wqe
->wr_id
;
192 set_bit(QIB_R_WRID_VALID
, &qp
->r_aflags
);
197 * Validate head pointer value and compute
198 * the number of remaining WQEs.
204 n
+= rq
->size
- tail
;
207 if (n
< srq
->limit
) {
211 spin_unlock_irqrestore(&rq
->lock
, flags
);
212 ev
.device
= qp
->ibqp
.device
;
213 ev
.element
.srq
= qp
->ibqp
.srq
;
214 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
215 handler(&ev
, srq
->ibsrq
.srq_context
);
220 spin_unlock_irqrestore(&rq
->lock
, flags
);
226 * Switch to alternate path.
227 * The QP s_lock should be held and interrupts disabled.
229 void qib_migrate_qp(struct qib_qp
*qp
)
233 qp
->s_mig_state
= IB_MIG_MIGRATED
;
234 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
235 qp
->port_num
= qp
->alt_ah_attr
.port_num
;
236 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
238 ev
.device
= qp
->ibqp
.device
;
239 ev
.element
.qp
= &qp
->ibqp
;
240 ev
.event
= IB_EVENT_PATH_MIG
;
241 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
244 static __be64
get_sguid(struct qib_ibport
*ibp
, unsigned index
)
247 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
251 return ibp
->guids
[index
- 1];
254 static int gid_ok(union ib_gid
*gid
, __be64 gid_prefix
, __be64 id
)
256 return (gid
->global
.interface_id
== id
&&
257 (gid
->global
.subnet_prefix
== gid_prefix
||
258 gid
->global
.subnet_prefix
== IB_DEFAULT_GID_PREFIX
));
263 * This should be called with the QP r_lock held.
265 * The s_lock will be acquired around the qib_migrate_qp() call.
267 int qib_ruc_check_hdr(struct qib_ibport
*ibp
, struct qib_ib_header
*hdr
,
268 int has_grh
, struct qib_qp
*qp
, u32 bth0
)
273 if (qp
->s_mig_state
== IB_MIG_ARMED
&& (bth0
& IB_BTH_MIG_REQ
)) {
275 if (qp
->alt_ah_attr
.ah_flags
& IB_AH_GRH
)
278 if (!(qp
->alt_ah_attr
.ah_flags
& IB_AH_GRH
))
280 guid
= get_sguid(ibp
, qp
->alt_ah_attr
.grh
.sgid_index
);
281 if (!gid_ok(&hdr
->u
.l
.grh
.dgid
, ibp
->gid_prefix
, guid
))
283 if (!gid_ok(&hdr
->u
.l
.grh
.sgid
,
284 qp
->alt_ah_attr
.grh
.dgid
.global
.subnet_prefix
,
285 qp
->alt_ah_attr
.grh
.dgid
.global
.interface_id
))
288 if (!qib_pkey_ok((u16
)bth0
,
289 qib_get_pkey(ibp
, qp
->s_alt_pkey_index
))) {
290 qib_bad_pqkey(ibp
, IB_NOTICE_TRAP_BAD_PKEY
,
292 (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF,
294 hdr
->lrh
[3], hdr
->lrh
[1]);
297 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
298 if (be16_to_cpu(hdr
->lrh
[3]) != qp
->alt_ah_attr
.dlid
||
299 ppd_from_ibp(ibp
)->port
!= qp
->alt_ah_attr
.port_num
)
301 spin_lock_irqsave(&qp
->s_lock
, flags
);
303 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
306 if (qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)
309 if (!(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
))
311 guid
= get_sguid(ibp
,
312 qp
->remote_ah_attr
.grh
.sgid_index
);
313 if (!gid_ok(&hdr
->u
.l
.grh
.dgid
, ibp
->gid_prefix
, guid
))
315 if (!gid_ok(&hdr
->u
.l
.grh
.sgid
,
316 qp
->remote_ah_attr
.grh
.dgid
.global
.subnet_prefix
,
317 qp
->remote_ah_attr
.grh
.dgid
.global
.interface_id
))
320 if (!qib_pkey_ok((u16
)bth0
,
321 qib_get_pkey(ibp
, qp
->s_pkey_index
))) {
322 qib_bad_pqkey(ibp
, IB_NOTICE_TRAP_BAD_PKEY
,
324 (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF,
326 hdr
->lrh
[3], hdr
->lrh
[1]);
329 /* Validate the SLID. See Ch. 9.6.1.5 */
330 if (be16_to_cpu(hdr
->lrh
[3]) != qp
->remote_ah_attr
.dlid
||
331 ppd_from_ibp(ibp
)->port
!= qp
->port_num
)
333 if (qp
->s_mig_state
== IB_MIG_REARM
&&
334 !(bth0
& IB_BTH_MIG_REQ
))
335 qp
->s_mig_state
= IB_MIG_ARMED
;
345 * qib_ruc_loopback - handle UC and RC lookback requests
346 * @sqp: the sending QP
348 * This is called from qib_do_send() to
349 * forward a WQE addressed to the same HCA.
350 * Note that although we are single threaded due to the tasklet, we still
351 * have to protect against post_send(). We don't have to worry about
352 * receive interrupts since this is a connected protocol and all packets
353 * will pass through here.
355 static void qib_ruc_loopback(struct qib_qp
*sqp
)
357 struct qib_ibport
*ibp
= to_iport(sqp
->ibqp
.device
, sqp
->port_num
);
359 struct qib_swqe
*wqe
;
365 enum ib_wc_status send_status
;
370 * Note that we check the responder QP state after
371 * checking the requester's state.
373 qp
= qib_lookup_qpn(ibp
, sqp
->remote_qpn
);
375 spin_lock_irqsave(&sqp
->s_lock
, flags
);
377 /* Return if we are already busy processing a work request. */
378 if ((sqp
->s_flags
& (QIB_S_BUSY
| QIB_S_ANY_WAIT
)) ||
379 !(ib_qib_state_ops
[sqp
->state
] & QIB_PROCESS_OR_FLUSH_SEND
))
382 sqp
->s_flags
|= QIB_S_BUSY
;
385 if (sqp
->s_last
== sqp
->s_head
)
387 wqe
= get_swqe_ptr(sqp
, sqp
->s_last
);
389 /* Return if it is not OK to start a new work reqeust. */
390 if (!(ib_qib_state_ops
[sqp
->state
] & QIB_PROCESS_NEXT_SEND_OK
)) {
391 if (!(ib_qib_state_ops
[sqp
->state
] & QIB_FLUSH_SEND
))
393 /* We are in the error state, flush the work request. */
394 send_status
= IB_WC_WR_FLUSH_ERR
;
399 * We can rely on the entry not changing without the s_lock
400 * being held until we update s_last.
401 * We increment s_cur to indicate s_last is in progress.
403 if (sqp
->s_last
== sqp
->s_cur
) {
404 if (++sqp
->s_cur
>= sqp
->s_size
)
407 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
409 if (!qp
|| !(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_RECV_OK
) ||
410 qp
->ibqp
.qp_type
!= sqp
->ibqp
.qp_type
) {
413 * For RC, the requester would timeout and retry so
414 * shortcut the timeouts and just signal too many retries.
416 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
)
417 send_status
= IB_WC_RETRY_EXC_ERR
;
419 send_status
= IB_WC_SUCCESS
;
423 memset(&wc
, 0, sizeof wc
);
424 send_status
= IB_WC_SUCCESS
;
427 sqp
->s_sge
.sge
= wqe
->sg_list
[0];
428 sqp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
429 sqp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
430 sqp
->s_len
= wqe
->length
;
431 switch (wqe
->wr
.opcode
) {
432 case IB_WR_SEND_WITH_IMM
:
433 wc
.wc_flags
= IB_WC_WITH_IMM
;
434 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
437 ret
= qib_get_rwqe(qp
, 0);
444 case IB_WR_RDMA_WRITE_WITH_IMM
:
445 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
447 wc
.wc_flags
= IB_WC_WITH_IMM
;
448 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
449 ret
= qib_get_rwqe(qp
, 1);
455 case IB_WR_RDMA_WRITE
:
456 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
458 if (wqe
->length
== 0)
460 if (unlikely(!qib_rkey_ok(qp
, &qp
->r_sge
.sge
, wqe
->length
,
461 wqe
->wr
.wr
.rdma
.remote_addr
,
462 wqe
->wr
.wr
.rdma
.rkey
,
463 IB_ACCESS_REMOTE_WRITE
)))
465 qp
->r_sge
.sg_list
= NULL
;
466 qp
->r_sge
.num_sge
= 1;
467 qp
->r_sge
.total_len
= wqe
->length
;
470 case IB_WR_RDMA_READ
:
471 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
473 if (unlikely(!qib_rkey_ok(qp
, &sqp
->s_sge
.sge
, wqe
->length
,
474 wqe
->wr
.wr
.rdma
.remote_addr
,
475 wqe
->wr
.wr
.rdma
.rkey
,
476 IB_ACCESS_REMOTE_READ
)))
479 sqp
->s_sge
.sg_list
= NULL
;
480 sqp
->s_sge
.num_sge
= 1;
481 qp
->r_sge
.sge
= wqe
->sg_list
[0];
482 qp
->r_sge
.sg_list
= wqe
->sg_list
+ 1;
483 qp
->r_sge
.num_sge
= wqe
->wr
.num_sge
;
484 qp
->r_sge
.total_len
= wqe
->length
;
487 case IB_WR_ATOMIC_CMP_AND_SWP
:
488 case IB_WR_ATOMIC_FETCH_AND_ADD
:
489 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
)))
491 if (unlikely(!qib_rkey_ok(qp
, &qp
->r_sge
.sge
, sizeof(u64
),
492 wqe
->wr
.wr
.atomic
.remote_addr
,
493 wqe
->wr
.wr
.atomic
.rkey
,
494 IB_ACCESS_REMOTE_ATOMIC
)))
496 /* Perform atomic OP and save result. */
497 maddr
= (atomic64_t
*) qp
->r_sge
.sge
.vaddr
;
498 sdata
= wqe
->wr
.wr
.atomic
.compare_add
;
499 *(u64
*) sqp
->s_sge
.sge
.vaddr
=
500 (wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) ?
501 (u64
) atomic64_add_return(sdata
, maddr
) - sdata
:
502 (u64
) cmpxchg((u64
*) qp
->r_sge
.sge
.vaddr
,
503 sdata
, wqe
->wr
.wr
.atomic
.swap
);
504 atomic_dec(&qp
->r_sge
.sge
.mr
->refcount
);
505 qp
->r_sge
.num_sge
= 0;
509 send_status
= IB_WC_LOC_QP_OP_ERR
;
513 sge
= &sqp
->s_sge
.sge
;
515 u32 len
= sqp
->s_len
;
517 if (len
> sge
->length
)
519 if (len
> sge
->sge_length
)
520 len
= sge
->sge_length
;
522 qib_copy_sge(&qp
->r_sge
, sge
->vaddr
, len
, release
);
525 sge
->sge_length
-= len
;
526 if (sge
->sge_length
== 0) {
528 atomic_dec(&sge
->mr
->refcount
);
529 if (--sqp
->s_sge
.num_sge
)
530 *sge
= *sqp
->s_sge
.sg_list
++;
531 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
532 if (++sge
->n
>= QIB_SEGSZ
) {
533 if (++sge
->m
>= sge
->mr
->mapsz
)
538 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
540 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
545 while (qp
->r_sge
.num_sge
) {
546 atomic_dec(&qp
->r_sge
.sge
.mr
->refcount
);
547 if (--qp
->r_sge
.num_sge
)
548 qp
->r_sge
.sge
= *qp
->r_sge
.sg_list
++;
551 if (!test_and_clear_bit(QIB_R_WRID_VALID
, &qp
->r_aflags
))
554 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
555 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
557 wc
.opcode
= IB_WC_RECV
;
558 wc
.wr_id
= qp
->r_wr_id
;
559 wc
.status
= IB_WC_SUCCESS
;
560 wc
.byte_len
= wqe
->length
;
562 wc
.src_qp
= qp
->remote_qpn
;
563 wc
.slid
= qp
->remote_ah_attr
.dlid
;
564 wc
.sl
= qp
->remote_ah_attr
.sl
;
566 /* Signal completion event if the solicited bit is set. */
567 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
568 wqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
571 spin_lock_irqsave(&sqp
->s_lock
, flags
);
574 sqp
->s_rnr_retry
= sqp
->s_rnr_retry_cnt
;
575 qib_send_complete(sqp
, wqe
, send_status
);
580 if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
584 * Note: we don't need the s_lock held since the BUSY flag
585 * makes this single threaded.
587 if (sqp
->s_rnr_retry
== 0) {
588 send_status
= IB_WC_RNR_RETRY_EXC_ERR
;
591 if (sqp
->s_rnr_retry_cnt
< 7)
593 spin_lock_irqsave(&sqp
->s_lock
, flags
);
594 if (!(ib_qib_state_ops
[sqp
->state
] & QIB_PROCESS_RECV_OK
))
596 sqp
->s_flags
|= QIB_S_WAIT_RNR
;
597 sqp
->s_timer
.function
= qib_rc_rnr_retry
;
598 sqp
->s_timer
.expires
= jiffies
+
599 usecs_to_jiffies(ib_qib_rnr_table
[qp
->r_min_rnr_timer
]);
600 add_timer(&sqp
->s_timer
);
604 send_status
= IB_WC_REM_OP_ERR
;
605 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
609 send_status
= IB_WC_REM_INV_REQ_ERR
;
610 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
614 send_status
= IB_WC_REM_ACCESS_ERR
;
615 wc
.status
= IB_WC_LOC_PROT_ERR
;
617 /* responder goes to error state */
618 qib_rc_error(qp
, wc
.status
);
621 spin_lock_irqsave(&sqp
->s_lock
, flags
);
622 qib_send_complete(sqp
, wqe
, send_status
);
623 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
) {
624 int lastwqe
= qib_error_qp(sqp
, IB_WC_WR_FLUSH_ERR
);
626 sqp
->s_flags
&= ~QIB_S_BUSY
;
627 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
631 ev
.device
= sqp
->ibqp
.device
;
632 ev
.element
.qp
= &sqp
->ibqp
;
633 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
634 sqp
->ibqp
.event_handler(&ev
, sqp
->ibqp
.qp_context
);
639 sqp
->s_flags
&= ~QIB_S_BUSY
;
641 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
643 if (qp
&& atomic_dec_and_test(&qp
->refcount
))
648 * qib_make_grh - construct a GRH header
649 * @ibp: a pointer to the IB port
650 * @hdr: a pointer to the GRH header being constructed
651 * @grh: the global route address to send to
652 * @hwords: the number of 32 bit words of header being sent
653 * @nwords: the number of 32 bit words of data being sent
655 * Return the size of the header in 32 bit words.
657 u32
qib_make_grh(struct qib_ibport
*ibp
, struct ib_grh
*hdr
,
658 struct ib_global_route
*grh
, u32 hwords
, u32 nwords
)
660 hdr
->version_tclass_flow
=
661 cpu_to_be32((IB_GRH_VERSION
<< IB_GRH_VERSION_SHIFT
) |
662 (grh
->traffic_class
<< IB_GRH_TCLASS_SHIFT
) |
663 (grh
->flow_label
<< IB_GRH_FLOW_SHIFT
));
664 hdr
->paylen
= cpu_to_be16((hwords
- 2 + nwords
+ SIZE_OF_CRC
) << 2);
665 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
666 hdr
->next_hdr
= IB_GRH_NEXT_HDR
;
667 hdr
->hop_limit
= grh
->hop_limit
;
668 /* The SGID is 32-bit aligned. */
669 hdr
->sgid
.global
.subnet_prefix
= ibp
->gid_prefix
;
670 hdr
->sgid
.global
.interface_id
= grh
->sgid_index
?
671 ibp
->guids
[grh
->sgid_index
- 1] : ppd_from_ibp(ibp
)->guid
;
672 hdr
->dgid
= grh
->dgid
;
674 /* GRH header size in 32-bit words. */
675 return sizeof(struct ib_grh
) / sizeof(u32
);
678 void qib_make_ruc_header(struct qib_qp
*qp
, struct qib_other_headers
*ohdr
,
681 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
686 /* Construct the header. */
687 extra_bytes
= -qp
->s_cur_size
& 3;
688 nwords
= (qp
->s_cur_size
+ extra_bytes
) >> 2;
690 if (unlikely(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)) {
691 qp
->s_hdrwords
+= qib_make_grh(ibp
, &qp
->s_hdr
.u
.l
.grh
,
692 &qp
->remote_ah_attr
.grh
,
693 qp
->s_hdrwords
, nwords
);
696 lrh0
|= ibp
->sl_to_vl
[qp
->remote_ah_attr
.sl
] << 12 |
697 qp
->remote_ah_attr
.sl
<< 4;
698 qp
->s_hdr
.lrh
[0] = cpu_to_be16(lrh0
);
699 qp
->s_hdr
.lrh
[1] = cpu_to_be16(qp
->remote_ah_attr
.dlid
);
700 qp
->s_hdr
.lrh
[2] = cpu_to_be16(qp
->s_hdrwords
+ nwords
+ SIZE_OF_CRC
);
701 qp
->s_hdr
.lrh
[3] = cpu_to_be16(ppd_from_ibp(ibp
)->lid
|
702 qp
->remote_ah_attr
.src_path_bits
);
703 bth0
|= qib_get_pkey(ibp
, qp
->s_pkey_index
);
704 bth0
|= extra_bytes
<< 20;
705 if (qp
->s_mig_state
== IB_MIG_MIGRATED
)
706 bth0
|= IB_BTH_MIG_REQ
;
707 ohdr
->bth
[0] = cpu_to_be32(bth0
);
708 ohdr
->bth
[1] = cpu_to_be32(qp
->remote_qpn
);
709 ohdr
->bth
[2] = cpu_to_be32(bth2
);
713 * qib_do_send - perform a send on a QP
714 * @work: contains a pointer to the QP
716 * Process entries in the send work queue until credit or queue is
717 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
718 * Otherwise, two threads could send packets out of order.
720 void qib_do_send(struct work_struct
*work
)
722 struct qib_qp
*qp
= container_of(work
, struct qib_qp
, s_work
);
723 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
724 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
725 int (*make_req
)(struct qib_qp
*qp
);
728 if ((qp
->ibqp
.qp_type
== IB_QPT_RC
||
729 qp
->ibqp
.qp_type
== IB_QPT_UC
) &&
730 (qp
->remote_ah_attr
.dlid
& ~((1 << ppd
->lmc
) - 1)) == ppd
->lid
) {
731 qib_ruc_loopback(qp
);
735 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
736 make_req
= qib_make_rc_req
;
737 else if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
738 make_req
= qib_make_uc_req
;
740 make_req
= qib_make_ud_req
;
742 spin_lock_irqsave(&qp
->s_lock
, flags
);
744 /* Return if we are already busy processing a work request. */
745 if (!qib_send_ok(qp
)) {
746 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
750 qp
->s_flags
|= QIB_S_BUSY
;
752 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
755 /* Check for a constructed packet to be sent. */
756 if (qp
->s_hdrwords
!= 0) {
758 * If the packet cannot be sent now, return and
759 * the send tasklet will be woken up later.
761 if (qib_verbs_send(qp
, &qp
->s_hdr
, qp
->s_hdrwords
,
762 qp
->s_cur_sge
, qp
->s_cur_size
))
764 /* Record that s_hdr is empty. */
767 } while (make_req(qp
));
771 * This should be called with s_lock held.
773 void qib_send_complete(struct qib_qp
*qp
, struct qib_swqe
*wqe
,
774 enum ib_wc_status status
)
779 if (!(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_OR_FLUSH_SEND
))
782 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
783 struct qib_sge
*sge
= &wqe
->sg_list
[i
];
785 atomic_dec(&sge
->mr
->refcount
);
787 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
788 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
789 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
790 atomic_dec(&to_iah(wqe
->wr
.wr
.ud
.ah
)->refcount
);
792 /* See ch. 11.2.4.1 and 10.7.3.1 */
793 if (!(qp
->s_flags
& QIB_S_SIGNAL_REQ_WR
) ||
794 (wqe
->wr
.send_flags
& IB_SEND_SIGNALED
) ||
795 status
!= IB_WC_SUCCESS
) {
798 memset(&wc
, 0, sizeof wc
);
799 wc
.wr_id
= wqe
->wr
.wr_id
;
801 wc
.opcode
= ib_qib_wc_opcode
[wqe
->wr
.opcode
];
803 if (status
== IB_WC_SUCCESS
)
804 wc
.byte_len
= wqe
->length
;
805 qib_cq_enter(to_icq(qp
->ibqp
.send_cq
), &wc
,
806 status
!= IB_WC_SUCCESS
);
811 if (++last
>= qp
->s_size
)
814 if (qp
->s_acked
== old_last
)
816 if (qp
->s_cur
== old_last
)
818 if (qp
->s_tail
== old_last
)
820 if (qp
->state
== IB_QPS_SQD
&& last
== qp
->s_cur
)