2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/spinlock.h>
40 * Convert the AETH RNR timeout code into the number of microseconds.
42 const u32 ib_qib_rnr_table
[32] = {
43 655360, /* 00: 655.36 */
63 10240, /* 14: 10.24 */
64 15360, /* 15: 15.36 */
65 20480, /* 16: 20.48 */
66 30720, /* 17: 30.72 */
67 40960, /* 18: 40.96 */
68 61440, /* 19: 61.44 */
69 81920, /* 1A: 81.92 */
70 122880, /* 1B: 122.88 */
71 163840, /* 1C: 163.84 */
72 245760, /* 1D: 245.76 */
73 327680, /* 1E: 327.68 */
74 491520 /* 1F: 491.52 */
78 * Validate a RWQE and fill in the SGE state.
81 static int qib_init_sge(struct qib_qp
*qp
, struct qib_rwqe
*wqe
)
85 struct qib_lkey_table
*rkt
;
87 struct qib_sge_state
*ss
;
89 rkt
= &to_idev(qp
->ibqp
.device
)->lk_table
;
90 pd
= to_ipd(qp
->ibqp
.srq
? qp
->ibqp
.srq
->pd
: qp
->ibqp
.pd
);
92 ss
->sg_list
= qp
->r_sg_list
;
94 for (i
= j
= 0; i
< wqe
->num_sge
; i
++) {
95 if (wqe
->sg_list
[i
].length
== 0)
98 if (!qib_lkey_ok(rkt
, pd
, j
? &ss
->sg_list
[j
- 1] : &ss
->sge
,
99 &wqe
->sg_list
[i
], IB_ACCESS_LOCAL_WRITE
))
101 qp
->r_len
+= wqe
->sg_list
[i
].length
;
105 ss
->total_len
= qp
->r_len
;
111 struct qib_sge
*sge
= --j
? &ss
->sg_list
[j
- 1] : &ss
->sge
;
116 memset(&wc
, 0, sizeof(wc
));
117 wc
.wr_id
= wqe
->wr_id
;
118 wc
.status
= IB_WC_LOC_PROT_ERR
;
119 wc
.opcode
= IB_WC_RECV
;
121 /* Signal solicited completion event. */
122 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
129 * qib_get_rwqe - copy the next RWQE into the QP's RWQE
131 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
133 * Return -1 if there is a local error, 0 if no RWQE is available,
134 * otherwise return 1.
136 * Can be called from interrupt level.
138 int qib_get_rwqe(struct qib_qp
*qp
, int wr_id_only
)
144 struct qib_rwqe
*wqe
;
145 void (*handler
)(struct ib_event
*, void *);
150 srq
= to_isrq(qp
->ibqp
.srq
);
151 handler
= srq
->ibsrq
.event_handler
;
159 spin_lock_irqsave(&rq
->lock
, flags
);
160 if (!(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_RECV_OK
)) {
167 /* Validate tail before using it since it is user writable. */
168 if (tail
>= rq
->size
)
170 if (unlikely(tail
== wq
->head
)) {
174 /* Make sure entry is read after head index is read. */
176 wqe
= get_rwqe_ptr(rq
, tail
);
178 * Even though we update the tail index in memory, the verbs
179 * consumer is not supposed to post more entries until a
180 * completion is generated.
182 if (++tail
>= rq
->size
)
185 if (!wr_id_only
&& !qib_init_sge(qp
, wqe
)) {
189 qp
->r_wr_id
= wqe
->wr_id
;
192 set_bit(QIB_R_WRID_VALID
, &qp
->r_aflags
);
197 * Validate head pointer value and compute
198 * the number of remaining WQEs.
204 n
+= rq
->size
- tail
;
207 if (n
< srq
->limit
) {
211 spin_unlock_irqrestore(&rq
->lock
, flags
);
212 ev
.device
= qp
->ibqp
.device
;
213 ev
.element
.srq
= qp
->ibqp
.srq
;
214 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
215 handler(&ev
, srq
->ibsrq
.srq_context
);
220 spin_unlock_irqrestore(&rq
->lock
, flags
);
226 * Switch to alternate path.
227 * The QP s_lock should be held and interrupts disabled.
229 void qib_migrate_qp(struct qib_qp
*qp
)
233 qp
->s_mig_state
= IB_MIG_MIGRATED
;
234 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
235 qp
->port_num
= qp
->alt_ah_attr
.port_num
;
236 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
238 ev
.device
= qp
->ibqp
.device
;
239 ev
.element
.qp
= &qp
->ibqp
;
240 ev
.event
= IB_EVENT_PATH_MIG
;
241 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
244 static __be64
get_sguid(struct qib_ibport
*ibp
, unsigned index
)
247 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
251 return ibp
->guids
[index
- 1];
254 static int gid_ok(union ib_gid
*gid
, __be64 gid_prefix
, __be64 id
)
256 return (gid
->global
.interface_id
== id
&&
257 (gid
->global
.subnet_prefix
== gid_prefix
||
258 gid
->global
.subnet_prefix
== IB_DEFAULT_GID_PREFIX
));
263 * This should be called with the QP r_lock held.
265 * The s_lock will be acquired around the qib_migrate_qp() call.
267 int qib_ruc_check_hdr(struct qib_ibport
*ibp
, struct qib_ib_header
*hdr
,
268 int has_grh
, struct qib_qp
*qp
, u32 bth0
)
273 if (qp
->s_mig_state
== IB_MIG_ARMED
&& (bth0
& IB_BTH_MIG_REQ
)) {
275 if (qp
->alt_ah_attr
.ah_flags
& IB_AH_GRH
)
278 if (!(qp
->alt_ah_attr
.ah_flags
& IB_AH_GRH
))
280 guid
= get_sguid(ibp
, qp
->alt_ah_attr
.grh
.sgid_index
);
281 if (!gid_ok(&hdr
->u
.l
.grh
.dgid
, ibp
->gid_prefix
, guid
))
283 if (!gid_ok(&hdr
->u
.l
.grh
.sgid
,
284 qp
->alt_ah_attr
.grh
.dgid
.global
.subnet_prefix
,
285 qp
->alt_ah_attr
.grh
.dgid
.global
.interface_id
))
288 if (!qib_pkey_ok((u16
)bth0
,
289 qib_get_pkey(ibp
, qp
->s_alt_pkey_index
))) {
290 qib_bad_pqkey(ibp
, IB_NOTICE_TRAP_BAD_PKEY
,
292 (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF,
294 hdr
->lrh
[3], hdr
->lrh
[1]);
297 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
298 if (be16_to_cpu(hdr
->lrh
[3]) != qp
->alt_ah_attr
.dlid
||
299 ppd_from_ibp(ibp
)->port
!= qp
->alt_ah_attr
.port_num
)
301 spin_lock_irqsave(&qp
->s_lock
, flags
);
303 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
306 if (qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)
309 if (!(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
))
311 guid
= get_sguid(ibp
,
312 qp
->remote_ah_attr
.grh
.sgid_index
);
313 if (!gid_ok(&hdr
->u
.l
.grh
.dgid
, ibp
->gid_prefix
, guid
))
315 if (!gid_ok(&hdr
->u
.l
.grh
.sgid
,
316 qp
->remote_ah_attr
.grh
.dgid
.global
.subnet_prefix
,
317 qp
->remote_ah_attr
.grh
.dgid
.global
.interface_id
))
320 if (!qib_pkey_ok((u16
)bth0
,
321 qib_get_pkey(ibp
, qp
->s_pkey_index
))) {
322 qib_bad_pqkey(ibp
, IB_NOTICE_TRAP_BAD_PKEY
,
324 (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF,
326 hdr
->lrh
[3], hdr
->lrh
[1]);
329 /* Validate the SLID. See Ch. 9.6.1.5 */
330 if (be16_to_cpu(hdr
->lrh
[3]) != qp
->remote_ah_attr
.dlid
||
331 ppd_from_ibp(ibp
)->port
!= qp
->port_num
)
333 if (qp
->s_mig_state
== IB_MIG_REARM
&&
334 !(bth0
& IB_BTH_MIG_REQ
))
335 qp
->s_mig_state
= IB_MIG_ARMED
;
345 * qib_ruc_loopback - handle UC and RC lookback requests
346 * @sqp: the sending QP
348 * This is called from qib_do_send() to
349 * forward a WQE addressed to the same HCA.
350 * Note that although we are single threaded due to the tasklet, we still
351 * have to protect against post_send(). We don't have to worry about
352 * receive interrupts since this is a connected protocol and all packets
353 * will pass through here.
355 static void qib_ruc_loopback(struct qib_qp
*sqp
)
357 struct qib_ibport
*ibp
= to_iport(sqp
->ibqp
.device
, sqp
->port_num
);
359 struct qib_swqe
*wqe
;
365 enum ib_wc_status send_status
;
370 * Note that we check the responder QP state after
371 * checking the requester's state.
373 qp
= qib_lookup_qpn(ibp
, sqp
->remote_qpn
);
375 spin_lock_irqsave(&sqp
->s_lock
, flags
);
377 /* Return if we are already busy processing a work request. */
378 if ((sqp
->s_flags
& (QIB_S_BUSY
| QIB_S_ANY_WAIT
)) ||
379 !(ib_qib_state_ops
[sqp
->state
] & QIB_PROCESS_OR_FLUSH_SEND
))
382 sqp
->s_flags
|= QIB_S_BUSY
;
385 if (sqp
->s_last
== sqp
->s_head
)
387 wqe
= get_swqe_ptr(sqp
, sqp
->s_last
);
389 /* Return if it is not OK to start a new work reqeust. */
390 if (!(ib_qib_state_ops
[sqp
->state
] & QIB_PROCESS_NEXT_SEND_OK
)) {
391 if (!(ib_qib_state_ops
[sqp
->state
] & QIB_FLUSH_SEND
))
393 /* We are in the error state, flush the work request. */
394 send_status
= IB_WC_WR_FLUSH_ERR
;
399 * We can rely on the entry not changing without the s_lock
400 * being held until we update s_last.
401 * We increment s_cur to indicate s_last is in progress.
403 if (sqp
->s_last
== sqp
->s_cur
) {
404 if (++sqp
->s_cur
>= sqp
->s_size
)
407 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
409 if (!qp
|| !(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_RECV_OK
) ||
410 qp
->ibqp
.qp_type
!= sqp
->ibqp
.qp_type
) {
413 * For RC, the requester would timeout and retry so
414 * shortcut the timeouts and just signal too many retries.
416 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
)
417 send_status
= IB_WC_RETRY_EXC_ERR
;
419 send_status
= IB_WC_SUCCESS
;
423 memset(&wc
, 0, sizeof wc
);
424 send_status
= IB_WC_SUCCESS
;
427 sqp
->s_sge
.sge
= wqe
->sg_list
[0];
428 sqp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
429 sqp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
430 sqp
->s_len
= wqe
->length
;
431 switch (wqe
->wr
.opcode
) {
432 case IB_WR_SEND_WITH_IMM
:
433 wc
.wc_flags
= IB_WC_WITH_IMM
;
434 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
437 ret
= qib_get_rwqe(qp
, 0);
444 case IB_WR_RDMA_WRITE_WITH_IMM
:
445 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
447 wc
.wc_flags
= IB_WC_WITH_IMM
;
448 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
449 ret
= qib_get_rwqe(qp
, 1);
455 case IB_WR_RDMA_WRITE
:
456 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
458 if (wqe
->length
== 0)
460 if (unlikely(!qib_rkey_ok(qp
, &qp
->r_sge
.sge
, wqe
->length
,
461 wqe
->wr
.wr
.rdma
.remote_addr
,
462 wqe
->wr
.wr
.rdma
.rkey
,
463 IB_ACCESS_REMOTE_WRITE
)))
465 qp
->r_sge
.sg_list
= NULL
;
466 qp
->r_sge
.num_sge
= 1;
467 qp
->r_sge
.total_len
= wqe
->length
;
470 case IB_WR_RDMA_READ
:
471 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
473 if (unlikely(!qib_rkey_ok(qp
, &sqp
->s_sge
.sge
, wqe
->length
,
474 wqe
->wr
.wr
.rdma
.remote_addr
,
475 wqe
->wr
.wr
.rdma
.rkey
,
476 IB_ACCESS_REMOTE_READ
)))
479 sqp
->s_sge
.sg_list
= NULL
;
480 sqp
->s_sge
.num_sge
= 1;
481 qp
->r_sge
.sge
= wqe
->sg_list
[0];
482 qp
->r_sge
.sg_list
= wqe
->sg_list
+ 1;
483 qp
->r_sge
.num_sge
= wqe
->wr
.num_sge
;
484 qp
->r_sge
.total_len
= wqe
->length
;
487 case IB_WR_ATOMIC_CMP_AND_SWP
:
488 case IB_WR_ATOMIC_FETCH_AND_ADD
:
489 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
)))
491 if (unlikely(!qib_rkey_ok(qp
, &qp
->r_sge
.sge
, sizeof(u64
),
492 wqe
->wr
.wr
.atomic
.remote_addr
,
493 wqe
->wr
.wr
.atomic
.rkey
,
494 IB_ACCESS_REMOTE_ATOMIC
)))
496 /* Perform atomic OP and save result. */
497 maddr
= (atomic64_t
*) qp
->r_sge
.sge
.vaddr
;
498 sdata
= wqe
->wr
.wr
.atomic
.compare_add
;
499 *(u64
*) sqp
->s_sge
.sge
.vaddr
=
500 (wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) ?
501 (u64
) atomic64_add_return(sdata
, maddr
) - sdata
:
502 (u64
) cmpxchg((u64
*) qp
->r_sge
.sge
.vaddr
,
503 sdata
, wqe
->wr
.wr
.atomic
.swap
);
504 qib_put_mr(qp
->r_sge
.sge
.mr
);
505 qp
->r_sge
.num_sge
= 0;
509 send_status
= IB_WC_LOC_QP_OP_ERR
;
513 sge
= &sqp
->s_sge
.sge
;
515 u32 len
= sqp
->s_len
;
517 if (len
> sge
->length
)
519 if (len
> sge
->sge_length
)
520 len
= sge
->sge_length
;
522 qib_copy_sge(&qp
->r_sge
, sge
->vaddr
, len
, release
);
525 sge
->sge_length
-= len
;
526 if (sge
->sge_length
== 0) {
529 if (--sqp
->s_sge
.num_sge
)
530 *sge
= *sqp
->s_sge
.sg_list
++;
531 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
532 if (++sge
->n
>= QIB_SEGSZ
) {
533 if (++sge
->m
>= sge
->mr
->mapsz
)
538 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
540 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
545 qib_put_ss(&qp
->r_sge
);
547 if (!test_and_clear_bit(QIB_R_WRID_VALID
, &qp
->r_aflags
))
550 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
551 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
553 wc
.opcode
= IB_WC_RECV
;
554 wc
.wr_id
= qp
->r_wr_id
;
555 wc
.status
= IB_WC_SUCCESS
;
556 wc
.byte_len
= wqe
->length
;
558 wc
.src_qp
= qp
->remote_qpn
;
559 wc
.slid
= qp
->remote_ah_attr
.dlid
;
560 wc
.sl
= qp
->remote_ah_attr
.sl
;
562 /* Signal completion event if the solicited bit is set. */
563 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
564 wqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
567 spin_lock_irqsave(&sqp
->s_lock
, flags
);
570 sqp
->s_rnr_retry
= sqp
->s_rnr_retry_cnt
;
571 qib_send_complete(sqp
, wqe
, send_status
);
576 if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
580 * Note: we don't need the s_lock held since the BUSY flag
581 * makes this single threaded.
583 if (sqp
->s_rnr_retry
== 0) {
584 send_status
= IB_WC_RNR_RETRY_EXC_ERR
;
587 if (sqp
->s_rnr_retry_cnt
< 7)
589 spin_lock_irqsave(&sqp
->s_lock
, flags
);
590 if (!(ib_qib_state_ops
[sqp
->state
] & QIB_PROCESS_RECV_OK
))
592 sqp
->s_flags
|= QIB_S_WAIT_RNR
;
593 sqp
->s_timer
.function
= qib_rc_rnr_retry
;
594 sqp
->s_timer
.expires
= jiffies
+
595 usecs_to_jiffies(ib_qib_rnr_table
[qp
->r_min_rnr_timer
]);
596 add_timer(&sqp
->s_timer
);
600 send_status
= IB_WC_REM_OP_ERR
;
601 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
605 send_status
= IB_WC_REM_INV_REQ_ERR
;
606 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
610 send_status
= IB_WC_REM_ACCESS_ERR
;
611 wc
.status
= IB_WC_LOC_PROT_ERR
;
613 /* responder goes to error state */
614 qib_rc_error(qp
, wc
.status
);
617 spin_lock_irqsave(&sqp
->s_lock
, flags
);
618 qib_send_complete(sqp
, wqe
, send_status
);
619 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
) {
620 int lastwqe
= qib_error_qp(sqp
, IB_WC_WR_FLUSH_ERR
);
622 sqp
->s_flags
&= ~QIB_S_BUSY
;
623 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
627 ev
.device
= sqp
->ibqp
.device
;
628 ev
.element
.qp
= &sqp
->ibqp
;
629 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
630 sqp
->ibqp
.event_handler(&ev
, sqp
->ibqp
.qp_context
);
635 sqp
->s_flags
&= ~QIB_S_BUSY
;
637 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
639 if (qp
&& atomic_dec_and_test(&qp
->refcount
))
644 * qib_make_grh - construct a GRH header
645 * @ibp: a pointer to the IB port
646 * @hdr: a pointer to the GRH header being constructed
647 * @grh: the global route address to send to
648 * @hwords: the number of 32 bit words of header being sent
649 * @nwords: the number of 32 bit words of data being sent
651 * Return the size of the header in 32 bit words.
653 u32
qib_make_grh(struct qib_ibport
*ibp
, struct ib_grh
*hdr
,
654 struct ib_global_route
*grh
, u32 hwords
, u32 nwords
)
656 hdr
->version_tclass_flow
=
657 cpu_to_be32((IB_GRH_VERSION
<< IB_GRH_VERSION_SHIFT
) |
658 (grh
->traffic_class
<< IB_GRH_TCLASS_SHIFT
) |
659 (grh
->flow_label
<< IB_GRH_FLOW_SHIFT
));
660 hdr
->paylen
= cpu_to_be16((hwords
- 2 + nwords
+ SIZE_OF_CRC
) << 2);
661 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
662 hdr
->next_hdr
= IB_GRH_NEXT_HDR
;
663 hdr
->hop_limit
= grh
->hop_limit
;
664 /* The SGID is 32-bit aligned. */
665 hdr
->sgid
.global
.subnet_prefix
= ibp
->gid_prefix
;
666 hdr
->sgid
.global
.interface_id
= grh
->sgid_index
?
667 ibp
->guids
[grh
->sgid_index
- 1] : ppd_from_ibp(ibp
)->guid
;
668 hdr
->dgid
= grh
->dgid
;
670 /* GRH header size in 32-bit words. */
671 return sizeof(struct ib_grh
) / sizeof(u32
);
674 void qib_make_ruc_header(struct qib_qp
*qp
, struct qib_other_headers
*ohdr
,
677 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
682 /* Construct the header. */
683 extra_bytes
= -qp
->s_cur_size
& 3;
684 nwords
= (qp
->s_cur_size
+ extra_bytes
) >> 2;
686 if (unlikely(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)) {
687 qp
->s_hdrwords
+= qib_make_grh(ibp
, &qp
->s_hdr
->u
.l
.grh
,
688 &qp
->remote_ah_attr
.grh
,
689 qp
->s_hdrwords
, nwords
);
692 lrh0
|= ibp
->sl_to_vl
[qp
->remote_ah_attr
.sl
] << 12 |
693 qp
->remote_ah_attr
.sl
<< 4;
694 qp
->s_hdr
->lrh
[0] = cpu_to_be16(lrh0
);
695 qp
->s_hdr
->lrh
[1] = cpu_to_be16(qp
->remote_ah_attr
.dlid
);
696 qp
->s_hdr
->lrh
[2] = cpu_to_be16(qp
->s_hdrwords
+ nwords
+ SIZE_OF_CRC
);
697 qp
->s_hdr
->lrh
[3] = cpu_to_be16(ppd_from_ibp(ibp
)->lid
|
698 qp
->remote_ah_attr
.src_path_bits
);
699 bth0
|= qib_get_pkey(ibp
, qp
->s_pkey_index
);
700 bth0
|= extra_bytes
<< 20;
701 if (qp
->s_mig_state
== IB_MIG_MIGRATED
)
702 bth0
|= IB_BTH_MIG_REQ
;
703 ohdr
->bth
[0] = cpu_to_be32(bth0
);
704 ohdr
->bth
[1] = cpu_to_be32(qp
->remote_qpn
);
705 ohdr
->bth
[2] = cpu_to_be32(bth2
);
709 * qib_do_send - perform a send on a QP
710 * @work: contains a pointer to the QP
712 * Process entries in the send work queue until credit or queue is
713 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
714 * Otherwise, two threads could send packets out of order.
716 void qib_do_send(struct work_struct
*work
)
718 struct qib_qp
*qp
= container_of(work
, struct qib_qp
, s_work
);
719 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
720 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
721 int (*make_req
)(struct qib_qp
*qp
);
724 if ((qp
->ibqp
.qp_type
== IB_QPT_RC
||
725 qp
->ibqp
.qp_type
== IB_QPT_UC
) &&
726 (qp
->remote_ah_attr
.dlid
& ~((1 << ppd
->lmc
) - 1)) == ppd
->lid
) {
727 qib_ruc_loopback(qp
);
731 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
732 make_req
= qib_make_rc_req
;
733 else if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
734 make_req
= qib_make_uc_req
;
736 make_req
= qib_make_ud_req
;
738 spin_lock_irqsave(&qp
->s_lock
, flags
);
740 /* Return if we are already busy processing a work request. */
741 if (!qib_send_ok(qp
)) {
742 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
746 qp
->s_flags
|= QIB_S_BUSY
;
748 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
751 /* Check for a constructed packet to be sent. */
752 if (qp
->s_hdrwords
!= 0) {
754 * If the packet cannot be sent now, return and
755 * the send tasklet will be woken up later.
757 if (qib_verbs_send(qp
, qp
->s_hdr
, qp
->s_hdrwords
,
758 qp
->s_cur_sge
, qp
->s_cur_size
))
760 /* Record that s_hdr is empty. */
763 } while (make_req(qp
));
767 * This should be called with s_lock held.
769 void qib_send_complete(struct qib_qp
*qp
, struct qib_swqe
*wqe
,
770 enum ib_wc_status status
)
775 if (!(ib_qib_state_ops
[qp
->state
] & QIB_PROCESS_OR_FLUSH_SEND
))
778 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
779 struct qib_sge
*sge
= &wqe
->sg_list
[i
];
783 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
784 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
785 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
786 atomic_dec(&to_iah(wqe
->wr
.wr
.ud
.ah
)->refcount
);
788 /* See ch. 11.2.4.1 and 10.7.3.1 */
789 if (!(qp
->s_flags
& QIB_S_SIGNAL_REQ_WR
) ||
790 (wqe
->wr
.send_flags
& IB_SEND_SIGNALED
) ||
791 status
!= IB_WC_SUCCESS
) {
794 memset(&wc
, 0, sizeof wc
);
795 wc
.wr_id
= wqe
->wr
.wr_id
;
797 wc
.opcode
= ib_qib_wc_opcode
[wqe
->wr
.opcode
];
799 if (status
== IB_WC_SUCCESS
)
800 wc
.byte_len
= wqe
->length
;
801 qib_cq_enter(to_icq(qp
->ibqp
.send_cq
), &wc
,
802 status
!= IB_WC_SUCCESS
);
807 if (++last
>= qp
->s_size
)
810 if (qp
->s_acked
== old_last
)
812 if (qp
->s_cur
== old_last
)
814 if (qp
->s_tail
== old_last
)
816 if (qp
->state
== IB_QPS_SQD
&& last
== qp
->s_cur
)