2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/spinlock.h>
35 #include <rdma/ib_smi.h>
41 * Validate a RWQE and fill in the SGE state.
44 static int qib_init_sge(struct rvt_qp
*qp
, struct rvt_rwqe
*wqe
)
48 struct rvt_lkey_table
*rkt
;
50 struct rvt_sge_state
*ss
;
52 rkt
= &to_idev(qp
->ibqp
.device
)->rdi
.lkey_table
;
53 pd
= ibpd_to_rvtpd(qp
->ibqp
.srq
? qp
->ibqp
.srq
->pd
: qp
->ibqp
.pd
);
55 ss
->sg_list
= qp
->r_sg_list
;
57 for (i
= j
= 0; i
< wqe
->num_sge
; i
++) {
58 if (wqe
->sg_list
[i
].length
== 0)
61 ret
= rvt_lkey_ok(rkt
, pd
, j
? &ss
->sg_list
[j
- 1] : &ss
->sge
,
62 NULL
, &wqe
->sg_list
[i
],
63 IB_ACCESS_LOCAL_WRITE
);
64 if (unlikely(ret
<= 0))
66 qp
->r_len
+= wqe
->sg_list
[i
].length
;
70 ss
->total_len
= qp
->r_len
;
76 struct rvt_sge
*sge
= --j
? &ss
->sg_list
[j
- 1] : &ss
->sge
;
81 memset(&wc
, 0, sizeof(wc
));
82 wc
.wr_id
= wqe
->wr_id
;
83 wc
.status
= IB_WC_LOC_PROT_ERR
;
84 wc
.opcode
= IB_WC_RECV
;
86 /* Signal solicited completion event. */
87 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
94 * qib_get_rwqe - copy the next RWQE into the QP's RWQE
96 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
98 * Return -1 if there is a local error, 0 if no RWQE is available,
101 * Can be called from interrupt level.
103 int qib_get_rwqe(struct rvt_qp
*qp
, int wr_id_only
)
109 struct rvt_rwqe
*wqe
;
110 void (*handler
)(struct ib_event
*, void *);
115 srq
= ibsrq_to_rvtsrq(qp
->ibqp
.srq
);
116 handler
= srq
->ibsrq
.event_handler
;
124 spin_lock_irqsave(&rq
->lock
, flags
);
125 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
)) {
132 /* Validate tail before using it since it is user writable. */
133 if (tail
>= rq
->size
)
135 if (unlikely(tail
== wq
->head
)) {
139 /* Make sure entry is read after head index is read. */
141 wqe
= rvt_get_rwqe_ptr(rq
, tail
);
143 * Even though we update the tail index in memory, the verbs
144 * consumer is not supposed to post more entries until a
145 * completion is generated.
147 if (++tail
>= rq
->size
)
150 if (!wr_id_only
&& !qib_init_sge(qp
, wqe
)) {
154 qp
->r_wr_id
= wqe
->wr_id
;
157 set_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
);
162 * Validate head pointer value and compute
163 * the number of remaining WQEs.
169 n
+= rq
->size
- tail
;
172 if (n
< srq
->limit
) {
176 spin_unlock_irqrestore(&rq
->lock
, flags
);
177 ev
.device
= qp
->ibqp
.device
;
178 ev
.element
.srq
= qp
->ibqp
.srq
;
179 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
180 handler(&ev
, srq
->ibsrq
.srq_context
);
185 spin_unlock_irqrestore(&rq
->lock
, flags
);
191 * Switch to alternate path.
192 * The QP s_lock should be held and interrupts disabled.
194 void qib_migrate_qp(struct rvt_qp
*qp
)
198 qp
->s_mig_state
= IB_MIG_MIGRATED
;
199 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
200 qp
->port_num
= rdma_ah_get_port_num(&qp
->alt_ah_attr
);
201 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
203 ev
.device
= qp
->ibqp
.device
;
204 ev
.element
.qp
= &qp
->ibqp
;
205 ev
.event
= IB_EVENT_PATH_MIG
;
206 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
209 static __be64
get_sguid(struct qib_ibport
*ibp
, unsigned index
)
212 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
216 return ibp
->guids
[index
- 1];
219 static int gid_ok(union ib_gid
*gid
, __be64 gid_prefix
, __be64 id
)
221 return (gid
->global
.interface_id
== id
&&
222 (gid
->global
.subnet_prefix
== gid_prefix
||
223 gid
->global
.subnet_prefix
== IB_DEFAULT_GID_PREFIX
));
228 * This should be called with the QP r_lock held.
230 * The s_lock will be acquired around the qib_migrate_qp() call.
232 int qib_ruc_check_hdr(struct qib_ibport
*ibp
, struct ib_header
*hdr
,
233 int has_grh
, struct rvt_qp
*qp
, u32 bth0
)
238 if (qp
->s_mig_state
== IB_MIG_ARMED
&& (bth0
& IB_BTH_MIG_REQ
)) {
240 if (rdma_ah_get_ah_flags(&qp
->alt_ah_attr
) &
244 const struct ib_global_route
*grh
;
246 if (!(rdma_ah_get_ah_flags(&qp
->alt_ah_attr
) &
249 grh
= rdma_ah_read_grh(&qp
->alt_ah_attr
);
250 guid
= get_sguid(ibp
, grh
->sgid_index
);
251 if (!gid_ok(&hdr
->u
.l
.grh
.dgid
,
252 ibp
->rvp
.gid_prefix
, guid
))
254 if (!gid_ok(&hdr
->u
.l
.grh
.sgid
,
255 grh
->dgid
.global
.subnet_prefix
,
256 grh
->dgid
.global
.interface_id
))
259 if (!qib_pkey_ok((u16
)bth0
,
260 qib_get_pkey(ibp
, qp
->s_alt_pkey_index
))) {
263 (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF,
265 hdr
->lrh
[3], hdr
->lrh
[1]);
268 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
269 if ((be16_to_cpu(hdr
->lrh
[3]) !=
270 rdma_ah_get_dlid(&qp
->alt_ah_attr
)) ||
271 ppd_from_ibp(ibp
)->port
!=
272 rdma_ah_get_port_num(&qp
->alt_ah_attr
))
274 spin_lock_irqsave(&qp
->s_lock
, flags
);
276 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
279 if (rdma_ah_get_ah_flags(&qp
->remote_ah_attr
) &
283 const struct ib_global_route
*grh
;
285 if (!(rdma_ah_get_ah_flags(&qp
->remote_ah_attr
) &
288 grh
= rdma_ah_read_grh(&qp
->remote_ah_attr
);
289 guid
= get_sguid(ibp
, grh
->sgid_index
);
290 if (!gid_ok(&hdr
->u
.l
.grh
.dgid
,
291 ibp
->rvp
.gid_prefix
, guid
))
293 if (!gid_ok(&hdr
->u
.l
.grh
.sgid
,
294 grh
->dgid
.global
.subnet_prefix
,
295 grh
->dgid
.global
.interface_id
))
298 if (!qib_pkey_ok((u16
)bth0
,
299 qib_get_pkey(ibp
, qp
->s_pkey_index
))) {
302 (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF,
304 hdr
->lrh
[3], hdr
->lrh
[1]);
307 /* Validate the SLID. See Ch. 9.6.1.5 */
308 if (be16_to_cpu(hdr
->lrh
[3]) !=
309 rdma_ah_get_dlid(&qp
->remote_ah_attr
) ||
310 ppd_from_ibp(ibp
)->port
!= qp
->port_num
)
312 if (qp
->s_mig_state
== IB_MIG_REARM
&&
313 !(bth0
& IB_BTH_MIG_REQ
))
314 qp
->s_mig_state
= IB_MIG_ARMED
;
324 * qib_ruc_loopback - handle UC and RC lookback requests
325 * @sqp: the sending QP
327 * This is called from qib_do_send() to
328 * forward a WQE addressed to the same HCA.
329 * Note that although we are single threaded due to the tasklet, we still
330 * have to protect against post_send(). We don't have to worry about
331 * receive interrupts since this is a connected protocol and all packets
332 * will pass through here.
334 static void qib_ruc_loopback(struct rvt_qp
*sqp
)
336 struct qib_ibport
*ibp
= to_iport(sqp
->ibqp
.device
, sqp
->port_num
);
337 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
338 struct qib_devdata
*dd
= ppd
->dd
;
339 struct rvt_dev_info
*rdi
= &dd
->verbs_dev
.rdi
;
341 struct rvt_swqe
*wqe
;
347 enum ib_wc_status send_status
;
353 * Note that we check the responder QP state after
354 * checking the requester's state.
356 qp
= rvt_lookup_qpn(rdi
, &ibp
->rvp
, sqp
->remote_qpn
);
360 spin_lock_irqsave(&sqp
->s_lock
, flags
);
362 /* Return if we are already busy processing a work request. */
363 if ((sqp
->s_flags
& (RVT_S_BUSY
| RVT_S_ANY_WAIT
)) ||
364 !(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
367 sqp
->s_flags
|= RVT_S_BUSY
;
370 if (sqp
->s_last
== READ_ONCE(sqp
->s_head
))
372 wqe
= rvt_get_swqe_ptr(sqp
, sqp
->s_last
);
374 /* Return if it is not OK to start a new work reqeust. */
375 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_NEXT_SEND_OK
)) {
376 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_FLUSH_SEND
))
378 /* We are in the error state, flush the work request. */
379 send_status
= IB_WC_WR_FLUSH_ERR
;
384 * We can rely on the entry not changing without the s_lock
385 * being held until we update s_last.
386 * We increment s_cur to indicate s_last is in progress.
388 if (sqp
->s_last
== sqp
->s_cur
) {
389 if (++sqp
->s_cur
>= sqp
->s_size
)
392 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
394 if (!qp
|| !(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
) ||
395 qp
->ibqp
.qp_type
!= sqp
->ibqp
.qp_type
) {
396 ibp
->rvp
.n_pkt_drops
++;
398 * For RC, the requester would timeout and retry so
399 * shortcut the timeouts and just signal too many retries.
401 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
)
402 send_status
= IB_WC_RETRY_EXC_ERR
;
404 send_status
= IB_WC_SUCCESS
;
408 memset(&wc
, 0, sizeof(wc
));
409 send_status
= IB_WC_SUCCESS
;
412 sqp
->s_sge
.sge
= wqe
->sg_list
[0];
413 sqp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
414 sqp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
415 sqp
->s_len
= wqe
->length
;
416 switch (wqe
->wr
.opcode
) {
417 case IB_WR_SEND_WITH_IMM
:
418 wc
.wc_flags
= IB_WC_WITH_IMM
;
419 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
422 ret
= qib_get_rwqe(qp
, 0);
429 case IB_WR_RDMA_WRITE_WITH_IMM
:
430 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
432 wc
.wc_flags
= IB_WC_WITH_IMM
;
433 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
434 ret
= qib_get_rwqe(qp
, 1);
440 case IB_WR_RDMA_WRITE
:
441 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
443 if (wqe
->length
== 0)
445 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, wqe
->length
,
446 wqe
->rdma_wr
.remote_addr
,
448 IB_ACCESS_REMOTE_WRITE
)))
450 qp
->r_sge
.sg_list
= NULL
;
451 qp
->r_sge
.num_sge
= 1;
452 qp
->r_sge
.total_len
= wqe
->length
;
455 case IB_WR_RDMA_READ
:
456 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
458 if (unlikely(!rvt_rkey_ok(qp
, &sqp
->s_sge
.sge
, wqe
->length
,
459 wqe
->rdma_wr
.remote_addr
,
461 IB_ACCESS_REMOTE_READ
)))
464 sqp
->s_sge
.sg_list
= NULL
;
465 sqp
->s_sge
.num_sge
= 1;
466 qp
->r_sge
.sge
= wqe
->sg_list
[0];
467 qp
->r_sge
.sg_list
= wqe
->sg_list
+ 1;
468 qp
->r_sge
.num_sge
= wqe
->wr
.num_sge
;
469 qp
->r_sge
.total_len
= wqe
->length
;
472 case IB_WR_ATOMIC_CMP_AND_SWP
:
473 case IB_WR_ATOMIC_FETCH_AND_ADD
:
474 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
)))
476 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, sizeof(u64
),
477 wqe
->atomic_wr
.remote_addr
,
479 IB_ACCESS_REMOTE_ATOMIC
)))
481 /* Perform atomic OP and save result. */
482 maddr
= (atomic64_t
*) qp
->r_sge
.sge
.vaddr
;
483 sdata
= wqe
->atomic_wr
.compare_add
;
484 *(u64
*) sqp
->s_sge
.sge
.vaddr
=
485 (wqe
->atomic_wr
.wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) ?
486 (u64
) atomic64_add_return(sdata
, maddr
) - sdata
:
487 (u64
) cmpxchg((u64
*) qp
->r_sge
.sge
.vaddr
,
488 sdata
, wqe
->atomic_wr
.swap
);
489 rvt_put_mr(qp
->r_sge
.sge
.mr
);
490 qp
->r_sge
.num_sge
= 0;
494 send_status
= IB_WC_LOC_QP_OP_ERR
;
498 sge
= &sqp
->s_sge
.sge
;
500 u32 len
= sqp
->s_len
;
502 if (len
> sge
->length
)
504 if (len
> sge
->sge_length
)
505 len
= sge
->sge_length
;
507 qib_copy_sge(&qp
->r_sge
, sge
->vaddr
, len
, release
);
510 sge
->sge_length
-= len
;
511 if (sge
->sge_length
== 0) {
514 if (--sqp
->s_sge
.num_sge
)
515 *sge
= *sqp
->s_sge
.sg_list
++;
516 } else if (sge
->length
== 0 && sge
->mr
->lkey
) {
517 if (++sge
->n
>= RVT_SEGSZ
) {
518 if (++sge
->m
>= sge
->mr
->mapsz
)
523 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
525 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
530 rvt_put_ss(&qp
->r_sge
);
532 if (!test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
535 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
536 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
538 wc
.opcode
= IB_WC_RECV
;
539 wc
.wr_id
= qp
->r_wr_id
;
540 wc
.status
= IB_WC_SUCCESS
;
541 wc
.byte_len
= wqe
->length
;
543 wc
.src_qp
= qp
->remote_qpn
;
544 wc
.slid
= rdma_ah_get_dlid(&qp
->remote_ah_attr
);
545 wc
.sl
= rdma_ah_get_sl(&qp
->remote_ah_attr
);
547 /* Signal completion event if the solicited bit is set. */
548 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
,
549 wqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
552 spin_lock_irqsave(&sqp
->s_lock
, flags
);
553 ibp
->rvp
.n_loop_pkts
++;
555 sqp
->s_rnr_retry
= sqp
->s_rnr_retry_cnt
;
556 qib_send_complete(sqp
, wqe
, send_status
);
561 if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
563 ibp
->rvp
.n_rnr_naks
++;
565 * Note: we don't need the s_lock held since the BUSY flag
566 * makes this single threaded.
568 if (sqp
->s_rnr_retry
== 0) {
569 send_status
= IB_WC_RNR_RETRY_EXC_ERR
;
572 if (sqp
->s_rnr_retry_cnt
< 7)
574 spin_lock_irqsave(&sqp
->s_lock
, flags
);
575 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_RECV_OK
))
577 rvt_add_rnr_timer(sqp
, qp
->r_min_rnr_timer
<<
578 IB_AETH_CREDIT_SHIFT
);
582 send_status
= IB_WC_REM_OP_ERR
;
583 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
587 send_status
= IB_WC_REM_INV_REQ_ERR
;
588 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
592 send_status
= IB_WC_REM_ACCESS_ERR
;
593 wc
.status
= IB_WC_LOC_PROT_ERR
;
595 /* responder goes to error state */
596 rvt_rc_error(qp
, wc
.status
);
599 spin_lock_irqsave(&sqp
->s_lock
, flags
);
600 qib_send_complete(sqp
, wqe
, send_status
);
601 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
) {
602 int lastwqe
= rvt_error_qp(sqp
, IB_WC_WR_FLUSH_ERR
);
604 sqp
->s_flags
&= ~RVT_S_BUSY
;
605 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
609 ev
.device
= sqp
->ibqp
.device
;
610 ev
.element
.qp
= &sqp
->ibqp
;
611 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
612 sqp
->ibqp
.event_handler(&ev
, sqp
->ibqp
.qp_context
);
617 sqp
->s_flags
&= ~RVT_S_BUSY
;
619 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
625 * qib_make_grh - construct a GRH header
626 * @ibp: a pointer to the IB port
627 * @hdr: a pointer to the GRH header being constructed
628 * @grh: the global route address to send to
629 * @hwords: the number of 32 bit words of header being sent
630 * @nwords: the number of 32 bit words of data being sent
632 * Return the size of the header in 32 bit words.
634 u32
qib_make_grh(struct qib_ibport
*ibp
, struct ib_grh
*hdr
,
635 const struct ib_global_route
*grh
, u32 hwords
, u32 nwords
)
637 hdr
->version_tclass_flow
=
638 cpu_to_be32((IB_GRH_VERSION
<< IB_GRH_VERSION_SHIFT
) |
639 (grh
->traffic_class
<< IB_GRH_TCLASS_SHIFT
) |
640 (grh
->flow_label
<< IB_GRH_FLOW_SHIFT
));
641 hdr
->paylen
= cpu_to_be16((hwords
- 2 + nwords
+ SIZE_OF_CRC
) << 2);
642 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
643 hdr
->next_hdr
= IB_GRH_NEXT_HDR
;
644 hdr
->hop_limit
= grh
->hop_limit
;
645 /* The SGID is 32-bit aligned. */
646 hdr
->sgid
.global
.subnet_prefix
= ibp
->rvp
.gid_prefix
;
647 if (!grh
->sgid_index
)
648 hdr
->sgid
.global
.interface_id
= ppd_from_ibp(ibp
)->guid
;
649 else if (grh
->sgid_index
< QIB_GUIDS_PER_PORT
)
650 hdr
->sgid
.global
.interface_id
= ibp
->guids
[grh
->sgid_index
- 1];
651 hdr
->dgid
= grh
->dgid
;
653 /* GRH header size in 32-bit words. */
654 return sizeof(struct ib_grh
) / sizeof(u32
);
657 void qib_make_ruc_header(struct rvt_qp
*qp
, struct ib_other_headers
*ohdr
,
660 struct qib_qp_priv
*priv
= qp
->priv
;
661 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
666 /* Construct the header. */
667 extra_bytes
= -qp
->s_cur_size
& 3;
668 nwords
= (qp
->s_cur_size
+ extra_bytes
) >> 2;
670 if (unlikely(rdma_ah_get_ah_flags(&qp
->remote_ah_attr
) & IB_AH_GRH
)) {
672 qib_make_grh(ibp
, &priv
->s_hdr
->u
.l
.grh
,
673 rdma_ah_read_grh(&qp
->remote_ah_attr
),
674 qp
->s_hdrwords
, nwords
);
677 lrh0
|= ibp
->sl_to_vl
[rdma_ah_get_sl(&qp
->remote_ah_attr
)] << 12 |
678 rdma_ah_get_sl(&qp
->remote_ah_attr
) << 4;
679 priv
->s_hdr
->lrh
[0] = cpu_to_be16(lrh0
);
680 priv
->s_hdr
->lrh
[1] =
681 cpu_to_be16(rdma_ah_get_dlid(&qp
->remote_ah_attr
));
682 priv
->s_hdr
->lrh
[2] =
683 cpu_to_be16(qp
->s_hdrwords
+ nwords
+ SIZE_OF_CRC
);
684 priv
->s_hdr
->lrh
[3] =
685 cpu_to_be16(ppd_from_ibp(ibp
)->lid
|
686 rdma_ah_get_path_bits(&qp
->remote_ah_attr
));
687 bth0
|= qib_get_pkey(ibp
, qp
->s_pkey_index
);
688 bth0
|= extra_bytes
<< 20;
689 if (qp
->s_mig_state
== IB_MIG_MIGRATED
)
690 bth0
|= IB_BTH_MIG_REQ
;
691 ohdr
->bth
[0] = cpu_to_be32(bth0
);
692 ohdr
->bth
[1] = cpu_to_be32(qp
->remote_qpn
);
693 ohdr
->bth
[2] = cpu_to_be32(bth2
);
694 this_cpu_inc(ibp
->pmastats
->n_unicast_xmit
);
697 void _qib_do_send(struct work_struct
*work
)
699 struct qib_qp_priv
*priv
= container_of(work
, struct qib_qp_priv
,
701 struct rvt_qp
*qp
= priv
->owner
;
707 * qib_do_send - perform a send on a QP
708 * @qp: pointer to the QP
710 * Process entries in the send work queue until credit or queue is
711 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
712 * Otherwise, two threads could send packets out of order.
714 void qib_do_send(struct rvt_qp
*qp
)
716 struct qib_qp_priv
*priv
= qp
->priv
;
717 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
718 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
719 int (*make_req
)(struct rvt_qp
*qp
, unsigned long *flags
);
722 if ((qp
->ibqp
.qp_type
== IB_QPT_RC
||
723 qp
->ibqp
.qp_type
== IB_QPT_UC
) &&
724 (rdma_ah_get_dlid(&qp
->remote_ah_attr
) &
725 ~((1 << ppd
->lmc
) - 1)) == ppd
->lid
) {
726 qib_ruc_loopback(qp
);
730 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
731 make_req
= qib_make_rc_req
;
732 else if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
733 make_req
= qib_make_uc_req
;
735 make_req
= qib_make_ud_req
;
737 spin_lock_irqsave(&qp
->s_lock
, flags
);
739 /* Return if we are already busy processing a work request. */
740 if (!qib_send_ok(qp
)) {
741 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
745 qp
->s_flags
|= RVT_S_BUSY
;
748 /* Check for a constructed packet to be sent. */
749 if (qp
->s_hdrwords
!= 0) {
750 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
752 * If the packet cannot be sent now, return and
753 * the send tasklet will be woken up later.
755 if (qib_verbs_send(qp
, priv
->s_hdr
, qp
->s_hdrwords
,
756 qp
->s_cur_sge
, qp
->s_cur_size
))
758 /* Record that s_hdr is empty. */
760 spin_lock_irqsave(&qp
->s_lock
, flags
);
762 } while (make_req(qp
, &flags
));
764 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
768 * This should be called with s_lock held.
770 void qib_send_complete(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
,
771 enum ib_wc_status status
)
775 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
780 if (++last
>= qp
->s_size
)
783 /* See post_send() */
786 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
787 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
788 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
789 atomic_dec(&ibah_to_rvtah(wqe
->ud_wr
.ah
)->refcount
);
791 rvt_qp_swqe_complete(qp
,
793 ib_qib_wc_opcode
[wqe
->wr
.opcode
],
796 if (qp
->s_acked
== old_last
)
798 if (qp
->s_cur
== old_last
)
800 if (qp
->s_tail
== old_last
)
802 if (qp
->state
== IB_QPS_SQD
&& last
== qp
->s_cur
)