2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/sched.h>
35 #include <linux/spinlock.h>
37 #include "ipath_verbs.h"
38 #include "ipath_kernel.h"
41 * Convert the AETH RNR timeout code into the number of milliseconds.
43 const u32 ib_ipath_rnr_table
[32] = {
79 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
82 * Called with the QP s_lock held and interrupts disabled.
83 * XXX Use a simple list for now. We might need a priority
84 * queue if we have lots of QPs waiting for RNR timeouts
85 * but that should be rare.
87 void ipath_insert_rnr_queue(struct ipath_qp
*qp
)
89 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
91 /* We already did a spin_lock_irqsave(), so just use spin_lock */
92 spin_lock(&dev
->pending_lock
);
93 if (list_empty(&dev
->rnrwait
))
94 list_add(&qp
->timerwait
, &dev
->rnrwait
);
96 struct list_head
*l
= &dev
->rnrwait
;
97 struct ipath_qp
*nqp
= list_entry(l
->next
, struct ipath_qp
,
100 while (qp
->s_rnr_timeout
>= nqp
->s_rnr_timeout
) {
101 qp
->s_rnr_timeout
-= nqp
->s_rnr_timeout
;
103 if (l
->next
== &dev
->rnrwait
) {
107 nqp
= list_entry(l
->next
, struct ipath_qp
,
111 nqp
->s_rnr_timeout
-= qp
->s_rnr_timeout
;
112 list_add(&qp
->timerwait
, l
);
114 spin_unlock(&dev
->pending_lock
);
118 * ipath_init_sge - Validate a RWQE and fill in the SGE state
123 int ipath_init_sge(struct ipath_qp
*qp
, struct ipath_rwqe
*wqe
,
124 u32
*lengthp
, struct ipath_sge_state
*ss
)
130 for (i
= j
= 0; i
< wqe
->num_sge
; i
++) {
131 if (wqe
->sg_list
[i
].length
== 0)
134 if (!ipath_lkey_ok(qp
, j
? &ss
->sg_list
[j
- 1] : &ss
->sge
,
135 &wqe
->sg_list
[i
], IB_ACCESS_LOCAL_WRITE
))
137 *lengthp
+= wqe
->sg_list
[i
].length
;
145 memset(&wc
, 0, sizeof(wc
));
146 wc
.wr_id
= wqe
->wr_id
;
147 wc
.status
= IB_WC_LOC_PROT_ERR
;
148 wc
.opcode
= IB_WC_RECV
;
150 /* Signal solicited completion event. */
151 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
158 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
160 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
162 * Return 0 if no RWQE is available, otherwise return 1.
164 * Can be called from interrupt level.
166 int ipath_get_rwqe(struct ipath_qp
*qp
, int wr_id_only
)
170 struct ipath_rwq
*wq
;
171 struct ipath_srq
*srq
;
172 struct ipath_rwqe
*wqe
;
173 void (*handler
)(struct ib_event
*, void *);
178 srq
= to_isrq(qp
->ibqp
.srq
);
179 handler
= srq
->ibsrq
.event_handler
;
187 spin_lock_irqsave(&rq
->lock
, flags
);
188 if (!(ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_RECV_OK
)) {
195 /* Validate tail before using it since it is user writable. */
196 if (tail
>= rq
->size
)
199 if (unlikely(tail
== wq
->head
)) {
203 /* Make sure entry is read after head index is read. */
205 wqe
= get_rwqe_ptr(rq
, tail
);
206 if (++tail
>= rq
->size
)
210 qp
->r_sge
.sg_list
= qp
->r_sg_list
;
211 } while (!ipath_init_sge(qp
, wqe
, &qp
->r_len
, &qp
->r_sge
));
212 qp
->r_wr_id
= wqe
->wr_id
;
216 set_bit(IPATH_R_WRID_VALID
, &qp
->r_aflags
);
221 * validate head pointer value and compute
222 * the number of remaining WQEs.
228 n
+= rq
->size
- tail
;
231 if (n
< srq
->limit
) {
235 spin_unlock_irqrestore(&rq
->lock
, flags
);
236 ev
.device
= qp
->ibqp
.device
;
237 ev
.element
.srq
= qp
->ibqp
.srq
;
238 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
239 handler(&ev
, srq
->ibsrq
.srq_context
);
244 spin_unlock_irqrestore(&rq
->lock
, flags
);
250 * ipath_ruc_loopback - handle UC and RC lookback requests
251 * @sqp: the sending QP
253 * This is called from ipath_do_send() to
254 * forward a WQE addressed to the same HCA.
255 * Note that although we are single threaded due to the tasklet, we still
256 * have to protect against post_send(). We don't have to worry about
257 * receive interrupts since this is a connected protocol and all packets
258 * will pass through here.
260 static void ipath_ruc_loopback(struct ipath_qp
*sqp
)
262 struct ipath_ibdev
*dev
= to_idev(sqp
->ibqp
.device
);
264 struct ipath_swqe
*wqe
;
265 struct ipath_sge
*sge
;
270 enum ib_wc_status send_status
;
273 * Note that we check the responder QP state after
274 * checking the requester's state.
276 qp
= ipath_lookup_qpn(&dev
->qp_table
, sqp
->remote_qpn
);
278 spin_lock_irqsave(&sqp
->s_lock
, flags
);
280 /* Return if we are already busy processing a work request. */
281 if ((sqp
->s_flags
& (IPATH_S_BUSY
| IPATH_S_ANY_WAIT
)) ||
282 !(ib_ipath_state_ops
[sqp
->state
] & IPATH_PROCESS_OR_FLUSH_SEND
))
285 sqp
->s_flags
|= IPATH_S_BUSY
;
288 if (sqp
->s_last
== sqp
->s_head
)
290 wqe
= get_swqe_ptr(sqp
, sqp
->s_last
);
292 /* Return if it is not OK to start a new work reqeust. */
293 if (!(ib_ipath_state_ops
[sqp
->state
] & IPATH_PROCESS_NEXT_SEND_OK
)) {
294 if (!(ib_ipath_state_ops
[sqp
->state
] & IPATH_FLUSH_SEND
))
296 /* We are in the error state, flush the work request. */
297 send_status
= IB_WC_WR_FLUSH_ERR
;
302 * We can rely on the entry not changing without the s_lock
303 * being held until we update s_last.
304 * We increment s_cur to indicate s_last is in progress.
306 if (sqp
->s_last
== sqp
->s_cur
) {
307 if (++sqp
->s_cur
>= sqp
->s_size
)
310 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
312 if (!qp
|| !(ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_RECV_OK
)) {
315 * For RC, the requester would timeout and retry so
316 * shortcut the timeouts and just signal too many retries.
318 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
)
319 send_status
= IB_WC_RETRY_EXC_ERR
;
321 send_status
= IB_WC_SUCCESS
;
325 memset(&wc
, 0, sizeof wc
);
326 send_status
= IB_WC_SUCCESS
;
328 sqp
->s_sge
.sge
= wqe
->sg_list
[0];
329 sqp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
330 sqp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
331 sqp
->s_len
= wqe
->length
;
332 switch (wqe
->wr
.opcode
) {
333 case IB_WR_SEND_WITH_IMM
:
334 wc
.wc_flags
= IB_WC_WITH_IMM
;
335 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
338 if (!ipath_get_rwqe(qp
, 0))
342 case IB_WR_RDMA_WRITE_WITH_IMM
:
343 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
345 wc
.wc_flags
= IB_WC_WITH_IMM
;
346 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
347 if (!ipath_get_rwqe(qp
, 1))
350 case IB_WR_RDMA_WRITE
:
351 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
353 if (wqe
->length
== 0)
355 if (unlikely(!ipath_rkey_ok(qp
, &qp
->r_sge
, wqe
->length
,
356 wqe
->wr
.wr
.rdma
.remote_addr
,
357 wqe
->wr
.wr
.rdma
.rkey
,
358 IB_ACCESS_REMOTE_WRITE
)))
362 case IB_WR_RDMA_READ
:
363 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
365 if (unlikely(!ipath_rkey_ok(qp
, &sqp
->s_sge
, wqe
->length
,
366 wqe
->wr
.wr
.rdma
.remote_addr
,
367 wqe
->wr
.wr
.rdma
.rkey
,
368 IB_ACCESS_REMOTE_READ
)))
370 qp
->r_sge
.sge
= wqe
->sg_list
[0];
371 qp
->r_sge
.sg_list
= wqe
->sg_list
+ 1;
372 qp
->r_sge
.num_sge
= wqe
->wr
.num_sge
;
375 case IB_WR_ATOMIC_CMP_AND_SWP
:
376 case IB_WR_ATOMIC_FETCH_AND_ADD
:
377 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
)))
379 if (unlikely(!ipath_rkey_ok(qp
, &qp
->r_sge
, sizeof(u64
),
380 wqe
->wr
.wr
.atomic
.remote_addr
,
381 wqe
->wr
.wr
.atomic
.rkey
,
382 IB_ACCESS_REMOTE_ATOMIC
)))
384 /* Perform atomic OP and save result. */
385 maddr
= (atomic64_t
*) qp
->r_sge
.sge
.vaddr
;
386 sdata
= wqe
->wr
.wr
.atomic
.compare_add
;
387 *(u64
*) sqp
->s_sge
.sge
.vaddr
=
388 (wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) ?
389 (u64
) atomic64_add_return(sdata
, maddr
) - sdata
:
390 (u64
) cmpxchg((u64
*) qp
->r_sge
.sge
.vaddr
,
391 sdata
, wqe
->wr
.wr
.atomic
.swap
);
395 send_status
= IB_WC_LOC_QP_OP_ERR
;
399 sge
= &sqp
->s_sge
.sge
;
401 u32 len
= sqp
->s_len
;
403 if (len
> sge
->length
)
405 if (len
> sge
->sge_length
)
406 len
= sge
->sge_length
;
408 ipath_copy_sge(&qp
->r_sge
, sge
->vaddr
, len
);
411 sge
->sge_length
-= len
;
412 if (sge
->sge_length
== 0) {
413 if (--sqp
->s_sge
.num_sge
)
414 *sge
= *sqp
->s_sge
.sg_list
++;
415 } else if (sge
->length
== 0 && sge
->mr
!= NULL
) {
416 if (++sge
->n
>= IPATH_SEGSZ
) {
417 if (++sge
->m
>= sge
->mr
->mapsz
)
422 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
424 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
429 if (!test_and_clear_bit(IPATH_R_WRID_VALID
, &qp
->r_aflags
))
432 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
433 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
435 wc
.opcode
= IB_WC_RECV
;
436 wc
.wr_id
= qp
->r_wr_id
;
437 wc
.status
= IB_WC_SUCCESS
;
438 wc
.byte_len
= wqe
->length
;
440 wc
.src_qp
= qp
->remote_qpn
;
441 wc
.slid
= qp
->remote_ah_attr
.dlid
;
442 wc
.sl
= qp
->remote_ah_attr
.sl
;
444 /* Signal completion event if the solicited bit is set. */
445 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
446 wqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
449 spin_lock_irqsave(&sqp
->s_lock
, flags
);
451 sqp
->s_rnr_retry
= sqp
->s_rnr_retry_cnt
;
452 ipath_send_complete(sqp
, wqe
, send_status
);
457 if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
460 * Note: we don't need the s_lock held since the BUSY flag
461 * makes this single threaded.
463 if (sqp
->s_rnr_retry
== 0) {
464 send_status
= IB_WC_RNR_RETRY_EXC_ERR
;
467 if (sqp
->s_rnr_retry_cnt
< 7)
469 spin_lock_irqsave(&sqp
->s_lock
, flags
);
470 if (!(ib_ipath_state_ops
[sqp
->state
] & IPATH_PROCESS_RECV_OK
))
472 sqp
->s_flags
|= IPATH_S_WAITING
;
474 sqp
->s_rnr_timeout
= ib_ipath_rnr_table
[qp
->r_min_rnr_timer
];
475 ipath_insert_rnr_queue(sqp
);
479 send_status
= IB_WC_REM_INV_REQ_ERR
;
480 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
484 send_status
= IB_WC_REM_ACCESS_ERR
;
485 wc
.status
= IB_WC_LOC_PROT_ERR
;
487 /* responder goes to error state */
488 ipath_rc_error(qp
, wc
.status
);
491 spin_lock_irqsave(&sqp
->s_lock
, flags
);
492 ipath_send_complete(sqp
, wqe
, send_status
);
493 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
) {
494 int lastwqe
= ipath_error_qp(sqp
, IB_WC_WR_FLUSH_ERR
);
496 sqp
->s_flags
&= ~IPATH_S_BUSY
;
497 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
501 ev
.device
= sqp
->ibqp
.device
;
502 ev
.element
.qp
= &sqp
->ibqp
;
503 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
504 sqp
->ibqp
.event_handler(&ev
, sqp
->ibqp
.qp_context
);
509 sqp
->s_flags
&= ~IPATH_S_BUSY
;
511 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
513 if (qp
&& atomic_dec_and_test(&qp
->refcount
))
517 static void want_buffer(struct ipath_devdata
*dd
, struct ipath_qp
*qp
)
519 if (!(dd
->ipath_flags
& IPATH_HAS_SEND_DMA
) ||
520 qp
->ibqp
.qp_type
== IB_QPT_SMI
) {
523 spin_lock_irqsave(&dd
->ipath_sendctrl_lock
, flags
);
524 dd
->ipath_sendctrl
|= INFINIPATH_S_PIOINTBUFAVAIL
;
525 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
527 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_scratch
);
528 spin_unlock_irqrestore(&dd
->ipath_sendctrl_lock
, flags
);
533 * ipath_no_bufs_available - tell the layer driver we need buffers
534 * @qp: the QP that caused the problem
535 * @dev: the device we ran out of buffers on
537 * Called when we run out of PIO buffers.
538 * If we are now in the error state, return zero to flush the
541 static int ipath_no_bufs_available(struct ipath_qp
*qp
,
542 struct ipath_ibdev
*dev
)
548 * Note that as soon as want_buffer() is called and
549 * possibly before it returns, ipath_ib_piobufavail()
550 * could be called. Therefore, put QP on the piowait list before
551 * enabling the PIO avail interrupt.
553 spin_lock_irqsave(&qp
->s_lock
, flags
);
554 if (ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_SEND_OK
) {
556 qp
->s_flags
|= IPATH_S_WAITING
;
557 qp
->s_flags
&= ~IPATH_S_BUSY
;
558 spin_lock(&dev
->pending_lock
);
559 if (list_empty(&qp
->piowait
))
560 list_add_tail(&qp
->piowait
, &dev
->piowait
);
561 spin_unlock(&dev
->pending_lock
);
564 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
566 want_buffer(dev
->dd
, qp
);
571 * ipath_make_grh - construct a GRH header
572 * @dev: a pointer to the ipath device
573 * @hdr: a pointer to the GRH header being constructed
574 * @grh: the global route address to send to
575 * @hwords: the number of 32 bit words of header being sent
576 * @nwords: the number of 32 bit words of data being sent
578 * Return the size of the header in 32 bit words.
580 u32
ipath_make_grh(struct ipath_ibdev
*dev
, struct ib_grh
*hdr
,
581 struct ib_global_route
*grh
, u32 hwords
, u32 nwords
)
583 hdr
->version_tclass_flow
=
584 cpu_to_be32((6 << 28) |
585 (grh
->traffic_class
<< 20) |
587 hdr
->paylen
= cpu_to_be16((hwords
- 2 + nwords
+ SIZE_OF_CRC
) << 2);
588 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
589 hdr
->next_hdr
= 0x1B;
590 hdr
->hop_limit
= grh
->hop_limit
;
591 /* The SGID is 32-bit aligned. */
592 hdr
->sgid
.global
.subnet_prefix
= dev
->gid_prefix
;
593 hdr
->sgid
.global
.interface_id
= dev
->dd
->ipath_guid
;
594 hdr
->dgid
= grh
->dgid
;
596 /* GRH header size in 32-bit words. */
597 return sizeof(struct ib_grh
) / sizeof(u32
);
600 void ipath_make_ruc_header(struct ipath_ibdev
*dev
, struct ipath_qp
*qp
,
601 struct ipath_other_headers
*ohdr
,
608 /* Construct the header. */
609 extra_bytes
= -qp
->s_cur_size
& 3;
610 nwords
= (qp
->s_cur_size
+ extra_bytes
) >> 2;
611 lrh0
= IPATH_LRH_BTH
;
612 if (unlikely(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)) {
613 qp
->s_hdrwords
+= ipath_make_grh(dev
, &qp
->s_hdr
.u
.l
.grh
,
614 &qp
->remote_ah_attr
.grh
,
615 qp
->s_hdrwords
, nwords
);
616 lrh0
= IPATH_LRH_GRH
;
618 lrh0
|= qp
->remote_ah_attr
.sl
<< 4;
619 qp
->s_hdr
.lrh
[0] = cpu_to_be16(lrh0
);
620 qp
->s_hdr
.lrh
[1] = cpu_to_be16(qp
->remote_ah_attr
.dlid
);
621 qp
->s_hdr
.lrh
[2] = cpu_to_be16(qp
->s_hdrwords
+ nwords
+ SIZE_OF_CRC
);
622 qp
->s_hdr
.lrh
[3] = cpu_to_be16(dev
->dd
->ipath_lid
|
623 qp
->remote_ah_attr
.src_path_bits
);
624 bth0
|= ipath_get_pkey(dev
->dd
, qp
->s_pkey_index
);
625 bth0
|= extra_bytes
<< 20;
626 ohdr
->bth
[0] = cpu_to_be32(bth0
| (1 << 22));
627 ohdr
->bth
[1] = cpu_to_be32(qp
->remote_qpn
);
628 ohdr
->bth
[2] = cpu_to_be32(bth2
);
632 * ipath_do_send - perform a send on a QP
633 * @data: contains a pointer to the QP
635 * Process entries in the send work queue until credit or queue is
636 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
637 * Otherwise, two threads could send packets out of order.
639 void ipath_do_send(unsigned long data
)
641 struct ipath_qp
*qp
= (struct ipath_qp
*)data
;
642 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
643 int (*make_req
)(struct ipath_qp
*qp
);
646 if ((qp
->ibqp
.qp_type
== IB_QPT_RC
||
647 qp
->ibqp
.qp_type
== IB_QPT_UC
) &&
648 qp
->remote_ah_attr
.dlid
== dev
->dd
->ipath_lid
) {
649 ipath_ruc_loopback(qp
);
653 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
654 make_req
= ipath_make_rc_req
;
655 else if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
656 make_req
= ipath_make_uc_req
;
658 make_req
= ipath_make_ud_req
;
660 spin_lock_irqsave(&qp
->s_lock
, flags
);
662 /* Return if we are already busy processing a work request. */
663 if ((qp
->s_flags
& (IPATH_S_BUSY
| IPATH_S_ANY_WAIT
)) ||
664 !(ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_OR_FLUSH_SEND
)) {
665 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
669 qp
->s_flags
|= IPATH_S_BUSY
;
671 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
674 /* Check for a constructed packet to be sent. */
675 if (qp
->s_hdrwords
!= 0) {
677 * If no PIO bufs are available, return. An interrupt will
678 * call ipath_ib_piobufavail() when one is available.
680 if (ipath_verbs_send(qp
, &qp
->s_hdr
, qp
->s_hdrwords
,
681 qp
->s_cur_sge
, qp
->s_cur_size
)) {
682 if (ipath_no_bufs_available(qp
, dev
))
685 dev
->n_unicast_xmit
++;
686 /* Record that we sent the packet and s_hdr is empty. */
697 * This should be called with s_lock held.
699 void ipath_send_complete(struct ipath_qp
*qp
, struct ipath_swqe
*wqe
,
700 enum ib_wc_status status
)
704 if (!(ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_OR_FLUSH_SEND
))
707 /* See ch. 11.2.4.1 and 10.7.3.1 */
708 if (!(qp
->s_flags
& IPATH_S_SIGNAL_REQ_WR
) ||
709 (wqe
->wr
.send_flags
& IB_SEND_SIGNALED
) ||
710 status
!= IB_WC_SUCCESS
) {
713 memset(&wc
, 0, sizeof wc
);
714 wc
.wr_id
= wqe
->wr
.wr_id
;
716 wc
.opcode
= ib_ipath_wc_opcode
[wqe
->wr
.opcode
];
718 if (status
== IB_WC_SUCCESS
)
719 wc
.byte_len
= wqe
->length
;
720 ipath_cq_enter(to_icq(qp
->ibqp
.send_cq
), &wc
,
721 status
!= IB_WC_SUCCESS
);
724 old_last
= last
= qp
->s_last
;
725 if (++last
>= qp
->s_size
)
728 if (qp
->s_cur
== old_last
)
730 if (qp
->s_tail
== old_last
)
732 if (qp
->state
== IB_QPS_SQD
&& last
== qp
->s_cur
)