2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
38 #include "rxe_queue.h"
55 COMPST_EXIT
, /* We have an issue, and we want to rerun the completer */
56 COMPST_DONE
, /* The completer finished successflly */
59 static char *comp_state_name
[] = {
60 [COMPST_GET_ACK
] = "GET ACK",
61 [COMPST_GET_WQE
] = "GET WQE",
62 [COMPST_COMP_WQE
] = "COMP WQE",
63 [COMPST_COMP_ACK
] = "COMP ACK",
64 [COMPST_CHECK_PSN
] = "CHECK PSN",
65 [COMPST_CHECK_ACK
] = "CHECK ACK",
66 [COMPST_READ
] = "READ",
67 [COMPST_ATOMIC
] = "ATOMIC",
68 [COMPST_WRITE_SEND
] = "WRITE/SEND",
69 [COMPST_UPDATE_COMP
] = "UPDATE COMP",
70 [COMPST_ERROR_RETRY
] = "ERROR RETRY",
71 [COMPST_RNR_RETRY
] = "RNR RETRY",
72 [COMPST_ERROR
] = "ERROR",
73 [COMPST_EXIT
] = "EXIT",
74 [COMPST_DONE
] = "DONE",
77 static unsigned long rnrnak_usec
[32] = {
78 [IB_RNR_TIMER_655_36
] = 655360,
79 [IB_RNR_TIMER_000_01
] = 10,
80 [IB_RNR_TIMER_000_02
] = 20,
81 [IB_RNR_TIMER_000_03
] = 30,
82 [IB_RNR_TIMER_000_04
] = 40,
83 [IB_RNR_TIMER_000_06
] = 60,
84 [IB_RNR_TIMER_000_08
] = 80,
85 [IB_RNR_TIMER_000_12
] = 120,
86 [IB_RNR_TIMER_000_16
] = 160,
87 [IB_RNR_TIMER_000_24
] = 240,
88 [IB_RNR_TIMER_000_32
] = 320,
89 [IB_RNR_TIMER_000_48
] = 480,
90 [IB_RNR_TIMER_000_64
] = 640,
91 [IB_RNR_TIMER_000_96
] = 960,
92 [IB_RNR_TIMER_001_28
] = 1280,
93 [IB_RNR_TIMER_001_92
] = 1920,
94 [IB_RNR_TIMER_002_56
] = 2560,
95 [IB_RNR_TIMER_003_84
] = 3840,
96 [IB_RNR_TIMER_005_12
] = 5120,
97 [IB_RNR_TIMER_007_68
] = 7680,
98 [IB_RNR_TIMER_010_24
] = 10240,
99 [IB_RNR_TIMER_015_36
] = 15360,
100 [IB_RNR_TIMER_020_48
] = 20480,
101 [IB_RNR_TIMER_030_72
] = 30720,
102 [IB_RNR_TIMER_040_96
] = 40960,
103 [IB_RNR_TIMER_061_44
] = 61410,
104 [IB_RNR_TIMER_081_92
] = 81920,
105 [IB_RNR_TIMER_122_88
] = 122880,
106 [IB_RNR_TIMER_163_84
] = 163840,
107 [IB_RNR_TIMER_245_76
] = 245760,
108 [IB_RNR_TIMER_327_68
] = 327680,
109 [IB_RNR_TIMER_491_52
] = 491520,
112 static inline unsigned long rnrnak_jiffies(u8 timeout
)
114 return max_t(unsigned long,
115 usecs_to_jiffies(rnrnak_usec
[timeout
]), 1);
118 static enum ib_wc_opcode
wr_to_wc_opcode(enum ib_wr_opcode opcode
)
121 case IB_WR_RDMA_WRITE
: return IB_WC_RDMA_WRITE
;
122 case IB_WR_RDMA_WRITE_WITH_IMM
: return IB_WC_RDMA_WRITE
;
123 case IB_WR_SEND
: return IB_WC_SEND
;
124 case IB_WR_SEND_WITH_IMM
: return IB_WC_SEND
;
125 case IB_WR_RDMA_READ
: return IB_WC_RDMA_READ
;
126 case IB_WR_ATOMIC_CMP_AND_SWP
: return IB_WC_COMP_SWAP
;
127 case IB_WR_ATOMIC_FETCH_AND_ADD
: return IB_WC_FETCH_ADD
;
128 case IB_WR_LSO
: return IB_WC_LSO
;
129 case IB_WR_SEND_WITH_INV
: return IB_WC_SEND
;
130 case IB_WR_RDMA_READ_WITH_INV
: return IB_WC_RDMA_READ
;
131 case IB_WR_LOCAL_INV
: return IB_WC_LOCAL_INV
;
132 case IB_WR_REG_MR
: return IB_WC_REG_MR
;
139 void retransmit_timer(struct timer_list
*t
)
141 struct rxe_qp
*qp
= from_timer(qp
, t
, retrans_timer
);
144 qp
->comp
.timeout
= 1;
145 rxe_run_task(&qp
->comp
.task
, 1);
149 void rxe_comp_queue_pkt(struct rxe_qp
*qp
, struct sk_buff
*skb
)
153 skb_queue_tail(&qp
->resp_pkts
, skb
);
155 must_sched
= skb_queue_len(&qp
->resp_pkts
) > 1;
157 rxe_counter_inc(SKB_TO_PKT(skb
)->rxe
, RXE_CNT_COMPLETER_SCHED
);
159 rxe_run_task(&qp
->comp
.task
, must_sched
);
162 static inline enum comp_state
get_wqe(struct rxe_qp
*qp
,
163 struct rxe_pkt_info
*pkt
,
164 struct rxe_send_wqe
**wqe_p
)
166 struct rxe_send_wqe
*wqe
;
168 /* we come here whether or not we found a response packet to see if
169 * there are any posted WQEs
171 wqe
= queue_head(qp
->sq
.queue
);
174 /* no WQE or requester has not started it yet */
175 if (!wqe
|| wqe
->state
== wqe_state_posted
)
176 return pkt
? COMPST_DONE
: COMPST_EXIT
;
178 /* WQE does not require an ack */
179 if (wqe
->state
== wqe_state_done
)
180 return COMPST_COMP_WQE
;
182 /* WQE caused an error */
183 if (wqe
->state
== wqe_state_error
)
186 /* we have a WQE, if we also have an ack check its PSN */
187 return pkt
? COMPST_CHECK_PSN
: COMPST_EXIT
;
190 static inline void reset_retry_counters(struct rxe_qp
*qp
)
192 qp
->comp
.retry_cnt
= qp
->attr
.retry_cnt
;
193 qp
->comp
.rnr_retry
= qp
->attr
.rnr_retry
;
194 qp
->comp
.started_retry
= 0;
197 static inline enum comp_state
check_psn(struct rxe_qp
*qp
,
198 struct rxe_pkt_info
*pkt
,
199 struct rxe_send_wqe
*wqe
)
203 /* check to see if response is past the oldest WQE. if it is, complete
204 * send/write or error read/atomic
206 diff
= psn_compare(pkt
->psn
, wqe
->last_psn
);
208 if (wqe
->state
== wqe_state_pending
) {
209 if (wqe
->mask
& WR_ATOMIC_OR_READ_MASK
)
210 return COMPST_ERROR_RETRY
;
212 reset_retry_counters(qp
);
213 return COMPST_COMP_WQE
;
219 /* compare response packet to expected response */
220 diff
= psn_compare(pkt
->psn
, qp
->comp
.psn
);
222 /* response is most likely a retried packet if it matches an
223 * uncompleted WQE go complete it else ignore it
225 if (pkt
->psn
== wqe
->last_psn
)
226 return COMPST_COMP_ACK
;
229 } else if ((diff
> 0) && (wqe
->mask
& WR_ATOMIC_OR_READ_MASK
)) {
232 return COMPST_CHECK_ACK
;
236 static inline enum comp_state
check_ack(struct rxe_qp
*qp
,
237 struct rxe_pkt_info
*pkt
,
238 struct rxe_send_wqe
*wqe
)
240 unsigned int mask
= pkt
->mask
;
242 struct rxe_dev
*rxe
= to_rdev(qp
->ibqp
.device
);
244 /* Check the sequence only */
245 switch (qp
->comp
.opcode
) {
247 /* Will catch all *_ONLY cases. */
248 if (!(mask
& RXE_START_MASK
))
253 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST
:
254 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
:
255 if (pkt
->opcode
!= IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
&&
256 pkt
->opcode
!= IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST
) {
257 /* read retries of partial data may restart from
258 * read response first or response only.
260 if ((pkt
->psn
== wqe
->first_psn
&&
262 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST
) ||
263 (wqe
->first_psn
== wqe
->last_psn
&&
265 IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY
))
275 /* Check operation validity. */
276 switch (pkt
->opcode
) {
277 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST
:
278 case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST
:
279 case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY
:
282 if ((syn
& AETH_TYPE_MASK
) != AETH_ACK
)
286 /* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH)
288 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
:
289 if (wqe
->wr
.opcode
!= IB_WR_RDMA_READ
&&
290 wqe
->wr
.opcode
!= IB_WR_RDMA_READ_WITH_INV
) {
291 wqe
->status
= IB_WC_FATAL_ERR
;
294 reset_retry_counters(qp
);
297 case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE
:
300 if ((syn
& AETH_TYPE_MASK
) != AETH_ACK
)
303 if (wqe
->wr
.opcode
!= IB_WR_ATOMIC_CMP_AND_SWP
&&
304 wqe
->wr
.opcode
!= IB_WR_ATOMIC_FETCH_AND_ADD
)
306 reset_retry_counters(qp
);
307 return COMPST_ATOMIC
;
309 case IB_OPCODE_RC_ACKNOWLEDGE
:
311 switch (syn
& AETH_TYPE_MASK
) {
313 reset_retry_counters(qp
);
314 return COMPST_WRITE_SEND
;
317 rxe_counter_inc(rxe
, RXE_CNT_RCV_RNR
);
318 return COMPST_RNR_RETRY
;
322 case AETH_NAK_PSN_SEQ_ERROR
:
323 /* a nak implicitly acks all packets with psns
326 if (psn_compare(pkt
->psn
, qp
->comp
.psn
) > 0) {
328 RXE_CNT_RCV_SEQ_ERR
);
329 qp
->comp
.psn
= pkt
->psn
;
330 if (qp
->req
.wait_psn
) {
331 qp
->req
.wait_psn
= 0;
332 rxe_run_task(&qp
->req
.task
, 0);
335 return COMPST_ERROR_RETRY
;
337 case AETH_NAK_INVALID_REQ
:
338 wqe
->status
= IB_WC_REM_INV_REQ_ERR
;
341 case AETH_NAK_REM_ACC_ERR
:
342 wqe
->status
= IB_WC_REM_ACCESS_ERR
;
345 case AETH_NAK_REM_OP_ERR
:
346 wqe
->status
= IB_WC_REM_OP_ERR
;
350 pr_warn("unexpected nak %x\n", syn
);
351 wqe
->status
= IB_WC_REM_OP_ERR
;
361 pr_warn("unexpected opcode\n");
367 static inline enum comp_state
do_read(struct rxe_qp
*qp
,
368 struct rxe_pkt_info
*pkt
,
369 struct rxe_send_wqe
*wqe
)
373 ret
= copy_data(qp
->pd
, IB_ACCESS_LOCAL_WRITE
,
374 &wqe
->dma
, payload_addr(pkt
),
375 payload_size(pkt
), to_mem_obj
, NULL
);
379 if (wqe
->dma
.resid
== 0 && (pkt
->mask
& RXE_END_MASK
))
380 return COMPST_COMP_ACK
;
382 return COMPST_UPDATE_COMP
;
385 static inline enum comp_state
do_atomic(struct rxe_qp
*qp
,
386 struct rxe_pkt_info
*pkt
,
387 struct rxe_send_wqe
*wqe
)
391 u64 atomic_orig
= atmack_orig(pkt
);
393 ret
= copy_data(qp
->pd
, IB_ACCESS_LOCAL_WRITE
,
394 &wqe
->dma
, &atomic_orig
,
395 sizeof(u64
), to_mem_obj
, NULL
);
399 return COMPST_COMP_ACK
;
402 static void make_send_cqe(struct rxe_qp
*qp
, struct rxe_send_wqe
*wqe
,
405 memset(cqe
, 0, sizeof(*cqe
));
408 struct ib_wc
*wc
= &cqe
->ibwc
;
410 wc
->wr_id
= wqe
->wr
.wr_id
;
411 wc
->status
= wqe
->status
;
412 wc
->opcode
= wr_to_wc_opcode(wqe
->wr
.opcode
);
413 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE_WITH_IMM
||
414 wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
)
415 wc
->wc_flags
= IB_WC_WITH_IMM
;
416 wc
->byte_len
= wqe
->dma
.length
;
419 struct ib_uverbs_wc
*uwc
= &cqe
->uibwc
;
421 uwc
->wr_id
= wqe
->wr
.wr_id
;
422 uwc
->status
= wqe
->status
;
423 uwc
->opcode
= wr_to_wc_opcode(wqe
->wr
.opcode
);
424 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE_WITH_IMM
||
425 wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
)
426 uwc
->wc_flags
= IB_WC_WITH_IMM
;
427 uwc
->byte_len
= wqe
->dma
.length
;
428 uwc
->qp_num
= qp
->ibqp
.qp_num
;
433 * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
434 * ---------8<---------8<-------------
435 * ...Note that if a completion error occurs, a Work Completion
436 * will always be generated, even if the signaling
437 * indicator requests an Unsignaled Completion.
438 * ---------8<---------8<-------------
440 static void do_complete(struct rxe_qp
*qp
, struct rxe_send_wqe
*wqe
)
442 struct rxe_dev
*rxe
= to_rdev(qp
->ibqp
.device
);
445 if ((qp
->sq_sig_type
== IB_SIGNAL_ALL_WR
) ||
446 (wqe
->wr
.send_flags
& IB_SEND_SIGNALED
) ||
447 wqe
->status
!= IB_WC_SUCCESS
) {
448 make_send_cqe(qp
, wqe
, &cqe
);
449 advance_consumer(qp
->sq
.queue
);
450 rxe_cq_post(qp
->scq
, &cqe
, 0);
452 advance_consumer(qp
->sq
.queue
);
455 if (wqe
->wr
.opcode
== IB_WR_SEND
||
456 wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
||
457 wqe
->wr
.opcode
== IB_WR_SEND_WITH_INV
)
458 rxe_counter_inc(rxe
, RXE_CNT_RDMA_SEND
);
461 * we completed something so let req run again
462 * if it is trying to fence
464 if (qp
->req
.wait_fence
) {
465 qp
->req
.wait_fence
= 0;
466 rxe_run_task(&qp
->req
.task
, 0);
470 static inline enum comp_state
complete_ack(struct rxe_qp
*qp
,
471 struct rxe_pkt_info
*pkt
,
472 struct rxe_send_wqe
*wqe
)
476 if (wqe
->has_rd_atomic
) {
477 wqe
->has_rd_atomic
= 0;
478 atomic_inc(&qp
->req
.rd_atomic
);
479 if (qp
->req
.need_rd_atomic
) {
480 qp
->comp
.timeout_retry
= 0;
481 qp
->req
.need_rd_atomic
= 0;
482 rxe_run_task(&qp
->req
.task
, 0);
486 if (unlikely(qp
->req
.state
== QP_STATE_DRAIN
)) {
487 /* state_lock used by requester & completer */
488 spin_lock_irqsave(&qp
->state_lock
, flags
);
489 if ((qp
->req
.state
== QP_STATE_DRAIN
) &&
490 (qp
->comp
.psn
== qp
->req
.psn
)) {
491 qp
->req
.state
= QP_STATE_DRAINED
;
492 spin_unlock_irqrestore(&qp
->state_lock
, flags
);
494 if (qp
->ibqp
.event_handler
) {
497 ev
.device
= qp
->ibqp
.device
;
498 ev
.element
.qp
= &qp
->ibqp
;
499 ev
.event
= IB_EVENT_SQ_DRAINED
;
500 qp
->ibqp
.event_handler(&ev
,
501 qp
->ibqp
.qp_context
);
504 spin_unlock_irqrestore(&qp
->state_lock
, flags
);
508 do_complete(qp
, wqe
);
510 if (psn_compare(pkt
->psn
, qp
->comp
.psn
) >= 0)
511 return COMPST_UPDATE_COMP
;
516 static inline enum comp_state
complete_wqe(struct rxe_qp
*qp
,
517 struct rxe_pkt_info
*pkt
,
518 struct rxe_send_wqe
*wqe
)
520 if (pkt
&& wqe
->state
== wqe_state_pending
) {
521 if (psn_compare(wqe
->last_psn
, qp
->comp
.psn
) >= 0) {
522 qp
->comp
.psn
= (wqe
->last_psn
+ 1) & BTH_PSN_MASK
;
523 qp
->comp
.opcode
= -1;
526 if (qp
->req
.wait_psn
) {
527 qp
->req
.wait_psn
= 0;
528 rxe_run_task(&qp
->req
.task
, 1);
532 do_complete(qp
, wqe
);
534 return COMPST_GET_WQE
;
537 static void rxe_drain_resp_pkts(struct rxe_qp
*qp
, bool notify
)
540 struct rxe_send_wqe
*wqe
;
542 while ((skb
= skb_dequeue(&qp
->resp_pkts
))) {
547 while ((wqe
= queue_head(qp
->sq
.queue
))) {
549 wqe
->status
= IB_WC_WR_FLUSH_ERR
;
550 do_complete(qp
, wqe
);
552 advance_consumer(qp
->sq
.queue
);
557 int rxe_completer(void *arg
)
559 struct rxe_qp
*qp
= (struct rxe_qp
*)arg
;
560 struct rxe_dev
*rxe
= to_rdev(qp
->ibqp
.device
);
561 struct rxe_send_wqe
*wqe
= NULL
;
562 struct sk_buff
*skb
= NULL
;
563 struct rxe_pkt_info
*pkt
= NULL
;
564 enum comp_state state
;
568 if (!qp
->valid
|| qp
->req
.state
== QP_STATE_ERROR
||
569 qp
->req
.state
== QP_STATE_RESET
) {
570 rxe_drain_resp_pkts(qp
, qp
->valid
&&
571 qp
->req
.state
== QP_STATE_ERROR
);
575 if (qp
->comp
.timeout
) {
576 qp
->comp
.timeout_retry
= 1;
577 qp
->comp
.timeout
= 0;
579 qp
->comp
.timeout_retry
= 0;
582 if (qp
->req
.need_retry
)
585 state
= COMPST_GET_ACK
;
588 pr_debug("qp#%d state = %s\n", qp_num(qp
),
589 comp_state_name
[state
]);
592 skb
= skb_dequeue(&qp
->resp_pkts
);
594 pkt
= SKB_TO_PKT(skb
);
595 qp
->comp
.timeout_retry
= 0;
597 state
= COMPST_GET_WQE
;
601 state
= get_wqe(qp
, pkt
, &wqe
);
604 case COMPST_CHECK_PSN
:
605 state
= check_psn(qp
, pkt
, wqe
);
608 case COMPST_CHECK_ACK
:
609 state
= check_ack(qp
, pkt
, wqe
);
613 state
= do_read(qp
, pkt
, wqe
);
617 state
= do_atomic(qp
, pkt
, wqe
);
620 case COMPST_WRITE_SEND
:
621 if (wqe
->state
== wqe_state_pending
&&
622 wqe
->last_psn
== pkt
->psn
)
623 state
= COMPST_COMP_ACK
;
625 state
= COMPST_UPDATE_COMP
;
628 case COMPST_COMP_ACK
:
629 state
= complete_ack(qp
, pkt
, wqe
);
632 case COMPST_COMP_WQE
:
633 state
= complete_wqe(qp
, pkt
, wqe
);
636 case COMPST_UPDATE_COMP
:
637 if (pkt
->mask
& RXE_END_MASK
)
638 qp
->comp
.opcode
= -1;
640 qp
->comp
.opcode
= pkt
->opcode
;
642 if (psn_compare(pkt
->psn
, qp
->comp
.psn
) >= 0)
643 qp
->comp
.psn
= (pkt
->psn
+ 1) & BTH_PSN_MASK
;
645 if (qp
->req
.wait_psn
) {
646 qp
->req
.wait_psn
= 0;
647 rxe_run_task(&qp
->req
.task
, 1);
655 rxe_drop_ref(pkt
->qp
);
662 if (qp
->comp
.timeout_retry
&& wqe
) {
663 state
= COMPST_ERROR_RETRY
;
667 /* re reset the timeout counter if
669 * (2) the QP is alive
670 * (3) there is a packet sent by the requester that
671 * might be acked (we still might get spurious
672 * timeouts but try to keep them as few as possible)
673 * (4) the timeout parameter is set
675 if ((qp_type(qp
) == IB_QPT_RC
) &&
676 (qp
->req
.state
== QP_STATE_READY
) &&
677 (psn_compare(qp
->req
.psn
, qp
->comp
.psn
) > 0) &&
678 qp
->qp_timeout_jiffies
)
679 mod_timer(&qp
->retrans_timer
,
680 jiffies
+ qp
->qp_timeout_jiffies
);
683 case COMPST_ERROR_RETRY
:
684 /* we come here if the retry timer fired and we did
685 * not receive a response packet. try to retry the send
686 * queue if that makes sense and the limits have not
687 * been exceeded. remember that some timeouts are
688 * spurious since we do not reset the timer but kick
689 * it down the road or let it expire
692 /* there is nothing to retry in this case */
693 if (!wqe
|| (wqe
->state
== wqe_state_posted
)) {
697 /* if we've started a retry, don't start another
698 * retry sequence, unless this is a timeout.
700 if (qp
->comp
.started_retry
&&
701 !qp
->comp
.timeout_retry
) {
703 rxe_drop_ref(pkt
->qp
);
711 if (qp
->comp
.retry_cnt
> 0) {
712 if (qp
->comp
.retry_cnt
!= 7)
713 qp
->comp
.retry_cnt
--;
715 /* no point in retrying if we have already
716 * seen the last ack that the requester could
719 if (psn_compare(qp
->req
.psn
,
721 /* tell the requester to retry the
722 * send queue next time around
726 qp
->req
.need_retry
= 1;
727 qp
->comp
.started_retry
= 1;
728 rxe_run_task(&qp
->req
.task
, 0);
732 rxe_drop_ref(pkt
->qp
);
740 rxe_counter_inc(rxe
, RXE_CNT_RETRY_EXCEEDED
);
741 wqe
->status
= IB_WC_RETRY_EXC_ERR
;
742 state
= COMPST_ERROR
;
746 case COMPST_RNR_RETRY
:
747 if (qp
->comp
.rnr_retry
> 0) {
748 if (qp
->comp
.rnr_retry
!= 7)
749 qp
->comp
.rnr_retry
--;
751 qp
->req
.need_retry
= 1;
752 pr_debug("qp#%d set rnr nak timer\n",
754 mod_timer(&qp
->rnr_nak_timer
,
755 jiffies
+ rnrnak_jiffies(aeth_syn(pkt
)
757 rxe_drop_ref(pkt
->qp
);
763 RXE_CNT_RNR_RETRY_EXCEEDED
);
764 wqe
->status
= IB_WC_RNR_RETRY_EXC_ERR
;
765 state
= COMPST_ERROR
;
770 WARN_ON_ONCE(wqe
->status
== IB_WC_SUCCESS
);
771 do_complete(qp
, wqe
);
775 rxe_drop_ref(pkt
->qp
);
785 /* we come here if we are done with processing and want the task to
786 * exit from the loop calling us
793 /* we come here if we have processed a packet we want the task to call
794 * us again to see if there is anything else to do