2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
38 #include "rxe_queue.h"
40 static int next_opcode(struct rxe_qp
*qp
, struct rxe_send_wqe
*wqe
,
43 static inline void retry_first_write_send(struct rxe_qp
*qp
,
44 struct rxe_send_wqe
*wqe
,
45 unsigned mask
, int npsn
)
49 for (i
= 0; i
< npsn
; i
++) {
50 int to_send
= (wqe
->dma
.resid
> qp
->mtu
) ?
51 qp
->mtu
: wqe
->dma
.resid
;
53 qp
->req
.opcode
= next_opcode(qp
, wqe
,
56 if (wqe
->wr
.send_flags
& IB_SEND_INLINE
) {
57 wqe
->dma
.resid
-= to_send
;
58 wqe
->dma
.sge_offset
+= to_send
;
60 advance_dma_data(&wqe
->dma
, to_send
);
62 if (mask
& WR_WRITE_MASK
)
67 static void req_retry(struct rxe_qp
*qp
)
69 struct rxe_send_wqe
*wqe
;
70 unsigned int wqe_index
;
75 wqe
= queue_head(qp
->sq
.queue
);
76 npsn
= (qp
->comp
.psn
- wqe
->first_psn
) & BTH_PSN_MASK
;
78 qp
->req
.wqe_index
= consumer_index(qp
->sq
.queue
);
79 qp
->req
.psn
= qp
->comp
.psn
;
82 for (wqe_index
= consumer_index(qp
->sq
.queue
);
83 wqe_index
!= producer_index(qp
->sq
.queue
);
84 wqe_index
= next_index(qp
->sq
.queue
, wqe_index
)) {
85 wqe
= addr_from_index(qp
->sq
.queue
, wqe_index
);
86 mask
= wr_opcode_mask(wqe
->wr
.opcode
, qp
);
88 if (wqe
->state
== wqe_state_posted
)
91 if (wqe
->state
== wqe_state_done
)
94 wqe
->iova
= (mask
& WR_ATOMIC_MASK
) ?
95 wqe
->wr
.wr
.atomic
.remote_addr
:
96 (mask
& WR_READ_OR_WRITE_MASK
) ?
97 wqe
->wr
.wr
.rdma
.remote_addr
:
100 if (!first
|| (mask
& WR_READ_MASK
) == 0) {
101 wqe
->dma
.resid
= wqe
->dma
.length
;
102 wqe
->dma
.cur_sge
= 0;
103 wqe
->dma
.sge_offset
= 0;
109 if (mask
& WR_WRITE_OR_SEND_MASK
)
110 retry_first_write_send(qp
, wqe
, mask
, npsn
);
112 if (mask
& WR_READ_MASK
)
113 wqe
->iova
+= npsn
* qp
->mtu
;
116 wqe
->state
= wqe_state_posted
;
120 void rnr_nak_timer(unsigned long data
)
122 struct rxe_qp
*qp
= (struct rxe_qp
*)data
;
124 pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp
));
125 rxe_run_task(&qp
->req
.task
, 1);
128 static struct rxe_send_wqe
*req_next_wqe(struct rxe_qp
*qp
)
130 struct rxe_send_wqe
*wqe
= queue_head(qp
->sq
.queue
);
133 if (unlikely(qp
->req
.state
== QP_STATE_DRAIN
)) {
134 /* check to see if we are drained;
135 * state_lock used by requester and completer
137 spin_lock_irqsave(&qp
->state_lock
, flags
);
139 if (qp
->req
.state
!= QP_STATE_DRAIN
) {
140 /* comp just finished */
141 spin_unlock_irqrestore(&qp
->state_lock
,
146 if (wqe
&& ((qp
->req
.wqe_index
!=
147 consumer_index(qp
->sq
.queue
)) ||
148 (wqe
->state
!= wqe_state_posted
))) {
149 /* comp not done yet */
150 spin_unlock_irqrestore(&qp
->state_lock
,
155 qp
->req
.state
= QP_STATE_DRAINED
;
156 spin_unlock_irqrestore(&qp
->state_lock
, flags
);
158 if (qp
->ibqp
.event_handler
) {
161 ev
.device
= qp
->ibqp
.device
;
162 ev
.element
.qp
= &qp
->ibqp
;
163 ev
.event
= IB_EVENT_SQ_DRAINED
;
164 qp
->ibqp
.event_handler(&ev
,
165 qp
->ibqp
.qp_context
);
170 if (qp
->req
.wqe_index
== producer_index(qp
->sq
.queue
))
173 wqe
= addr_from_index(qp
->sq
.queue
, qp
->req
.wqe_index
);
175 if (unlikely((qp
->req
.state
== QP_STATE_DRAIN
||
176 qp
->req
.state
== QP_STATE_DRAINED
) &&
177 (wqe
->state
!= wqe_state_processing
)))
180 if (unlikely((wqe
->wr
.send_flags
& IB_SEND_FENCE
) &&
181 (qp
->req
.wqe_index
!= consumer_index(qp
->sq
.queue
)))) {
182 qp
->req
.wait_fence
= 1;
186 wqe
->mask
= wr_opcode_mask(wqe
->wr
.opcode
, qp
);
190 static int next_opcode_rc(struct rxe_qp
*qp
, u32 opcode
, int fits
)
193 case IB_WR_RDMA_WRITE
:
194 if (qp
->req
.opcode
== IB_OPCODE_RC_RDMA_WRITE_FIRST
||
195 qp
->req
.opcode
== IB_OPCODE_RC_RDMA_WRITE_MIDDLE
)
197 IB_OPCODE_RC_RDMA_WRITE_LAST
:
198 IB_OPCODE_RC_RDMA_WRITE_MIDDLE
;
201 IB_OPCODE_RC_RDMA_WRITE_ONLY
:
202 IB_OPCODE_RC_RDMA_WRITE_FIRST
;
204 case IB_WR_RDMA_WRITE_WITH_IMM
:
205 if (qp
->req
.opcode
== IB_OPCODE_RC_RDMA_WRITE_FIRST
||
206 qp
->req
.opcode
== IB_OPCODE_RC_RDMA_WRITE_MIDDLE
)
208 IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE
:
209 IB_OPCODE_RC_RDMA_WRITE_MIDDLE
;
212 IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
:
213 IB_OPCODE_RC_RDMA_WRITE_FIRST
;
216 if (qp
->req
.opcode
== IB_OPCODE_RC_SEND_FIRST
||
217 qp
->req
.opcode
== IB_OPCODE_RC_SEND_MIDDLE
)
219 IB_OPCODE_RC_SEND_LAST
:
220 IB_OPCODE_RC_SEND_MIDDLE
;
223 IB_OPCODE_RC_SEND_ONLY
:
224 IB_OPCODE_RC_SEND_FIRST
;
226 case IB_WR_SEND_WITH_IMM
:
227 if (qp
->req
.opcode
== IB_OPCODE_RC_SEND_FIRST
||
228 qp
->req
.opcode
== IB_OPCODE_RC_SEND_MIDDLE
)
230 IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE
:
231 IB_OPCODE_RC_SEND_MIDDLE
;
234 IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE
:
235 IB_OPCODE_RC_SEND_FIRST
;
237 case IB_WR_RDMA_READ
:
238 return IB_OPCODE_RC_RDMA_READ_REQUEST
;
240 case IB_WR_ATOMIC_CMP_AND_SWP
:
241 return IB_OPCODE_RC_COMPARE_SWAP
;
243 case IB_WR_ATOMIC_FETCH_AND_ADD
:
244 return IB_OPCODE_RC_FETCH_ADD
;
246 case IB_WR_SEND_WITH_INV
:
247 if (qp
->req
.opcode
== IB_OPCODE_RC_SEND_FIRST
||
248 qp
->req
.opcode
== IB_OPCODE_RC_SEND_MIDDLE
)
249 return fits
? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE
:
250 IB_OPCODE_RC_SEND_MIDDLE
;
252 return fits
? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE
:
253 IB_OPCODE_RC_SEND_FIRST
;
255 case IB_WR_LOCAL_INV
:
262 static int next_opcode_uc(struct rxe_qp
*qp
, u32 opcode
, int fits
)
265 case IB_WR_RDMA_WRITE
:
266 if (qp
->req
.opcode
== IB_OPCODE_UC_RDMA_WRITE_FIRST
||
267 qp
->req
.opcode
== IB_OPCODE_UC_RDMA_WRITE_MIDDLE
)
269 IB_OPCODE_UC_RDMA_WRITE_LAST
:
270 IB_OPCODE_UC_RDMA_WRITE_MIDDLE
;
273 IB_OPCODE_UC_RDMA_WRITE_ONLY
:
274 IB_OPCODE_UC_RDMA_WRITE_FIRST
;
276 case IB_WR_RDMA_WRITE_WITH_IMM
:
277 if (qp
->req
.opcode
== IB_OPCODE_UC_RDMA_WRITE_FIRST
||
278 qp
->req
.opcode
== IB_OPCODE_UC_RDMA_WRITE_MIDDLE
)
280 IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE
:
281 IB_OPCODE_UC_RDMA_WRITE_MIDDLE
;
284 IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
:
285 IB_OPCODE_UC_RDMA_WRITE_FIRST
;
288 if (qp
->req
.opcode
== IB_OPCODE_UC_SEND_FIRST
||
289 qp
->req
.opcode
== IB_OPCODE_UC_SEND_MIDDLE
)
291 IB_OPCODE_UC_SEND_LAST
:
292 IB_OPCODE_UC_SEND_MIDDLE
;
295 IB_OPCODE_UC_SEND_ONLY
:
296 IB_OPCODE_UC_SEND_FIRST
;
298 case IB_WR_SEND_WITH_IMM
:
299 if (qp
->req
.opcode
== IB_OPCODE_UC_SEND_FIRST
||
300 qp
->req
.opcode
== IB_OPCODE_UC_SEND_MIDDLE
)
302 IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE
:
303 IB_OPCODE_UC_SEND_MIDDLE
;
306 IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE
:
307 IB_OPCODE_UC_SEND_FIRST
;
313 static int next_opcode(struct rxe_qp
*qp
, struct rxe_send_wqe
*wqe
,
316 int fits
= (wqe
->dma
.resid
<= qp
->mtu
);
318 switch (qp_type(qp
)) {
320 return next_opcode_rc(qp
, opcode
, fits
);
323 return next_opcode_uc(qp
, opcode
, fits
);
330 return IB_OPCODE_UD_SEND_ONLY
;
332 case IB_WR_SEND_WITH_IMM
:
333 return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
344 static inline int check_init_depth(struct rxe_qp
*qp
, struct rxe_send_wqe
*wqe
)
348 if (wqe
->has_rd_atomic
)
351 qp
->req
.need_rd_atomic
= 1;
352 depth
= atomic_dec_return(&qp
->req
.rd_atomic
);
355 qp
->req
.need_rd_atomic
= 0;
356 wqe
->has_rd_atomic
= 1;
360 atomic_inc(&qp
->req
.rd_atomic
);
364 static inline int get_mtu(struct rxe_qp
*qp
)
366 struct rxe_dev
*rxe
= to_rdev(qp
->ibqp
.device
);
368 if ((qp_type(qp
) == IB_QPT_RC
) || (qp_type(qp
) == IB_QPT_UC
))
371 return rxe
->port
.mtu_cap
;
374 static struct sk_buff
*init_req_packet(struct rxe_qp
*qp
,
375 struct rxe_send_wqe
*wqe
,
376 int opcode
, int payload
,
377 struct rxe_pkt_info
*pkt
)
379 struct rxe_dev
*rxe
= to_rdev(qp
->ibqp
.device
);
380 struct rxe_port
*port
= &rxe
->port
;
382 struct rxe_send_wr
*ibwr
= &wqe
->wr
;
384 int pad
= (-payload
) & 0x3;
391 /* length from start of bth to end of icrc */
392 paylen
= rxe_opcode
[opcode
].length
+ payload
+ pad
+ RXE_ICRC_SIZE
;
394 /* pkt->hdr, rxe, port_num and mask are initialized in ifc
397 pkt
->opcode
= opcode
;
399 pkt
->psn
= qp
->req
.psn
;
400 pkt
->mask
= rxe_opcode
[opcode
].mask
;
401 pkt
->paylen
= paylen
;
406 av
= rxe_get_av(pkt
);
407 skb
= rxe_init_packet(rxe
, av
, paylen
, pkt
);
412 solicited
= (ibwr
->send_flags
& IB_SEND_SOLICITED
) &&
413 (pkt
->mask
& RXE_END_MASK
) &&
414 ((pkt
->mask
& (RXE_SEND_MASK
)) ||
415 (pkt
->mask
& (RXE_WRITE_MASK
| RXE_IMMDT_MASK
)) ==
416 (RXE_WRITE_MASK
| RXE_IMMDT_MASK
));
418 pkey
= (qp_type(qp
) == IB_QPT_GSI
) ?
419 port
->pkey_tbl
[ibwr
->wr
.ud
.pkey_index
] :
420 port
->pkey_tbl
[qp
->attr
.pkey_index
];
422 qp_num
= (pkt
->mask
& RXE_DETH_MASK
) ? ibwr
->wr
.ud
.remote_qpn
:
423 qp
->attr
.dest_qp_num
;
425 ack_req
= ((pkt
->mask
& RXE_END_MASK
) ||
426 (qp
->req
.noack_pkts
++ > RXE_MAX_PKT_PER_ACK
));
428 qp
->req
.noack_pkts
= 0;
430 bth_init(pkt
, pkt
->opcode
, solicited
, 0, pad
, pkey
, qp_num
,
433 /* init optional headers */
434 if (pkt
->mask
& RXE_RETH_MASK
) {
435 reth_set_rkey(pkt
, ibwr
->wr
.rdma
.rkey
);
436 reth_set_va(pkt
, wqe
->iova
);
437 reth_set_len(pkt
, wqe
->dma
.length
);
440 if (pkt
->mask
& RXE_IMMDT_MASK
)
441 immdt_set_imm(pkt
, ibwr
->ex
.imm_data
);
443 if (pkt
->mask
& RXE_IETH_MASK
)
444 ieth_set_rkey(pkt
, ibwr
->ex
.invalidate_rkey
);
446 if (pkt
->mask
& RXE_ATMETH_MASK
) {
447 atmeth_set_va(pkt
, wqe
->iova
);
448 if (opcode
== IB_OPCODE_RC_COMPARE_SWAP
||
449 opcode
== IB_OPCODE_RD_COMPARE_SWAP
) {
450 atmeth_set_swap_add(pkt
, ibwr
->wr
.atomic
.swap
);
451 atmeth_set_comp(pkt
, ibwr
->wr
.atomic
.compare_add
);
453 atmeth_set_swap_add(pkt
, ibwr
->wr
.atomic
.compare_add
);
455 atmeth_set_rkey(pkt
, ibwr
->wr
.atomic
.rkey
);
458 if (pkt
->mask
& RXE_DETH_MASK
) {
459 if (qp
->ibqp
.qp_num
== 1)
460 deth_set_qkey(pkt
, GSI_QKEY
);
462 deth_set_qkey(pkt
, ibwr
->wr
.ud
.remote_qkey
);
463 deth_set_sqp(pkt
, qp
->ibqp
.qp_num
);
469 static int fill_packet(struct rxe_qp
*qp
, struct rxe_send_wqe
*wqe
,
470 struct rxe_pkt_info
*pkt
, struct sk_buff
*skb
,
473 struct rxe_dev
*rxe
= to_rdev(qp
->ibqp
.device
);
478 err
= rxe_prepare(rxe
, pkt
, skb
, &crc
);
482 if (pkt
->mask
& RXE_WRITE_OR_SEND
) {
483 if (wqe
->wr
.send_flags
& IB_SEND_INLINE
) {
484 u8
*tmp
= &wqe
->dma
.inline_data
[wqe
->dma
.sge_offset
];
486 crc
= crc32_le(crc
, tmp
, paylen
);
488 memcpy(payload_addr(pkt
), tmp
, paylen
);
490 wqe
->dma
.resid
-= paylen
;
491 wqe
->dma
.sge_offset
+= paylen
;
493 err
= copy_data(rxe
, qp
->pd
, 0, &wqe
->dma
,
494 payload_addr(pkt
), paylen
,
501 p
= payload_addr(pkt
) + paylen
+ bth_pad(pkt
);
508 static void update_wqe_state(struct rxe_qp
*qp
,
509 struct rxe_send_wqe
*wqe
,
510 struct rxe_pkt_info
*pkt
)
512 if (pkt
->mask
& RXE_END_MASK
) {
513 if (qp_type(qp
) == IB_QPT_RC
)
514 wqe
->state
= wqe_state_pending
;
516 wqe
->state
= wqe_state_processing
;
520 static void update_wqe_psn(struct rxe_qp
*qp
,
521 struct rxe_send_wqe
*wqe
,
522 struct rxe_pkt_info
*pkt
,
525 /* number of packets left to send including current one */
526 int num_pkt
= (wqe
->dma
.resid
+ payload
+ qp
->mtu
- 1) / qp
->mtu
;
528 /* handle zero length packet case */
532 if (pkt
->mask
& RXE_START_MASK
) {
533 wqe
->first_psn
= qp
->req
.psn
;
534 wqe
->last_psn
= (qp
->req
.psn
+ num_pkt
- 1) & BTH_PSN_MASK
;
537 if (pkt
->mask
& RXE_READ_MASK
)
538 qp
->req
.psn
= (wqe
->first_psn
+ num_pkt
) & BTH_PSN_MASK
;
540 qp
->req
.psn
= (qp
->req
.psn
+ 1) & BTH_PSN_MASK
;
543 static void save_state(struct rxe_send_wqe
*wqe
,
545 struct rxe_send_wqe
*rollback_wqe
,
548 rollback_wqe
->state
= wqe
->state
;
549 rollback_wqe
->first_psn
= wqe
->first_psn
;
550 rollback_wqe
->last_psn
= wqe
->last_psn
;
551 *rollback_psn
= qp
->req
.psn
;
554 static void rollback_state(struct rxe_send_wqe
*wqe
,
556 struct rxe_send_wqe
*rollback_wqe
,
559 wqe
->state
= rollback_wqe
->state
;
560 wqe
->first_psn
= rollback_wqe
->first_psn
;
561 wqe
->last_psn
= rollback_wqe
->last_psn
;
562 qp
->req
.psn
= rollback_psn
;
565 static void update_state(struct rxe_qp
*qp
, struct rxe_send_wqe
*wqe
,
566 struct rxe_pkt_info
*pkt
, int payload
)
568 qp
->req
.opcode
= pkt
->opcode
;
570 if (pkt
->mask
& RXE_END_MASK
)
571 qp
->req
.wqe_index
= next_index(qp
->sq
.queue
, qp
->req
.wqe_index
);
573 qp
->need_req_skb
= 0;
575 if (qp
->qp_timeout_jiffies
&& !timer_pending(&qp
->retrans_timer
))
576 mod_timer(&qp
->retrans_timer
,
577 jiffies
+ qp
->qp_timeout_jiffies
);
580 int rxe_requester(void *arg
)
582 struct rxe_qp
*qp
= (struct rxe_qp
*)arg
;
583 struct rxe_pkt_info pkt
;
585 struct rxe_send_wqe
*wqe
;
586 enum rxe_hdr_mask mask
;
591 struct rxe_send_wqe rollback_wqe
;
597 if (unlikely(!qp
->valid
))
600 if (unlikely(qp
->req
.state
== QP_STATE_ERROR
)) {
601 rxe_drain_req_pkts(qp
, true);
605 if (unlikely(qp
->req
.state
== QP_STATE_RESET
)) {
606 qp
->req
.wqe_index
= consumer_index(qp
->sq
.queue
);
608 qp
->req
.need_rd_atomic
= 0;
609 qp
->req
.wait_psn
= 0;
610 qp
->req
.need_retry
= 0;
614 if (unlikely(qp
->req
.need_retry
)) {
616 qp
->req
.need_retry
= 0;
619 wqe
= req_next_wqe(qp
);
623 if (wqe
->mask
& WR_REG_MASK
) {
624 if (wqe
->wr
.opcode
== IB_WR_LOCAL_INV
) {
625 struct rxe_dev
*rxe
= to_rdev(qp
->ibqp
.device
);
628 rmr
= rxe_pool_get_index(&rxe
->mr_pool
,
629 wqe
->wr
.ex
.invalidate_rkey
>> 8);
631 pr_err("No mr for key %#x\n",
632 wqe
->wr
.ex
.invalidate_rkey
);
633 wqe
->state
= wqe_state_error
;
634 wqe
->status
= IB_WC_MW_BIND_ERR
;
637 rmr
->state
= RXE_MEM_STATE_FREE
;
639 wqe
->state
= wqe_state_done
;
640 wqe
->status
= IB_WC_SUCCESS
;
641 } else if (wqe
->wr
.opcode
== IB_WR_REG_MR
) {
642 struct rxe_mem
*rmr
= to_rmr(wqe
->wr
.wr
.reg
.mr
);
644 rmr
->state
= RXE_MEM_STATE_VALID
;
645 rmr
->access
= wqe
->wr
.wr
.reg
.access
;
646 rmr
->lkey
= wqe
->wr
.wr
.reg
.key
;
647 rmr
->rkey
= wqe
->wr
.wr
.reg
.key
;
648 wqe
->state
= wqe_state_done
;
649 wqe
->status
= IB_WC_SUCCESS
;
653 qp
->req
.wqe_index
= next_index(qp
->sq
.queue
,
658 if (unlikely(qp_type(qp
) == IB_QPT_RC
&&
659 qp
->req
.psn
> (qp
->comp
.psn
+ RXE_MAX_UNACKED_PSNS
))) {
660 qp
->req
.wait_psn
= 1;
664 /* Limit the number of inflight SKBs per QP */
665 if (unlikely(atomic_read(&qp
->skb_out
) >
666 RXE_INFLIGHT_SKBS_PER_QP_HIGH
)) {
667 qp
->need_req_skb
= 1;
671 opcode
= next_opcode(qp
, wqe
, wqe
->wr
.opcode
);
672 if (unlikely(opcode
< 0)) {
673 wqe
->status
= IB_WC_LOC_QP_OP_ERR
;
677 mask
= rxe_opcode
[opcode
].mask
;
678 if (unlikely(mask
& RXE_READ_OR_ATOMIC
)) {
679 if (check_init_depth(qp
, wqe
))
684 payload
= (mask
& RXE_WRITE_OR_SEND
) ? wqe
->dma
.resid
: 0;
686 if (qp_type(qp
) == IB_QPT_UD
) {
687 /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
688 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
689 * shall not emit any packets for this message. Further, the CI shall not
690 * generate an error due to this condition.
693 /* fake a successful UD send */
694 wqe
->first_psn
= qp
->req
.psn
;
695 wqe
->last_psn
= qp
->req
.psn
;
696 qp
->req
.psn
= (qp
->req
.psn
+ 1) & BTH_PSN_MASK
;
697 qp
->req
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
698 qp
->req
.wqe_index
= next_index(qp
->sq
.queue
,
700 wqe
->state
= wqe_state_done
;
701 wqe
->status
= IB_WC_SUCCESS
;
702 __rxe_do_task(&qp
->comp
.task
);
709 skb
= init_req_packet(qp
, wqe
, opcode
, payload
, &pkt
);
710 if (unlikely(!skb
)) {
711 pr_err("qp#%d Failed allocating skb\n", qp_num(qp
));
715 if (fill_packet(qp
, wqe
, &pkt
, skb
, payload
)) {
716 pr_debug("qp#%d Error during fill packet\n", qp_num(qp
));
721 * To prevent a race on wqe access between requester and completer,
722 * wqe members state and psn need to be set before calling
724 * Otherwise, completer might initiate an unjustified retry flow.
726 save_state(wqe
, qp
, &rollback_wqe
, &rollback_psn
);
727 update_wqe_state(qp
, wqe
, &pkt
);
728 update_wqe_psn(qp
, wqe
, &pkt
, payload
);
729 ret
= rxe_xmit_packet(to_rdev(qp
->ibqp
.device
), qp
, &pkt
, skb
);
731 qp
->need_req_skb
= 1;
733 rollback_state(wqe
, qp
, &rollback_wqe
, rollback_psn
);
735 if (ret
== -EAGAIN
) {
737 rxe_run_task(&qp
->req
.task
, 1);
744 update_state(qp
, wqe
, &pkt
, payload
);
750 wqe
->status
= IB_WC_LOC_PROT_ERR
;
751 wqe
->state
= wqe_state_error
;
752 __rxe_do_task(&qp
->comp
.task
);