2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
35 #include <crypto/hash.h>
39 #include "rxe_queue.h"
41 static int next_opcode(struct rxe_qp
*qp
, struct rxe_send_wqe
*wqe
,
44 static inline void retry_first_write_send(struct rxe_qp
*qp
,
45 struct rxe_send_wqe
*wqe
,
46 unsigned int mask
, int npsn
)
50 for (i
= 0; i
< npsn
; i
++) {
51 int to_send
= (wqe
->dma
.resid
> qp
->mtu
) ?
52 qp
->mtu
: wqe
->dma
.resid
;
54 qp
->req
.opcode
= next_opcode(qp
, wqe
,
57 if (wqe
->wr
.send_flags
& IB_SEND_INLINE
) {
58 wqe
->dma
.resid
-= to_send
;
59 wqe
->dma
.sge_offset
+= to_send
;
61 advance_dma_data(&wqe
->dma
, to_send
);
63 if (mask
& WR_WRITE_MASK
)
68 static void req_retry(struct rxe_qp
*qp
)
70 struct rxe_send_wqe
*wqe
;
71 unsigned int wqe_index
;
76 wqe
= queue_head(qp
->sq
.queue
);
77 npsn
= (qp
->comp
.psn
- wqe
->first_psn
) & BTH_PSN_MASK
;
79 qp
->req
.wqe_index
= consumer_index(qp
->sq
.queue
);
80 qp
->req
.psn
= qp
->comp
.psn
;
83 for (wqe_index
= consumer_index(qp
->sq
.queue
);
84 wqe_index
!= producer_index(qp
->sq
.queue
);
85 wqe_index
= next_index(qp
->sq
.queue
, wqe_index
)) {
86 wqe
= addr_from_index(qp
->sq
.queue
, wqe_index
);
87 mask
= wr_opcode_mask(wqe
->wr
.opcode
, qp
);
89 if (wqe
->state
== wqe_state_posted
)
92 if (wqe
->state
== wqe_state_done
)
95 wqe
->iova
= (mask
& WR_ATOMIC_MASK
) ?
96 wqe
->wr
.wr
.atomic
.remote_addr
:
97 (mask
& WR_READ_OR_WRITE_MASK
) ?
98 wqe
->wr
.wr
.rdma
.remote_addr
:
101 if (!first
|| (mask
& WR_READ_MASK
) == 0) {
102 wqe
->dma
.resid
= wqe
->dma
.length
;
103 wqe
->dma
.cur_sge
= 0;
104 wqe
->dma
.sge_offset
= 0;
110 if (mask
& WR_WRITE_OR_SEND_MASK
)
111 retry_first_write_send(qp
, wqe
, mask
, npsn
);
113 if (mask
& WR_READ_MASK
)
114 wqe
->iova
+= npsn
* qp
->mtu
;
117 wqe
->state
= wqe_state_posted
;
121 void rnr_nak_timer(struct timer_list
*t
)
123 struct rxe_qp
*qp
= from_timer(qp
, t
, rnr_nak_timer
);
125 pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp
));
126 rxe_run_task(&qp
->req
.task
, 1);
129 static struct rxe_send_wqe
*req_next_wqe(struct rxe_qp
*qp
)
131 struct rxe_send_wqe
*wqe
= queue_head(qp
->sq
.queue
);
134 if (unlikely(qp
->req
.state
== QP_STATE_DRAIN
)) {
135 /* check to see if we are drained;
136 * state_lock used by requester and completer
138 spin_lock_irqsave(&qp
->state_lock
, flags
);
140 if (qp
->req
.state
!= QP_STATE_DRAIN
) {
141 /* comp just finished */
142 spin_unlock_irqrestore(&qp
->state_lock
,
147 if (wqe
&& ((qp
->req
.wqe_index
!=
148 consumer_index(qp
->sq
.queue
)) ||
149 (wqe
->state
!= wqe_state_posted
))) {
150 /* comp not done yet */
151 spin_unlock_irqrestore(&qp
->state_lock
,
156 qp
->req
.state
= QP_STATE_DRAINED
;
157 spin_unlock_irqrestore(&qp
->state_lock
, flags
);
159 if (qp
->ibqp
.event_handler
) {
162 ev
.device
= qp
->ibqp
.device
;
163 ev
.element
.qp
= &qp
->ibqp
;
164 ev
.event
= IB_EVENT_SQ_DRAINED
;
165 qp
->ibqp
.event_handler(&ev
,
166 qp
->ibqp
.qp_context
);
171 if (qp
->req
.wqe_index
== producer_index(qp
->sq
.queue
))
174 wqe
= addr_from_index(qp
->sq
.queue
, qp
->req
.wqe_index
);
176 if (unlikely((qp
->req
.state
== QP_STATE_DRAIN
||
177 qp
->req
.state
== QP_STATE_DRAINED
) &&
178 (wqe
->state
!= wqe_state_processing
)))
181 if (unlikely((wqe
->wr
.send_flags
& IB_SEND_FENCE
) &&
182 (qp
->req
.wqe_index
!= consumer_index(qp
->sq
.queue
)))) {
183 qp
->req
.wait_fence
= 1;
187 wqe
->mask
= wr_opcode_mask(wqe
->wr
.opcode
, qp
);
191 static int next_opcode_rc(struct rxe_qp
*qp
, u32 opcode
, int fits
)
194 case IB_WR_RDMA_WRITE
:
195 if (qp
->req
.opcode
== IB_OPCODE_RC_RDMA_WRITE_FIRST
||
196 qp
->req
.opcode
== IB_OPCODE_RC_RDMA_WRITE_MIDDLE
)
198 IB_OPCODE_RC_RDMA_WRITE_LAST
:
199 IB_OPCODE_RC_RDMA_WRITE_MIDDLE
;
202 IB_OPCODE_RC_RDMA_WRITE_ONLY
:
203 IB_OPCODE_RC_RDMA_WRITE_FIRST
;
205 case IB_WR_RDMA_WRITE_WITH_IMM
:
206 if (qp
->req
.opcode
== IB_OPCODE_RC_RDMA_WRITE_FIRST
||
207 qp
->req
.opcode
== IB_OPCODE_RC_RDMA_WRITE_MIDDLE
)
209 IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE
:
210 IB_OPCODE_RC_RDMA_WRITE_MIDDLE
;
213 IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
:
214 IB_OPCODE_RC_RDMA_WRITE_FIRST
;
217 if (qp
->req
.opcode
== IB_OPCODE_RC_SEND_FIRST
||
218 qp
->req
.opcode
== IB_OPCODE_RC_SEND_MIDDLE
)
220 IB_OPCODE_RC_SEND_LAST
:
221 IB_OPCODE_RC_SEND_MIDDLE
;
224 IB_OPCODE_RC_SEND_ONLY
:
225 IB_OPCODE_RC_SEND_FIRST
;
227 case IB_WR_SEND_WITH_IMM
:
228 if (qp
->req
.opcode
== IB_OPCODE_RC_SEND_FIRST
||
229 qp
->req
.opcode
== IB_OPCODE_RC_SEND_MIDDLE
)
231 IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE
:
232 IB_OPCODE_RC_SEND_MIDDLE
;
235 IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE
:
236 IB_OPCODE_RC_SEND_FIRST
;
238 case IB_WR_RDMA_READ
:
239 return IB_OPCODE_RC_RDMA_READ_REQUEST
;
241 case IB_WR_ATOMIC_CMP_AND_SWP
:
242 return IB_OPCODE_RC_COMPARE_SWAP
;
244 case IB_WR_ATOMIC_FETCH_AND_ADD
:
245 return IB_OPCODE_RC_FETCH_ADD
;
247 case IB_WR_SEND_WITH_INV
:
248 if (qp
->req
.opcode
== IB_OPCODE_RC_SEND_FIRST
||
249 qp
->req
.opcode
== IB_OPCODE_RC_SEND_MIDDLE
)
250 return fits
? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE
:
251 IB_OPCODE_RC_SEND_MIDDLE
;
253 return fits
? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE
:
254 IB_OPCODE_RC_SEND_FIRST
;
256 case IB_WR_LOCAL_INV
:
263 static int next_opcode_uc(struct rxe_qp
*qp
, u32 opcode
, int fits
)
266 case IB_WR_RDMA_WRITE
:
267 if (qp
->req
.opcode
== IB_OPCODE_UC_RDMA_WRITE_FIRST
||
268 qp
->req
.opcode
== IB_OPCODE_UC_RDMA_WRITE_MIDDLE
)
270 IB_OPCODE_UC_RDMA_WRITE_LAST
:
271 IB_OPCODE_UC_RDMA_WRITE_MIDDLE
;
274 IB_OPCODE_UC_RDMA_WRITE_ONLY
:
275 IB_OPCODE_UC_RDMA_WRITE_FIRST
;
277 case IB_WR_RDMA_WRITE_WITH_IMM
:
278 if (qp
->req
.opcode
== IB_OPCODE_UC_RDMA_WRITE_FIRST
||
279 qp
->req
.opcode
== IB_OPCODE_UC_RDMA_WRITE_MIDDLE
)
281 IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE
:
282 IB_OPCODE_UC_RDMA_WRITE_MIDDLE
;
285 IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE
:
286 IB_OPCODE_UC_RDMA_WRITE_FIRST
;
289 if (qp
->req
.opcode
== IB_OPCODE_UC_SEND_FIRST
||
290 qp
->req
.opcode
== IB_OPCODE_UC_SEND_MIDDLE
)
292 IB_OPCODE_UC_SEND_LAST
:
293 IB_OPCODE_UC_SEND_MIDDLE
;
296 IB_OPCODE_UC_SEND_ONLY
:
297 IB_OPCODE_UC_SEND_FIRST
;
299 case IB_WR_SEND_WITH_IMM
:
300 if (qp
->req
.opcode
== IB_OPCODE_UC_SEND_FIRST
||
301 qp
->req
.opcode
== IB_OPCODE_UC_SEND_MIDDLE
)
303 IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE
:
304 IB_OPCODE_UC_SEND_MIDDLE
;
307 IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE
:
308 IB_OPCODE_UC_SEND_FIRST
;
314 static int next_opcode(struct rxe_qp
*qp
, struct rxe_send_wqe
*wqe
,
317 int fits
= (wqe
->dma
.resid
<= qp
->mtu
);
319 switch (qp_type(qp
)) {
321 return next_opcode_rc(qp
, opcode
, fits
);
324 return next_opcode_uc(qp
, opcode
, fits
);
331 return IB_OPCODE_UD_SEND_ONLY
;
333 case IB_WR_SEND_WITH_IMM
:
334 return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
345 static inline int check_init_depth(struct rxe_qp
*qp
, struct rxe_send_wqe
*wqe
)
349 if (wqe
->has_rd_atomic
)
352 qp
->req
.need_rd_atomic
= 1;
353 depth
= atomic_dec_return(&qp
->req
.rd_atomic
);
356 qp
->req
.need_rd_atomic
= 0;
357 wqe
->has_rd_atomic
= 1;
361 atomic_inc(&qp
->req
.rd_atomic
);
365 static inline int get_mtu(struct rxe_qp
*qp
)
367 struct rxe_dev
*rxe
= to_rdev(qp
->ibqp
.device
);
369 if ((qp_type(qp
) == IB_QPT_RC
) || (qp_type(qp
) == IB_QPT_UC
))
372 return rxe
->port
.mtu_cap
;
375 static struct sk_buff
*init_req_packet(struct rxe_qp
*qp
,
376 struct rxe_send_wqe
*wqe
,
377 int opcode
, int payload
,
378 struct rxe_pkt_info
*pkt
)
380 struct rxe_dev
*rxe
= to_rdev(qp
->ibqp
.device
);
381 struct rxe_port
*port
= &rxe
->port
;
383 struct rxe_send_wr
*ibwr
= &wqe
->wr
;
385 int pad
= (-payload
) & 0x3;
392 /* length from start of bth to end of icrc */
393 paylen
= rxe_opcode
[opcode
].length
+ payload
+ pad
+ RXE_ICRC_SIZE
;
395 /* pkt->hdr, rxe, port_num and mask are initialized in ifc
398 pkt
->opcode
= opcode
;
400 pkt
->psn
= qp
->req
.psn
;
401 pkt
->mask
= rxe_opcode
[opcode
].mask
;
402 pkt
->paylen
= paylen
;
407 av
= rxe_get_av(pkt
);
408 skb
= rxe_init_packet(rxe
, av
, paylen
, pkt
);
413 solicited
= (ibwr
->send_flags
& IB_SEND_SOLICITED
) &&
414 (pkt
->mask
& RXE_END_MASK
) &&
415 ((pkt
->mask
& (RXE_SEND_MASK
)) ||
416 (pkt
->mask
& (RXE_WRITE_MASK
| RXE_IMMDT_MASK
)) ==
417 (RXE_WRITE_MASK
| RXE_IMMDT_MASK
));
419 pkey
= (qp_type(qp
) == IB_QPT_GSI
) ?
420 port
->pkey_tbl
[ibwr
->wr
.ud
.pkey_index
] :
421 port
->pkey_tbl
[qp
->attr
.pkey_index
];
423 qp_num
= (pkt
->mask
& RXE_DETH_MASK
) ? ibwr
->wr
.ud
.remote_qpn
:
424 qp
->attr
.dest_qp_num
;
426 ack_req
= ((pkt
->mask
& RXE_END_MASK
) ||
427 (qp
->req
.noack_pkts
++ > RXE_MAX_PKT_PER_ACK
));
429 qp
->req
.noack_pkts
= 0;
431 bth_init(pkt
, pkt
->opcode
, solicited
, 0, pad
, pkey
, qp_num
,
434 /* init optional headers */
435 if (pkt
->mask
& RXE_RETH_MASK
) {
436 reth_set_rkey(pkt
, ibwr
->wr
.rdma
.rkey
);
437 reth_set_va(pkt
, wqe
->iova
);
438 reth_set_len(pkt
, wqe
->dma
.length
);
441 if (pkt
->mask
& RXE_IMMDT_MASK
)
442 immdt_set_imm(pkt
, ibwr
->ex
.imm_data
);
444 if (pkt
->mask
& RXE_IETH_MASK
)
445 ieth_set_rkey(pkt
, ibwr
->ex
.invalidate_rkey
);
447 if (pkt
->mask
& RXE_ATMETH_MASK
) {
448 atmeth_set_va(pkt
, wqe
->iova
);
449 if (opcode
== IB_OPCODE_RC_COMPARE_SWAP
||
450 opcode
== IB_OPCODE_RD_COMPARE_SWAP
) {
451 atmeth_set_swap_add(pkt
, ibwr
->wr
.atomic
.swap
);
452 atmeth_set_comp(pkt
, ibwr
->wr
.atomic
.compare_add
);
454 atmeth_set_swap_add(pkt
, ibwr
->wr
.atomic
.compare_add
);
456 atmeth_set_rkey(pkt
, ibwr
->wr
.atomic
.rkey
);
459 if (pkt
->mask
& RXE_DETH_MASK
) {
460 if (qp
->ibqp
.qp_num
== 1)
461 deth_set_qkey(pkt
, GSI_QKEY
);
463 deth_set_qkey(pkt
, ibwr
->wr
.ud
.remote_qkey
);
464 deth_set_sqp(pkt
, qp
->ibqp
.qp_num
);
470 static int fill_packet(struct rxe_qp
*qp
, struct rxe_send_wqe
*wqe
,
471 struct rxe_pkt_info
*pkt
, struct sk_buff
*skb
,
474 struct rxe_dev
*rxe
= to_rdev(qp
->ibqp
.device
);
479 err
= rxe_prepare(rxe
, pkt
, skb
, &crc
);
483 if (pkt
->mask
& RXE_WRITE_OR_SEND
) {
484 if (wqe
->wr
.send_flags
& IB_SEND_INLINE
) {
485 u8
*tmp
= &wqe
->dma
.inline_data
[wqe
->dma
.sge_offset
];
487 crc
= rxe_crc32(rxe
, crc
, tmp
, paylen
);
488 memcpy(payload_addr(pkt
), tmp
, paylen
);
490 wqe
->dma
.resid
-= paylen
;
491 wqe
->dma
.sge_offset
+= paylen
;
493 err
= copy_data(rxe
, qp
->pd
, 0, &wqe
->dma
,
494 payload_addr(pkt
), paylen
,
501 p
= payload_addr(pkt
) + paylen
+ bth_pad(pkt
);
508 static void update_wqe_state(struct rxe_qp
*qp
,
509 struct rxe_send_wqe
*wqe
,
510 struct rxe_pkt_info
*pkt
)
512 if (pkt
->mask
& RXE_END_MASK
) {
513 if (qp_type(qp
) == IB_QPT_RC
)
514 wqe
->state
= wqe_state_pending
;
516 wqe
->state
= wqe_state_processing
;
520 static void update_wqe_psn(struct rxe_qp
*qp
,
521 struct rxe_send_wqe
*wqe
,
522 struct rxe_pkt_info
*pkt
,
525 /* number of packets left to send including current one */
526 int num_pkt
= (wqe
->dma
.resid
+ payload
+ qp
->mtu
- 1) / qp
->mtu
;
528 /* handle zero length packet case */
532 if (pkt
->mask
& RXE_START_MASK
) {
533 wqe
->first_psn
= qp
->req
.psn
;
534 wqe
->last_psn
= (qp
->req
.psn
+ num_pkt
- 1) & BTH_PSN_MASK
;
537 if (pkt
->mask
& RXE_READ_MASK
)
538 qp
->req
.psn
= (wqe
->first_psn
+ num_pkt
) & BTH_PSN_MASK
;
540 qp
->req
.psn
= (qp
->req
.psn
+ 1) & BTH_PSN_MASK
;
543 static void save_state(struct rxe_send_wqe
*wqe
,
545 struct rxe_send_wqe
*rollback_wqe
,
548 rollback_wqe
->state
= wqe
->state
;
549 rollback_wqe
->first_psn
= wqe
->first_psn
;
550 rollback_wqe
->last_psn
= wqe
->last_psn
;
551 *rollback_psn
= qp
->req
.psn
;
554 static void rollback_state(struct rxe_send_wqe
*wqe
,
556 struct rxe_send_wqe
*rollback_wqe
,
559 wqe
->state
= rollback_wqe
->state
;
560 wqe
->first_psn
= rollback_wqe
->first_psn
;
561 wqe
->last_psn
= rollback_wqe
->last_psn
;
562 qp
->req
.psn
= rollback_psn
;
565 static void update_state(struct rxe_qp
*qp
, struct rxe_send_wqe
*wqe
,
566 struct rxe_pkt_info
*pkt
, int payload
)
568 qp
->req
.opcode
= pkt
->opcode
;
570 if (pkt
->mask
& RXE_END_MASK
)
571 qp
->req
.wqe_index
= next_index(qp
->sq
.queue
, qp
->req
.wqe_index
);
573 qp
->need_req_skb
= 0;
575 if (qp
->qp_timeout_jiffies
&& !timer_pending(&qp
->retrans_timer
))
576 mod_timer(&qp
->retrans_timer
,
577 jiffies
+ qp
->qp_timeout_jiffies
);
580 int rxe_requester(void *arg
)
582 struct rxe_qp
*qp
= (struct rxe_qp
*)arg
;
583 struct rxe_pkt_info pkt
;
585 struct rxe_send_wqe
*wqe
;
586 enum rxe_hdr_mask mask
;
591 struct rxe_send_wqe rollback_wqe
;
597 if (unlikely(!qp
->valid
|| qp
->req
.state
== QP_STATE_ERROR
))
600 if (unlikely(qp
->req
.state
== QP_STATE_RESET
)) {
601 qp
->req
.wqe_index
= consumer_index(qp
->sq
.queue
);
603 qp
->req
.need_rd_atomic
= 0;
604 qp
->req
.wait_psn
= 0;
605 qp
->req
.need_retry
= 0;
609 if (unlikely(qp
->req
.need_retry
)) {
611 qp
->req
.need_retry
= 0;
614 wqe
= req_next_wqe(qp
);
618 if (wqe
->mask
& WR_REG_MASK
) {
619 if (wqe
->wr
.opcode
== IB_WR_LOCAL_INV
) {
620 struct rxe_dev
*rxe
= to_rdev(qp
->ibqp
.device
);
623 rmr
= rxe_pool_get_index(&rxe
->mr_pool
,
624 wqe
->wr
.ex
.invalidate_rkey
>> 8);
626 pr_err("No mr for key %#x\n",
627 wqe
->wr
.ex
.invalidate_rkey
);
628 wqe
->state
= wqe_state_error
;
629 wqe
->status
= IB_WC_MW_BIND_ERR
;
632 rmr
->state
= RXE_MEM_STATE_FREE
;
634 wqe
->state
= wqe_state_done
;
635 wqe
->status
= IB_WC_SUCCESS
;
636 } else if (wqe
->wr
.opcode
== IB_WR_REG_MR
) {
637 struct rxe_mem
*rmr
= to_rmr(wqe
->wr
.wr
.reg
.mr
);
639 rmr
->state
= RXE_MEM_STATE_VALID
;
640 rmr
->access
= wqe
->wr
.wr
.reg
.access
;
641 rmr
->lkey
= wqe
->wr
.wr
.reg
.key
;
642 rmr
->rkey
= wqe
->wr
.wr
.reg
.key
;
643 wqe
->state
= wqe_state_done
;
644 wqe
->status
= IB_WC_SUCCESS
;
648 qp
->req
.wqe_index
= next_index(qp
->sq
.queue
,
653 if (unlikely(qp_type(qp
) == IB_QPT_RC
&&
654 qp
->req
.psn
> (qp
->comp
.psn
+ RXE_MAX_UNACKED_PSNS
))) {
655 qp
->req
.wait_psn
= 1;
659 /* Limit the number of inflight SKBs per QP */
660 if (unlikely(atomic_read(&qp
->skb_out
) >
661 RXE_INFLIGHT_SKBS_PER_QP_HIGH
)) {
662 qp
->need_req_skb
= 1;
666 opcode
= next_opcode(qp
, wqe
, wqe
->wr
.opcode
);
667 if (unlikely(opcode
< 0)) {
668 wqe
->status
= IB_WC_LOC_QP_OP_ERR
;
672 mask
= rxe_opcode
[opcode
].mask
;
673 if (unlikely(mask
& RXE_READ_OR_ATOMIC
)) {
674 if (check_init_depth(qp
, wqe
))
679 payload
= (mask
& RXE_WRITE_OR_SEND
) ? wqe
->dma
.resid
: 0;
681 if (qp_type(qp
) == IB_QPT_UD
) {
682 /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
683 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
684 * shall not emit any packets for this message. Further, the CI shall not
685 * generate an error due to this condition.
688 /* fake a successful UD send */
689 wqe
->first_psn
= qp
->req
.psn
;
690 wqe
->last_psn
= qp
->req
.psn
;
691 qp
->req
.psn
= (qp
->req
.psn
+ 1) & BTH_PSN_MASK
;
692 qp
->req
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
693 qp
->req
.wqe_index
= next_index(qp
->sq
.queue
,
695 wqe
->state
= wqe_state_done
;
696 wqe
->status
= IB_WC_SUCCESS
;
697 __rxe_do_task(&qp
->comp
.task
);
704 skb
= init_req_packet(qp
, wqe
, opcode
, payload
, &pkt
);
705 if (unlikely(!skb
)) {
706 pr_err("qp#%d Failed allocating skb\n", qp_num(qp
));
710 if (fill_packet(qp
, wqe
, &pkt
, skb
, payload
)) {
711 pr_debug("qp#%d Error during fill packet\n", qp_num(qp
));
716 * To prevent a race on wqe access between requester and completer,
717 * wqe members state and psn need to be set before calling
719 * Otherwise, completer might initiate an unjustified retry flow.
721 save_state(wqe
, qp
, &rollback_wqe
, &rollback_psn
);
722 update_wqe_state(qp
, wqe
, &pkt
);
723 update_wqe_psn(qp
, wqe
, &pkt
, payload
);
724 ret
= rxe_xmit_packet(to_rdev(qp
->ibqp
.device
), qp
, &pkt
, skb
);
726 qp
->need_req_skb
= 1;
728 rollback_state(wqe
, qp
, &rollback_wqe
, rollback_psn
);
730 if (ret
== -EAGAIN
) {
732 rxe_run_task(&qp
->req
.task
, 1);
739 update_state(qp
, wqe
, &pkt
, payload
);
745 wqe
->status
= IB_WC_LOC_PROT_ERR
;
746 wqe
->state
= wqe_state_error
;
747 __rxe_do_task(&qp
->comp
.task
);