1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
47 * The main entry point is svc_rdma_sendto. This is called by the
48 * RPC server when an RPC Reply is ready to be transmitted to a client.
50 * The passed-in svc_rqst contains a struct xdr_buf which holds an
51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52 * transport header, post all Write WRs needed for this Reply, then post
53 * a Send WR conveying the transport header and the RPC message itself to
56 * svc_rdma_sendto must fully transmit the Reply before returning, as
57 * the svc_rqst will be recycled as soon as sendto returns. Remaining
58 * resources referred to by the svc_rqst are also recycled at that time.
59 * Therefore any resources that must remain longer must be detached
60 * from the svc_rqst and released later.
64 * The I/O that performs Reply transmission is asynchronous, and may
65 * complete well after sendto returns. Thus pages under I/O must be
66 * removed from the svc_rqst before sendto returns.
68 * The logic here depends on Send Queue and completion ordering. Since
69 * the Send WR is always posted last, it will always complete last. Thus
70 * when it completes, it is guaranteed that all previous Write WRs have
73 * Write WRs are constructed and posted. Each Write segment gets its own
74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75 * DMA-unmap the pages under I/O for that Write segment. The Write
76 * completion handler does not release any pages.
78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
79 * The ownership of all of the Reply's pages are transferred into that
80 * ctxt, the Send WR is posted, and sendto returns.
82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The
83 * Send completion handler finally releases the Reply's pages.
85 * This mechanism also assumes that completions on the transport's Send
86 * Completion Queue do not run in parallel. Otherwise a Write completion
87 * and Send completion running at the same time could release pages that
88 * are still DMA-mapped.
92 * - If the Send WR is posted successfully, it will either complete
93 * successfully, or get flushed. Either way, the Send completion
94 * handler releases the Reply's pages.
95 * - If the Send WR cannot be not posted, the forward path releases
98 * This handles the case, without the use of page reference counting,
99 * where two different Write segments send portions of the same page.
102 #include <linux/spinlock.h>
103 #include <asm/unaligned.h>
105 #include <rdma/ib_verbs.h>
106 #include <rdma/rdma_cm.h>
108 #include <linux/sunrpc/debug.h>
109 #include <linux/sunrpc/rpc_rdma.h>
110 #include <linux/sunrpc/svc_rdma.h>
112 #include "xprt_rdma.h"
113 #include <trace/events/rpcrdma.h>
115 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
117 static void svc_rdma_wc_send(struct ib_cq
*cq
, struct ib_wc
*wc
);
119 static inline struct svc_rdma_send_ctxt
*
120 svc_rdma_next_send_ctxt(struct list_head
*list
)
122 return list_first_entry_or_null(list
, struct svc_rdma_send_ctxt
,
126 static struct svc_rdma_send_ctxt
*
127 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma
*rdma
)
129 struct svc_rdma_send_ctxt
*ctxt
;
135 size
= sizeof(*ctxt
);
136 size
+= rdma
->sc_max_send_sges
* sizeof(struct ib_sge
);
137 ctxt
= kmalloc(size
, GFP_KERNEL
);
140 buffer
= kmalloc(rdma
->sc_max_req_size
, GFP_KERNEL
);
143 addr
= ib_dma_map_single(rdma
->sc_pd
->device
, buffer
,
144 rdma
->sc_max_req_size
, DMA_TO_DEVICE
);
145 if (ib_dma_mapping_error(rdma
->sc_pd
->device
, addr
))
148 ctxt
->sc_send_wr
.next
= NULL
;
149 ctxt
->sc_send_wr
.wr_cqe
= &ctxt
->sc_cqe
;
150 ctxt
->sc_send_wr
.sg_list
= ctxt
->sc_sges
;
151 ctxt
->sc_send_wr
.send_flags
= IB_SEND_SIGNALED
;
152 ctxt
->sc_cqe
.done
= svc_rdma_wc_send
;
153 ctxt
->sc_xprt_buf
= buffer
;
154 ctxt
->sc_sges
[0].addr
= addr
;
156 for (i
= 0; i
< rdma
->sc_max_send_sges
; i
++)
157 ctxt
->sc_sges
[i
].lkey
= rdma
->sc_pd
->local_dma_lkey
;
169 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
170 * @rdma: svcxprt_rdma being torn down
173 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma
*rdma
)
175 struct svc_rdma_send_ctxt
*ctxt
;
177 while ((ctxt
= svc_rdma_next_send_ctxt(&rdma
->sc_send_ctxts
))) {
178 list_del(&ctxt
->sc_list
);
179 ib_dma_unmap_single(rdma
->sc_pd
->device
,
180 ctxt
->sc_sges
[0].addr
,
181 rdma
->sc_max_req_size
,
183 kfree(ctxt
->sc_xprt_buf
);
189 * svc_rdma_send_ctxt_get - Get a free send_ctxt
190 * @rdma: controlling svcxprt_rdma
192 * Returns a ready-to-use send_ctxt, or NULL if none are
193 * available and a fresh one cannot be allocated.
195 struct svc_rdma_send_ctxt
*svc_rdma_send_ctxt_get(struct svcxprt_rdma
*rdma
)
197 struct svc_rdma_send_ctxt
*ctxt
;
199 spin_lock(&rdma
->sc_send_lock
);
200 ctxt
= svc_rdma_next_send_ctxt(&rdma
->sc_send_ctxts
);
203 list_del(&ctxt
->sc_list
);
204 spin_unlock(&rdma
->sc_send_lock
);
207 ctxt
->sc_send_wr
.num_sge
= 0;
208 ctxt
->sc_cur_sge_no
= 0;
209 ctxt
->sc_page_count
= 0;
213 spin_unlock(&rdma
->sc_send_lock
);
214 ctxt
= svc_rdma_send_ctxt_alloc(rdma
);
221 * svc_rdma_send_ctxt_put - Return send_ctxt to free list
222 * @rdma: controlling svcxprt_rdma
223 * @ctxt: object to return to the free list
225 * Pages left in sc_pages are DMA unmapped and released.
227 void svc_rdma_send_ctxt_put(struct svcxprt_rdma
*rdma
,
228 struct svc_rdma_send_ctxt
*ctxt
)
230 struct ib_device
*device
= rdma
->sc_cm_id
->device
;
233 /* The first SGE contains the transport header, which
234 * remains mapped until @ctxt is destroyed.
236 for (i
= 1; i
< ctxt
->sc_send_wr
.num_sge
; i
++)
237 ib_dma_unmap_page(device
,
238 ctxt
->sc_sges
[i
].addr
,
239 ctxt
->sc_sges
[i
].length
,
242 for (i
= 0; i
< ctxt
->sc_page_count
; ++i
)
243 put_page(ctxt
->sc_pages
[i
]);
245 spin_lock(&rdma
->sc_send_lock
);
246 list_add(&ctxt
->sc_list
, &rdma
->sc_send_ctxts
);
247 spin_unlock(&rdma
->sc_send_lock
);
251 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
252 * @cq: Completion Queue context
253 * @wc: Work Completion object
255 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
256 * the Send completion handler could be running.
258 static void svc_rdma_wc_send(struct ib_cq
*cq
, struct ib_wc
*wc
)
260 struct svcxprt_rdma
*rdma
= cq
->cq_context
;
261 struct ib_cqe
*cqe
= wc
->wr_cqe
;
262 struct svc_rdma_send_ctxt
*ctxt
;
264 trace_svcrdma_wc_send(wc
);
266 atomic_inc(&rdma
->sc_sq_avail
);
267 wake_up(&rdma
->sc_send_wait
);
269 ctxt
= container_of(cqe
, struct svc_rdma_send_ctxt
, sc_cqe
);
270 svc_rdma_send_ctxt_put(rdma
, ctxt
);
272 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
273 set_bit(XPT_CLOSE
, &rdma
->sc_xprt
.xpt_flags
);
274 svc_xprt_enqueue(&rdma
->sc_xprt
);
275 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
276 pr_err("svcrdma: Send: %s (%u/0x%x)\n",
277 ib_wc_status_msg(wc
->status
),
278 wc
->status
, wc
->vendor_err
);
281 svc_xprt_put(&rdma
->sc_xprt
);
285 * svc_rdma_send - Post a single Send WR
286 * @rdma: transport on which to post the WR
287 * @wr: prepared Send WR to post
289 * Returns zero the Send WR was posted successfully. Otherwise, a
290 * negative errno is returned.
292 int svc_rdma_send(struct svcxprt_rdma
*rdma
, struct ib_send_wr
*wr
)
298 /* If the SQ is full, wait until an SQ entry is available */
300 if ((atomic_dec_return(&rdma
->sc_sq_avail
) < 0)) {
301 atomic_inc(&rdma_stat_sq_starve
);
302 trace_svcrdma_sq_full(rdma
);
303 atomic_inc(&rdma
->sc_sq_avail
);
304 wait_event(rdma
->sc_send_wait
,
305 atomic_read(&rdma
->sc_sq_avail
) > 1);
306 if (test_bit(XPT_CLOSE
, &rdma
->sc_xprt
.xpt_flags
))
308 trace_svcrdma_sq_retry(rdma
);
312 svc_xprt_get(&rdma
->sc_xprt
);
313 ret
= ib_post_send(rdma
->sc_qp
, wr
, NULL
);
314 trace_svcrdma_post_send(wr
, ret
);
316 set_bit(XPT_CLOSE
, &rdma
->sc_xprt
.xpt_flags
);
317 svc_xprt_put(&rdma
->sc_xprt
);
318 wake_up(&rdma
->sc_send_wait
);
325 static u32
xdr_padsize(u32 len
)
327 return (len
& 3) ? (4 - (len
& 3)) : 0;
330 /* Returns length of transport header, in bytes.
332 static unsigned int svc_rdma_reply_hdr_len(__be32
*rdma_resp
)
339 /* RPC-over-RDMA V1 replies never have a Read list. */
340 p
+= rpcrdma_fixed_maxsz
+ 1;
342 /* Skip Write list. */
343 while (*p
++ != xdr_zero
) {
344 nsegs
= be32_to_cpup(p
++);
345 p
+= nsegs
* rpcrdma_segment_maxsz
;
348 /* Skip Reply chunk. */
349 if (*p
++ != xdr_zero
) {
350 nsegs
= be32_to_cpup(p
++);
351 p
+= nsegs
* rpcrdma_segment_maxsz
;
354 return (unsigned long)p
- (unsigned long)rdma_resp
;
357 /* One Write chunk is copied from Call transport header to Reply
358 * transport header. Each segment's length field is updated to
359 * reflect number of bytes consumed in the segment.
361 * Returns number of segments in this chunk.
363 static unsigned int xdr_encode_write_chunk(__be32
*dst
, __be32
*src
,
364 unsigned int remaining
)
366 unsigned int i
, nsegs
;
369 /* Write list discriminator */
372 /* number of segments in this chunk */
373 nsegs
= be32_to_cpup(src
);
376 for (i
= nsegs
; i
; i
--) {
377 /* segment's RDMA handle */
380 /* bytes returned in this segment */
381 seg_len
= be32_to_cpu(*src
);
382 if (remaining
>= seg_len
) {
383 /* entire segment was consumed */
385 remaining
-= seg_len
;
387 /* segment only partly filled */
388 *dst
= cpu_to_be32(remaining
);
393 /* segment's RDMA offset */
401 /* The client provided a Write list in the Call message. Fill in
402 * the segments in the first Write chunk in the Reply's transport
403 * header with the number of bytes consumed in each segment.
404 * Remaining chunks are returned unused.
407 * - Client has provided only one Write chunk
409 static void svc_rdma_xdr_encode_write_list(__be32
*rdma_resp
, __be32
*wr_ch
,
410 unsigned int consumed
)
415 /* RPC-over-RDMA V1 replies never have a Read list. */
416 p
= rdma_resp
+ rpcrdma_fixed_maxsz
+ 1;
419 while (*q
!= xdr_zero
) {
420 nsegs
= xdr_encode_write_chunk(p
, q
, consumed
);
421 q
+= 2 + nsegs
* rpcrdma_segment_maxsz
;
422 p
+= 2 + nsegs
* rpcrdma_segment_maxsz
;
426 /* Terminate Write list */
429 /* Reply chunk discriminator; may be replaced later */
433 /* The client provided a Reply chunk in the Call message. Fill in
434 * the segments in the Reply chunk in the Reply message with the
435 * number of bytes consumed in each segment.
438 * - Reply can always fit in the provided Reply chunk
440 static void svc_rdma_xdr_encode_reply_chunk(__be32
*rdma_resp
, __be32
*rp_ch
,
441 unsigned int consumed
)
445 /* Find the Reply chunk in the Reply's xprt header.
446 * RPC-over-RDMA V1 replies never have a Read list.
448 p
= rdma_resp
+ rpcrdma_fixed_maxsz
+ 1;
450 /* Skip past Write list */
451 while (*p
++ != xdr_zero
)
452 p
+= 1 + be32_to_cpup(p
) * rpcrdma_segment_maxsz
;
454 xdr_encode_write_chunk(p
, rp_ch
, consumed
);
457 /* Parse the RPC Call's transport header.
459 static void svc_rdma_get_write_arrays(__be32
*rdma_argp
,
460 __be32
**write
, __be32
**reply
)
464 p
= rdma_argp
+ rpcrdma_fixed_maxsz
;
467 while (*p
++ != xdr_zero
)
471 if (*p
!= xdr_zero
) {
473 while (*p
++ != xdr_zero
)
474 p
+= 1 + be32_to_cpu(*p
) * 4;
487 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
488 * Responder's choice: requester signals it can handle Send With
489 * Invalidate, and responder chooses one rkey to invalidate.
491 * Find a candidate rkey to invalidate when sending a reply. Picks the
492 * first R_key it finds in the chunk lists.
494 * Returns zero if RPC's chunk lists are empty.
496 static u32
svc_rdma_get_inv_rkey(__be32
*rdma_argp
,
497 __be32
*wr_lst
, __be32
*rp_ch
)
501 p
= rdma_argp
+ rpcrdma_fixed_maxsz
;
504 else if (wr_lst
&& be32_to_cpup(wr_lst
+ 1))
506 else if (rp_ch
&& be32_to_cpup(rp_ch
+ 1))
510 return be32_to_cpup(p
);
513 static int svc_rdma_dma_map_page(struct svcxprt_rdma
*rdma
,
514 struct svc_rdma_send_ctxt
*ctxt
,
516 unsigned long offset
,
519 struct ib_device
*dev
= rdma
->sc_cm_id
->device
;
522 dma_addr
= ib_dma_map_page(dev
, page
, offset
, len
, DMA_TO_DEVICE
);
523 if (ib_dma_mapping_error(dev
, dma_addr
))
526 ctxt
->sc_sges
[ctxt
->sc_cur_sge_no
].addr
= dma_addr
;
527 ctxt
->sc_sges
[ctxt
->sc_cur_sge_no
].length
= len
;
528 ctxt
->sc_send_wr
.num_sge
++;
532 trace_svcrdma_dma_map_page(rdma
, page
);
536 /* ib_dma_map_page() is used here because svc_rdma_dma_unmap()
537 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
539 static int svc_rdma_dma_map_buf(struct svcxprt_rdma
*rdma
,
540 struct svc_rdma_send_ctxt
*ctxt
,
544 return svc_rdma_dma_map_page(rdma
, ctxt
, virt_to_page(base
),
545 offset_in_page(base
), len
);
549 * svc_rdma_sync_reply_hdr - DMA sync the transport header buffer
550 * @rdma: controlling transport
551 * @ctxt: send_ctxt for the Send WR
552 * @len: length of transport header
555 void svc_rdma_sync_reply_hdr(struct svcxprt_rdma
*rdma
,
556 struct svc_rdma_send_ctxt
*ctxt
,
559 ctxt
->sc_sges
[0].length
= len
;
560 ctxt
->sc_send_wr
.num_sge
++;
561 ib_dma_sync_single_for_device(rdma
->sc_pd
->device
,
562 ctxt
->sc_sges
[0].addr
, len
,
566 /* svc_rdma_map_reply_msg - Map the buffer holding RPC message
567 * @rdma: controlling transport
568 * @ctxt: send_ctxt for the Send WR
569 * @xdr: prepared xdr_buf containing RPC message
570 * @wr_lst: pointer to Call header's Write list, or NULL
572 * Load the xdr_buf into the ctxt's sge array, and DMA map each
573 * element as it is added.
575 * Returns zero on success, or a negative errno on failure.
577 int svc_rdma_map_reply_msg(struct svcxprt_rdma
*rdma
,
578 struct svc_rdma_send_ctxt
*ctxt
,
579 struct xdr_buf
*xdr
, __be32
*wr_lst
)
581 unsigned int len
, remaining
;
582 unsigned long page_off
;
583 struct page
**ppages
;
588 if (++ctxt
->sc_cur_sge_no
>= rdma
->sc_max_send_sges
)
590 ret
= svc_rdma_dma_map_buf(rdma
, ctxt
,
591 xdr
->head
[0].iov_base
,
592 xdr
->head
[0].iov_len
);
596 /* If a Write chunk is present, the xdr_buf's page list
597 * is not included inline. However the Upper Layer may
598 * have added XDR padding in the tail buffer, and that
599 * should not be included inline.
602 base
= xdr
->tail
[0].iov_base
;
603 len
= xdr
->tail
[0].iov_len
;
604 xdr_pad
= xdr_padsize(xdr
->page_len
);
606 if (len
&& xdr_pad
) {
614 ppages
= xdr
->pages
+ (xdr
->page_base
>> PAGE_SHIFT
);
615 page_off
= xdr
->page_base
& ~PAGE_MASK
;
616 remaining
= xdr
->page_len
;
618 len
= min_t(u32
, PAGE_SIZE
- page_off
, remaining
);
620 if (++ctxt
->sc_cur_sge_no
>= rdma
->sc_max_send_sges
)
622 ret
= svc_rdma_dma_map_page(rdma
, ctxt
, *ppages
++,
631 base
= xdr
->tail
[0].iov_base
;
632 len
= xdr
->tail
[0].iov_len
;
635 if (++ctxt
->sc_cur_sge_no
>= rdma
->sc_max_send_sges
)
637 ret
= svc_rdma_dma_map_buf(rdma
, ctxt
, base
, len
);
645 /* The svc_rqst and all resources it owns are released as soon as
646 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
647 * so they are released by the Send completion handler.
649 static void svc_rdma_save_io_pages(struct svc_rqst
*rqstp
,
650 struct svc_rdma_send_ctxt
*ctxt
)
652 int i
, pages
= rqstp
->rq_next_page
- rqstp
->rq_respages
;
654 ctxt
->sc_page_count
+= pages
;
655 for (i
= 0; i
< pages
; i
++) {
656 ctxt
->sc_pages
[i
] = rqstp
->rq_respages
[i
];
657 rqstp
->rq_respages
[i
] = NULL
;
660 /* Prevent svc_xprt_release from releasing pages in rq_pages */
661 rqstp
->rq_next_page
= rqstp
->rq_respages
;
664 /* Prepare the portion of the RPC Reply that will be transmitted
665 * via RDMA Send. The RPC-over-RDMA transport header is prepared
666 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
668 * Depending on whether a Write list or Reply chunk is present,
669 * the server may send all, a portion of, or none of the xdr_buf.
670 * In the latter case, only the transport header (sc_sges[0]) is
673 * RDMA Send is the last step of transmitting an RPC reply. Pages
674 * involved in the earlier RDMA Writes are here transferred out
675 * of the rqstp and into the ctxt's page array. These pages are
676 * DMA unmapped by each Write completion, but the subsequent Send
677 * completion finally releases these pages.
680 * - The Reply's transport header will never be larger than a page.
682 static int svc_rdma_send_reply_msg(struct svcxprt_rdma
*rdma
,
683 struct svc_rdma_send_ctxt
*ctxt
,
685 struct svc_rqst
*rqstp
,
686 __be32
*wr_lst
, __be32
*rp_ch
)
691 ret
= svc_rdma_map_reply_msg(rdma
, ctxt
,
692 &rqstp
->rq_res
, wr_lst
);
697 svc_rdma_save_io_pages(rqstp
, ctxt
);
699 ctxt
->sc_send_wr
.opcode
= IB_WR_SEND
;
700 if (rdma
->sc_snd_w_inv
) {
701 ctxt
->sc_send_wr
.ex
.invalidate_rkey
=
702 svc_rdma_get_inv_rkey(rdma_argp
, wr_lst
, rp_ch
);
703 if (ctxt
->sc_send_wr
.ex
.invalidate_rkey
)
704 ctxt
->sc_send_wr
.opcode
= IB_WR_SEND_WITH_INV
;
706 dprintk("svcrdma: posting Send WR with %u sge(s)\n",
707 ctxt
->sc_send_wr
.num_sge
);
708 return svc_rdma_send(rdma
, &ctxt
->sc_send_wr
);
711 /* Given the client-provided Write and Reply chunks, the server was not
712 * able to form a complete reply. Return an RDMA_ERROR message so the
713 * client can retire this RPC transaction. As above, the Send completion
714 * routine releases payload pages that were part of a previous RDMA Write.
716 * Remote Invalidation is skipped for simplicity.
718 static int svc_rdma_send_error_msg(struct svcxprt_rdma
*rdma
,
719 struct svc_rdma_send_ctxt
*ctxt
,
720 struct svc_rqst
*rqstp
)
725 p
= ctxt
->sc_xprt_buf
;
726 trace_svcrdma_err_chunk(*p
);
730 svc_rdma_sync_reply_hdr(rdma
, ctxt
, RPCRDMA_HDRLEN_ERR
);
732 svc_rdma_save_io_pages(rqstp
, ctxt
);
734 ctxt
->sc_send_wr
.opcode
= IB_WR_SEND
;
735 ret
= svc_rdma_send(rdma
, &ctxt
->sc_send_wr
);
737 svc_rdma_send_ctxt_put(rdma
, ctxt
);
744 void svc_rdma_prep_reply_hdr(struct svc_rqst
*rqstp
)
749 * svc_rdma_sendto - Transmit an RPC reply
750 * @rqstp: processed RPC request, reply XDR already in ::rq_res
752 * Any resources still associated with @rqstp are released upon return.
753 * If no reply message was possible, the connection is closed.
756 * %0 if an RPC reply has been successfully posted,
757 * %-ENOMEM if a resource shortage occurred (connection is lost),
758 * %-ENOTCONN if posting failed (connection is lost).
760 int svc_rdma_sendto(struct svc_rqst
*rqstp
)
762 struct svc_xprt
*xprt
= rqstp
->rq_xprt
;
763 struct svcxprt_rdma
*rdma
=
764 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
765 struct svc_rdma_recv_ctxt
*rctxt
= rqstp
->rq_xprt_ctxt
;
766 __be32
*p
, *rdma_argp
, *rdma_resp
, *wr_lst
, *rp_ch
;
767 struct xdr_buf
*xdr
= &rqstp
->rq_res
;
768 struct svc_rdma_send_ctxt
*sctxt
;
771 rdma_argp
= rctxt
->rc_recv_buf
;
772 svc_rdma_get_write_arrays(rdma_argp
, &wr_lst
, &rp_ch
);
774 /* Create the RDMA response header. xprt->xpt_mutex,
775 * acquired in svc_send(), serializes RPC replies. The
776 * code path below that inserts the credit grant value
777 * into each transport header runs only inside this
781 sctxt
= svc_rdma_send_ctxt_get(rdma
);
784 rdma_resp
= sctxt
->sc_xprt_buf
;
788 *p
++ = *(rdma_argp
+ 1);
789 *p
++ = rdma
->sc_fc_credits
;
790 *p
++ = rp_ch
? rdma_nomsg
: rdma_msg
;
792 /* Start with empty chunks */
798 /* XXX: Presume the client sent only one Write chunk */
799 ret
= svc_rdma_send_write_chunk(rdma
, wr_lst
, xdr
);
802 svc_rdma_xdr_encode_write_list(rdma_resp
, wr_lst
, ret
);
805 ret
= svc_rdma_send_reply_chunk(rdma
, rp_ch
, wr_lst
, xdr
);
808 svc_rdma_xdr_encode_reply_chunk(rdma_resp
, rp_ch
, ret
);
811 svc_rdma_sync_reply_hdr(rdma
, sctxt
, svc_rdma_reply_hdr_len(rdma_resp
));
812 ret
= svc_rdma_send_reply_msg(rdma
, sctxt
, rdma_argp
, rqstp
,
819 rqstp
->rq_xprt_ctxt
= NULL
;
820 svc_rdma_recv_ctxt_put(rdma
, rctxt
);
824 if (ret
!= -E2BIG
&& ret
!= -EINVAL
)
827 ret
= svc_rdma_send_error_msg(rdma
, sctxt
, rqstp
);
834 svc_rdma_send_ctxt_put(rdma
, sctxt
);
836 trace_svcrdma_send_failed(rqstp
, ret
);
837 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);