1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
47 * The main entry point is svc_rdma_recvfrom. This is called from
48 * svc_recv when the transport indicates there is incoming data to
49 * be read. "Data Ready" is signaled when an RDMA Receive completes,
50 * or when a set of RDMA Reads complete.
52 * An svc_rqst is passed in. This structure contains an array of
53 * free pages (rq_pages) that will contain the incoming RPC message.
55 * Short messages are moved directly into svc_rqst::rq_arg, and
56 * the RPC Call is ready to be processed by the Upper Layer.
57 * svc_rdma_recvfrom returns the length of the RPC Call message,
58 * completing the reception of the RPC Call.
60 * However, when an incoming message has Read chunks,
61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62 * data payload from the client. svc_rdma_recvfrom sets up the
63 * RDMA Reads using pages in svc_rqst::rq_pages, which are
64 * transferred to an svc_rdma_recv_ctxt for the duration of the
65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66 * is still not yet ready.
68 * When the Read chunk payloads have become available on the
69 * server, "Data Ready" is raised again, and svc_recv calls
70 * svc_rdma_recvfrom again. This second call may use a different
71 * svc_rqst than the first one, thus any information that needs
72 * to be preserved across these two calls is kept in an
75 * The second call to svc_rdma_recvfrom performs final assembly
76 * of the RPC Call message, using the RDMA Read sink pages kept in
77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79 * the length of the completed RPC Call message.
83 * Pages under I/O must be transferred from the first svc_rqst to an
84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
86 * The first svc_rqst supplies pages for RDMA Reads. These are moved
87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88 * the rq_pages array are set to NULL and refilled with the first
89 * svc_rdma_recvfrom call returns.
91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst
93 * (see rdma_read_complete() below).
96 #include <linux/spinlock.h>
97 #include <asm/unaligned.h>
98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
101 #include <linux/sunrpc/xdr.h>
102 #include <linux/sunrpc/debug.h>
103 #include <linux/sunrpc/rpc_rdma.h>
104 #include <linux/sunrpc/svc_rdma.h>
106 #include "xprt_rdma.h"
107 #include <trace/events/rpcrdma.h>
109 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
111 static void svc_rdma_wc_receive(struct ib_cq
*cq
, struct ib_wc
*wc
);
113 static inline struct svc_rdma_recv_ctxt
*
114 svc_rdma_next_recv_ctxt(struct list_head
*list
)
116 return list_first_entry_or_null(list
, struct svc_rdma_recv_ctxt
,
120 static struct svc_rdma_recv_ctxt
*
121 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma
*rdma
)
123 struct svc_rdma_recv_ctxt
*ctxt
;
127 ctxt
= kmalloc(sizeof(*ctxt
), GFP_KERNEL
);
130 buffer
= kmalloc(rdma
->sc_max_req_size
, GFP_KERNEL
);
133 addr
= ib_dma_map_single(rdma
->sc_pd
->device
, buffer
,
134 rdma
->sc_max_req_size
, DMA_FROM_DEVICE
);
135 if (ib_dma_mapping_error(rdma
->sc_pd
->device
, addr
))
138 ctxt
->rc_recv_wr
.next
= NULL
;
139 ctxt
->rc_recv_wr
.wr_cqe
= &ctxt
->rc_cqe
;
140 ctxt
->rc_recv_wr
.sg_list
= &ctxt
->rc_recv_sge
;
141 ctxt
->rc_recv_wr
.num_sge
= 1;
142 ctxt
->rc_cqe
.done
= svc_rdma_wc_receive
;
143 ctxt
->rc_recv_sge
.addr
= addr
;
144 ctxt
->rc_recv_sge
.length
= rdma
->sc_max_req_size
;
145 ctxt
->rc_recv_sge
.lkey
= rdma
->sc_pd
->local_dma_lkey
;
146 ctxt
->rc_recv_buf
= buffer
;
147 ctxt
->rc_temp
= false;
158 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma
*rdma
,
159 struct svc_rdma_recv_ctxt
*ctxt
)
161 ib_dma_unmap_single(rdma
->sc_pd
->device
, ctxt
->rc_recv_sge
.addr
,
162 ctxt
->rc_recv_sge
.length
, DMA_FROM_DEVICE
);
163 kfree(ctxt
->rc_recv_buf
);
168 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
169 * @rdma: svcxprt_rdma being torn down
172 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma
*rdma
)
174 struct svc_rdma_recv_ctxt
*ctxt
;
176 while ((ctxt
= svc_rdma_next_recv_ctxt(&rdma
->sc_recv_ctxts
))) {
177 list_del(&ctxt
->rc_list
);
178 svc_rdma_recv_ctxt_destroy(rdma
, ctxt
);
182 static struct svc_rdma_recv_ctxt
*
183 svc_rdma_recv_ctxt_get(struct svcxprt_rdma
*rdma
)
185 struct svc_rdma_recv_ctxt
*ctxt
;
187 spin_lock(&rdma
->sc_recv_lock
);
188 ctxt
= svc_rdma_next_recv_ctxt(&rdma
->sc_recv_ctxts
);
191 list_del(&ctxt
->rc_list
);
192 spin_unlock(&rdma
->sc_recv_lock
);
195 ctxt
->rc_page_count
= 0;
199 spin_unlock(&rdma
->sc_recv_lock
);
201 ctxt
= svc_rdma_recv_ctxt_alloc(rdma
);
208 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
209 * @rdma: controlling svcxprt_rdma
210 * @ctxt: object to return to the free list
213 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma
*rdma
,
214 struct svc_rdma_recv_ctxt
*ctxt
)
218 for (i
= 0; i
< ctxt
->rc_page_count
; i
++)
219 put_page(ctxt
->rc_pages
[i
]);
221 if (!ctxt
->rc_temp
) {
222 spin_lock(&rdma
->sc_recv_lock
);
223 list_add(&ctxt
->rc_list
, &rdma
->sc_recv_ctxts
);
224 spin_unlock(&rdma
->sc_recv_lock
);
226 svc_rdma_recv_ctxt_destroy(rdma
, ctxt
);
229 static int __svc_rdma_post_recv(struct svcxprt_rdma
*rdma
,
230 struct svc_rdma_recv_ctxt
*ctxt
)
234 svc_xprt_get(&rdma
->sc_xprt
);
235 ret
= ib_post_recv(rdma
->sc_qp
, &ctxt
->rc_recv_wr
, NULL
);
236 trace_svcrdma_post_recv(&ctxt
->rc_recv_wr
, ret
);
242 svc_rdma_recv_ctxt_put(rdma
, ctxt
);
243 svc_xprt_put(&rdma
->sc_xprt
);
247 static int svc_rdma_post_recv(struct svcxprt_rdma
*rdma
)
249 struct svc_rdma_recv_ctxt
*ctxt
;
251 ctxt
= svc_rdma_recv_ctxt_get(rdma
);
254 return __svc_rdma_post_recv(rdma
, ctxt
);
258 * svc_rdma_post_recvs - Post initial set of Recv WRs
259 * @rdma: fresh svcxprt_rdma
261 * Returns true if successful, otherwise false.
263 bool svc_rdma_post_recvs(struct svcxprt_rdma
*rdma
)
265 struct svc_rdma_recv_ctxt
*ctxt
;
269 for (i
= 0; i
< rdma
->sc_max_requests
; i
++) {
270 ctxt
= svc_rdma_recv_ctxt_get(rdma
);
273 ctxt
->rc_temp
= true;
274 ret
= __svc_rdma_post_recv(rdma
, ctxt
);
276 pr_err("svcrdma: failure posting recv buffers: %d\n",
285 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
286 * @cq: Completion Queue context
287 * @wc: Work Completion object
289 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
290 * the Receive completion handler could be running.
292 static void svc_rdma_wc_receive(struct ib_cq
*cq
, struct ib_wc
*wc
)
294 struct svcxprt_rdma
*rdma
= cq
->cq_context
;
295 struct ib_cqe
*cqe
= wc
->wr_cqe
;
296 struct svc_rdma_recv_ctxt
*ctxt
;
298 trace_svcrdma_wc_receive(wc
);
300 /* WARNING: Only wc->wr_cqe and wc->status are reliable */
301 ctxt
= container_of(cqe
, struct svc_rdma_recv_ctxt
, rc_cqe
);
303 if (wc
->status
!= IB_WC_SUCCESS
)
306 if (svc_rdma_post_recv(rdma
))
309 /* All wc fields are now known to be valid */
310 ctxt
->rc_byte_len
= wc
->byte_len
;
311 ib_dma_sync_single_for_cpu(rdma
->sc_pd
->device
,
312 ctxt
->rc_recv_sge
.addr
,
313 wc
->byte_len
, DMA_FROM_DEVICE
);
315 spin_lock(&rdma
->sc_rq_dto_lock
);
316 list_add_tail(&ctxt
->rc_list
, &rdma
->sc_rq_dto_q
);
317 spin_unlock(&rdma
->sc_rq_dto_lock
);
318 set_bit(XPT_DATA
, &rdma
->sc_xprt
.xpt_flags
);
319 if (!test_bit(RDMAXPRT_CONN_PENDING
, &rdma
->sc_flags
))
320 svc_xprt_enqueue(&rdma
->sc_xprt
);
324 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
325 pr_err("svcrdma: Recv: %s (%u/0x%x)\n",
326 ib_wc_status_msg(wc
->status
),
327 wc
->status
, wc
->vendor_err
);
329 svc_rdma_recv_ctxt_put(rdma
, ctxt
);
330 set_bit(XPT_CLOSE
, &rdma
->sc_xprt
.xpt_flags
);
331 svc_xprt_enqueue(&rdma
->sc_xprt
);
333 svc_xprt_put(&rdma
->sc_xprt
);
337 * svc_rdma_flush_recv_queues - Drain pending Receive work
338 * @rdma: svcxprt_rdma being shut down
341 void svc_rdma_flush_recv_queues(struct svcxprt_rdma
*rdma
)
343 struct svc_rdma_recv_ctxt
*ctxt
;
345 while ((ctxt
= svc_rdma_next_recv_ctxt(&rdma
->sc_read_complete_q
))) {
346 list_del(&ctxt
->rc_list
);
347 svc_rdma_recv_ctxt_put(rdma
, ctxt
);
349 while ((ctxt
= svc_rdma_next_recv_ctxt(&rdma
->sc_rq_dto_q
))) {
350 list_del(&ctxt
->rc_list
);
351 svc_rdma_recv_ctxt_put(rdma
, ctxt
);
355 static void svc_rdma_build_arg_xdr(struct svc_rqst
*rqstp
,
356 struct svc_rdma_recv_ctxt
*ctxt
)
358 struct xdr_buf
*arg
= &rqstp
->rq_arg
;
360 arg
->head
[0].iov_base
= ctxt
->rc_recv_buf
;
361 arg
->head
[0].iov_len
= ctxt
->rc_byte_len
;
362 arg
->tail
[0].iov_base
= NULL
;
363 arg
->tail
[0].iov_len
= 0;
366 arg
->buflen
= ctxt
->rc_byte_len
;
367 arg
->len
= ctxt
->rc_byte_len
;
370 /* This accommodates the largest possible Write chunk,
373 #define MAX_BYTES_WRITE_SEG ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT))
375 /* This accommodates the largest possible Position-Zero
376 * Read chunk or Reply chunk, in one segment.
378 #define MAX_BYTES_SPECIAL_SEG ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT))
380 /* Sanity check the Read list.
382 * Implementation limits:
383 * - This implementation supports only one Read chunk.
386 * - Read list does not overflow buffer.
387 * - Segment size limited by largest NFS data payload.
389 * The segment count is limited to how many segments can
390 * fit in the transport header without overflowing the
391 * buffer. That's about 40 Read segments for a 1KB inline
394 * Returns pointer to the following Write list.
396 static __be32
*xdr_check_read_list(__be32
*p
, const __be32
*end
)
402 while (*p
++ != xdr_zero
) {
404 position
= be32_to_cpup(p
++);
406 } else if (be32_to_cpup(p
++) != position
) {
410 if (be32_to_cpup(p
++) > MAX_BYTES_SPECIAL_SEG
)
420 /* The segment count is limited to how many segments can
421 * fit in the transport header without overflowing the
422 * buffer. That's about 60 Write segments for a 1KB inline
425 static __be32
*xdr_check_write_chunk(__be32
*p
, const __be32
*end
,
430 segcount
= be32_to_cpup(p
++);
431 for (i
= 0; i
< segcount
; i
++) {
433 if (be32_to_cpup(p
++) > maxlen
)
444 /* Sanity check the Write list.
446 * Implementation limits:
447 * - This implementation supports only one Write chunk.
450 * - Write list does not overflow buffer.
451 * - Segment size limited by largest NFS data payload.
453 * Returns pointer to the following Reply chunk.
455 static __be32
*xdr_check_write_list(__be32
*p
, const __be32
*end
)
460 while (*p
++ != xdr_zero
) {
461 p
= xdr_check_write_chunk(p
, end
, MAX_BYTES_WRITE_SEG
);
470 /* Sanity check the Reply chunk.
473 * - Reply chunk does not overflow buffer.
474 * - Segment size limited by largest NFS data payload.
476 * Returns pointer to the following RPC header.
478 static __be32
*xdr_check_reply_chunk(__be32
*p
, const __be32
*end
)
480 if (*p
++ != xdr_zero
) {
481 p
= xdr_check_write_chunk(p
, end
, MAX_BYTES_SPECIAL_SEG
);
488 /* On entry, xdr->head[0].iov_base points to first byte in the
489 * RPC-over-RDMA header.
491 * On successful exit, head[0] points to first byte past the
492 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
493 * The length of the RPC-over-RDMA header is returned.
496 * - The transport header is entirely contained in the head iovec.
498 static int svc_rdma_xdr_decode_req(struct xdr_buf
*rq_arg
)
500 __be32
*p
, *end
, *rdma_argp
;
501 unsigned int hdr_len
;
503 /* Verify that there's enough bytes for header + something */
504 if (rq_arg
->len
<= RPCRDMA_HDRLEN_ERR
)
507 rdma_argp
= rq_arg
->head
[0].iov_base
;
508 if (*(rdma_argp
+ 1) != rpcrdma_version
)
511 switch (*(rdma_argp
+ 3)) {
527 end
= (__be32
*)((unsigned long)rdma_argp
+ rq_arg
->len
);
528 p
= xdr_check_read_list(rdma_argp
+ 4, end
);
531 p
= xdr_check_write_list(p
, end
);
534 p
= xdr_check_reply_chunk(p
, end
);
540 rq_arg
->head
[0].iov_base
= p
;
541 hdr_len
= (unsigned long)p
- (unsigned long)rdma_argp
;
542 rq_arg
->head
[0].iov_len
-= hdr_len
;
543 rq_arg
->len
-= hdr_len
;
544 trace_svcrdma_decode_rqst(rdma_argp
, hdr_len
);
548 trace_svcrdma_decode_short(rq_arg
->len
);
552 trace_svcrdma_decode_badvers(rdma_argp
);
553 return -EPROTONOSUPPORT
;
556 trace_svcrdma_decode_drop(rdma_argp
);
560 trace_svcrdma_decode_badproc(rdma_argp
);
564 trace_svcrdma_decode_parse(rdma_argp
);
568 static void rdma_read_complete(struct svc_rqst
*rqstp
,
569 struct svc_rdma_recv_ctxt
*head
)
573 /* Move Read chunk pages to rqstp so that they will be released
574 * when svc_process is done with them.
576 for (page_no
= 0; page_no
< head
->rc_page_count
; page_no
++) {
577 put_page(rqstp
->rq_pages
[page_no
]);
578 rqstp
->rq_pages
[page_no
] = head
->rc_pages
[page_no
];
580 head
->rc_page_count
= 0;
582 /* Point rq_arg.pages past header */
583 rqstp
->rq_arg
.pages
= &rqstp
->rq_pages
[head
->rc_hdr_count
];
584 rqstp
->rq_arg
.page_len
= head
->rc_arg
.page_len
;
586 /* rq_respages starts after the last arg page */
587 rqstp
->rq_respages
= &rqstp
->rq_pages
[page_no
];
588 rqstp
->rq_next_page
= rqstp
->rq_respages
+ 1;
590 /* Rebuild rq_arg head and tail. */
591 rqstp
->rq_arg
.head
[0] = head
->rc_arg
.head
[0];
592 rqstp
->rq_arg
.tail
[0] = head
->rc_arg
.tail
[0];
593 rqstp
->rq_arg
.len
= head
->rc_arg
.len
;
594 rqstp
->rq_arg
.buflen
= head
->rc_arg
.buflen
;
597 static void svc_rdma_send_error(struct svcxprt_rdma
*xprt
,
598 __be32
*rdma_argp
, int status
)
600 struct svc_rdma_send_ctxt
*ctxt
;
605 ctxt
= svc_rdma_send_ctxt_get(xprt
);
609 p
= ctxt
->sc_xprt_buf
;
611 *p
++ = *(rdma_argp
+ 1);
612 *p
++ = xprt
->sc_fc_credits
;
615 case -EPROTONOSUPPORT
:
617 *p
++ = rpcrdma_version
;
618 *p
++ = rpcrdma_version
;
619 trace_svcrdma_err_vers(*rdma_argp
);
623 trace_svcrdma_err_chunk(*rdma_argp
);
625 length
= (unsigned long)p
- (unsigned long)ctxt
->sc_xprt_buf
;
626 svc_rdma_sync_reply_hdr(xprt
, ctxt
, length
);
628 ctxt
->sc_send_wr
.opcode
= IB_WR_SEND
;
629 ret
= svc_rdma_send(xprt
, &ctxt
->sc_send_wr
);
631 svc_rdma_send_ctxt_put(xprt
, ctxt
);
634 /* By convention, backchannel calls arrive via rdma_msg type
635 * messages, and never populate the chunk lists. This makes
636 * the RPC/RDMA header small and fixed in size, so it is
637 * straightforward to check the RPC header's direction field.
639 static bool svc_rdma_is_backchannel_reply(struct svc_xprt
*xprt
,
644 if (!xprt
->xpt_bc_xprt
)
648 if (*p
++ != rdma_msg
)
651 if (*p
++ != xdr_zero
)
653 if (*p
++ != xdr_zero
)
655 if (*p
++ != xdr_zero
)
659 if (*p
++ != *rdma_resp
)
662 if (*p
== cpu_to_be32(RPC_CALL
))
669 * svc_rdma_recvfrom - Receive an RPC call
670 * @rqstp: request structure into which to receive an RPC Call
673 * The positive number of bytes in the RPC Call message,
674 * %0 if there were no Calls ready to return,
675 * %-EINVAL if the Read chunk data is too large,
676 * %-ENOMEM if rdma_rw context pool was exhausted,
677 * %-ENOTCONN if posting failed (connection is lost),
678 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
680 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
681 * when there are no remaining ctxt's to process.
683 * The next ctxt is removed from the "receive" lists.
685 * - If the ctxt completes a Read, then finish assembling the Call
686 * message and return the number of bytes in the message.
688 * - If the ctxt completes a Receive, then construct the Call
689 * message from the contents of the Receive buffer.
691 * - If there are no Read chunks in this message, then finish
692 * assembling the Call message and return the number of bytes
695 * - If there are Read chunks in this message, post Read WRs to
696 * pull that payload and return 0.
698 int svc_rdma_recvfrom(struct svc_rqst
*rqstp
)
700 struct svc_xprt
*xprt
= rqstp
->rq_xprt
;
701 struct svcxprt_rdma
*rdma_xprt
=
702 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
703 struct svc_rdma_recv_ctxt
*ctxt
;
707 spin_lock(&rdma_xprt
->sc_rq_dto_lock
);
708 ctxt
= svc_rdma_next_recv_ctxt(&rdma_xprt
->sc_read_complete_q
);
710 list_del(&ctxt
->rc_list
);
711 spin_unlock(&rdma_xprt
->sc_rq_dto_lock
);
712 rdma_read_complete(rqstp
, ctxt
);
715 ctxt
= svc_rdma_next_recv_ctxt(&rdma_xprt
->sc_rq_dto_q
);
717 /* No new incoming requests, terminate the loop */
718 clear_bit(XPT_DATA
, &xprt
->xpt_flags
);
719 spin_unlock(&rdma_xprt
->sc_rq_dto_lock
);
722 list_del(&ctxt
->rc_list
);
723 spin_unlock(&rdma_xprt
->sc_rq_dto_lock
);
725 atomic_inc(&rdma_stat_recv
);
727 svc_rdma_build_arg_xdr(rqstp
, ctxt
);
729 /* Prevent svc_xprt_release from releasing pages in rq_pages
730 * if we return 0 or an error.
732 rqstp
->rq_respages
= rqstp
->rq_pages
;
733 rqstp
->rq_next_page
= rqstp
->rq_respages
;
735 p
= (__be32
*)rqstp
->rq_arg
.head
[0].iov_base
;
736 ret
= svc_rdma_xdr_decode_req(&rqstp
->rq_arg
);
741 rqstp
->rq_xprt_hlen
= ret
;
743 if (svc_rdma_is_backchannel_reply(xprt
, p
)) {
744 ret
= svc_rdma_handle_bc_reply(xprt
->xpt_bc_xprt
, p
,
746 svc_rdma_recv_ctxt_put(rdma_xprt
, ctxt
);
750 p
+= rpcrdma_fixed_maxsz
;
755 rqstp
->rq_xprt_ctxt
= ctxt
;
756 rqstp
->rq_prot
= IPPROTO_MAX
;
757 svc_xprt_copy_addrs(rqstp
, xprt
);
758 return rqstp
->rq_arg
.len
;
761 ret
= svc_rdma_recv_read_chunk(rdma_xprt
, rqstp
, ctxt
, p
);
767 svc_rdma_send_error(rdma_xprt
, p
, ret
);
768 svc_rdma_recv_ctxt_put(rdma_xprt
, ctxt
);
773 svc_rdma_send_error(rdma_xprt
, p
, ret
);
774 svc_rdma_recv_ctxt_put(rdma_xprt
, ctxt
);
778 svc_rdma_recv_ctxt_put(rdma_xprt
, ctxt
);