1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
47 * The main entry point is svc_rdma_recvfrom. This is called from
48 * svc_recv when the transport indicates there is incoming data to
49 * be read. "Data Ready" is signaled when an RDMA Receive completes,
50 * or when a set of RDMA Reads complete.
52 * An svc_rqst is passed in. This structure contains an array of
53 * free pages (rq_pages) that will contain the incoming RPC message.
55 * Short messages are moved directly into svc_rqst::rq_arg, and
56 * the RPC Call is ready to be processed by the Upper Layer.
57 * svc_rdma_recvfrom returns the length of the RPC Call message,
58 * completing the reception of the RPC Call.
60 * However, when an incoming message has Read chunks,
61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62 * data payload from the client. svc_rdma_recvfrom sets up the
63 * RDMA Reads using pages in svc_rqst::rq_pages, which are
64 * transferred to an svc_rdma_recv_ctxt for the duration of the
65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66 * is still not yet ready.
68 * When the Read chunk payloads have become available on the
69 * server, "Data Ready" is raised again, and svc_recv calls
70 * svc_rdma_recvfrom again. This second call may use a different
71 * svc_rqst than the first one, thus any information that needs
72 * to be preserved across these two calls is kept in an
75 * The second call to svc_rdma_recvfrom performs final assembly
76 * of the RPC Call message, using the RDMA Read sink pages kept in
77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79 * the length of the completed RPC Call message.
83 * Pages under I/O must be transferred from the first svc_rqst to an
84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
86 * The first svc_rqst supplies pages for RDMA Reads. These are moved
87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88 * the rq_pages array are set to NULL and refilled with the first
89 * svc_rdma_recvfrom call returns.
91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst.
95 #include <linux/slab.h>
96 #include <linux/spinlock.h>
97 #include <linux/unaligned.h>
98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
101 #include <linux/sunrpc/xdr.h>
102 #include <linux/sunrpc/debug.h>
103 #include <linux/sunrpc/rpc_rdma.h>
104 #include <linux/sunrpc/svc_rdma.h>
106 #include "xprt_rdma.h"
107 #include <trace/events/rpcrdma.h>
109 static void svc_rdma_wc_receive(struct ib_cq
*cq
, struct ib_wc
*wc
);
111 static inline struct svc_rdma_recv_ctxt
*
112 svc_rdma_next_recv_ctxt(struct list_head
*list
)
114 return list_first_entry_or_null(list
, struct svc_rdma_recv_ctxt
,
118 static struct svc_rdma_recv_ctxt
*
119 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma
*rdma
)
121 int node
= ibdev_to_node(rdma
->sc_cm_id
->device
);
122 struct svc_rdma_recv_ctxt
*ctxt
;
126 ctxt
= kzalloc_node(sizeof(*ctxt
), GFP_KERNEL
, node
);
129 buffer
= kmalloc_node(rdma
->sc_max_req_size
, GFP_KERNEL
, node
);
132 addr
= ib_dma_map_single(rdma
->sc_pd
->device
, buffer
,
133 rdma
->sc_max_req_size
, DMA_FROM_DEVICE
);
134 if (ib_dma_mapping_error(rdma
->sc_pd
->device
, addr
))
137 svc_rdma_recv_cid_init(rdma
, &ctxt
->rc_cid
);
138 pcl_init(&ctxt
->rc_call_pcl
);
139 pcl_init(&ctxt
->rc_read_pcl
);
140 pcl_init(&ctxt
->rc_write_pcl
);
141 pcl_init(&ctxt
->rc_reply_pcl
);
143 ctxt
->rc_recv_wr
.next
= NULL
;
144 ctxt
->rc_recv_wr
.wr_cqe
= &ctxt
->rc_cqe
;
145 ctxt
->rc_recv_wr
.sg_list
= &ctxt
->rc_recv_sge
;
146 ctxt
->rc_recv_wr
.num_sge
= 1;
147 ctxt
->rc_cqe
.done
= svc_rdma_wc_receive
;
148 ctxt
->rc_recv_sge
.addr
= addr
;
149 ctxt
->rc_recv_sge
.length
= rdma
->sc_max_req_size
;
150 ctxt
->rc_recv_sge
.lkey
= rdma
->sc_pd
->local_dma_lkey
;
151 ctxt
->rc_recv_buf
= buffer
;
152 svc_rdma_cc_init(rdma
, &ctxt
->rc_cc
);
163 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma
*rdma
,
164 struct svc_rdma_recv_ctxt
*ctxt
)
166 ib_dma_unmap_single(rdma
->sc_pd
->device
, ctxt
->rc_recv_sge
.addr
,
167 ctxt
->rc_recv_sge
.length
, DMA_FROM_DEVICE
);
168 kfree(ctxt
->rc_recv_buf
);
173 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
174 * @rdma: svcxprt_rdma being torn down
177 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma
*rdma
)
179 struct svc_rdma_recv_ctxt
*ctxt
;
180 struct llist_node
*node
;
182 while ((node
= llist_del_first(&rdma
->sc_recv_ctxts
))) {
183 ctxt
= llist_entry(node
, struct svc_rdma_recv_ctxt
, rc_node
);
184 svc_rdma_recv_ctxt_destroy(rdma
, ctxt
);
189 * svc_rdma_recv_ctxt_get - Allocate a recv_ctxt
190 * @rdma: controlling svcxprt_rdma
192 * Returns a recv_ctxt or (rarely) NULL if none are available.
194 struct svc_rdma_recv_ctxt
*svc_rdma_recv_ctxt_get(struct svcxprt_rdma
*rdma
)
196 struct svc_rdma_recv_ctxt
*ctxt
;
197 struct llist_node
*node
;
199 node
= llist_del_first(&rdma
->sc_recv_ctxts
);
203 ctxt
= llist_entry(node
, struct svc_rdma_recv_ctxt
, rc_node
);
204 ctxt
->rc_page_count
= 0;
209 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
210 * @rdma: controlling svcxprt_rdma
211 * @ctxt: object to return to the free list
214 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma
*rdma
,
215 struct svc_rdma_recv_ctxt
*ctxt
)
217 svc_rdma_cc_release(rdma
, &ctxt
->rc_cc
, DMA_FROM_DEVICE
);
219 /* @rc_page_count is normally zero here, but error flows
220 * can leave pages in @rc_pages.
222 release_pages(ctxt
->rc_pages
, ctxt
->rc_page_count
);
224 pcl_free(&ctxt
->rc_call_pcl
);
225 pcl_free(&ctxt
->rc_read_pcl
);
226 pcl_free(&ctxt
->rc_write_pcl
);
227 pcl_free(&ctxt
->rc_reply_pcl
);
229 llist_add(&ctxt
->rc_node
, &rdma
->sc_recv_ctxts
);
233 * svc_rdma_release_ctxt - Release transport-specific per-rqst resources
234 * @xprt: the transport which owned the context
235 * @vctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
237 * Ensure that the recv_ctxt is released whether or not a Reply
238 * was sent. For example, the client could close the connection,
239 * or svc_process could drop an RPC, before the Reply is sent.
241 void svc_rdma_release_ctxt(struct svc_xprt
*xprt
, void *vctxt
)
243 struct svc_rdma_recv_ctxt
*ctxt
= vctxt
;
244 struct svcxprt_rdma
*rdma
=
245 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
248 svc_rdma_recv_ctxt_put(rdma
, ctxt
);
251 static bool svc_rdma_refresh_recvs(struct svcxprt_rdma
*rdma
,
254 const struct ib_recv_wr
*bad_wr
= NULL
;
255 struct svc_rdma_recv_ctxt
*ctxt
;
256 struct ib_recv_wr
*recv_chain
;
259 if (test_bit(XPT_CLOSE
, &rdma
->sc_xprt
.xpt_flags
))
264 ctxt
= svc_rdma_recv_ctxt_get(rdma
);
268 trace_svcrdma_post_recv(&ctxt
->rc_cid
);
269 ctxt
->rc_recv_wr
.next
= recv_chain
;
270 recv_chain
= &ctxt
->rc_recv_wr
;
271 rdma
->sc_pending_recvs
++;
276 ret
= ib_post_recv(rdma
->sc_qp
, recv_chain
, &bad_wr
);
282 trace_svcrdma_rq_post_err(rdma
, ret
);
284 ctxt
= container_of(bad_wr
, struct svc_rdma_recv_ctxt
,
286 bad_wr
= bad_wr
->next
;
287 svc_rdma_recv_ctxt_put(rdma
, ctxt
);
289 /* Since we're destroying the xprt, no need to reset
290 * sc_pending_recvs. */
295 * svc_rdma_post_recvs - Post initial set of Recv WRs
296 * @rdma: fresh svcxprt_rdma
299 * %true: Receive Queue initialization successful
300 * %false: memory allocation or DMA error
302 bool svc_rdma_post_recvs(struct svcxprt_rdma
*rdma
)
306 /* For each credit, allocate enough recv_ctxts for one
307 * posted Receive and one RPC in process.
309 total
= (rdma
->sc_max_requests
* 2) + rdma
->sc_recv_batch
;
311 struct svc_rdma_recv_ctxt
*ctxt
;
313 ctxt
= svc_rdma_recv_ctxt_alloc(rdma
);
316 llist_add(&ctxt
->rc_node
, &rdma
->sc_recv_ctxts
);
319 return svc_rdma_refresh_recvs(rdma
, rdma
->sc_max_requests
);
323 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
324 * @cq: Completion Queue context
325 * @wc: Work Completion object
328 static void svc_rdma_wc_receive(struct ib_cq
*cq
, struct ib_wc
*wc
)
330 struct svcxprt_rdma
*rdma
= cq
->cq_context
;
331 struct ib_cqe
*cqe
= wc
->wr_cqe
;
332 struct svc_rdma_recv_ctxt
*ctxt
;
334 rdma
->sc_pending_recvs
--;
336 /* WARNING: Only wc->wr_cqe and wc->status are reliable */
337 ctxt
= container_of(cqe
, struct svc_rdma_recv_ctxt
, rc_cqe
);
339 if (wc
->status
!= IB_WC_SUCCESS
)
341 trace_svcrdma_wc_recv(wc
, &ctxt
->rc_cid
);
343 /* If receive posting fails, the connection is about to be
344 * lost anyway. The server will not be able to send a reply
345 * for this RPC, and the client will retransmit this RPC
346 * anyway when it reconnects.
348 * Therefore we drop the Receive, even if status was SUCCESS
349 * to reduce the likelihood of replayed requests once the
352 if (rdma
->sc_pending_recvs
< rdma
->sc_max_requests
)
353 if (!svc_rdma_refresh_recvs(rdma
, rdma
->sc_recv_batch
))
356 /* All wc fields are now known to be valid */
357 ctxt
->rc_byte_len
= wc
->byte_len
;
359 spin_lock(&rdma
->sc_rq_dto_lock
);
360 list_add_tail(&ctxt
->rc_list
, &rdma
->sc_rq_dto_q
);
361 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
362 set_bit(XPT_DATA
, &rdma
->sc_xprt
.xpt_flags
);
363 spin_unlock(&rdma
->sc_rq_dto_lock
);
364 if (!test_bit(RDMAXPRT_CONN_PENDING
, &rdma
->sc_flags
))
365 svc_xprt_enqueue(&rdma
->sc_xprt
);
369 if (wc
->status
== IB_WC_WR_FLUSH_ERR
)
370 trace_svcrdma_wc_recv_flush(wc
, &ctxt
->rc_cid
);
372 trace_svcrdma_wc_recv_err(wc
, &ctxt
->rc_cid
);
374 svc_rdma_recv_ctxt_put(rdma
, ctxt
);
375 svc_xprt_deferred_close(&rdma
->sc_xprt
);
379 * svc_rdma_flush_recv_queues - Drain pending Receive work
380 * @rdma: svcxprt_rdma being shut down
383 void svc_rdma_flush_recv_queues(struct svcxprt_rdma
*rdma
)
385 struct svc_rdma_recv_ctxt
*ctxt
;
387 while ((ctxt
= svc_rdma_next_recv_ctxt(&rdma
->sc_read_complete_q
))) {
388 list_del(&ctxt
->rc_list
);
389 svc_rdma_recv_ctxt_put(rdma
, ctxt
);
391 while ((ctxt
= svc_rdma_next_recv_ctxt(&rdma
->sc_rq_dto_q
))) {
392 list_del(&ctxt
->rc_list
);
393 svc_rdma_recv_ctxt_put(rdma
, ctxt
);
397 static void svc_rdma_build_arg_xdr(struct svc_rqst
*rqstp
,
398 struct svc_rdma_recv_ctxt
*ctxt
)
400 struct xdr_buf
*arg
= &rqstp
->rq_arg
;
402 arg
->head
[0].iov_base
= ctxt
->rc_recv_buf
;
403 arg
->head
[0].iov_len
= ctxt
->rc_byte_len
;
404 arg
->tail
[0].iov_base
= NULL
;
405 arg
->tail
[0].iov_len
= 0;
408 arg
->buflen
= ctxt
->rc_byte_len
;
409 arg
->len
= ctxt
->rc_byte_len
;
413 * xdr_count_read_segments - Count number of Read segments in Read list
414 * @rctxt: Ingress receive context
415 * @p: Start of an un-decoded Read list
417 * Before allocating anything, ensure the ingress Read list is safe
420 * The segment count is limited to how many segments can fit in the
421 * transport header without overflowing the buffer. That's about 40
422 * Read segments for a 1KB inline threshold.
425 * %true: Read list is valid. @rctxt's xdr_stream is updated to point
426 * to the first byte past the Read list. rc_read_pcl and
427 * rc_call_pcl cl_count fields are set to the number of
428 * Read segments in the list.
429 * %false: Read list is corrupt. @rctxt's xdr_stream is left in an
432 static bool xdr_count_read_segments(struct svc_rdma_recv_ctxt
*rctxt
, __be32
*p
)
434 rctxt
->rc_call_pcl
.cl_count
= 0;
435 rctxt
->rc_read_pcl
.cl_count
= 0;
436 while (xdr_item_is_present(p
)) {
437 u32 position
, handle
, length
;
440 p
= xdr_inline_decode(&rctxt
->rc_stream
,
441 rpcrdma_readseg_maxsz
* sizeof(*p
));
445 xdr_decode_read_segment(p
, &position
, &handle
,
450 ++rctxt
->rc_read_pcl
.cl_count
;
452 ++rctxt
->rc_call_pcl
.cl_count
;
455 p
= xdr_inline_decode(&rctxt
->rc_stream
, sizeof(*p
));
462 /* Sanity check the Read list.
465 * - Read list does not overflow Receive buffer.
466 * - Chunk size limited by largest NFS data payload.
469 * %true: Read list is valid. @rctxt's xdr_stream is updated
470 * to point to the first byte past the Read list.
471 * %false: Read list is corrupt. @rctxt's xdr_stream is left
472 * in an unknown state.
474 static bool xdr_check_read_list(struct svc_rdma_recv_ctxt
*rctxt
)
478 p
= xdr_inline_decode(&rctxt
->rc_stream
, sizeof(*p
));
481 if (!xdr_count_read_segments(rctxt
, p
))
483 if (!pcl_alloc_call(rctxt
, p
))
485 return pcl_alloc_read(rctxt
, p
);
488 static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt
*rctxt
)
493 if (xdr_stream_decode_u32(&rctxt
->rc_stream
, &segcount
))
496 /* Before trusting the segcount value enough to use it in
497 * a computation, perform a simple range check. This is an
498 * arbitrary but sensible limit (ie, not architectural).
500 if (unlikely(segcount
> RPCSVC_MAXPAGES
))
503 p
= xdr_inline_decode(&rctxt
->rc_stream
,
504 segcount
* rpcrdma_segment_maxsz
* sizeof(*p
));
509 * xdr_count_write_chunks - Count number of Write chunks in Write list
510 * @rctxt: Received header and decoding state
511 * @p: start of an un-decoded Write list
513 * Before allocating anything, ensure the ingress Write list is
517 * %true: Write list is valid. @rctxt's xdr_stream is updated
518 * to point to the first byte past the Write list, and
519 * the number of Write chunks is in rc_write_pcl.cl_count.
520 * %false: Write list is corrupt. @rctxt's xdr_stream is left
521 * in an indeterminate state.
523 static bool xdr_count_write_chunks(struct svc_rdma_recv_ctxt
*rctxt
, __be32
*p
)
525 rctxt
->rc_write_pcl
.cl_count
= 0;
526 while (xdr_item_is_present(p
)) {
527 if (!xdr_check_write_chunk(rctxt
))
529 ++rctxt
->rc_write_pcl
.cl_count
;
530 p
= xdr_inline_decode(&rctxt
->rc_stream
, sizeof(*p
));
537 /* Sanity check the Write list.
539 * Implementation limits:
540 * - This implementation currently supports only one Write chunk.
543 * - Write list does not overflow Receive buffer.
544 * - Chunk size limited by largest NFS data payload.
547 * %true: Write list is valid. @rctxt's xdr_stream is updated
548 * to point to the first byte past the Write list.
549 * %false: Write list is corrupt. @rctxt's xdr_stream is left
550 * in an unknown state.
552 static bool xdr_check_write_list(struct svc_rdma_recv_ctxt
*rctxt
)
556 p
= xdr_inline_decode(&rctxt
->rc_stream
, sizeof(*p
));
559 if (!xdr_count_write_chunks(rctxt
, p
))
561 if (!pcl_alloc_write(rctxt
, &rctxt
->rc_write_pcl
, p
))
564 rctxt
->rc_cur_result_payload
= pcl_first_chunk(&rctxt
->rc_write_pcl
);
568 /* Sanity check the Reply chunk.
571 * - Reply chunk does not overflow Receive buffer.
572 * - Chunk size limited by largest NFS data payload.
575 * %true: Reply chunk is valid. @rctxt's xdr_stream is updated
576 * to point to the first byte past the Reply chunk.
577 * %false: Reply chunk is corrupt. @rctxt's xdr_stream is left
578 * in an unknown state.
580 static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt
*rctxt
)
584 p
= xdr_inline_decode(&rctxt
->rc_stream
, sizeof(*p
));
588 if (!xdr_item_is_present(p
))
590 if (!xdr_check_write_chunk(rctxt
))
593 rctxt
->rc_reply_pcl
.cl_count
= 1;
594 return pcl_alloc_write(rctxt
, &rctxt
->rc_reply_pcl
, p
);
597 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
598 * Responder's choice: requester signals it can handle Send With
599 * Invalidate, and responder chooses one R_key to invalidate.
601 * If there is exactly one distinct R_key in the received transport
602 * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero.
604 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma
*rdma
,
605 struct svc_rdma_recv_ctxt
*ctxt
)
607 struct svc_rdma_segment
*segment
;
608 struct svc_rdma_chunk
*chunk
;
611 ctxt
->rc_inv_rkey
= 0;
613 if (!rdma
->sc_snd_w_inv
)
617 pcl_for_each_chunk(chunk
, &ctxt
->rc_call_pcl
) {
618 pcl_for_each_segment(segment
, chunk
) {
620 inv_rkey
= segment
->rs_handle
;
621 else if (inv_rkey
!= segment
->rs_handle
)
625 pcl_for_each_chunk(chunk
, &ctxt
->rc_read_pcl
) {
626 pcl_for_each_segment(segment
, chunk
) {
628 inv_rkey
= segment
->rs_handle
;
629 else if (inv_rkey
!= segment
->rs_handle
)
633 pcl_for_each_chunk(chunk
, &ctxt
->rc_write_pcl
) {
634 pcl_for_each_segment(segment
, chunk
) {
636 inv_rkey
= segment
->rs_handle
;
637 else if (inv_rkey
!= segment
->rs_handle
)
641 pcl_for_each_chunk(chunk
, &ctxt
->rc_reply_pcl
) {
642 pcl_for_each_segment(segment
, chunk
) {
644 inv_rkey
= segment
->rs_handle
;
645 else if (inv_rkey
!= segment
->rs_handle
)
649 ctxt
->rc_inv_rkey
= inv_rkey
;
653 * svc_rdma_xdr_decode_req - Decode the transport header
654 * @rq_arg: xdr_buf containing ingress RPC/RDMA message
655 * @rctxt: state of decoding
657 * On entry, xdr->head[0].iov_base points to first byte of the
658 * RPC-over-RDMA transport header.
660 * On successful exit, head[0] points to first byte past the
661 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
663 * The length of the RPC-over-RDMA header is returned.
666 * - The transport header is entirely contained in the head iovec.
668 static int svc_rdma_xdr_decode_req(struct xdr_buf
*rq_arg
,
669 struct svc_rdma_recv_ctxt
*rctxt
)
671 __be32
*p
, *rdma_argp
;
672 unsigned int hdr_len
;
674 rdma_argp
= rq_arg
->head
[0].iov_base
;
675 xdr_init_decode(&rctxt
->rc_stream
, rq_arg
, rdma_argp
, NULL
);
677 p
= xdr_inline_decode(&rctxt
->rc_stream
,
678 rpcrdma_fixed_maxsz
* sizeof(*p
));
682 if (*p
!= rpcrdma_version
)
685 rctxt
->rc_msgtype
= *p
;
686 switch (rctxt
->rc_msgtype
) {
699 if (!xdr_check_read_list(rctxt
))
701 if (!xdr_check_write_list(rctxt
))
703 if (!xdr_check_reply_chunk(rctxt
))
706 rq_arg
->head
[0].iov_base
= rctxt
->rc_stream
.p
;
707 hdr_len
= xdr_stream_pos(&rctxt
->rc_stream
);
708 rq_arg
->head
[0].iov_len
-= hdr_len
;
709 rq_arg
->len
-= hdr_len
;
710 trace_svcrdma_decode_rqst(rctxt
, rdma_argp
, hdr_len
);
714 trace_svcrdma_decode_short_err(rctxt
, rq_arg
->len
);
718 trace_svcrdma_decode_badvers_err(rctxt
, rdma_argp
);
719 return -EPROTONOSUPPORT
;
722 trace_svcrdma_decode_drop_err(rctxt
, rdma_argp
);
726 trace_svcrdma_decode_badproc_err(rctxt
, rdma_argp
);
730 trace_svcrdma_decode_parse_err(rctxt
, rdma_argp
);
734 static void svc_rdma_send_error(struct svcxprt_rdma
*rdma
,
735 struct svc_rdma_recv_ctxt
*rctxt
,
738 struct svc_rdma_send_ctxt
*sctxt
;
740 sctxt
= svc_rdma_send_ctxt_get(rdma
);
743 svc_rdma_send_error_msg(rdma
, sctxt
, rctxt
, status
);
746 /* By convention, backchannel calls arrive via rdma_msg type
747 * messages, and never populate the chunk lists. This makes
748 * the RPC/RDMA header small and fixed in size, so it is
749 * straightforward to check the RPC header's direction field.
751 static bool svc_rdma_is_reverse_direction_reply(struct svc_xprt
*xprt
,
752 struct svc_rdma_recv_ctxt
*rctxt
)
754 __be32
*p
= rctxt
->rc_recv_buf
;
756 if (!xprt
->xpt_bc_xprt
)
759 if (rctxt
->rc_msgtype
!= rdma_msg
)
762 if (!pcl_is_empty(&rctxt
->rc_call_pcl
))
764 if (!pcl_is_empty(&rctxt
->rc_read_pcl
))
766 if (!pcl_is_empty(&rctxt
->rc_write_pcl
))
768 if (!pcl_is_empty(&rctxt
->rc_reply_pcl
))
771 /* RPC call direction */
772 if (*(p
+ 8) == cpu_to_be32(RPC_CALL
))
778 /* Finish constructing the RPC Call message in rqstp::rq_arg.
780 * The incoming RPC/RDMA message is an RDMA_MSG type message
781 * with a single Read chunk (only the upper layer data payload
782 * was conveyed via RDMA Read).
784 static void svc_rdma_read_complete_one(struct svc_rqst
*rqstp
,
785 struct svc_rdma_recv_ctxt
*ctxt
)
787 struct svc_rdma_chunk
*chunk
= pcl_first_chunk(&ctxt
->rc_read_pcl
);
788 struct xdr_buf
*buf
= &rqstp
->rq_arg
;
791 /* Split the Receive buffer between the head and tail
792 * buffers at Read chunk's position. XDR roundup of the
793 * chunk is not included in either the pagelist or in
796 buf
->tail
[0].iov_base
= buf
->head
[0].iov_base
+ chunk
->ch_position
;
797 buf
->tail
[0].iov_len
= buf
->head
[0].iov_len
- chunk
->ch_position
;
798 buf
->head
[0].iov_len
= chunk
->ch_position
;
800 /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
802 * If the client already rounded up the chunk length, the
803 * length does not change. Otherwise, the length of the page
804 * list is increased to include XDR round-up.
806 * Currently these chunks always start at page offset 0,
807 * thus the rounded-up length never crosses a page boundary.
809 buf
->pages
= &rqstp
->rq_pages
[0];
810 length
= xdr_align_size(chunk
->ch_length
);
811 buf
->page_len
= length
;
813 buf
->buflen
+= length
;
816 /* Finish constructing the RPC Call message in rqstp::rq_arg.
818 * The incoming RPC/RDMA message is an RDMA_MSG type message
819 * with payload in multiple Read chunks and no PZRC.
821 static void svc_rdma_read_complete_multiple(struct svc_rqst
*rqstp
,
822 struct svc_rdma_recv_ctxt
*ctxt
)
824 struct xdr_buf
*buf
= &rqstp
->rq_arg
;
826 buf
->len
+= ctxt
->rc_readbytes
;
827 buf
->buflen
+= ctxt
->rc_readbytes
;
829 buf
->head
[0].iov_base
= page_address(rqstp
->rq_pages
[0]);
830 buf
->head
[0].iov_len
= min_t(size_t, PAGE_SIZE
, ctxt
->rc_readbytes
);
831 buf
->pages
= &rqstp
->rq_pages
[1];
832 buf
->page_len
= ctxt
->rc_readbytes
- buf
->head
[0].iov_len
;
835 /* Finish constructing the RPC Call message in rqstp::rq_arg.
837 * The incoming RPC/RDMA message is an RDMA_NOMSG type message
838 * (the RPC message body was conveyed via RDMA Read).
840 static void svc_rdma_read_complete_pzrc(struct svc_rqst
*rqstp
,
841 struct svc_rdma_recv_ctxt
*ctxt
)
843 struct xdr_buf
*buf
= &rqstp
->rq_arg
;
845 buf
->len
+= ctxt
->rc_readbytes
;
846 buf
->buflen
+= ctxt
->rc_readbytes
;
848 buf
->head
[0].iov_base
= page_address(rqstp
->rq_pages
[0]);
849 buf
->head
[0].iov_len
= min_t(size_t, PAGE_SIZE
, ctxt
->rc_readbytes
);
850 buf
->pages
= &rqstp
->rq_pages
[1];
851 buf
->page_len
= ctxt
->rc_readbytes
- buf
->head
[0].iov_len
;
854 static noinline
void svc_rdma_read_complete(struct svc_rqst
*rqstp
,
855 struct svc_rdma_recv_ctxt
*ctxt
)
859 /* Transfer the Read chunk pages into @rqstp.rq_pages, replacing
860 * the rq_pages that were already allocated for this rqstp.
862 release_pages(rqstp
->rq_respages
, ctxt
->rc_page_count
);
863 for (i
= 0; i
< ctxt
->rc_page_count
; i
++)
864 rqstp
->rq_pages
[i
] = ctxt
->rc_pages
[i
];
866 /* Update @rqstp's result send buffer to start after the
867 * last page in the RDMA Read payload.
869 rqstp
->rq_respages
= &rqstp
->rq_pages
[ctxt
->rc_page_count
];
870 rqstp
->rq_next_page
= rqstp
->rq_respages
+ 1;
872 /* Prevent svc_rdma_recv_ctxt_put() from releasing the
873 * pages in ctxt::rc_pages a second time.
875 ctxt
->rc_page_count
= 0;
877 /* Finish constructing the RPC Call message. The exact
878 * procedure for that depends on what kind of RPC/RDMA
879 * chunks were provided by the client.
881 rqstp
->rq_arg
= ctxt
->rc_saved_arg
;
882 if (pcl_is_empty(&ctxt
->rc_call_pcl
)) {
883 if (ctxt
->rc_read_pcl
.cl_count
== 1)
884 svc_rdma_read_complete_one(rqstp
, ctxt
);
886 svc_rdma_read_complete_multiple(rqstp
, ctxt
);
888 svc_rdma_read_complete_pzrc(rqstp
, ctxt
);
891 trace_svcrdma_read_finished(&ctxt
->rc_cid
);
895 * svc_rdma_recvfrom - Receive an RPC call
896 * @rqstp: request structure into which to receive an RPC Call
899 * The positive number of bytes in the RPC Call message,
900 * %0 if there were no Calls ready to return,
901 * %-EINVAL if the Read chunk data is too large,
902 * %-ENOMEM if rdma_rw context pool was exhausted,
903 * %-ENOTCONN if posting failed (connection is lost),
904 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
906 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
907 * when there are no remaining ctxt's to process.
909 * The next ctxt is removed from the "receive" lists.
911 * - If the ctxt completes a Receive, then construct the Call
912 * message from the contents of the Receive buffer.
914 * - If there are no Read chunks in this message, then finish
915 * assembling the Call message and return the number of bytes
918 * - If there are Read chunks in this message, post Read WRs to
919 * pull that payload. When the Read WRs complete, build the
920 * full message and return the number of bytes in it.
922 int svc_rdma_recvfrom(struct svc_rqst
*rqstp
)
924 struct svc_xprt
*xprt
= rqstp
->rq_xprt
;
925 struct svcxprt_rdma
*rdma_xprt
=
926 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
927 struct svc_rdma_recv_ctxt
*ctxt
;
930 /* Prevent svc_xprt_release() from releasing pages in rq_pages
931 * when returning 0 or an error.
933 rqstp
->rq_respages
= rqstp
->rq_pages
;
934 rqstp
->rq_next_page
= rqstp
->rq_respages
;
936 rqstp
->rq_xprt_ctxt
= NULL
;
938 spin_lock(&rdma_xprt
->sc_rq_dto_lock
);
939 ctxt
= svc_rdma_next_recv_ctxt(&rdma_xprt
->sc_read_complete_q
);
941 list_del(&ctxt
->rc_list
);
942 spin_unlock(&rdma_xprt
->sc_rq_dto_lock
);
943 svc_xprt_received(xprt
);
944 svc_rdma_read_complete(rqstp
, ctxt
);
947 ctxt
= svc_rdma_next_recv_ctxt(&rdma_xprt
->sc_rq_dto_q
);
949 list_del(&ctxt
->rc_list
);
951 /* No new incoming requests, terminate the loop */
952 clear_bit(XPT_DATA
, &xprt
->xpt_flags
);
953 spin_unlock(&rdma_xprt
->sc_rq_dto_lock
);
955 /* Unblock the transport for the next receive */
956 svc_xprt_received(xprt
);
960 percpu_counter_inc(&svcrdma_stat_recv
);
961 ib_dma_sync_single_for_cpu(rdma_xprt
->sc_pd
->device
,
962 ctxt
->rc_recv_sge
.addr
, ctxt
->rc_byte_len
,
964 svc_rdma_build_arg_xdr(rqstp
, ctxt
);
966 ret
= svc_rdma_xdr_decode_req(&rqstp
->rq_arg
, ctxt
);
972 if (svc_rdma_is_reverse_direction_reply(xprt
, ctxt
))
973 goto out_backchannel
;
975 svc_rdma_get_inv_rkey(rdma_xprt
, ctxt
);
977 if (!pcl_is_empty(&ctxt
->rc_read_pcl
) ||
978 !pcl_is_empty(&ctxt
->rc_call_pcl
))
982 rqstp
->rq_xprt_ctxt
= ctxt
;
983 rqstp
->rq_prot
= IPPROTO_MAX
;
984 svc_xprt_copy_addrs(rqstp
, xprt
);
985 set_bit(RQ_SECURE
, &rqstp
->rq_flags
);
986 return rqstp
->rq_arg
.len
;
989 svc_rdma_send_error(rdma_xprt
, ctxt
, ret
);
990 svc_rdma_recv_ctxt_put(rdma_xprt
, ctxt
);
994 /* This @rqstp is about to be recycled. Save the work
995 * already done constructing the Call message in rq_arg
996 * so it can be restored when the RDMA Reads have
999 ctxt
->rc_saved_arg
= rqstp
->rq_arg
;
1001 ret
= svc_rdma_process_read_list(rdma_xprt
, rqstp
, ctxt
);
1004 svc_rdma_send_error(rdma_xprt
, ctxt
, ret
);
1005 svc_rdma_recv_ctxt_put(rdma_xprt
, ctxt
);
1006 svc_xprt_deferred_close(xprt
);
1012 svc_rdma_handle_bc_reply(rqstp
, ctxt
);
1014 svc_rdma_recv_ctxt_put(rdma_xprt
, ctxt
);