WIP FPC-III support
[linux/fpc-iii.git] / net / sunrpc / xprtrdma / svc_rdma_recvfrom.c
blobcbdb71247755038e98ac182e5fe78a084a374714
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
11 * license below:
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
28 * permission.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
45 /* Operation
47 * The main entry point is svc_rdma_recvfrom. This is called from
48 * svc_recv when the transport indicates there is incoming data to
49 * be read. "Data Ready" is signaled when an RDMA Receive completes,
50 * or when a set of RDMA Reads complete.
52 * An svc_rqst is passed in. This structure contains an array of
53 * free pages (rq_pages) that will contain the incoming RPC message.
55 * Short messages are moved directly into svc_rqst::rq_arg, and
56 * the RPC Call is ready to be processed by the Upper Layer.
57 * svc_rdma_recvfrom returns the length of the RPC Call message,
58 * completing the reception of the RPC Call.
60 * However, when an incoming message has Read chunks,
61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62 * data payload from the client. svc_rdma_recvfrom sets up the
63 * RDMA Reads using pages in svc_rqst::rq_pages, which are
64 * transferred to an svc_rdma_recv_ctxt for the duration of the
65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66 * is still not yet ready.
68 * When the Read chunk payloads have become available on the
69 * server, "Data Ready" is raised again, and svc_recv calls
70 * svc_rdma_recvfrom again. This second call may use a different
71 * svc_rqst than the first one, thus any information that needs
72 * to be preserved across these two calls is kept in an
73 * svc_rdma_recv_ctxt.
75 * The second call to svc_rdma_recvfrom performs final assembly
76 * of the RPC Call message, using the RDMA Read sink pages kept in
77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79 * the length of the completed RPC Call message.
81 * Page Management
83 * Pages under I/O must be transferred from the first svc_rqst to an
84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
86 * The first svc_rqst supplies pages for RDMA Reads. These are moved
87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88 * the rq_pages array are set to NULL and refilled with the first
89 * svc_rdma_recvfrom call returns.
91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst
93 * (see rdma_read_complete() below).
96 #include <linux/slab.h>
97 #include <linux/spinlock.h>
98 #include <asm/unaligned.h>
99 #include <rdma/ib_verbs.h>
100 #include <rdma/rdma_cm.h>
102 #include <linux/sunrpc/xdr.h>
103 #include <linux/sunrpc/debug.h>
104 #include <linux/sunrpc/rpc_rdma.h>
105 #include <linux/sunrpc/svc_rdma.h>
107 #include "xprt_rdma.h"
108 #include <trace/events/rpcrdma.h>
110 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
112 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
114 static inline struct svc_rdma_recv_ctxt *
115 svc_rdma_next_recv_ctxt(struct list_head *list)
117 return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
118 rc_list);
121 static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
122 struct rpc_rdma_cid *cid)
124 cid->ci_queue_id = rdma->sc_rq_cq->res.id;
125 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
128 static struct svc_rdma_recv_ctxt *
129 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
131 struct svc_rdma_recv_ctxt *ctxt;
132 dma_addr_t addr;
133 void *buffer;
135 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
136 if (!ctxt)
137 goto fail0;
138 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
139 if (!buffer)
140 goto fail1;
141 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
142 rdma->sc_max_req_size, DMA_FROM_DEVICE);
143 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
144 goto fail2;
146 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid);
147 pcl_init(&ctxt->rc_call_pcl);
148 pcl_init(&ctxt->rc_read_pcl);
149 pcl_init(&ctxt->rc_write_pcl);
150 pcl_init(&ctxt->rc_reply_pcl);
152 ctxt->rc_recv_wr.next = NULL;
153 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
154 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
155 ctxt->rc_recv_wr.num_sge = 1;
156 ctxt->rc_cqe.done = svc_rdma_wc_receive;
157 ctxt->rc_recv_sge.addr = addr;
158 ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
159 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
160 ctxt->rc_recv_buf = buffer;
161 ctxt->rc_temp = false;
162 return ctxt;
164 fail2:
165 kfree(buffer);
166 fail1:
167 kfree(ctxt);
168 fail0:
169 return NULL;
172 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
173 struct svc_rdma_recv_ctxt *ctxt)
175 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
176 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
177 kfree(ctxt->rc_recv_buf);
178 kfree(ctxt);
182 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
183 * @rdma: svcxprt_rdma being torn down
186 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
188 struct svc_rdma_recv_ctxt *ctxt;
189 struct llist_node *node;
191 while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
192 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
193 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
198 * svc_rdma_recv_ctxt_get - Allocate a recv_ctxt
199 * @rdma: controlling svcxprt_rdma
201 * Returns a recv_ctxt or (rarely) NULL if none are available.
203 struct svc_rdma_recv_ctxt *svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
205 struct svc_rdma_recv_ctxt *ctxt;
206 struct llist_node *node;
208 node = llist_del_first(&rdma->sc_recv_ctxts);
209 if (!node)
210 goto out_empty;
211 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
213 out:
214 ctxt->rc_page_count = 0;
215 return ctxt;
217 out_empty:
218 ctxt = svc_rdma_recv_ctxt_alloc(rdma);
219 if (!ctxt)
220 return NULL;
221 goto out;
225 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
226 * @rdma: controlling svcxprt_rdma
227 * @ctxt: object to return to the free list
230 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
231 struct svc_rdma_recv_ctxt *ctxt)
233 unsigned int i;
235 for (i = 0; i < ctxt->rc_page_count; i++)
236 put_page(ctxt->rc_pages[i]);
238 pcl_free(&ctxt->rc_call_pcl);
239 pcl_free(&ctxt->rc_read_pcl);
240 pcl_free(&ctxt->rc_write_pcl);
241 pcl_free(&ctxt->rc_reply_pcl);
243 if (!ctxt->rc_temp)
244 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
245 else
246 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
250 * svc_rdma_release_rqst - Release transport-specific per-rqst resources
251 * @rqstp: svc_rqst being released
253 * Ensure that the recv_ctxt is released whether or not a Reply
254 * was sent. For example, the client could close the connection,
255 * or svc_process could drop an RPC, before the Reply is sent.
257 void svc_rdma_release_rqst(struct svc_rqst *rqstp)
259 struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt;
260 struct svc_xprt *xprt = rqstp->rq_xprt;
261 struct svcxprt_rdma *rdma =
262 container_of(xprt, struct svcxprt_rdma, sc_xprt);
264 rqstp->rq_xprt_ctxt = NULL;
265 if (ctxt)
266 svc_rdma_recv_ctxt_put(rdma, ctxt);
269 static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
270 struct svc_rdma_recv_ctxt *ctxt)
272 int ret;
274 trace_svcrdma_post_recv(ctxt);
275 ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
276 if (ret)
277 goto err_post;
278 return 0;
280 err_post:
281 trace_svcrdma_rq_post_err(rdma, ret);
282 svc_rdma_recv_ctxt_put(rdma, ctxt);
283 return ret;
286 static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
288 struct svc_rdma_recv_ctxt *ctxt;
290 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
291 return 0;
292 ctxt = svc_rdma_recv_ctxt_get(rdma);
293 if (!ctxt)
294 return -ENOMEM;
295 return __svc_rdma_post_recv(rdma, ctxt);
299 * svc_rdma_post_recvs - Post initial set of Recv WRs
300 * @rdma: fresh svcxprt_rdma
302 * Returns true if successful, otherwise false.
304 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
306 struct svc_rdma_recv_ctxt *ctxt;
307 unsigned int i;
308 int ret;
310 for (i = 0; i < rdma->sc_max_requests; i++) {
311 ctxt = svc_rdma_recv_ctxt_get(rdma);
312 if (!ctxt)
313 return false;
314 ctxt->rc_temp = true;
315 ret = __svc_rdma_post_recv(rdma, ctxt);
316 if (ret)
317 return false;
319 return true;
323 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
324 * @cq: Completion Queue context
325 * @wc: Work Completion object
327 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
328 * the Receive completion handler could be running.
330 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
332 struct svcxprt_rdma *rdma = cq->cq_context;
333 struct ib_cqe *cqe = wc->wr_cqe;
334 struct svc_rdma_recv_ctxt *ctxt;
336 /* WARNING: Only wc->wr_cqe and wc->status are reliable */
337 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
339 trace_svcrdma_wc_receive(wc, &ctxt->rc_cid);
340 if (wc->status != IB_WC_SUCCESS)
341 goto flushed;
343 if (svc_rdma_post_recv(rdma))
344 goto post_err;
346 /* All wc fields are now known to be valid */
347 ctxt->rc_byte_len = wc->byte_len;
348 ib_dma_sync_single_for_cpu(rdma->sc_pd->device,
349 ctxt->rc_recv_sge.addr,
350 wc->byte_len, DMA_FROM_DEVICE);
352 spin_lock(&rdma->sc_rq_dto_lock);
353 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
354 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
355 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
356 spin_unlock(&rdma->sc_rq_dto_lock);
357 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
358 svc_xprt_enqueue(&rdma->sc_xprt);
359 return;
361 flushed:
362 post_err:
363 svc_rdma_recv_ctxt_put(rdma, ctxt);
364 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
365 svc_xprt_enqueue(&rdma->sc_xprt);
369 * svc_rdma_flush_recv_queues - Drain pending Receive work
370 * @rdma: svcxprt_rdma being shut down
373 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
375 struct svc_rdma_recv_ctxt *ctxt;
377 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
378 list_del(&ctxt->rc_list);
379 svc_rdma_recv_ctxt_put(rdma, ctxt);
381 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
382 list_del(&ctxt->rc_list);
383 svc_rdma_recv_ctxt_put(rdma, ctxt);
387 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
388 struct svc_rdma_recv_ctxt *ctxt)
390 struct xdr_buf *arg = &rqstp->rq_arg;
392 arg->head[0].iov_base = ctxt->rc_recv_buf;
393 arg->head[0].iov_len = ctxt->rc_byte_len;
394 arg->tail[0].iov_base = NULL;
395 arg->tail[0].iov_len = 0;
396 arg->page_len = 0;
397 arg->page_base = 0;
398 arg->buflen = ctxt->rc_byte_len;
399 arg->len = ctxt->rc_byte_len;
403 * xdr_count_read_segments - Count number of Read segments in Read list
404 * @rctxt: Ingress receive context
405 * @p: Start of an un-decoded Read list
407 * Before allocating anything, ensure the ingress Read list is safe
408 * to use.
410 * The segment count is limited to how many segments can fit in the
411 * transport header without overflowing the buffer. That's about 40
412 * Read segments for a 1KB inline threshold.
414 * Return values:
415 * %true: Read list is valid. @rctxt's xdr_stream is updated to point
416 * to the first byte past the Read list. rc_read_pcl and
417 * rc_call_pcl cl_count fields are set to the number of
418 * Read segments in the list.
419 * %false: Read list is corrupt. @rctxt's xdr_stream is left in an
420 * unknown state.
422 static bool xdr_count_read_segments(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
424 rctxt->rc_call_pcl.cl_count = 0;
425 rctxt->rc_read_pcl.cl_count = 0;
426 while (xdr_item_is_present(p)) {
427 u32 position, handle, length;
428 u64 offset;
430 p = xdr_inline_decode(&rctxt->rc_stream,
431 rpcrdma_readseg_maxsz * sizeof(*p));
432 if (!p)
433 return false;
435 xdr_decode_read_segment(p, &position, &handle,
436 &length, &offset);
437 if (position) {
438 if (position & 3)
439 return false;
440 ++rctxt->rc_read_pcl.cl_count;
441 } else {
442 ++rctxt->rc_call_pcl.cl_count;
445 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
446 if (!p)
447 return false;
449 return true;
452 /* Sanity check the Read list.
454 * Sanity checks:
455 * - Read list does not overflow Receive buffer.
456 * - Chunk size limited by largest NFS data payload.
458 * Return values:
459 * %true: Read list is valid. @rctxt's xdr_stream is updated
460 * to point to the first byte past the Read list.
461 * %false: Read list is corrupt. @rctxt's xdr_stream is left
462 * in an unknown state.
464 static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt)
466 __be32 *p;
468 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
469 if (!p)
470 return false;
471 if (!xdr_count_read_segments(rctxt, p))
472 return false;
473 if (!pcl_alloc_call(rctxt, p))
474 return false;
475 return pcl_alloc_read(rctxt, p);
478 static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt)
480 u32 segcount;
481 __be32 *p;
483 if (xdr_stream_decode_u32(&rctxt->rc_stream, &segcount))
484 return false;
486 /* A bogus segcount causes this buffer overflow check to fail. */
487 p = xdr_inline_decode(&rctxt->rc_stream,
488 segcount * rpcrdma_segment_maxsz * sizeof(*p));
489 return p != NULL;
493 * xdr_count_write_chunks - Count number of Write chunks in Write list
494 * @rctxt: Received header and decoding state
495 * @p: start of an un-decoded Write list
497 * Before allocating anything, ensure the ingress Write list is
498 * safe to use.
500 * Return values:
501 * %true: Write list is valid. @rctxt's xdr_stream is updated
502 * to point to the first byte past the Write list, and
503 * the number of Write chunks is in rc_write_pcl.cl_count.
504 * %false: Write list is corrupt. @rctxt's xdr_stream is left
505 * in an indeterminate state.
507 static bool xdr_count_write_chunks(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
509 rctxt->rc_write_pcl.cl_count = 0;
510 while (xdr_item_is_present(p)) {
511 if (!xdr_check_write_chunk(rctxt))
512 return false;
513 ++rctxt->rc_write_pcl.cl_count;
514 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
515 if (!p)
516 return false;
518 return true;
521 /* Sanity check the Write list.
523 * Implementation limits:
524 * - This implementation currently supports only one Write chunk.
526 * Sanity checks:
527 * - Write list does not overflow Receive buffer.
528 * - Chunk size limited by largest NFS data payload.
530 * Return values:
531 * %true: Write list is valid. @rctxt's xdr_stream is updated
532 * to point to the first byte past the Write list.
533 * %false: Write list is corrupt. @rctxt's xdr_stream is left
534 * in an unknown state.
536 static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt)
538 __be32 *p;
540 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
541 if (!p)
542 return false;
543 if (!xdr_count_write_chunks(rctxt, p))
544 return false;
545 if (!pcl_alloc_write(rctxt, &rctxt->rc_write_pcl, p))
546 return false;
548 rctxt->rc_cur_result_payload = pcl_first_chunk(&rctxt->rc_write_pcl);
549 return true;
552 /* Sanity check the Reply chunk.
554 * Sanity checks:
555 * - Reply chunk does not overflow Receive buffer.
556 * - Chunk size limited by largest NFS data payload.
558 * Return values:
559 * %true: Reply chunk is valid. @rctxt's xdr_stream is updated
560 * to point to the first byte past the Reply chunk.
561 * %false: Reply chunk is corrupt. @rctxt's xdr_stream is left
562 * in an unknown state.
564 static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt)
566 __be32 *p;
568 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
569 if (!p)
570 return false;
572 if (!xdr_item_is_present(p))
573 return true;
574 if (!xdr_check_write_chunk(rctxt))
575 return false;
577 rctxt->rc_reply_pcl.cl_count = 1;
578 return pcl_alloc_write(rctxt, &rctxt->rc_reply_pcl, p);
581 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
582 * Responder's choice: requester signals it can handle Send With
583 * Invalidate, and responder chooses one R_key to invalidate.
585 * If there is exactly one distinct R_key in the received transport
586 * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero.
588 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
589 struct svc_rdma_recv_ctxt *ctxt)
591 struct svc_rdma_segment *segment;
592 struct svc_rdma_chunk *chunk;
593 u32 inv_rkey;
595 ctxt->rc_inv_rkey = 0;
597 if (!rdma->sc_snd_w_inv)
598 return;
600 inv_rkey = 0;
601 pcl_for_each_chunk(chunk, &ctxt->rc_call_pcl) {
602 pcl_for_each_segment(segment, chunk) {
603 if (inv_rkey == 0)
604 inv_rkey = segment->rs_handle;
605 else if (inv_rkey != segment->rs_handle)
606 return;
609 pcl_for_each_chunk(chunk, &ctxt->rc_read_pcl) {
610 pcl_for_each_segment(segment, chunk) {
611 if (inv_rkey == 0)
612 inv_rkey = segment->rs_handle;
613 else if (inv_rkey != segment->rs_handle)
614 return;
617 pcl_for_each_chunk(chunk, &ctxt->rc_write_pcl) {
618 pcl_for_each_segment(segment, chunk) {
619 if (inv_rkey == 0)
620 inv_rkey = segment->rs_handle;
621 else if (inv_rkey != segment->rs_handle)
622 return;
625 pcl_for_each_chunk(chunk, &ctxt->rc_reply_pcl) {
626 pcl_for_each_segment(segment, chunk) {
627 if (inv_rkey == 0)
628 inv_rkey = segment->rs_handle;
629 else if (inv_rkey != segment->rs_handle)
630 return;
633 ctxt->rc_inv_rkey = inv_rkey;
637 * svc_rdma_xdr_decode_req - Decode the transport header
638 * @rq_arg: xdr_buf containing ingress RPC/RDMA message
639 * @rctxt: state of decoding
641 * On entry, xdr->head[0].iov_base points to first byte of the
642 * RPC-over-RDMA transport header.
644 * On successful exit, head[0] points to first byte past the
645 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
647 * The length of the RPC-over-RDMA header is returned.
649 * Assumptions:
650 * - The transport header is entirely contained in the head iovec.
652 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg,
653 struct svc_rdma_recv_ctxt *rctxt)
655 __be32 *p, *rdma_argp;
656 unsigned int hdr_len;
658 rdma_argp = rq_arg->head[0].iov_base;
659 xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL);
661 p = xdr_inline_decode(&rctxt->rc_stream,
662 rpcrdma_fixed_maxsz * sizeof(*p));
663 if (unlikely(!p))
664 goto out_short;
665 p++;
666 if (*p != rpcrdma_version)
667 goto out_version;
668 p += 2;
669 rctxt->rc_msgtype = *p;
670 switch (rctxt->rc_msgtype) {
671 case rdma_msg:
672 break;
673 case rdma_nomsg:
674 break;
675 case rdma_done:
676 goto out_drop;
677 case rdma_error:
678 goto out_drop;
679 default:
680 goto out_proc;
683 if (!xdr_check_read_list(rctxt))
684 goto out_inval;
685 if (!xdr_check_write_list(rctxt))
686 goto out_inval;
687 if (!xdr_check_reply_chunk(rctxt))
688 goto out_inval;
690 rq_arg->head[0].iov_base = rctxt->rc_stream.p;
691 hdr_len = xdr_stream_pos(&rctxt->rc_stream);
692 rq_arg->head[0].iov_len -= hdr_len;
693 rq_arg->len -= hdr_len;
694 trace_svcrdma_decode_rqst(rctxt, rdma_argp, hdr_len);
695 return hdr_len;
697 out_short:
698 trace_svcrdma_decode_short_err(rctxt, rq_arg->len);
699 return -EINVAL;
701 out_version:
702 trace_svcrdma_decode_badvers_err(rctxt, rdma_argp);
703 return -EPROTONOSUPPORT;
705 out_drop:
706 trace_svcrdma_decode_drop_err(rctxt, rdma_argp);
707 return 0;
709 out_proc:
710 trace_svcrdma_decode_badproc_err(rctxt, rdma_argp);
711 return -EINVAL;
713 out_inval:
714 trace_svcrdma_decode_parse_err(rctxt, rdma_argp);
715 return -EINVAL;
718 static void rdma_read_complete(struct svc_rqst *rqstp,
719 struct svc_rdma_recv_ctxt *head)
721 int page_no;
723 /* Move Read chunk pages to rqstp so that they will be released
724 * when svc_process is done with them.
726 for (page_no = 0; page_no < head->rc_page_count; page_no++) {
727 put_page(rqstp->rq_pages[page_no]);
728 rqstp->rq_pages[page_no] = head->rc_pages[page_no];
730 head->rc_page_count = 0;
732 /* Point rq_arg.pages past header */
733 rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count];
734 rqstp->rq_arg.page_len = head->rc_arg.page_len;
736 /* rq_respages starts after the last arg page */
737 rqstp->rq_respages = &rqstp->rq_pages[page_no];
738 rqstp->rq_next_page = rqstp->rq_respages + 1;
740 /* Rebuild rq_arg head and tail. */
741 rqstp->rq_arg.head[0] = head->rc_arg.head[0];
742 rqstp->rq_arg.tail[0] = head->rc_arg.tail[0];
743 rqstp->rq_arg.len = head->rc_arg.len;
744 rqstp->rq_arg.buflen = head->rc_arg.buflen;
747 static void svc_rdma_send_error(struct svcxprt_rdma *rdma,
748 struct svc_rdma_recv_ctxt *rctxt,
749 int status)
751 struct svc_rdma_send_ctxt *sctxt;
753 sctxt = svc_rdma_send_ctxt_get(rdma);
754 if (!sctxt)
755 return;
756 svc_rdma_send_error_msg(rdma, sctxt, rctxt, status);
759 /* By convention, backchannel calls arrive via rdma_msg type
760 * messages, and never populate the chunk lists. This makes
761 * the RPC/RDMA header small and fixed in size, so it is
762 * straightforward to check the RPC header's direction field.
764 static bool svc_rdma_is_reverse_direction_reply(struct svc_xprt *xprt,
765 struct svc_rdma_recv_ctxt *rctxt)
767 __be32 *p = rctxt->rc_recv_buf;
769 if (!xprt->xpt_bc_xprt)
770 return false;
772 if (rctxt->rc_msgtype != rdma_msg)
773 return false;
775 if (!pcl_is_empty(&rctxt->rc_call_pcl))
776 return false;
777 if (!pcl_is_empty(&rctxt->rc_read_pcl))
778 return false;
779 if (!pcl_is_empty(&rctxt->rc_write_pcl))
780 return false;
781 if (!pcl_is_empty(&rctxt->rc_reply_pcl))
782 return false;
784 /* RPC call direction */
785 if (*(p + 8) == cpu_to_be32(RPC_CALL))
786 return false;
788 return true;
792 * svc_rdma_recvfrom - Receive an RPC call
793 * @rqstp: request structure into which to receive an RPC Call
795 * Returns:
796 * The positive number of bytes in the RPC Call message,
797 * %0 if there were no Calls ready to return,
798 * %-EINVAL if the Read chunk data is too large,
799 * %-ENOMEM if rdma_rw context pool was exhausted,
800 * %-ENOTCONN if posting failed (connection is lost),
801 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
803 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
804 * when there are no remaining ctxt's to process.
806 * The next ctxt is removed from the "receive" lists.
808 * - If the ctxt completes a Read, then finish assembling the Call
809 * message and return the number of bytes in the message.
811 * - If the ctxt completes a Receive, then construct the Call
812 * message from the contents of the Receive buffer.
814 * - If there are no Read chunks in this message, then finish
815 * assembling the Call message and return the number of bytes
816 * in the message.
818 * - If there are Read chunks in this message, post Read WRs to
819 * pull that payload and return 0.
821 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
823 struct svc_xprt *xprt = rqstp->rq_xprt;
824 struct svcxprt_rdma *rdma_xprt =
825 container_of(xprt, struct svcxprt_rdma, sc_xprt);
826 struct svc_rdma_recv_ctxt *ctxt;
827 int ret;
829 rqstp->rq_xprt_ctxt = NULL;
831 spin_lock(&rdma_xprt->sc_rq_dto_lock);
832 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
833 if (ctxt) {
834 list_del(&ctxt->rc_list);
835 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
836 rdma_read_complete(rqstp, ctxt);
837 goto complete;
839 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
840 if (!ctxt) {
841 /* No new incoming requests, terminate the loop */
842 clear_bit(XPT_DATA, &xprt->xpt_flags);
843 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
844 return 0;
846 list_del(&ctxt->rc_list);
847 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
849 atomic_inc(&rdma_stat_recv);
851 svc_rdma_build_arg_xdr(rqstp, ctxt);
853 /* Prevent svc_xprt_release from releasing pages in rq_pages
854 * if we return 0 or an error.
856 rqstp->rq_respages = rqstp->rq_pages;
857 rqstp->rq_next_page = rqstp->rq_respages;
859 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt);
860 if (ret < 0)
861 goto out_err;
862 if (ret == 0)
863 goto out_drop;
864 rqstp->rq_xprt_hlen = ret;
866 if (svc_rdma_is_reverse_direction_reply(xprt, ctxt))
867 goto out_backchannel;
869 svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
871 if (!pcl_is_empty(&ctxt->rc_read_pcl) ||
872 !pcl_is_empty(&ctxt->rc_call_pcl))
873 goto out_readlist;
875 complete:
876 rqstp->rq_xprt_ctxt = ctxt;
877 rqstp->rq_prot = IPPROTO_MAX;
878 svc_xprt_copy_addrs(rqstp, xprt);
879 return rqstp->rq_arg.len;
881 out_readlist:
882 ret = svc_rdma_process_read_list(rdma_xprt, rqstp, ctxt);
883 if (ret < 0)
884 goto out_readfail;
885 return 0;
887 out_err:
888 svc_rdma_send_error(rdma_xprt, ctxt, ret);
889 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
890 return 0;
892 out_readfail:
893 if (ret == -EINVAL)
894 svc_rdma_send_error(rdma_xprt, ctxt, ret);
895 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
896 return ret;
898 out_backchannel:
899 svc_rdma_handle_bc_reply(rqstp, ctxt);
900 out_drop:
901 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
902 return 0;