2 * Copyright (c) 2014-2017 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 * This file contains the guts of the RPC RDMA protocol, and
45 * does marshaling/unmarshaling, etc. It is also where interfacing
46 * to the Linux RPC framework lives.
49 #include "xprt_rdma.h"
51 #include <linux/highmem.h>
53 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
54 # define RPCDBG_FACILITY RPCDBG_TRANS
57 static const char transfertypes
[][12] = {
58 "inline", /* no chunks */
59 "read list", /* some argument via rdma read */
60 "*read list", /* entire request via rdma read */
61 "write list", /* some result via rdma write */
62 "reply chunk" /* entire reply via rdma write */
65 /* Returns size of largest RPC-over-RDMA header in a Call message
67 * The largest Call header contains a full-size Read list and a
68 * minimal Reply chunk.
70 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs
)
74 /* Fixed header fields and list discriminators */
75 size
= RPCRDMA_HDRLEN_MIN
;
77 /* Maximum Read list size */
78 maxsegs
+= 2; /* segment for head and tail buffers */
79 size
= maxsegs
* rpcrdma_readchunk_maxsz
* sizeof(__be32
);
81 /* Minimal Read chunk size */
82 size
+= sizeof(__be32
); /* segment count */
83 size
+= rpcrdma_segment_maxsz
* sizeof(__be32
);
84 size
+= sizeof(__be32
); /* list discriminator */
86 dprintk("RPC: %s: max call header size = %u\n",
91 /* Returns size of largest RPC-over-RDMA header in a Reply message
93 * There is only one Write list or one Reply chunk per Reply
94 * message. The larger list is the Write list.
96 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs
)
100 /* Fixed header fields and list discriminators */
101 size
= RPCRDMA_HDRLEN_MIN
;
103 /* Maximum Write list size */
104 maxsegs
+= 2; /* segment for head and tail buffers */
105 size
= sizeof(__be32
); /* segment count */
106 size
+= maxsegs
* rpcrdma_segment_maxsz
* sizeof(__be32
);
107 size
+= sizeof(__be32
); /* list discriminator */
109 dprintk("RPC: %s: max reply header size = %u\n",
114 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt
*r_xprt
)
116 struct rpcrdma_create_data_internal
*cdata
= &r_xprt
->rx_data
;
117 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
118 unsigned int maxsegs
= ia
->ri_max_segs
;
120 ia
->ri_max_inline_write
= cdata
->inline_wsize
-
121 rpcrdma_max_call_header_size(maxsegs
);
122 ia
->ri_max_inline_read
= cdata
->inline_rsize
-
123 rpcrdma_max_reply_header_size(maxsegs
);
126 /* The client can send a request inline as long as the RPCRDMA header
127 * plus the RPC call fit under the transport's inline limit. If the
128 * combined call message size exceeds that limit, the client must use
129 * a Read chunk for this operation.
131 * A Read chunk is also required if sending the RPC call inline would
132 * exceed this device's max_sge limit.
134 static bool rpcrdma_args_inline(struct rpcrdma_xprt
*r_xprt
,
135 struct rpc_rqst
*rqst
)
137 struct xdr_buf
*xdr
= &rqst
->rq_snd_buf
;
138 unsigned int count
, remaining
, offset
;
140 if (xdr
->len
> r_xprt
->rx_ia
.ri_max_inline_write
)
144 remaining
= xdr
->page_len
;
145 offset
= offset_in_page(xdr
->page_base
);
146 count
= RPCRDMA_MIN_SEND_SGES
;
148 remaining
-= min_t(unsigned int,
149 PAGE_SIZE
- offset
, remaining
);
151 if (++count
> r_xprt
->rx_ia
.ri_max_send_sges
)
159 /* The client can't know how large the actual reply will be. Thus it
160 * plans for the largest possible reply for that particular ULP
161 * operation. If the maximum combined reply message size exceeds that
162 * limit, the client must provide a write list or a reply chunk for
165 static bool rpcrdma_results_inline(struct rpcrdma_xprt
*r_xprt
,
166 struct rpc_rqst
*rqst
)
168 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
170 return rqst
->rq_rcv_buf
.buflen
<= ia
->ri_max_inline_read
;
173 /* Split @vec on page boundaries into SGEs. FMR registers pages, not
174 * a byte range. Other modes coalesce these SGEs into a single MR
177 * Returns pointer to next available SGE, and bumps the total number
180 static struct rpcrdma_mr_seg
*
181 rpcrdma_convert_kvec(struct kvec
*vec
, struct rpcrdma_mr_seg
*seg
,
184 u32 remaining
, page_offset
;
187 base
= vec
->iov_base
;
188 page_offset
= offset_in_page(base
);
189 remaining
= vec
->iov_len
;
192 seg
->mr_offset
= base
;
193 seg
->mr_len
= min_t(u32
, PAGE_SIZE
- page_offset
, remaining
);
194 remaining
-= seg
->mr_len
;
203 /* Convert @xdrbuf into SGEs no larger than a page each. As they
204 * are registered, these SGEs are then coalesced into RDMA segments
205 * when the selected memreg mode supports it.
207 * Returns positive number of SGEs consumed, or a negative errno.
211 rpcrdma_convert_iovs(struct rpcrdma_xprt
*r_xprt
, struct xdr_buf
*xdrbuf
,
212 unsigned int pos
, enum rpcrdma_chunktype type
,
213 struct rpcrdma_mr_seg
*seg
)
215 unsigned long page_base
;
217 struct page
**ppages
;
221 seg
= rpcrdma_convert_kvec(&xdrbuf
->head
[0], seg
, &n
);
223 len
= xdrbuf
->page_len
;
224 ppages
= xdrbuf
->pages
+ (xdrbuf
->page_base
>> PAGE_SHIFT
);
225 page_base
= offset_in_page(xdrbuf
->page_base
);
227 if (unlikely(!*ppages
)) {
228 /* XXX: Certain upper layer operations do
229 * not provide receive buffer pages.
231 *ppages
= alloc_page(GFP_ATOMIC
);
235 seg
->mr_page
= *ppages
;
236 seg
->mr_offset
= (char *)page_base
;
237 seg
->mr_len
= min_t(u32
, PAGE_SIZE
- page_base
, len
);
245 /* When encoding a Read chunk, the tail iovec contains an
246 * XDR pad and may be omitted.
248 if (type
== rpcrdma_readch
&& r_xprt
->rx_ia
.ri_implicit_roundup
)
251 /* When encoding a Write chunk, some servers need to see an
252 * extra segment for non-XDR-aligned Write chunks. The upper
253 * layer provides space in the tail iovec that may be used
256 if (type
== rpcrdma_writech
&& r_xprt
->rx_ia
.ri_implicit_roundup
)
259 if (xdrbuf
->tail
[0].iov_len
)
260 seg
= rpcrdma_convert_kvec(&xdrbuf
->tail
[0], seg
, &n
);
263 if (unlikely(n
> RPCRDMA_MAX_SEGS
))
269 encode_item_present(struct xdr_stream
*xdr
)
273 p
= xdr_reserve_space(xdr
, sizeof(*p
));
282 encode_item_not_present(struct xdr_stream
*xdr
)
286 p
= xdr_reserve_space(xdr
, sizeof(*p
));
295 xdr_encode_rdma_segment(__be32
*iptr
, struct rpcrdma_mr
*mr
)
297 *iptr
++ = cpu_to_be32(mr
->mr_handle
);
298 *iptr
++ = cpu_to_be32(mr
->mr_length
);
299 xdr_encode_hyper(iptr
, mr
->mr_offset
);
303 encode_rdma_segment(struct xdr_stream
*xdr
, struct rpcrdma_mr
*mr
)
307 p
= xdr_reserve_space(xdr
, 4 * sizeof(*p
));
311 xdr_encode_rdma_segment(p
, mr
);
316 encode_read_segment(struct xdr_stream
*xdr
, struct rpcrdma_mr
*mr
,
321 p
= xdr_reserve_space(xdr
, 6 * sizeof(*p
));
325 *p
++ = xdr_one
; /* Item present */
326 *p
++ = cpu_to_be32(position
);
327 xdr_encode_rdma_segment(p
, mr
);
331 /* Register and XDR encode the Read list. Supports encoding a list of read
332 * segments that belong to a single read chunk.
334 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
336 * Read chunklist (a linked list):
337 * N elements, position P (same P for all chunks of same arg!):
338 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
340 * Returns zero on success, or a negative errno if a failure occurred.
341 * @xdr is advanced to the next position in the stream.
343 * Only a single @pos value is currently supported.
346 rpcrdma_encode_read_list(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
,
347 struct rpc_rqst
*rqst
, enum rpcrdma_chunktype rtype
)
349 struct xdr_stream
*xdr
= &req
->rl_stream
;
350 struct rpcrdma_mr_seg
*seg
;
351 struct rpcrdma_mr
*mr
;
355 pos
= rqst
->rq_snd_buf
.head
[0].iov_len
;
356 if (rtype
== rpcrdma_areadch
)
358 seg
= req
->rl_segments
;
359 nsegs
= rpcrdma_convert_iovs(r_xprt
, &rqst
->rq_snd_buf
, pos
,
365 seg
= r_xprt
->rx_ia
.ri_ops
->ro_map(r_xprt
, seg
, nsegs
,
369 rpcrdma_mr_push(mr
, &req
->rl_registered
);
371 if (encode_read_segment(xdr
, mr
, pos
) < 0)
374 trace_xprtrdma_read_chunk(rqst
->rq_task
, pos
, mr
, nsegs
);
375 r_xprt
->rx_stats
.read_chunk_count
++;
376 nsegs
-= mr
->mr_nents
;
382 /* Register and XDR encode the Write list. Supports encoding a list
383 * containing one array of plain segments that belong to a single
386 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
388 * Write chunklist (a list of (one) counted array):
390 * 1 - N - HLOO - HLOO - ... - HLOO - 0
392 * Returns zero on success, or a negative errno if a failure occurred.
393 * @xdr is advanced to the next position in the stream.
395 * Only a single Write chunk is currently supported.
398 rpcrdma_encode_write_list(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
,
399 struct rpc_rqst
*rqst
, enum rpcrdma_chunktype wtype
)
401 struct xdr_stream
*xdr
= &req
->rl_stream
;
402 struct rpcrdma_mr_seg
*seg
;
403 struct rpcrdma_mr
*mr
;
407 seg
= req
->rl_segments
;
408 nsegs
= rpcrdma_convert_iovs(r_xprt
, &rqst
->rq_rcv_buf
,
409 rqst
->rq_rcv_buf
.head
[0].iov_len
,
414 if (encode_item_present(xdr
) < 0)
416 segcount
= xdr_reserve_space(xdr
, sizeof(*segcount
));
417 if (unlikely(!segcount
))
419 /* Actual value encoded below */
423 seg
= r_xprt
->rx_ia
.ri_ops
->ro_map(r_xprt
, seg
, nsegs
,
427 rpcrdma_mr_push(mr
, &req
->rl_registered
);
429 if (encode_rdma_segment(xdr
, mr
) < 0)
432 trace_xprtrdma_write_chunk(rqst
->rq_task
, mr
, nsegs
);
433 r_xprt
->rx_stats
.write_chunk_count
++;
434 r_xprt
->rx_stats
.total_rdma_request
+= mr
->mr_length
;
436 nsegs
-= mr
->mr_nents
;
439 /* Update count of segments in this Write chunk */
440 *segcount
= cpu_to_be32(nchunks
);
445 /* Register and XDR encode the Reply chunk. Supports encoding an array
446 * of plain segments that belong to a single write (reply) chunk.
448 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
450 * Reply chunk (a counted array):
452 * 1 - N - HLOO - HLOO - ... - HLOO
454 * Returns zero on success, or a negative errno if a failure occurred.
455 * @xdr is advanced to the next position in the stream.
458 rpcrdma_encode_reply_chunk(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
,
459 struct rpc_rqst
*rqst
, enum rpcrdma_chunktype wtype
)
461 struct xdr_stream
*xdr
= &req
->rl_stream
;
462 struct rpcrdma_mr_seg
*seg
;
463 struct rpcrdma_mr
*mr
;
467 seg
= req
->rl_segments
;
468 nsegs
= rpcrdma_convert_iovs(r_xprt
, &rqst
->rq_rcv_buf
, 0, wtype
, seg
);
472 if (encode_item_present(xdr
) < 0)
474 segcount
= xdr_reserve_space(xdr
, sizeof(*segcount
));
475 if (unlikely(!segcount
))
477 /* Actual value encoded below */
481 seg
= r_xprt
->rx_ia
.ri_ops
->ro_map(r_xprt
, seg
, nsegs
,
485 rpcrdma_mr_push(mr
, &req
->rl_registered
);
487 if (encode_rdma_segment(xdr
, mr
) < 0)
490 trace_xprtrdma_reply_chunk(rqst
->rq_task
, mr
, nsegs
);
491 r_xprt
->rx_stats
.reply_chunk_count
++;
492 r_xprt
->rx_stats
.total_rdma_request
+= mr
->mr_length
;
494 nsegs
-= mr
->mr_nents
;
497 /* Update count of segments in the Reply chunk */
498 *segcount
= cpu_to_be32(nchunks
);
504 * rpcrdma_unmap_sendctx - DMA-unmap Send buffers
505 * @sc: sendctx containing SGEs to unmap
509 rpcrdma_unmap_sendctx(struct rpcrdma_sendctx
*sc
)
511 struct rpcrdma_ia
*ia
= &sc
->sc_xprt
->rx_ia
;
515 /* The first two SGEs contain the transport header and
516 * the inline buffer. These are always left mapped so
517 * they can be cheaply re-used.
519 sge
= &sc
->sc_sges
[2];
520 for (count
= sc
->sc_unmap_count
; count
; ++sge
, --count
)
521 ib_dma_unmap_page(ia
->ri_device
,
522 sge
->addr
, sge
->length
, DMA_TO_DEVICE
);
524 if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES
, &sc
->sc_req
->rl_flags
)) {
525 smp_mb__after_atomic();
526 wake_up_bit(&sc
->sc_req
->rl_flags
, RPCRDMA_REQ_F_TX_RESOURCES
);
530 /* Prepare an SGE for the RPC-over-RDMA transport header.
533 rpcrdma_prepare_hdr_sge(struct rpcrdma_ia
*ia
, struct rpcrdma_req
*req
,
536 struct rpcrdma_sendctx
*sc
= req
->rl_sendctx
;
537 struct rpcrdma_regbuf
*rb
= req
->rl_rdmabuf
;
538 struct ib_sge
*sge
= sc
->sc_sges
;
540 if (!rpcrdma_dma_map_regbuf(ia
, rb
))
542 sge
->addr
= rdmab_addr(rb
);
544 sge
->lkey
= rdmab_lkey(rb
);
546 ib_dma_sync_single_for_device(rdmab_device(rb
), sge
->addr
,
547 sge
->length
, DMA_TO_DEVICE
);
552 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
556 /* Prepare the Send SGEs. The head and tail iovec, and each entry
557 * in the page list, gets its own SGE.
560 rpcrdma_prepare_msg_sges(struct rpcrdma_ia
*ia
, struct rpcrdma_req
*req
,
561 struct xdr_buf
*xdr
, enum rpcrdma_chunktype rtype
)
563 struct rpcrdma_sendctx
*sc
= req
->rl_sendctx
;
564 unsigned int sge_no
, page_base
, len
, remaining
;
565 struct rpcrdma_regbuf
*rb
= req
->rl_sendbuf
;
566 struct ib_device
*device
= ia
->ri_device
;
567 struct ib_sge
*sge
= sc
->sc_sges
;
568 u32 lkey
= ia
->ri_pd
->local_dma_lkey
;
569 struct page
*page
, **ppages
;
571 /* The head iovec is straightforward, as it is already
572 * DMA-mapped. Sync the content that has changed.
574 if (!rpcrdma_dma_map_regbuf(ia
, rb
))
577 sge
[sge_no
].addr
= rdmab_addr(rb
);
578 sge
[sge_no
].length
= xdr
->head
[0].iov_len
;
579 sge
[sge_no
].lkey
= rdmab_lkey(rb
);
580 ib_dma_sync_single_for_device(rdmab_device(rb
), sge
[sge_no
].addr
,
581 sge
[sge_no
].length
, DMA_TO_DEVICE
);
583 /* If there is a Read chunk, the page list is being handled
584 * via explicit RDMA, and thus is skipped here. However, the
585 * tail iovec may include an XDR pad for the page list, as
586 * well as additional content, and may not reside in the
587 * same page as the head iovec.
589 if (rtype
== rpcrdma_readch
) {
590 len
= xdr
->tail
[0].iov_len
;
592 /* Do not include the tail if it is only an XDR pad */
596 page
= virt_to_page(xdr
->tail
[0].iov_base
);
597 page_base
= offset_in_page(xdr
->tail
[0].iov_base
);
599 /* If the content in the page list is an odd length,
600 * xdr_write_pages() has added a pad at the beginning
601 * of the tail iovec. Force the tail's non-pad content
602 * to land at the next XDR position in the Send message.
604 page_base
+= len
& 3;
609 /* If there is a page list present, temporarily DMA map
610 * and prepare an SGE for each page to be sent.
613 ppages
= xdr
->pages
+ (xdr
->page_base
>> PAGE_SHIFT
);
614 page_base
= offset_in_page(xdr
->page_base
);
615 remaining
= xdr
->page_len
;
618 if (sge_no
> RPCRDMA_MAX_SEND_SGES
- 2)
619 goto out_mapping_overflow
;
621 len
= min_t(u32
, PAGE_SIZE
- page_base
, remaining
);
622 sge
[sge_no
].addr
= ib_dma_map_page(device
, *ppages
,
625 if (ib_dma_mapping_error(device
, sge
[sge_no
].addr
))
626 goto out_mapping_err
;
627 sge
[sge_no
].length
= len
;
628 sge
[sge_no
].lkey
= lkey
;
630 sc
->sc_unmap_count
++;
637 /* The tail iovec is not always constructed in the same
638 * page where the head iovec resides (see, for example,
639 * gss_wrap_req_priv). To neatly accommodate that case,
640 * DMA map it separately.
642 if (xdr
->tail
[0].iov_len
) {
643 page
= virt_to_page(xdr
->tail
[0].iov_base
);
644 page_base
= offset_in_page(xdr
->tail
[0].iov_base
);
645 len
= xdr
->tail
[0].iov_len
;
649 sge
[sge_no
].addr
= ib_dma_map_page(device
, page
,
652 if (ib_dma_mapping_error(device
, sge
[sge_no
].addr
))
653 goto out_mapping_err
;
654 sge
[sge_no
].length
= len
;
655 sge
[sge_no
].lkey
= lkey
;
656 sc
->sc_unmap_count
++;
660 sc
->sc_wr
.num_sge
+= sge_no
;
661 if (sc
->sc_unmap_count
)
662 __set_bit(RPCRDMA_REQ_F_TX_RESOURCES
, &req
->rl_flags
);
666 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
669 out_mapping_overflow
:
670 rpcrdma_unmap_sendctx(sc
);
671 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no
);
675 rpcrdma_unmap_sendctx(sc
);
676 pr_err("rpcrdma: Send mapping error\n");
681 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
682 * @r_xprt: controlling transport
683 * @req: context of RPC Call being marshalled
684 * @hdrlen: size of transport header, in bytes
685 * @xdr: xdr_buf containing RPC Call
686 * @rtype: chunk type being encoded
688 * Returns 0 on success; otherwise a negative errno is returned.
691 rpcrdma_prepare_send_sges(struct rpcrdma_xprt
*r_xprt
,
692 struct rpcrdma_req
*req
, u32 hdrlen
,
693 struct xdr_buf
*xdr
, enum rpcrdma_chunktype rtype
)
695 req
->rl_sendctx
= rpcrdma_sendctx_get_locked(&r_xprt
->rx_buf
);
696 if (!req
->rl_sendctx
)
698 req
->rl_sendctx
->sc_wr
.num_sge
= 0;
699 req
->rl_sendctx
->sc_unmap_count
= 0;
700 req
->rl_sendctx
->sc_req
= req
;
701 __clear_bit(RPCRDMA_REQ_F_TX_RESOURCES
, &req
->rl_flags
);
703 if (!rpcrdma_prepare_hdr_sge(&r_xprt
->rx_ia
, req
, hdrlen
))
706 if (rtype
!= rpcrdma_areadch
)
707 if (!rpcrdma_prepare_msg_sges(&r_xprt
->rx_ia
, req
, xdr
, rtype
))
714 * rpcrdma_marshal_req - Marshal and send one RPC request
715 * @r_xprt: controlling transport
716 * @rqst: RPC request to be marshaled
718 * For the RPC in "rqst", this function:
719 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
720 * - Registers Read, Write, and Reply chunks
721 * - Constructs the transport header
722 * - Posts a Send WR to send the transport header and request
725 * %0 if the RPC was sent successfully,
726 * %-ENOTCONN if the connection was lost,
727 * %-EAGAIN if not enough pages are available for on-demand reply buffer,
728 * %-ENOBUFS if no MRs are available to register chunks,
729 * %-EMSGSIZE if the transport header is too small,
730 * %-EIO if a permanent problem occurred while marshaling.
733 rpcrdma_marshal_req(struct rpcrdma_xprt
*r_xprt
, struct rpc_rqst
*rqst
)
735 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
736 struct xdr_stream
*xdr
= &req
->rl_stream
;
737 enum rpcrdma_chunktype rtype
, wtype
;
742 rpcrdma_set_xdrlen(&req
->rl_hdrbuf
, 0);
743 xdr_init_encode(xdr
, &req
->rl_hdrbuf
,
744 req
->rl_rdmabuf
->rg_base
);
746 /* Fixed header fields */
748 p
= xdr_reserve_space(xdr
, 4 * sizeof(*p
));
752 *p
++ = rpcrdma_version
;
753 *p
++ = cpu_to_be32(r_xprt
->rx_buf
.rb_max_requests
);
755 /* When the ULP employs a GSS flavor that guarantees integrity
756 * or privacy, direct data placement of individual data items
759 ddp_allowed
= !(rqst
->rq_cred
->cr_auth
->au_flags
&
760 RPCAUTH_AUTH_DATATOUCH
);
763 * Chunks needed for results?
765 * o If the expected result is under the inline threshold, all ops
767 * o Large read ops return data as write chunk(s), header as
769 * o Large non-read ops return as a single reply chunk.
771 if (rpcrdma_results_inline(r_xprt
, rqst
))
772 wtype
= rpcrdma_noch
;
773 else if (ddp_allowed
&& rqst
->rq_rcv_buf
.flags
& XDRBUF_READ
)
774 wtype
= rpcrdma_writech
;
776 wtype
= rpcrdma_replych
;
779 * Chunks needed for arguments?
781 * o If the total request is under the inline threshold, all ops
782 * are sent as inline.
783 * o Large write ops transmit data as read chunk(s), header as
785 * o Large non-write ops are sent with the entire message as a
786 * single read chunk (protocol 0-position special case).
788 * This assumes that the upper layer does not present a request
789 * that both has a data payload, and whose non-data arguments
790 * by themselves are larger than the inline threshold.
792 if (rpcrdma_args_inline(r_xprt
, rqst
)) {
794 rtype
= rpcrdma_noch
;
795 } else if (ddp_allowed
&& rqst
->rq_snd_buf
.flags
& XDRBUF_WRITE
) {
797 rtype
= rpcrdma_readch
;
799 r_xprt
->rx_stats
.nomsg_call_count
++;
801 rtype
= rpcrdma_areadch
;
804 /* If this is a retransmit, discard previously registered
805 * chunks. Very likely the connection has been replaced,
806 * so these registrations are invalid and unusable.
808 while (unlikely(!list_empty(&req
->rl_registered
))) {
809 struct rpcrdma_mr
*mr
;
811 mr
= rpcrdma_mr_pop(&req
->rl_registered
);
812 rpcrdma_mr_defer_recovery(mr
);
815 /* This implementation supports the following combinations
816 * of chunk lists in one RPC-over-RDMA Call message:
821 * - Read list + Reply chunk
823 * It might not yet support the following combinations:
825 * - Read list + Write list
827 * It does not support the following combinations:
829 * - Write list + Reply chunk
830 * - Read list + Write list + Reply chunk
832 * This implementation supports only a single chunk in each
833 * Read or Write list. Thus for example the client cannot
834 * send a Call message with a Position Zero Read chunk and a
835 * regular Read chunk at the same time.
837 if (rtype
!= rpcrdma_noch
) {
838 ret
= rpcrdma_encode_read_list(r_xprt
, req
, rqst
, rtype
);
842 ret
= encode_item_not_present(xdr
);
846 if (wtype
== rpcrdma_writech
) {
847 ret
= rpcrdma_encode_write_list(r_xprt
, req
, rqst
, wtype
);
851 ret
= encode_item_not_present(xdr
);
855 if (wtype
!= rpcrdma_replych
)
856 ret
= encode_item_not_present(xdr
);
858 ret
= rpcrdma_encode_reply_chunk(r_xprt
, req
, rqst
, wtype
);
862 trace_xprtrdma_marshal(rqst
, xdr_stream_pos(xdr
), rtype
, wtype
);
864 ret
= rpcrdma_prepare_send_sges(r_xprt
, req
, xdr_stream_pos(xdr
),
865 &rqst
->rq_snd_buf
, rtype
);
871 if (ret
!= -ENOBUFS
) {
872 pr_err("rpcrdma: header marshaling failed (%d)\n", ret
);
873 r_xprt
->rx_stats
.failed_marshal_count
++;
879 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
880 * @rqst: controlling RPC request
881 * @srcp: points to RPC message payload in receive buffer
882 * @copy_len: remaining length of receive buffer content
883 * @pad: Write chunk pad bytes needed (zero for pure inline)
885 * The upper layer has set the maximum number of bytes it can
886 * receive in each component of rq_rcv_buf. These values are set in
887 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
889 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
890 * many cases this function simply updates iov_base pointers in
891 * rq_rcv_buf to point directly to the received reply data, to
892 * avoid copying reply data.
894 * Returns the count of bytes which had to be memcopied.
897 rpcrdma_inline_fixup(struct rpc_rqst
*rqst
, char *srcp
, int copy_len
, int pad
)
899 unsigned long fixup_copy_count
;
900 int i
, npages
, curlen
;
902 struct page
**ppages
;
905 /* The head iovec is redirected to the RPC reply message
906 * in the receive buffer, to avoid a memcopy.
908 rqst
->rq_rcv_buf
.head
[0].iov_base
= srcp
;
909 rqst
->rq_private_buf
.head
[0].iov_base
= srcp
;
911 /* The contents of the receive buffer that follow
912 * head.iov_len bytes are copied into the page list.
914 curlen
= rqst
->rq_rcv_buf
.head
[0].iov_len
;
915 if (curlen
> copy_len
)
917 trace_xprtrdma_fixup(rqst
, copy_len
, curlen
);
921 ppages
= rqst
->rq_rcv_buf
.pages
+
922 (rqst
->rq_rcv_buf
.page_base
>> PAGE_SHIFT
);
923 page_base
= offset_in_page(rqst
->rq_rcv_buf
.page_base
);
924 fixup_copy_count
= 0;
925 if (copy_len
&& rqst
->rq_rcv_buf
.page_len
) {
928 pagelist_len
= rqst
->rq_rcv_buf
.page_len
;
929 if (pagelist_len
> copy_len
)
930 pagelist_len
= copy_len
;
931 npages
= PAGE_ALIGN(page_base
+ pagelist_len
) >> PAGE_SHIFT
;
932 for (i
= 0; i
< npages
; i
++) {
933 curlen
= PAGE_SIZE
- page_base
;
934 if (curlen
> pagelist_len
)
935 curlen
= pagelist_len
;
937 trace_xprtrdma_fixup_pg(rqst
, i
, srcp
,
939 destp
= kmap_atomic(ppages
[i
]);
940 memcpy(destp
+ page_base
, srcp
, curlen
);
941 flush_dcache_page(ppages
[i
]);
942 kunmap_atomic(destp
);
945 fixup_copy_count
+= curlen
;
946 pagelist_len
-= curlen
;
952 /* Implicit padding for the last segment in a Write
953 * chunk is inserted inline at the front of the tail
954 * iovec. The upper layer ignores the content of
955 * the pad. Simply ensure inline content in the tail
956 * that follows the Write chunk is properly aligned.
962 /* The tail iovec is redirected to the remaining data
963 * in the receive buffer, to avoid a memcopy.
965 if (copy_len
|| pad
) {
966 rqst
->rq_rcv_buf
.tail
[0].iov_base
= srcp
;
967 rqst
->rq_private_buf
.tail
[0].iov_base
= srcp
;
970 return fixup_copy_count
;
973 /* By convention, backchannel calls arrive via rdma_msg type
974 * messages, and never populate the chunk lists. This makes
975 * the RPC/RDMA header small and fixed in size, so it is
976 * straightforward to check the RPC header's direction field.
979 rpcrdma_is_bcall(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_rep
*rep
)
980 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
982 struct xdr_stream
*xdr
= &rep
->rr_stream
;
985 if (rep
->rr_proc
!= rdma_msg
)
988 /* Peek at stream contents without advancing. */
989 p
= xdr_inline_decode(xdr
, 0);
992 if (*p
++ != xdr_zero
)
994 if (*p
++ != xdr_zero
)
996 if (*p
++ != xdr_zero
)
1000 if (*p
++ != rep
->rr_xid
)
1002 if (*p
!= cpu_to_be32(RPC_CALL
))
1005 /* Now that we are sure this is a backchannel call,
1006 * advance to the RPC header.
1008 p
= xdr_inline_decode(xdr
, 3 * sizeof(*p
));
1012 rpcrdma_bc_receive_call(r_xprt
, rep
);
1016 pr_warn("RPC/RDMA short backward direction call\n");
1017 if (rpcrdma_ep_post_recv(&r_xprt
->rx_ia
, rep
))
1018 xprt_disconnect_done(&r_xprt
->rx_xprt
);
1021 #else /* CONFIG_SUNRPC_BACKCHANNEL */
1025 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1027 static int decode_rdma_segment(struct xdr_stream
*xdr
, u32
*length
)
1033 p
= xdr_inline_decode(xdr
, 4 * sizeof(*p
));
1037 handle
= be32_to_cpup(p
++);
1038 *length
= be32_to_cpup(p
++);
1039 xdr_decode_hyper(p
, &offset
);
1041 trace_xprtrdma_decode_seg(handle
, *length
, offset
);
1045 static int decode_write_chunk(struct xdr_stream
*xdr
, u32
*length
)
1047 u32 segcount
, seglength
;
1050 p
= xdr_inline_decode(xdr
, sizeof(*p
));
1055 segcount
= be32_to_cpup(p
);
1056 while (segcount
--) {
1057 if (decode_rdma_segment(xdr
, &seglength
))
1059 *length
+= seglength
;
1065 /* In RPC-over-RDMA Version One replies, a Read list is never
1066 * expected. This decoder is a stub that returns an error if
1067 * a Read list is present.
1069 static int decode_read_list(struct xdr_stream
*xdr
)
1073 p
= xdr_inline_decode(xdr
, sizeof(*p
));
1076 if (unlikely(*p
!= xdr_zero
))
1081 /* Supports only one Write chunk in the Write list
1083 static int decode_write_list(struct xdr_stream
*xdr
, u32
*length
)
1092 p
= xdr_inline_decode(xdr
, sizeof(*p
));
1100 if (decode_write_chunk(xdr
, &chunklen
))
1102 *length
+= chunklen
;
1108 static int decode_reply_chunk(struct xdr_stream
*xdr
, u32
*length
)
1112 p
= xdr_inline_decode(xdr
, sizeof(*p
));
1118 if (decode_write_chunk(xdr
, length
))
1124 rpcrdma_decode_msg(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_rep
*rep
,
1125 struct rpc_rqst
*rqst
)
1127 struct xdr_stream
*xdr
= &rep
->rr_stream
;
1128 u32 writelist
, replychunk
, rpclen
;
1131 /* Decode the chunk lists */
1132 if (decode_read_list(xdr
))
1134 if (decode_write_list(xdr
, &writelist
))
1136 if (decode_reply_chunk(xdr
, &replychunk
))
1139 /* RDMA_MSG sanity checks */
1140 if (unlikely(replychunk
))
1143 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1144 base
= (char *)xdr_inline_decode(xdr
, 0);
1145 rpclen
= xdr_stream_remaining(xdr
);
1146 r_xprt
->rx_stats
.fixup_copy_count
+=
1147 rpcrdma_inline_fixup(rqst
, base
, rpclen
, writelist
& 3);
1149 r_xprt
->rx_stats
.total_rdma_reply
+= writelist
;
1150 return rpclen
+ xdr_align_size(writelist
);
1154 rpcrdma_decode_nomsg(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_rep
*rep
)
1156 struct xdr_stream
*xdr
= &rep
->rr_stream
;
1157 u32 writelist
, replychunk
;
1159 /* Decode the chunk lists */
1160 if (decode_read_list(xdr
))
1162 if (decode_write_list(xdr
, &writelist
))
1164 if (decode_reply_chunk(xdr
, &replychunk
))
1167 /* RDMA_NOMSG sanity checks */
1168 if (unlikely(writelist
))
1170 if (unlikely(!replychunk
))
1173 /* Reply chunk buffer already is the reply vector */
1174 r_xprt
->rx_stats
.total_rdma_reply
+= replychunk
;
1179 rpcrdma_decode_error(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_rep
*rep
,
1180 struct rpc_rqst
*rqst
)
1182 struct xdr_stream
*xdr
= &rep
->rr_stream
;
1185 p
= xdr_inline_decode(xdr
, sizeof(*p
));
1191 p
= xdr_inline_decode(xdr
, 2 * sizeof(*p
));
1194 dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n",
1195 rqst
->rq_task
->tk_pid
, __func__
,
1196 be32_to_cpup(p
), be32_to_cpu(*(p
+ 1)));
1199 dprintk("RPC: %5u: %s: server reports header decoding error\n",
1200 rqst
->rq_task
->tk_pid
, __func__
);
1203 dprintk("RPC: %5u: %s: server reports unrecognized error %d\n",
1204 rqst
->rq_task
->tk_pid
, __func__
, be32_to_cpup(p
));
1207 r_xprt
->rx_stats
.bad_reply_count
++;
1211 /* Perform XID lookup, reconstruction of the RPC reply, and
1212 * RPC completion while holding the transport lock to ensure
1213 * the rep, rqst, and rq_task pointers remain stable.
1215 void rpcrdma_complete_rqst(struct rpcrdma_rep
*rep
)
1217 struct rpcrdma_xprt
*r_xprt
= rep
->rr_rxprt
;
1218 struct rpc_xprt
*xprt
= &r_xprt
->rx_xprt
;
1219 struct rpc_rqst
*rqst
= rep
->rr_rqst
;
1223 xprt
->reestablish_timeout
= 0;
1225 switch (rep
->rr_proc
) {
1227 status
= rpcrdma_decode_msg(r_xprt
, rep
, rqst
);
1230 status
= rpcrdma_decode_nomsg(r_xprt
, rep
);
1233 status
= rpcrdma_decode_error(r_xprt
, rep
, rqst
);
1242 spin_lock(&xprt
->recv_lock
);
1244 xprt
->cwnd
= r_xprt
->rx_buf
.rb_credits
<< RPC_CWNDSHIFT
;
1245 if (xprt
->cwnd
> cwnd
)
1246 xprt_release_rqst_cong(rqst
->rq_task
);
1248 xprt_complete_rqst(rqst
->rq_task
, status
);
1249 xprt_unpin_rqst(rqst
);
1250 spin_unlock(&xprt
->recv_lock
);
1253 /* If the incoming reply terminated a pending RPC, the next
1254 * RPC call will post a replacement receive buffer as it is
1258 trace_xprtrdma_reply_hdr(rep
);
1259 r_xprt
->rx_stats
.bad_reply_count
++;
1264 void rpcrdma_release_rqst(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
)
1266 /* Invalidate and unmap the data payloads before waking
1267 * the waiting application. This guarantees the memory
1268 * regions are properly fenced from the server before the
1269 * application accesses the data. It also ensures proper
1270 * send flow control: waking the next RPC waits until this
1271 * RPC has relinquished all its Send Queue entries.
1273 if (!list_empty(&req
->rl_registered
))
1274 r_xprt
->rx_ia
.ri_ops
->ro_unmap_sync(r_xprt
,
1275 &req
->rl_registered
);
1277 /* Ensure that any DMA mapped pages associated with
1278 * the Send of the RPC Call have been unmapped before
1279 * allowing the RPC to complete. This protects argument
1280 * memory not controlled by the RPC client from being
1281 * re-used before we're done with it.
1283 if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES
, &req
->rl_flags
)) {
1284 r_xprt
->rx_stats
.reply_waits_for_send
++;
1285 out_of_line_wait_on_bit(&req
->rl_flags
,
1286 RPCRDMA_REQ_F_TX_RESOURCES
,
1288 TASK_UNINTERRUPTIBLE
);
1292 /* Reply handling runs in the poll worker thread. Anything that
1293 * might wait is deferred to a separate workqueue.
1295 void rpcrdma_deferred_completion(struct work_struct
*work
)
1297 struct rpcrdma_rep
*rep
=
1298 container_of(work
, struct rpcrdma_rep
, rr_work
);
1299 struct rpcrdma_req
*req
= rpcr_to_rdmar(rep
->rr_rqst
);
1300 struct rpcrdma_xprt
*r_xprt
= rep
->rr_rxprt
;
1302 trace_xprtrdma_defer_cmp(rep
);
1303 if (rep
->rr_wc_flags
& IB_WC_WITH_INVALIDATE
)
1304 r_xprt
->rx_ia
.ri_ops
->ro_reminv(rep
, &req
->rl_registered
);
1305 rpcrdma_release_rqst(r_xprt
, req
);
1306 rpcrdma_complete_rqst(rep
);
1309 /* Process received RPC/RDMA messages.
1311 * Errors must result in the RPC task either being awakened, or
1312 * allowed to timeout, to discover the errors at that time.
1314 void rpcrdma_reply_handler(struct rpcrdma_rep
*rep
)
1316 struct rpcrdma_xprt
*r_xprt
= rep
->rr_rxprt
;
1317 struct rpc_xprt
*xprt
= &r_xprt
->rx_xprt
;
1318 struct rpcrdma_buffer
*buf
= &r_xprt
->rx_buf
;
1319 struct rpcrdma_req
*req
;
1320 struct rpc_rqst
*rqst
;
1324 if (rep
->rr_hdrbuf
.head
[0].iov_len
== 0)
1327 xdr_init_decode(&rep
->rr_stream
, &rep
->rr_hdrbuf
,
1328 rep
->rr_hdrbuf
.head
[0].iov_base
);
1330 /* Fixed transport header fields */
1331 p
= xdr_inline_decode(&rep
->rr_stream
, 4 * sizeof(*p
));
1333 goto out_shortreply
;
1335 rep
->rr_vers
= *p
++;
1336 credits
= be32_to_cpu(*p
++);
1337 rep
->rr_proc
= *p
++;
1339 if (rep
->rr_vers
!= rpcrdma_version
)
1340 goto out_badversion
;
1342 if (rpcrdma_is_bcall(r_xprt
, rep
))
1345 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1346 * get context for handling any incoming chunks.
1348 spin_lock(&xprt
->recv_lock
);
1349 rqst
= xprt_lookup_rqst(xprt
, rep
->rr_xid
);
1352 xprt_pin_rqst(rqst
);
1355 credits
= 1; /* don't deadlock */
1356 else if (credits
> buf
->rb_max_requests
)
1357 credits
= buf
->rb_max_requests
;
1358 buf
->rb_credits
= credits
;
1360 spin_unlock(&xprt
->recv_lock
);
1362 req
= rpcr_to_rdmar(rqst
);
1363 req
->rl_reply
= rep
;
1364 rep
->rr_rqst
= rqst
;
1365 clear_bit(RPCRDMA_REQ_F_PENDING
, &req
->rl_flags
);
1367 trace_xprtrdma_reply(rqst
->rq_task
, rep
, req
, credits
);
1369 queue_work_on(req
->rl_cpu
, rpcrdma_receive_wq
, &rep
->rr_work
);
1373 rpcrdma_recv_buffer_put(rep
);
1374 if (r_xprt
->rx_ep
.rep_connected
== 1) {
1375 r_xprt
->rx_ep
.rep_connected
= -EIO
;
1376 rpcrdma_conn_func(&r_xprt
->rx_ep
);
1381 trace_xprtrdma_reply_vers(rep
);
1384 /* The RPC transaction has already been terminated, or the header
1388 spin_unlock(&xprt
->recv_lock
);
1389 trace_xprtrdma_reply_rqst(rep
);
1393 trace_xprtrdma_reply_short(rep
);
1395 /* If no pending RPC transaction was matched, post a replacement
1396 * receive buffer before returning.
1399 r_xprt
->rx_stats
.bad_reply_count
++;
1400 if (rpcrdma_ep_post_recv(&r_xprt
->rx_ia
, rep
))
1401 rpcrdma_recv_buffer_put(rep
);