2 * Copyright (c) 2016 Oracle. All rights reserved.
4 * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
7 #include <linux/sunrpc/rpc_rdma.h>
8 #include <linux/sunrpc/svc_rdma.h>
9 #include <linux/sunrpc/debug.h>
13 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
15 static void svc_rdma_write_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
16 static void svc_rdma_wc_read_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
18 /* Each R/W context contains state for one chain of RDMA Read or
19 * Write Work Requests.
21 * Each WR chain handles a single contiguous server-side buffer,
22 * because scatterlist entries after the first have to start on
23 * page alignment. xdr_buf iovecs cannot guarantee alignment.
25 * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
26 * from a client may contain a unique R_key, so each WR chain moves
27 * up to one segment at a time.
29 * The scatterlist makes this data structure over 4KB in size. To
30 * make it less likely to fail, and to handle the allocation for
31 * smaller I/O requests without disabling bottom-halves, these
32 * contexts are created on demand, but cached and reused until the
33 * controlling svcxprt_rdma is destroyed.
35 struct svc_rdma_rw_ctxt
{
36 struct list_head rw_list
;
37 struct rdma_rw_ctx rw_ctx
;
39 struct sg_table rw_sg_table
;
40 struct scatterlist rw_first_sgl
[0];
43 static inline struct svc_rdma_rw_ctxt
*
44 svc_rdma_next_ctxt(struct list_head
*list
)
46 return list_first_entry_or_null(list
, struct svc_rdma_rw_ctxt
,
50 static struct svc_rdma_rw_ctxt
*
51 svc_rdma_get_rw_ctxt(struct svcxprt_rdma
*rdma
, unsigned int sges
)
53 struct svc_rdma_rw_ctxt
*ctxt
;
55 spin_lock(&rdma
->sc_rw_ctxt_lock
);
57 ctxt
= svc_rdma_next_ctxt(&rdma
->sc_rw_ctxts
);
59 list_del(&ctxt
->rw_list
);
60 spin_unlock(&rdma
->sc_rw_ctxt_lock
);
62 spin_unlock(&rdma
->sc_rw_ctxt_lock
);
63 ctxt
= kmalloc(sizeof(*ctxt
) +
64 SG_CHUNK_SIZE
* sizeof(struct scatterlist
),
68 INIT_LIST_HEAD(&ctxt
->rw_list
);
71 ctxt
->rw_sg_table
.sgl
= ctxt
->rw_first_sgl
;
72 if (sg_alloc_table_chained(&ctxt
->rw_sg_table
, sges
,
73 ctxt
->rw_sg_table
.sgl
)) {
81 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma
*rdma
,
82 struct svc_rdma_rw_ctxt
*ctxt
)
84 sg_free_table_chained(&ctxt
->rw_sg_table
, true);
86 spin_lock(&rdma
->sc_rw_ctxt_lock
);
87 list_add(&ctxt
->rw_list
, &rdma
->sc_rw_ctxts
);
88 spin_unlock(&rdma
->sc_rw_ctxt_lock
);
92 * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
93 * @rdma: transport about to be destroyed
96 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma
*rdma
)
98 struct svc_rdma_rw_ctxt
*ctxt
;
100 while ((ctxt
= svc_rdma_next_ctxt(&rdma
->sc_rw_ctxts
)) != NULL
) {
101 list_del(&ctxt
->rw_list
);
106 /* A chunk context tracks all I/O for moving one Read or Write
107 * chunk. This is a a set of rdma_rw's that handle data movement
108 * for all segments of one chunk.
110 * These are small, acquired with a single allocator call, and
111 * no more than one is needed per chunk. They are allocated on
112 * demand, and not cached.
114 struct svc_rdma_chunk_ctxt
{
115 struct ib_cqe cc_cqe
;
116 struct svcxprt_rdma
*cc_rdma
;
117 struct list_head cc_rwctxts
;
121 static void svc_rdma_cc_init(struct svcxprt_rdma
*rdma
,
122 struct svc_rdma_chunk_ctxt
*cc
)
125 svc_xprt_get(&rdma
->sc_xprt
);
127 INIT_LIST_HEAD(&cc
->cc_rwctxts
);
131 static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt
*cc
,
132 enum dma_data_direction dir
)
134 struct svcxprt_rdma
*rdma
= cc
->cc_rdma
;
135 struct svc_rdma_rw_ctxt
*ctxt
;
137 while ((ctxt
= svc_rdma_next_ctxt(&cc
->cc_rwctxts
)) != NULL
) {
138 list_del(&ctxt
->rw_list
);
140 rdma_rw_ctx_destroy(&ctxt
->rw_ctx
, rdma
->sc_qp
,
141 rdma
->sc_port_num
, ctxt
->rw_sg_table
.sgl
,
142 ctxt
->rw_nents
, dir
);
143 svc_rdma_put_rw_ctxt(rdma
, ctxt
);
145 svc_xprt_put(&rdma
->sc_xprt
);
148 /* State for sending a Write or Reply chunk.
149 * - Tracks progress of writing one chunk over all its segments
150 * - Stores arguments for the SGL constructor functions
152 struct svc_rdma_write_info
{
153 /* write state of this chunk */
154 unsigned int wi_seg_off
;
155 unsigned int wi_seg_no
;
156 unsigned int wi_nsegs
;
159 /* SGL constructor arguments */
160 struct xdr_buf
*wi_xdr
;
161 unsigned char *wi_base
;
162 unsigned int wi_next_off
;
164 struct svc_rdma_chunk_ctxt wi_cc
;
167 static struct svc_rdma_write_info
*
168 svc_rdma_write_info_alloc(struct svcxprt_rdma
*rdma
, __be32
*chunk
)
170 struct svc_rdma_write_info
*info
;
172 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
176 info
->wi_seg_off
= 0;
178 info
->wi_nsegs
= be32_to_cpup(++chunk
);
179 info
->wi_segs
= ++chunk
;
180 svc_rdma_cc_init(rdma
, &info
->wi_cc
);
181 info
->wi_cc
.cc_cqe
.done
= svc_rdma_write_done
;
185 static void svc_rdma_write_info_free(struct svc_rdma_write_info
*info
)
187 svc_rdma_cc_release(&info
->wi_cc
, DMA_TO_DEVICE
);
192 * svc_rdma_write_done - Write chunk completion
193 * @cq: controlling Completion Queue
194 * @wc: Work Completion
196 * Pages under I/O are freed by a subsequent Send completion.
198 static void svc_rdma_write_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
200 struct ib_cqe
*cqe
= wc
->wr_cqe
;
201 struct svc_rdma_chunk_ctxt
*cc
=
202 container_of(cqe
, struct svc_rdma_chunk_ctxt
, cc_cqe
);
203 struct svcxprt_rdma
*rdma
= cc
->cc_rdma
;
204 struct svc_rdma_write_info
*info
=
205 container_of(cc
, struct svc_rdma_write_info
, wi_cc
);
207 atomic_add(cc
->cc_sqecount
, &rdma
->sc_sq_avail
);
208 wake_up(&rdma
->sc_send_wait
);
210 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
211 set_bit(XPT_CLOSE
, &rdma
->sc_xprt
.xpt_flags
);
212 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
213 pr_err("svcrdma: write ctx: %s (%u/0x%x)\n",
214 ib_wc_status_msg(wc
->status
),
215 wc
->status
, wc
->vendor_err
);
218 svc_rdma_write_info_free(info
);
221 /* State for pulling a Read chunk.
223 struct svc_rdma_read_info
{
224 struct svc_rdma_op_ctxt
*ri_readctxt
;
225 unsigned int ri_position
;
226 unsigned int ri_pageno
;
227 unsigned int ri_pageoff
;
228 unsigned int ri_chunklen
;
230 struct svc_rdma_chunk_ctxt ri_cc
;
233 static struct svc_rdma_read_info
*
234 svc_rdma_read_info_alloc(struct svcxprt_rdma
*rdma
)
236 struct svc_rdma_read_info
*info
;
238 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
242 svc_rdma_cc_init(rdma
, &info
->ri_cc
);
243 info
->ri_cc
.cc_cqe
.done
= svc_rdma_wc_read_done
;
247 static void svc_rdma_read_info_free(struct svc_rdma_read_info
*info
)
249 svc_rdma_cc_release(&info
->ri_cc
, DMA_FROM_DEVICE
);
254 * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
255 * @cq: controlling Completion Queue
256 * @wc: Work Completion
259 static void svc_rdma_wc_read_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
261 struct ib_cqe
*cqe
= wc
->wr_cqe
;
262 struct svc_rdma_chunk_ctxt
*cc
=
263 container_of(cqe
, struct svc_rdma_chunk_ctxt
, cc_cqe
);
264 struct svcxprt_rdma
*rdma
= cc
->cc_rdma
;
265 struct svc_rdma_read_info
*info
=
266 container_of(cc
, struct svc_rdma_read_info
, ri_cc
);
268 atomic_add(cc
->cc_sqecount
, &rdma
->sc_sq_avail
);
269 wake_up(&rdma
->sc_send_wait
);
271 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
272 set_bit(XPT_CLOSE
, &rdma
->sc_xprt
.xpt_flags
);
273 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
274 pr_err("svcrdma: read ctx: %s (%u/0x%x)\n",
275 ib_wc_status_msg(wc
->status
),
276 wc
->status
, wc
->vendor_err
);
277 svc_rdma_put_context(info
->ri_readctxt
, 1);
279 spin_lock(&rdma
->sc_rq_dto_lock
);
280 list_add_tail(&info
->ri_readctxt
->list
,
281 &rdma
->sc_read_complete_q
);
282 spin_unlock(&rdma
->sc_rq_dto_lock
);
284 set_bit(XPT_DATA
, &rdma
->sc_xprt
.xpt_flags
);
285 svc_xprt_enqueue(&rdma
->sc_xprt
);
288 svc_rdma_read_info_free(info
);
291 /* This function sleeps when the transport's Send Queue is congested.
294 * - If ib_post_send() succeeds, only one completion is expected,
295 * even if one or more WRs are flushed. This is true when posting
296 * an rdma_rw_ctx or when posting a single signaled WR.
298 static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt
*cc
)
300 struct svcxprt_rdma
*rdma
= cc
->cc_rdma
;
301 struct svc_xprt
*xprt
= &rdma
->sc_xprt
;
302 struct ib_send_wr
*first_wr
, *bad_wr
;
303 struct list_head
*tmp
;
307 if (cc
->cc_sqecount
> rdma
->sc_sq_depth
)
312 list_for_each(tmp
, &cc
->cc_rwctxts
) {
313 struct svc_rdma_rw_ctxt
*ctxt
;
315 ctxt
= list_entry(tmp
, struct svc_rdma_rw_ctxt
, rw_list
);
316 first_wr
= rdma_rw_ctx_wrs(&ctxt
->rw_ctx
, rdma
->sc_qp
,
317 rdma
->sc_port_num
, cqe
, first_wr
);
322 if (atomic_sub_return(cc
->cc_sqecount
,
323 &rdma
->sc_sq_avail
) > 0) {
324 ret
= ib_post_send(rdma
->sc_qp
, first_wr
, &bad_wr
);
330 atomic_inc(&rdma_stat_sq_starve
);
331 atomic_add(cc
->cc_sqecount
, &rdma
->sc_sq_avail
);
332 wait_event(rdma
->sc_send_wait
,
333 atomic_read(&rdma
->sc_sq_avail
) > cc
->cc_sqecount
);
336 pr_err("svcrdma: ib_post_send failed (%d)\n", ret
);
337 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
339 /* If even one was posted, there will be a completion. */
340 if (bad_wr
!= first_wr
)
343 atomic_add(cc
->cc_sqecount
, &rdma
->sc_sq_avail
);
344 wake_up(&rdma
->sc_send_wait
);
348 /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
350 static void svc_rdma_vec_to_sg(struct svc_rdma_write_info
*info
,
352 struct svc_rdma_rw_ctxt
*ctxt
)
354 struct scatterlist
*sg
= ctxt
->rw_sg_table
.sgl
;
356 sg_set_buf(&sg
[0], info
->wi_base
, len
);
357 info
->wi_base
+= len
;
362 /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
364 static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info
*info
,
365 unsigned int remaining
,
366 struct svc_rdma_rw_ctxt
*ctxt
)
368 unsigned int sge_no
, sge_bytes
, page_off
, page_no
;
369 struct xdr_buf
*xdr
= info
->wi_xdr
;
370 struct scatterlist
*sg
;
373 page_off
= info
->wi_next_off
+ xdr
->page_base
;
374 page_no
= page_off
>> PAGE_SHIFT
;
375 page_off
= offset_in_page(page_off
);
376 page
= xdr
->pages
+ page_no
;
377 info
->wi_next_off
+= remaining
;
378 sg
= ctxt
->rw_sg_table
.sgl
;
381 sge_bytes
= min_t(unsigned int, remaining
,
382 PAGE_SIZE
- page_off
);
383 sg_set_page(sg
, *page
, sge_bytes
, page_off
);
385 remaining
-= sge_bytes
;
392 ctxt
->rw_nents
= sge_no
;
395 /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
399 svc_rdma_build_writes(struct svc_rdma_write_info
*info
,
400 void (*constructor
)(struct svc_rdma_write_info
*info
,
402 struct svc_rdma_rw_ctxt
*ctxt
),
403 unsigned int remaining
)
405 struct svc_rdma_chunk_ctxt
*cc
= &info
->wi_cc
;
406 struct svcxprt_rdma
*rdma
= cc
->cc_rdma
;
407 struct svc_rdma_rw_ctxt
*ctxt
;
411 seg
= info
->wi_segs
+ info
->wi_seg_no
* rpcrdma_segment_maxsz
;
413 unsigned int write_len
;
414 u32 seg_length
, seg_handle
;
417 if (info
->wi_seg_no
>= info
->wi_nsegs
)
420 seg_handle
= be32_to_cpup(seg
);
421 seg_length
= be32_to_cpup(seg
+ 1);
422 xdr_decode_hyper(seg
+ 2, &seg_offset
);
423 seg_offset
+= info
->wi_seg_off
;
425 write_len
= min(remaining
, seg_length
- info
->wi_seg_off
);
426 ctxt
= svc_rdma_get_rw_ctxt(rdma
,
427 (write_len
>> PAGE_SHIFT
) + 2);
431 constructor(info
, write_len
, ctxt
);
432 ret
= rdma_rw_ctx_init(&ctxt
->rw_ctx
, rdma
->sc_qp
,
433 rdma
->sc_port_num
, ctxt
->rw_sg_table
.sgl
,
434 ctxt
->rw_nents
, 0, seg_offset
,
435 seg_handle
, DMA_TO_DEVICE
);
439 list_add(&ctxt
->rw_list
, &cc
->cc_rwctxts
);
440 cc
->cc_sqecount
+= ret
;
441 if (write_len
== seg_length
- info
->wi_seg_off
) {
444 info
->wi_seg_off
= 0;
446 info
->wi_seg_off
+= write_len
;
448 remaining
-= write_len
;
454 dprintk("svcrdma: inadequate space in Write chunk (%u)\n",
459 dprintk("svcrdma: no R/W ctxs available\n");
463 svc_rdma_put_rw_ctxt(rdma
, ctxt
);
464 pr_err("svcrdma: failed to map pagelist (%d)\n", ret
);
468 /* Send one of an xdr_buf's kvecs by itself. To send a Reply
469 * chunk, the whole RPC Reply is written back to the client.
470 * This function writes either the head or tail of the xdr_buf
471 * containing the Reply.
473 static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info
*info
,
476 info
->wi_base
= vec
->iov_base
;
477 return svc_rdma_build_writes(info
, svc_rdma_vec_to_sg
,
481 /* Send an xdr_buf's page list by itself. A Write chunk is
482 * just the page list. a Reply chunk is the head, page list,
483 * and tail. This function is shared between the two types
486 static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info
*info
,
490 info
->wi_next_off
= 0;
491 return svc_rdma_build_writes(info
, svc_rdma_pagelist_to_sg
,
496 * svc_rdma_send_write_chunk - Write all segments in a Write chunk
497 * @rdma: controlling RDMA transport
498 * @wr_ch: Write chunk provided by client
499 * @xdr: xdr_buf containing the data payload
501 * Returns a non-negative number of bytes the chunk consumed, or
502 * %-E2BIG if the payload was larger than the Write chunk,
503 * %-EINVAL if client provided too many segments,
504 * %-ENOMEM if rdma_rw context pool was exhausted,
505 * %-ENOTCONN if posting failed (connection is lost),
506 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
508 int svc_rdma_send_write_chunk(struct svcxprt_rdma
*rdma
, __be32
*wr_ch
,
511 struct svc_rdma_write_info
*info
;
517 info
= svc_rdma_write_info_alloc(rdma
, wr_ch
);
521 ret
= svc_rdma_send_xdr_pagelist(info
, xdr
);
525 ret
= svc_rdma_post_chunk_ctxt(&info
->wi_cc
);
528 return xdr
->page_len
;
531 svc_rdma_write_info_free(info
);
536 * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
537 * @rdma: controlling RDMA transport
538 * @rp_ch: Reply chunk provided by client
539 * @writelist: true if client provided a Write list
540 * @xdr: xdr_buf containing an RPC Reply
542 * Returns a non-negative number of bytes the chunk consumed, or
543 * %-E2BIG if the payload was larger than the Reply chunk,
544 * %-EINVAL if client provided too many segments,
545 * %-ENOMEM if rdma_rw context pool was exhausted,
546 * %-ENOTCONN if posting failed (connection is lost),
547 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
549 int svc_rdma_send_reply_chunk(struct svcxprt_rdma
*rdma
, __be32
*rp_ch
,
550 bool writelist
, struct xdr_buf
*xdr
)
552 struct svc_rdma_write_info
*info
;
555 info
= svc_rdma_write_info_alloc(rdma
, rp_ch
);
559 ret
= svc_rdma_send_xdr_kvec(info
, &xdr
->head
[0]);
562 consumed
= xdr
->head
[0].iov_len
;
564 /* Send the page list in the Reply chunk only if the
565 * client did not provide Write chunks.
567 if (!writelist
&& xdr
->page_len
) {
568 ret
= svc_rdma_send_xdr_pagelist(info
, xdr
);
571 consumed
+= xdr
->page_len
;
574 if (xdr
->tail
[0].iov_len
) {
575 ret
= svc_rdma_send_xdr_kvec(info
, &xdr
->tail
[0]);
578 consumed
+= xdr
->tail
[0].iov_len
;
581 ret
= svc_rdma_post_chunk_ctxt(&info
->wi_cc
);
587 svc_rdma_write_info_free(info
);
591 static int svc_rdma_build_read_segment(struct svc_rdma_read_info
*info
,
592 struct svc_rqst
*rqstp
,
593 u32 rkey
, u32 len
, u64 offset
)
595 struct svc_rdma_op_ctxt
*head
= info
->ri_readctxt
;
596 struct svc_rdma_chunk_ctxt
*cc
= &info
->ri_cc
;
597 struct svc_rdma_rw_ctxt
*ctxt
;
598 unsigned int sge_no
, seg_len
;
599 struct scatterlist
*sg
;
602 sge_no
= PAGE_ALIGN(info
->ri_pageoff
+ len
) >> PAGE_SHIFT
;
603 ctxt
= svc_rdma_get_rw_ctxt(cc
->cc_rdma
, sge_no
);
606 ctxt
->rw_nents
= sge_no
;
608 dprintk("svcrdma: reading segment %u@0x%016llx:0x%08x (%u sges)\n",
609 len
, offset
, rkey
, sge_no
);
611 sg
= ctxt
->rw_sg_table
.sgl
;
612 for (sge_no
= 0; sge_no
< ctxt
->rw_nents
; sge_no
++) {
613 seg_len
= min_t(unsigned int, len
,
614 PAGE_SIZE
- info
->ri_pageoff
);
616 head
->arg
.pages
[info
->ri_pageno
] =
617 rqstp
->rq_pages
[info
->ri_pageno
];
618 if (!info
->ri_pageoff
)
621 sg_set_page(sg
, rqstp
->rq_pages
[info
->ri_pageno
],
622 seg_len
, info
->ri_pageoff
);
625 info
->ri_pageoff
+= seg_len
;
626 if (info
->ri_pageoff
== PAGE_SIZE
) {
628 info
->ri_pageoff
= 0;
634 &rqstp
->rq_pages
[info
->ri_pageno
+ 1] > rqstp
->rq_page_end
)
638 ret
= rdma_rw_ctx_init(&ctxt
->rw_ctx
, cc
->cc_rdma
->sc_qp
,
639 cc
->cc_rdma
->sc_port_num
,
640 ctxt
->rw_sg_table
.sgl
, ctxt
->rw_nents
,
641 0, offset
, rkey
, DMA_FROM_DEVICE
);
645 list_add(&ctxt
->rw_list
, &cc
->cc_rwctxts
);
646 cc
->cc_sqecount
+= ret
;
650 dprintk("svcrdma: no R/W ctxs available\n");
654 dprintk("svcrdma: request overruns rq_pages\n");
658 svc_rdma_put_rw_ctxt(cc
->cc_rdma
, ctxt
);
659 pr_err("svcrdma: failed to map pagelist (%d)\n", ret
);
663 /* Walk the segments in the Read chunk starting at @p and construct
664 * RDMA Read operations to pull the chunk to the server.
666 static int svc_rdma_build_read_chunk(struct svc_rqst
*rqstp
,
667 struct svc_rdma_read_info
*info
,
673 info
->ri_chunklen
= 0;
674 while (*p
++ != xdr_zero
&& be32_to_cpup(p
++) == info
->ri_position
) {
675 u32 rs_handle
, rs_length
;
678 rs_handle
= be32_to_cpup(p
++);
679 rs_length
= be32_to_cpup(p
++);
680 p
= xdr_decode_hyper(p
, &rs_offset
);
682 ret
= svc_rdma_build_read_segment(info
, rqstp
,
683 rs_handle
, rs_length
,
688 info
->ri_chunklen
+= rs_length
;
694 /* Construct RDMA Reads to pull over a normal Read chunk. The chunk
695 * data lands in the page list of head->arg.pages.
697 * Currently NFSD does not look at the head->arg.tail[0] iovec.
698 * Therefore, XDR round-up of the Read chunk and trailing
699 * inline content must both be added at the end of the pagelist.
701 static int svc_rdma_build_normal_read_chunk(struct svc_rqst
*rqstp
,
702 struct svc_rdma_read_info
*info
,
705 struct svc_rdma_op_ctxt
*head
= info
->ri_readctxt
;
708 dprintk("svcrdma: Reading Read chunk at position %u\n",
711 info
->ri_pageno
= head
->hdr_count
;
712 info
->ri_pageoff
= 0;
714 ret
= svc_rdma_build_read_chunk(rqstp
, info
, p
);
718 /* Split the Receive buffer between the head and tail
719 * buffers at Read chunk's position. XDR roundup of the
720 * chunk is not included in either the pagelist or in
723 head
->arg
.tail
[0].iov_base
=
724 head
->arg
.head
[0].iov_base
+ info
->ri_position
;
725 head
->arg
.tail
[0].iov_len
=
726 head
->arg
.head
[0].iov_len
- info
->ri_position
;
727 head
->arg
.head
[0].iov_len
= info
->ri_position
;
729 /* Read chunk may need XDR roundup (see RFC 5666, s. 3.7).
731 * NFSv2/3 write decoders need the length of the tail to
732 * contain the size of the roundup padding.
734 head
->arg
.tail
[0].iov_len
+= 4 - (info
->ri_chunklen
& 3);
736 head
->arg
.page_len
= info
->ri_chunklen
;
737 head
->arg
.len
+= info
->ri_chunklen
;
738 head
->arg
.buflen
+= info
->ri_chunklen
;
744 /* Construct RDMA Reads to pull over a Position Zero Read chunk.
745 * The start of the data lands in the first page just after
746 * the Transport header, and the rest lands in the page list of
750 * - A PZRC has an XDR-aligned length (no implicit round-up).
751 * - There can be no trailing inline content (IOW, we assume
752 * a PZRC is never sent in an RDMA_MSG message, though it's
755 static int svc_rdma_build_pz_read_chunk(struct svc_rqst
*rqstp
,
756 struct svc_rdma_read_info
*info
,
759 struct svc_rdma_op_ctxt
*head
= info
->ri_readctxt
;
762 dprintk("svcrdma: Reading Position Zero Read chunk\n");
764 info
->ri_pageno
= head
->hdr_count
- 1;
765 info
->ri_pageoff
= offset_in_page(head
->byte_len
);
767 ret
= svc_rdma_build_read_chunk(rqstp
, info
, p
);
771 head
->arg
.len
+= info
->ri_chunklen
;
772 head
->arg
.buflen
+= info
->ri_chunklen
;
774 if (head
->arg
.buflen
<= head
->sge
[0].length
) {
775 /* Transport header and RPC message fit entirely
776 * in page where head iovec resides.
778 head
->arg
.head
[0].iov_len
= info
->ri_chunklen
;
780 /* Transport header and part of RPC message reside
781 * in the head iovec's page.
783 head
->arg
.head
[0].iov_len
=
784 head
->sge
[0].length
- head
->byte_len
;
786 info
->ri_chunklen
- head
->arg
.head
[0].iov_len
;
794 * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
795 * @rdma: controlling RDMA transport
796 * @rqstp: set of pages to use as Read sink buffers
797 * @head: pages under I/O collect here
798 * @p: pointer to start of Read chunk
801 * %0 if all needed RDMA Reads were posted successfully,
802 * %-EINVAL if client provided too many segments,
803 * %-ENOMEM if rdma_rw context pool was exhausted,
804 * %-ENOTCONN if posting failed (connection is lost),
805 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
808 * - All Read segments in @p have the same Position value.
810 int svc_rdma_recv_read_chunk(struct svcxprt_rdma
*rdma
, struct svc_rqst
*rqstp
,
811 struct svc_rdma_op_ctxt
*head
, __be32
*p
)
813 struct svc_rdma_read_info
*info
;
817 /* The request (with page list) is constructed in
818 * head->arg. Pages involved with RDMA Read I/O are
821 head
->hdr_count
= head
->count
;
822 head
->arg
.head
[0] = rqstp
->rq_arg
.head
[0];
823 head
->arg
.tail
[0] = rqstp
->rq_arg
.tail
[0];
824 head
->arg
.pages
= head
->pages
;
825 head
->arg
.page_base
= 0;
826 head
->arg
.page_len
= 0;
827 head
->arg
.len
= rqstp
->rq_arg
.len
;
828 head
->arg
.buflen
= rqstp
->rq_arg
.buflen
;
830 info
= svc_rdma_read_info_alloc(rdma
);
833 info
->ri_readctxt
= head
;
835 info
->ri_position
= be32_to_cpup(p
+ 1);
836 if (info
->ri_position
)
837 ret
= svc_rdma_build_normal_read_chunk(rqstp
, info
, p
);
839 ret
= svc_rdma_build_pz_read_chunk(rqstp
, info
, p
);
841 /* Mark the start of the pages that can be used for the reply */
842 if (info
->ri_pageoff
> 0)
844 rqstp
->rq_respages
= &rqstp
->rq_pages
[info
->ri_pageno
];
845 rqstp
->rq_next_page
= rqstp
->rq_respages
+ 1;
850 ret
= svc_rdma_post_chunk_ctxt(&info
->ri_cc
);
853 /* Read sink pages have been moved from rqstp->rq_pages to
854 * head->arg.pages. Force svc_recv to refill those slots
857 for (page
= rqstp
->rq_pages
; page
< rqstp
->rq_respages
; page
++)
861 svc_rdma_read_info_free(info
);