2 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Author: Tom Tucker <tom@opengridcomputing.com>
43 #include <linux/sunrpc/debug.h>
44 #include <linux/sunrpc/rpc_rdma.h>
45 #include <linux/spinlock.h>
46 #include <asm/unaligned.h>
47 #include <rdma/ib_verbs.h>
48 #include <rdma/rdma_cm.h>
49 #include <linux/sunrpc/svc_rdma.h>
51 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
53 static int map_xdr(struct svcxprt_rdma
*xprt
,
55 struct svc_rdma_req_map
*vec
)
64 (xdr
->head
[0].iov_len
+ xdr
->page_len
+ xdr
->tail
[0].iov_len
)) {
65 pr_err("svcrdma: map_xdr: XDR buffer length error\n");
69 /* Skip the first sge, this is for the RPCRDMA header */
73 vec
->sge
[sge_no
].iov_base
= xdr
->head
[0].iov_base
;
74 vec
->sge
[sge_no
].iov_len
= xdr
->head
[0].iov_len
;
79 page_bytes
= xdr
->page_len
;
80 page_off
= xdr
->page_base
;
82 vec
->sge
[sge_no
].iov_base
=
83 page_address(xdr
->pages
[page_no
]) + page_off
;
84 sge_bytes
= min_t(u32
, page_bytes
, (PAGE_SIZE
- page_off
));
85 page_bytes
-= sge_bytes
;
86 vec
->sge
[sge_no
].iov_len
= sge_bytes
;
90 page_off
= 0; /* reset for next time through loop */
94 if (xdr
->tail
[0].iov_len
) {
95 vec
->sge
[sge_no
].iov_base
= xdr
->tail
[0].iov_base
;
96 vec
->sge
[sge_no
].iov_len
= xdr
->tail
[0].iov_len
;
100 dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
101 "page_base %u page_len %u head_len %zu tail_len %zu\n",
102 sge_no
, page_no
, xdr
->page_base
, xdr
->page_len
,
103 xdr
->head
[0].iov_len
, xdr
->tail
[0].iov_len
);
109 static dma_addr_t
dma_map_xdr(struct svcxprt_rdma
*xprt
,
111 u32 xdr_off
, size_t len
, int dir
)
115 if (xdr_off
< xdr
->head
[0].iov_len
) {
116 /* This offset is in the head */
117 xdr_off
+= (unsigned long)xdr
->head
[0].iov_base
& ~PAGE_MASK
;
118 page
= virt_to_page(xdr
->head
[0].iov_base
);
120 xdr_off
-= xdr
->head
[0].iov_len
;
121 if (xdr_off
< xdr
->page_len
) {
122 /* This offset is in the page list */
123 xdr_off
+= xdr
->page_base
;
124 page
= xdr
->pages
[xdr_off
>> PAGE_SHIFT
];
125 xdr_off
&= ~PAGE_MASK
;
127 /* This offset is in the tail */
128 xdr_off
-= xdr
->page_len
;
129 xdr_off
+= (unsigned long)
130 xdr
->tail
[0].iov_base
& ~PAGE_MASK
;
131 page
= virt_to_page(xdr
->tail
[0].iov_base
);
134 dma_addr
= ib_dma_map_page(xprt
->sc_cm_id
->device
, page
, xdr_off
,
135 min_t(size_t, PAGE_SIZE
, len
), dir
);
139 /* Returns the address of the first read chunk or <nul> if no read chunk
142 struct rpcrdma_read_chunk
*
143 svc_rdma_get_read_chunk(struct rpcrdma_msg
*rmsgp
)
145 struct rpcrdma_read_chunk
*ch
=
146 (struct rpcrdma_read_chunk
*)&rmsgp
->rm_body
.rm_chunks
[0];
148 if (ch
->rc_discrim
== xdr_zero
)
153 /* Returns the address of the first read write array element or <nul>
154 * if no write array list is present
156 static struct rpcrdma_write_array
*
157 svc_rdma_get_write_array(struct rpcrdma_msg
*rmsgp
)
159 if (rmsgp
->rm_body
.rm_chunks
[0] != xdr_zero
||
160 rmsgp
->rm_body
.rm_chunks
[1] == xdr_zero
)
162 return (struct rpcrdma_write_array
*)&rmsgp
->rm_body
.rm_chunks
[1];
165 /* Returns the address of the first reply array element or <nul> if no
166 * reply array is present
168 static struct rpcrdma_write_array
*
169 svc_rdma_get_reply_array(struct rpcrdma_msg
*rmsgp
)
171 struct rpcrdma_read_chunk
*rch
;
172 struct rpcrdma_write_array
*wr_ary
;
173 struct rpcrdma_write_array
*rp_ary
;
175 /* XXX: Need to fix when reply chunk may occur with read list
178 if (rmsgp
->rm_body
.rm_chunks
[0] != xdr_zero
||
179 rmsgp
->rm_body
.rm_chunks
[1] != xdr_zero
)
182 rch
= svc_rdma_get_read_chunk(rmsgp
);
184 while (rch
->rc_discrim
!= xdr_zero
)
187 /* The reply chunk follows an empty write array located
188 * at 'rc_position' here. The reply array is at rc_target.
190 rp_ary
= (struct rpcrdma_write_array
*)&rch
->rc_target
;
194 wr_ary
= svc_rdma_get_write_array(rmsgp
);
196 int chunk
= be32_to_cpu(wr_ary
->wc_nchunks
);
198 rp_ary
= (struct rpcrdma_write_array
*)
199 &wr_ary
->wc_array
[chunk
].wc_target
.rs_length
;
203 /* No read list, no write list */
204 rp_ary
= (struct rpcrdma_write_array
*)&rmsgp
->rm_body
.rm_chunks
[2];
207 if (rp_ary
->wc_discrim
== xdr_zero
)
213 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
215 static int send_write(struct svcxprt_rdma
*xprt
, struct svc_rqst
*rqstp
,
217 u32 xdr_off
, int write_len
,
218 struct svc_rdma_req_map
*vec
)
220 struct ib_rdma_wr write_wr
;
227 struct svc_rdma_op_ctxt
*ctxt
;
229 if (vec
->count
> RPCSVC_MAXPAGES
) {
230 pr_err("svcrdma: Too many pages (%lu)\n", vec
->count
);
234 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
235 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
236 rmr
, (unsigned long long)to
, xdr_off
,
237 write_len
, vec
->sge
, vec
->count
);
239 ctxt
= svc_rdma_get_context(xprt
);
240 ctxt
->direction
= DMA_TO_DEVICE
;
243 /* Find the SGE associated with xdr_off */
244 for (bc
= xdr_off
, xdr_sge_no
= 1; bc
&& xdr_sge_no
< vec
->count
;
246 if (vec
->sge
[xdr_sge_no
].iov_len
> bc
)
248 bc
-= vec
->sge
[xdr_sge_no
].iov_len
;
255 /* Copy the remaining SGE */
257 sge_bytes
= min_t(size_t,
258 bc
, vec
->sge
[xdr_sge_no
].iov_len
-sge_off
);
259 sge
[sge_no
].length
= sge_bytes
;
261 dma_map_xdr(xprt
, &rqstp
->rq_res
, xdr_off
,
262 sge_bytes
, DMA_TO_DEVICE
);
263 xdr_off
+= sge_bytes
;
264 if (ib_dma_mapping_error(xprt
->sc_cm_id
->device
,
267 atomic_inc(&xprt
->sc_dma_used
);
268 sge
[sge_no
].lkey
= xprt
->sc_dma_lkey
;
273 if (xdr_sge_no
> vec
->count
) {
274 pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no
);
278 if (sge_no
== xprt
->sc_max_sge
)
282 /* Prepare WRITE WR */
283 memset(&write_wr
, 0, sizeof write_wr
);
284 ctxt
->wr_op
= IB_WR_RDMA_WRITE
;
285 write_wr
.wr
.wr_id
= (unsigned long)ctxt
;
286 write_wr
.wr
.sg_list
= &sge
[0];
287 write_wr
.wr
.num_sge
= sge_no
;
288 write_wr
.wr
.opcode
= IB_WR_RDMA_WRITE
;
289 write_wr
.wr
.send_flags
= IB_SEND_SIGNALED
;
291 write_wr
.remote_addr
= to
;
294 atomic_inc(&rdma_stat_write
);
295 if (svc_rdma_send(xprt
, &write_wr
.wr
))
297 return write_len
- bc
;
299 svc_rdma_unmap_dma(ctxt
);
300 svc_rdma_put_context(ctxt
, 0);
301 /* Fatal error, close transport */
305 static int send_write_chunks(struct svcxprt_rdma
*xprt
,
306 struct rpcrdma_msg
*rdma_argp
,
307 struct rpcrdma_msg
*rdma_resp
,
308 struct svc_rqst
*rqstp
,
309 struct svc_rdma_req_map
*vec
)
311 u32 xfer_len
= rqstp
->rq_res
.page_len
+ rqstp
->rq_res
.tail
[0].iov_len
;
317 struct rpcrdma_write_array
*arg_ary
;
318 struct rpcrdma_write_array
*res_ary
;
321 arg_ary
= svc_rdma_get_write_array(rdma_argp
);
324 res_ary
= (struct rpcrdma_write_array
*)
325 &rdma_resp
->rm_body
.rm_chunks
[1];
327 /* Write chunks start at the pagelist */
328 nchunks
= be32_to_cpu(arg_ary
->wc_nchunks
);
329 for (xdr_off
= rqstp
->rq_res
.head
[0].iov_len
, chunk_no
= 0;
330 xfer_len
&& chunk_no
< nchunks
;
332 struct rpcrdma_segment
*arg_ch
;
335 arg_ch
= &arg_ary
->wc_array
[chunk_no
].wc_target
;
336 write_len
= min(xfer_len
, be32_to_cpu(arg_ch
->rs_length
));
338 /* Prepare the response chunk given the length actually
340 xdr_decode_hyper((__be32
*)&arg_ch
->rs_offset
, &rs_offset
);
341 svc_rdma_xdr_encode_array_chunk(res_ary
, chunk_no
,
347 ret
= send_write(xprt
, rqstp
,
348 be32_to_cpu(arg_ch
->rs_handle
),
349 rs_offset
+ chunk_off
,
354 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
364 /* Update the req with the number of chunks actually used */
365 svc_rdma_xdr_encode_write_list(rdma_resp
, chunk_no
);
367 return rqstp
->rq_res
.page_len
+ rqstp
->rq_res
.tail
[0].iov_len
;
370 static int send_reply_chunks(struct svcxprt_rdma
*xprt
,
371 struct rpcrdma_msg
*rdma_argp
,
372 struct rpcrdma_msg
*rdma_resp
,
373 struct svc_rqst
*rqstp
,
374 struct svc_rdma_req_map
*vec
)
376 u32 xfer_len
= rqstp
->rq_res
.len
;
382 struct rpcrdma_segment
*ch
;
383 struct rpcrdma_write_array
*arg_ary
;
384 struct rpcrdma_write_array
*res_ary
;
387 arg_ary
= svc_rdma_get_reply_array(rdma_argp
);
390 /* XXX: need to fix when reply lists occur with read-list and or
392 res_ary
= (struct rpcrdma_write_array
*)
393 &rdma_resp
->rm_body
.rm_chunks
[2];
395 /* xdr offset starts at RPC message */
396 nchunks
= be32_to_cpu(arg_ary
->wc_nchunks
);
397 for (xdr_off
= 0, chunk_no
= 0;
398 xfer_len
&& chunk_no
< nchunks
;
401 ch
= &arg_ary
->wc_array
[chunk_no
].wc_target
;
402 write_len
= min(xfer_len
, be32_to_cpu(ch
->rs_length
));
404 /* Prepare the reply chunk given the length actually
406 xdr_decode_hyper((__be32
*)&ch
->rs_offset
, &rs_offset
);
407 svc_rdma_xdr_encode_array_chunk(res_ary
, chunk_no
,
408 ch
->rs_handle
, ch
->rs_offset
,
412 ret
= send_write(xprt
, rqstp
,
413 be32_to_cpu(ch
->rs_handle
),
414 rs_offset
+ chunk_off
,
419 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
429 /* Update the req with the number of chunks actually used */
430 svc_rdma_xdr_encode_reply_array(res_ary
, chunk_no
);
432 return rqstp
->rq_res
.len
;
435 /* This function prepares the portion of the RPCRDMA message to be
436 * sent in the RDMA_SEND. This function is called after data sent via
437 * RDMA has already been transmitted. There are three cases:
438 * - The RPCRDMA header, RPC header, and payload are all sent in a
439 * single RDMA_SEND. This is the "inline" case.
440 * - The RPCRDMA header and some portion of the RPC header and data
441 * are sent via this RDMA_SEND and another portion of the data is
443 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
444 * header and data are all transmitted via RDMA.
445 * In all three cases, this function prepares the RPCRDMA header in
446 * sge[0], the 'type' parameter indicates the type to place in the
447 * RPCRDMA header, and the 'byte_count' field indicates how much of
448 * the XDR to include in this RDMA_SEND. NB: The offset of the payload
449 * to send is zero in the XDR.
451 static int send_reply(struct svcxprt_rdma
*rdma
,
452 struct svc_rqst
*rqstp
,
454 struct rpcrdma_msg
*rdma_resp
,
455 struct svc_rdma_op_ctxt
*ctxt
,
456 struct svc_rdma_req_map
*vec
,
459 struct ib_send_wr send_wr
;
467 /* Post a recv buffer to handle another request. */
468 ret
= svc_rdma_post_recv(rdma
);
471 "svcrdma: could not post a receive buffer, err=%d."
472 "Closing transport %p.\n", ret
, rdma
);
473 set_bit(XPT_CLOSE
, &rdma
->sc_xprt
.xpt_flags
);
474 svc_rdma_put_context(ctxt
, 0);
478 /* Prepare the context */
479 ctxt
->pages
[0] = page
;
482 /* Prepare the SGE for the RPCRDMA Header */
483 ctxt
->sge
[0].lkey
= rdma
->sc_dma_lkey
;
484 ctxt
->sge
[0].length
= svc_rdma_xdr_get_reply_hdr_len(rdma_resp
);
486 ib_dma_map_page(rdma
->sc_cm_id
->device
, page
, 0,
487 ctxt
->sge
[0].length
, DMA_TO_DEVICE
);
488 if (ib_dma_mapping_error(rdma
->sc_cm_id
->device
, ctxt
->sge
[0].addr
))
490 atomic_inc(&rdma
->sc_dma_used
);
492 ctxt
->direction
= DMA_TO_DEVICE
;
494 /* Map the payload indicated by 'byte_count' */
496 for (sge_no
= 1; byte_count
&& sge_no
< vec
->count
; sge_no
++) {
497 sge_bytes
= min_t(size_t, vec
->sge
[sge_no
].iov_len
, byte_count
);
498 byte_count
-= sge_bytes
;
499 ctxt
->sge
[sge_no
].addr
=
500 dma_map_xdr(rdma
, &rqstp
->rq_res
, xdr_off
,
501 sge_bytes
, DMA_TO_DEVICE
);
502 xdr_off
+= sge_bytes
;
503 if (ib_dma_mapping_error(rdma
->sc_cm_id
->device
,
504 ctxt
->sge
[sge_no
].addr
))
506 atomic_inc(&rdma
->sc_dma_used
);
507 ctxt
->sge
[sge_no
].lkey
= rdma
->sc_dma_lkey
;
508 ctxt
->sge
[sge_no
].length
= sge_bytes
;
510 if (byte_count
!= 0) {
511 pr_err("svcrdma: Could not map %d bytes\n", byte_count
);
515 /* Save all respages in the ctxt and remove them from the
516 * respages array. They are our pages until the I/O
519 pages
= rqstp
->rq_next_page
- rqstp
->rq_respages
;
520 for (page_no
= 0; page_no
< pages
; page_no
++) {
521 ctxt
->pages
[page_no
+1] = rqstp
->rq_respages
[page_no
];
523 rqstp
->rq_respages
[page_no
] = NULL
;
525 * If there are more pages than SGE, terminate SGE
526 * list so that svc_rdma_unmap_dma doesn't attempt to
529 if (page_no
+1 >= sge_no
)
530 ctxt
->sge
[page_no
+1].length
= 0;
532 rqstp
->rq_next_page
= rqstp
->rq_respages
+ 1;
534 /* The loop above bumps sc_dma_used for each sge. The
535 * xdr_buf.tail gets a separate sge, but resides in the
536 * same page as xdr_buf.head. Don't count it twice.
538 if (sge_no
> ctxt
->count
)
539 atomic_dec(&rdma
->sc_dma_used
);
541 if (sge_no
> rdma
->sc_max_sge
) {
542 pr_err("svcrdma: Too many sges (%d)\n", sge_no
);
545 memset(&send_wr
, 0, sizeof send_wr
);
546 ctxt
->wr_op
= IB_WR_SEND
;
547 send_wr
.wr_id
= (unsigned long)ctxt
;
548 send_wr
.sg_list
= ctxt
->sge
;
549 send_wr
.num_sge
= sge_no
;
550 send_wr
.opcode
= IB_WR_SEND
;
551 send_wr
.send_flags
= IB_SEND_SIGNALED
;
553 ret
= svc_rdma_send(rdma
, &send_wr
);
560 svc_rdma_unmap_dma(ctxt
);
561 svc_rdma_put_context(ctxt
, 1);
565 void svc_rdma_prep_reply_hdr(struct svc_rqst
*rqstp
)
569 int svc_rdma_sendto(struct svc_rqst
*rqstp
)
571 struct svc_xprt
*xprt
= rqstp
->rq_xprt
;
572 struct svcxprt_rdma
*rdma
=
573 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
574 struct rpcrdma_msg
*rdma_argp
;
575 struct rpcrdma_msg
*rdma_resp
;
576 struct rpcrdma_write_array
*reply_ary
;
577 enum rpcrdma_proc reply_type
;
580 struct page
*res_page
;
581 struct svc_rdma_op_ctxt
*ctxt
;
582 struct svc_rdma_req_map
*vec
;
584 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp
);
586 /* Get the RDMA request header. The receive logic always
587 * places this at the start of page 0.
589 rdma_argp
= page_address(rqstp
->rq_pages
[0]);
591 /* Build an req vec for the XDR */
592 ctxt
= svc_rdma_get_context(rdma
);
593 ctxt
->direction
= DMA_TO_DEVICE
;
594 vec
= svc_rdma_get_req_map();
595 ret
= map_xdr(rdma
, &rqstp
->rq_res
, vec
);
598 inline_bytes
= rqstp
->rq_res
.len
;
600 /* Create the RDMA response header */
601 res_page
= alloc_page(GFP_KERNEL
| __GFP_NOFAIL
);
602 rdma_resp
= page_address(res_page
);
603 reply_ary
= svc_rdma_get_reply_array(rdma_argp
);
605 reply_type
= RDMA_NOMSG
;
607 reply_type
= RDMA_MSG
;
608 svc_rdma_xdr_encode_reply_header(rdma
, rdma_argp
,
609 rdma_resp
, reply_type
);
611 /* Send any write-chunk data and build resp write-list */
612 ret
= send_write_chunks(rdma
, rdma_argp
, rdma_resp
,
615 printk(KERN_ERR
"svcrdma: failed to send write chunks, rc=%d\n",
621 /* Send any reply-list data and update resp reply-list */
622 ret
= send_reply_chunks(rdma
, rdma_argp
, rdma_resp
,
625 printk(KERN_ERR
"svcrdma: failed to send reply chunks, rc=%d\n",
631 ret
= send_reply(rdma
, rqstp
, res_page
, rdma_resp
, ctxt
, vec
,
633 svc_rdma_put_req_map(vec
);
634 dprintk("svcrdma: send_reply returns %d\n", ret
);
640 svc_rdma_put_req_map(vec
);
641 svc_rdma_put_context(ctxt
, 0);