2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Author: Tom Tucker <tom@opengridcomputing.com>
42 #include <linux/sunrpc/debug.h>
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/spinlock.h>
45 #include <asm/unaligned.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
52 /* Encode an XDR as an array of IB SGE
55 * - head[0] is physically contiguous.
56 * - tail[0] is physically contiguous.
57 * - pages[] is not physically or virtually contigous and consists of
61 * SGE[0] reserved for RCPRDMA header
62 * SGE[1] data from xdr->head[]
63 * SGE[2..sge_count-2] data from xdr->pages[]
64 * SGE[sge_count-1] data from xdr->tail.
67 static struct ib_sge
*xdr_to_sge(struct svcxprt_rdma
*xprt
,
72 /* Max we need is the length of the XDR / pagesize + one for
73 * head + one for tail + one for RPCRDMA header
75 int sge_max
= (xdr
->len
+PAGE_SIZE
-1) / PAGE_SIZE
+ 3;
77 u32 byte_count
= xdr
->len
;
83 /* Skip the first sge, this is for the RPCRDMA header */
87 sge
[sge_no
].addr
= ib_dma_map_single(xprt
->sc_cm_id
->device
,
88 xdr
->head
[0].iov_base
,
91 sge_bytes
= min_t(u32
, byte_count
, xdr
->head
[0].iov_len
);
92 byte_count
-= sge_bytes
;
93 sge
[sge_no
].length
= sge_bytes
;
94 sge
[sge_no
].lkey
= xprt
->sc_phys_mr
->lkey
;
99 page_bytes
= xdr
->page_len
;
100 page_off
= xdr
->page_base
;
101 while (byte_count
&& page_bytes
) {
102 sge_bytes
= min_t(u32
, byte_count
, (PAGE_SIZE
-page_off
));
104 ib_dma_map_page(xprt
->sc_cm_id
->device
,
105 xdr
->pages
[page_no
], page_off
,
106 sge_bytes
, DMA_TO_DEVICE
);
107 sge_bytes
= min(sge_bytes
, page_bytes
);
108 byte_count
-= sge_bytes
;
109 page_bytes
-= sge_bytes
;
110 sge
[sge_no
].length
= sge_bytes
;
111 sge
[sge_no
].lkey
= xprt
->sc_phys_mr
->lkey
;
115 page_off
= 0; /* reset for next time through loop */
119 if (byte_count
&& xdr
->tail
[0].iov_len
) {
121 ib_dma_map_single(xprt
->sc_cm_id
->device
,
122 xdr
->tail
[0].iov_base
,
123 xdr
->tail
[0].iov_len
,
125 sge_bytes
= min_t(u32
, byte_count
, xdr
->tail
[0].iov_len
);
126 byte_count
-= sge_bytes
;
127 sge
[sge_no
].length
= sge_bytes
;
128 sge
[sge_no
].lkey
= xprt
->sc_phys_mr
->lkey
;
132 BUG_ON(sge_no
> sge_max
);
133 BUG_ON(byte_count
!= 0);
141 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
143 static int send_write(struct svcxprt_rdma
*xprt
, struct svc_rqst
*rqstp
,
145 u32 xdr_off
, int write_len
,
146 struct ib_sge
*xdr_sge
, int sge_count
)
148 struct svc_rdma_op_ctxt
*tmp_sge_ctxt
;
149 struct ib_send_wr write_wr
;
156 struct svc_rdma_op_ctxt
*ctxt
;
159 BUG_ON(sge_count
> RPCSVC_MAXPAGES
);
160 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
161 "write_len=%d, xdr_sge=%p, sge_count=%d\n",
162 rmr
, (unsigned long long)to
, xdr_off
,
163 write_len
, xdr_sge
, sge_count
);
165 ctxt
= svc_rdma_get_context(xprt
);
167 tmp_sge_ctxt
= svc_rdma_get_context(xprt
);
168 sge
= tmp_sge_ctxt
->sge
;
170 /* Find the SGE associated with xdr_off */
171 for (bc
= xdr_off
, xdr_sge_no
= 1; bc
&& xdr_sge_no
< sge_count
;
173 if (xdr_sge
[xdr_sge_no
].length
> bc
)
175 bc
-= xdr_sge
[xdr_sge_no
].length
;
182 /* Copy the remaining SGE */
183 while (bc
!= 0 && xdr_sge_no
< sge_count
) {
184 sge
[sge_no
].addr
= xdr_sge
[xdr_sge_no
].addr
+ sge_off
;
185 sge
[sge_no
].lkey
= xdr_sge
[xdr_sge_no
].lkey
;
186 sge_bytes
= min((size_t)bc
,
187 (size_t)(xdr_sge
[xdr_sge_no
].length
-sge_off
));
188 sge
[sge_no
].length
= sge_bytes
;
197 BUG_ON(xdr_sge_no
> sge_count
);
199 /* Prepare WRITE WR */
200 memset(&write_wr
, 0, sizeof write_wr
);
201 ctxt
->wr_op
= IB_WR_RDMA_WRITE
;
202 write_wr
.wr_id
= (unsigned long)ctxt
;
203 write_wr
.sg_list
= &sge
[0];
204 write_wr
.num_sge
= sge_no
;
205 write_wr
.opcode
= IB_WR_RDMA_WRITE
;
206 write_wr
.send_flags
= IB_SEND_SIGNALED
;
207 write_wr
.wr
.rdma
.rkey
= rmr
;
208 write_wr
.wr
.rdma
.remote_addr
= to
;
211 atomic_inc(&rdma_stat_write
);
212 if (svc_rdma_send(xprt
, &write_wr
)) {
213 svc_rdma_put_context(ctxt
, 1);
214 /* Fatal error, close transport */
217 svc_rdma_put_context(tmp_sge_ctxt
, 0);
221 static int send_write_chunks(struct svcxprt_rdma
*xprt
,
222 struct rpcrdma_msg
*rdma_argp
,
223 struct rpcrdma_msg
*rdma_resp
,
224 struct svc_rqst
*rqstp
,
228 u32 xfer_len
= rqstp
->rq_res
.page_len
+ rqstp
->rq_res
.tail
[0].iov_len
;
234 struct rpcrdma_write_array
*arg_ary
;
235 struct rpcrdma_write_array
*res_ary
;
238 arg_ary
= svc_rdma_get_write_array(rdma_argp
);
241 res_ary
= (struct rpcrdma_write_array
*)
242 &rdma_resp
->rm_body
.rm_chunks
[1];
244 max_write
= xprt
->sc_max_sge
* PAGE_SIZE
;
246 /* Write chunks start at the pagelist */
247 for (xdr_off
= rqstp
->rq_res
.head
[0].iov_len
, chunk_no
= 0;
248 xfer_len
&& chunk_no
< arg_ary
->wc_nchunks
;
250 struct rpcrdma_segment
*arg_ch
;
253 arg_ch
= &arg_ary
->wc_array
[chunk_no
].wc_target
;
254 write_len
= min(xfer_len
, arg_ch
->rs_length
);
256 /* Prepare the response chunk given the length actually
258 rs_offset
= get_unaligned(&(arg_ch
->rs_offset
));
259 svc_rdma_xdr_encode_array_chunk(res_ary
, chunk_no
,
266 this_write
= min(write_len
, max_write
);
267 ret
= send_write(xprt
, rqstp
,
269 rs_offset
+ chunk_off
,
275 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
279 chunk_off
+= this_write
;
280 xdr_off
+= this_write
;
281 xfer_len
-= this_write
;
282 write_len
-= this_write
;
285 /* Update the req with the number of chunks actually used */
286 svc_rdma_xdr_encode_write_list(rdma_resp
, chunk_no
);
288 return rqstp
->rq_res
.page_len
+ rqstp
->rq_res
.tail
[0].iov_len
;
291 static int send_reply_chunks(struct svcxprt_rdma
*xprt
,
292 struct rpcrdma_msg
*rdma_argp
,
293 struct rpcrdma_msg
*rdma_resp
,
294 struct svc_rqst
*rqstp
,
298 u32 xfer_len
= rqstp
->rq_res
.len
;
304 struct rpcrdma_segment
*ch
;
305 struct rpcrdma_write_array
*arg_ary
;
306 struct rpcrdma_write_array
*res_ary
;
309 arg_ary
= svc_rdma_get_reply_array(rdma_argp
);
312 /* XXX: need to fix when reply lists occur with read-list and or
314 res_ary
= (struct rpcrdma_write_array
*)
315 &rdma_resp
->rm_body
.rm_chunks
[2];
317 max_write
= xprt
->sc_max_sge
* PAGE_SIZE
;
319 /* xdr offset starts at RPC message */
320 for (xdr_off
= 0, chunk_no
= 0;
321 xfer_len
&& chunk_no
< arg_ary
->wc_nchunks
;
324 ch
= &arg_ary
->wc_array
[chunk_no
].wc_target
;
325 write_len
= min(xfer_len
, ch
->rs_length
);
328 /* Prepare the reply chunk given the length actually
330 rs_offset
= get_unaligned(&(ch
->rs_offset
));
331 svc_rdma_xdr_encode_array_chunk(res_ary
, chunk_no
,
332 ch
->rs_handle
, rs_offset
,
338 this_write
= min(write_len
, max_write
);
339 ret
= send_write(xprt
, rqstp
,
341 rs_offset
+ chunk_off
,
347 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
351 chunk_off
+= this_write
;
352 xdr_off
+= this_write
;
353 xfer_len
-= this_write
;
354 write_len
-= this_write
;
357 /* Update the req with the number of chunks actually used */
358 svc_rdma_xdr_encode_reply_array(res_ary
, chunk_no
);
360 return rqstp
->rq_res
.len
;
363 /* This function prepares the portion of the RPCRDMA message to be
364 * sent in the RDMA_SEND. This function is called after data sent via
365 * RDMA has already been transmitted. There are three cases:
366 * - The RPCRDMA header, RPC header, and payload are all sent in a
367 * single RDMA_SEND. This is the "inline" case.
368 * - The RPCRDMA header and some portion of the RPC header and data
369 * are sent via this RDMA_SEND and another portion of the data is
371 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
372 * header and data are all transmitted via RDMA.
373 * In all three cases, this function prepares the RPCRDMA header in
374 * sge[0], the 'type' parameter indicates the type to place in the
375 * RPCRDMA header, and the 'byte_count' field indicates how much of
376 * the XDR to include in this RDMA_SEND.
378 static int send_reply(struct svcxprt_rdma
*rdma
,
379 struct svc_rqst
*rqstp
,
381 struct rpcrdma_msg
*rdma_resp
,
382 struct svc_rdma_op_ctxt
*ctxt
,
386 struct ib_send_wr send_wr
;
392 /* Prepare the context */
393 ctxt
->pages
[0] = page
;
396 /* Prepare the SGE for the RPCRDMA Header */
398 ib_dma_map_page(rdma
->sc_cm_id
->device
,
399 page
, 0, PAGE_SIZE
, DMA_TO_DEVICE
);
400 ctxt
->direction
= DMA_TO_DEVICE
;
401 ctxt
->sge
[0].length
= svc_rdma_xdr_get_reply_hdr_len(rdma_resp
);
402 ctxt
->sge
[0].lkey
= rdma
->sc_phys_mr
->lkey
;
404 /* Determine how many of our SGE are to be transmitted */
405 for (sge_no
= 1; byte_count
&& sge_no
< sge_count
; sge_no
++) {
406 sge_bytes
= min((size_t)ctxt
->sge
[sge_no
].length
,
408 byte_count
-= sge_bytes
;
410 BUG_ON(byte_count
!= 0);
412 /* Save all respages in the ctxt and remove them from the
413 * respages array. They are our pages until the I/O
416 for (page_no
= 0; page_no
< rqstp
->rq_resused
; page_no
++) {
417 ctxt
->pages
[page_no
+1] = rqstp
->rq_respages
[page_no
];
419 rqstp
->rq_respages
[page_no
] = NULL
;
422 BUG_ON(sge_no
> rdma
->sc_max_sge
);
423 memset(&send_wr
, 0, sizeof send_wr
);
424 ctxt
->wr_op
= IB_WR_SEND
;
425 send_wr
.wr_id
= (unsigned long)ctxt
;
426 send_wr
.sg_list
= ctxt
->sge
;
427 send_wr
.num_sge
= sge_no
;
428 send_wr
.opcode
= IB_WR_SEND
;
429 send_wr
.send_flags
= IB_SEND_SIGNALED
;
431 ret
= svc_rdma_send(rdma
, &send_wr
);
433 svc_rdma_put_context(ctxt
, 1);
438 void svc_rdma_prep_reply_hdr(struct svc_rqst
*rqstp
)
443 * Return the start of an xdr buffer.
445 static void *xdr_start(struct xdr_buf
*xdr
)
447 return xdr
->head
[0].iov_base
-
450 xdr
->tail
[0].iov_len
-
451 xdr
->head
[0].iov_len
);
454 int svc_rdma_sendto(struct svc_rqst
*rqstp
)
456 struct svc_xprt
*xprt
= rqstp
->rq_xprt
;
457 struct svcxprt_rdma
*rdma
=
458 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
459 struct rpcrdma_msg
*rdma_argp
;
460 struct rpcrdma_msg
*rdma_resp
;
461 struct rpcrdma_write_array
*reply_ary
;
462 enum rpcrdma_proc reply_type
;
467 struct page
*res_page
;
468 struct svc_rdma_op_ctxt
*ctxt
;
470 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp
);
472 /* Get the RDMA request header. */
473 rdma_argp
= xdr_start(&rqstp
->rq_arg
);
475 /* Build an SGE for the XDR */
476 ctxt
= svc_rdma_get_context(rdma
);
477 ctxt
->direction
= DMA_TO_DEVICE
;
478 sge
= xdr_to_sge(rdma
, &rqstp
->rq_res
, ctxt
->sge
, &sge_count
);
480 inline_bytes
= rqstp
->rq_res
.len
;
482 /* Create the RDMA response header */
483 res_page
= svc_rdma_get_page();
484 rdma_resp
= page_address(res_page
);
485 reply_ary
= svc_rdma_get_reply_array(rdma_argp
);
487 reply_type
= RDMA_NOMSG
;
489 reply_type
= RDMA_MSG
;
490 svc_rdma_xdr_encode_reply_header(rdma
, rdma_argp
,
491 rdma_resp
, reply_type
);
493 /* Send any write-chunk data and build resp write-list */
494 ret
= send_write_chunks(rdma
, rdma_argp
, rdma_resp
,
495 rqstp
, sge
, sge_count
);
497 printk(KERN_ERR
"svcrdma: failed to send write chunks, rc=%d\n",
503 /* Send any reply-list data and update resp reply-list */
504 ret
= send_reply_chunks(rdma
, rdma_argp
, rdma_resp
,
505 rqstp
, sge
, sge_count
);
507 printk(KERN_ERR
"svcrdma: failed to send reply chunks, rc=%d\n",
513 ret
= send_reply(rdma
, rqstp
, res_page
, rdma_resp
, ctxt
, sge_count
,
515 dprintk("svcrdma: send_reply returns %d\n", ret
);
518 svc_rdma_put_context(ctxt
, 0);