2 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Author: Tom Tucker <tom@opengridcomputing.com>
43 #include <linux/sunrpc/debug.h>
44 #include <linux/sunrpc/rpc_rdma.h>
45 #include <linux/spinlock.h>
46 #include <asm/unaligned.h>
47 #include <rdma/ib_verbs.h>
48 #include <rdma/rdma_cm.h>
49 #include <linux/sunrpc/svc_rdma.h>
51 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
53 static u32
xdr_padsize(u32 len
)
55 return (len
& 3) ? (4 - (len
& 3)) : 0;
58 int svc_rdma_map_xdr(struct svcxprt_rdma
*xprt
,
60 struct svc_rdma_req_map
*vec
,
61 bool write_chunk_present
)
70 (xdr
->head
[0].iov_len
+ xdr
->page_len
+ xdr
->tail
[0].iov_len
)) {
71 pr_err("svcrdma: %s: XDR buffer length error\n", __func__
);
75 /* Skip the first sge, this is for the RPCRDMA header */
79 vec
->sge
[sge_no
].iov_base
= xdr
->head
[0].iov_base
;
80 vec
->sge
[sge_no
].iov_len
= xdr
->head
[0].iov_len
;
85 page_bytes
= xdr
->page_len
;
86 page_off
= xdr
->page_base
;
88 vec
->sge
[sge_no
].iov_base
=
89 page_address(xdr
->pages
[page_no
]) + page_off
;
90 sge_bytes
= min_t(u32
, page_bytes
, (PAGE_SIZE
- page_off
));
91 page_bytes
-= sge_bytes
;
92 vec
->sge
[sge_no
].iov_len
= sge_bytes
;
96 page_off
= 0; /* reset for next time through loop */
100 if (xdr
->tail
[0].iov_len
) {
101 unsigned char *base
= xdr
->tail
[0].iov_base
;
102 size_t len
= xdr
->tail
[0].iov_len
;
103 u32 xdr_pad
= xdr_padsize(xdr
->page_len
);
105 if (write_chunk_present
&& xdr_pad
) {
111 vec
->sge
[sge_no
].iov_base
= base
;
112 vec
->sge
[sge_no
].iov_len
= len
;
117 dprintk("svcrdma: %s: sge_no %d page_no %d "
118 "page_base %u page_len %u head_len %zu tail_len %zu\n",
119 __func__
, sge_no
, page_no
, xdr
->page_base
, xdr
->page_len
,
120 xdr
->head
[0].iov_len
, xdr
->tail
[0].iov_len
);
126 static dma_addr_t
dma_map_xdr(struct svcxprt_rdma
*xprt
,
128 u32 xdr_off
, size_t len
, int dir
)
132 if (xdr_off
< xdr
->head
[0].iov_len
) {
133 /* This offset is in the head */
134 xdr_off
+= (unsigned long)xdr
->head
[0].iov_base
& ~PAGE_MASK
;
135 page
= virt_to_page(xdr
->head
[0].iov_base
);
137 xdr_off
-= xdr
->head
[0].iov_len
;
138 if (xdr_off
< xdr
->page_len
) {
139 /* This offset is in the page list */
140 xdr_off
+= xdr
->page_base
;
141 page
= xdr
->pages
[xdr_off
>> PAGE_SHIFT
];
142 xdr_off
&= ~PAGE_MASK
;
144 /* This offset is in the tail */
145 xdr_off
-= xdr
->page_len
;
146 xdr_off
+= (unsigned long)
147 xdr
->tail
[0].iov_base
& ~PAGE_MASK
;
148 page
= virt_to_page(xdr
->tail
[0].iov_base
);
151 dma_addr
= ib_dma_map_page(xprt
->sc_cm_id
->device
, page
, xdr_off
,
152 min_t(size_t, PAGE_SIZE
, len
), dir
);
156 /* Returns the address of the first read chunk or <nul> if no read chunk
159 struct rpcrdma_read_chunk
*
160 svc_rdma_get_read_chunk(struct rpcrdma_msg
*rmsgp
)
162 struct rpcrdma_read_chunk
*ch
=
163 (struct rpcrdma_read_chunk
*)&rmsgp
->rm_body
.rm_chunks
[0];
165 if (ch
->rc_discrim
== xdr_zero
)
170 /* Returns the address of the first read write array element or <nul>
171 * if no write array list is present
173 static struct rpcrdma_write_array
*
174 svc_rdma_get_write_array(struct rpcrdma_msg
*rmsgp
)
176 if (rmsgp
->rm_body
.rm_chunks
[0] != xdr_zero
||
177 rmsgp
->rm_body
.rm_chunks
[1] == xdr_zero
)
179 return (struct rpcrdma_write_array
*)&rmsgp
->rm_body
.rm_chunks
[1];
182 /* Returns the address of the first reply array element or <nul> if no
183 * reply array is present
185 static struct rpcrdma_write_array
*
186 svc_rdma_get_reply_array(struct rpcrdma_msg
*rmsgp
,
187 struct rpcrdma_write_array
*wr_ary
)
189 struct rpcrdma_read_chunk
*rch
;
190 struct rpcrdma_write_array
*rp_ary
;
192 /* XXX: Need to fix when reply chunk may occur with read list
195 if (rmsgp
->rm_body
.rm_chunks
[0] != xdr_zero
||
196 rmsgp
->rm_body
.rm_chunks
[1] != xdr_zero
)
199 rch
= svc_rdma_get_read_chunk(rmsgp
);
201 while (rch
->rc_discrim
!= xdr_zero
)
204 /* The reply chunk follows an empty write array located
205 * at 'rc_position' here. The reply array is at rc_target.
207 rp_ary
= (struct rpcrdma_write_array
*)&rch
->rc_target
;
212 int chunk
= be32_to_cpu(wr_ary
->wc_nchunks
);
214 rp_ary
= (struct rpcrdma_write_array
*)
215 &wr_ary
->wc_array
[chunk
].wc_target
.rs_length
;
219 /* No read list, no write list */
220 rp_ary
= (struct rpcrdma_write_array
*)&rmsgp
->rm_body
.rm_chunks
[2];
223 if (rp_ary
->wc_discrim
== xdr_zero
)
229 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
231 static int send_write(struct svcxprt_rdma
*xprt
, struct svc_rqst
*rqstp
,
233 u32 xdr_off
, int write_len
,
234 struct svc_rdma_req_map
*vec
)
236 struct ib_rdma_wr write_wr
;
243 struct svc_rdma_op_ctxt
*ctxt
;
245 if (vec
->count
> RPCSVC_MAXPAGES
) {
246 pr_err("svcrdma: Too many pages (%lu)\n", vec
->count
);
250 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
251 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
252 rmr
, (unsigned long long)to
, xdr_off
,
253 write_len
, vec
->sge
, vec
->count
);
255 ctxt
= svc_rdma_get_context(xprt
);
256 ctxt
->direction
= DMA_TO_DEVICE
;
259 /* Find the SGE associated with xdr_off */
260 for (bc
= xdr_off
, xdr_sge_no
= 1; bc
&& xdr_sge_no
< vec
->count
;
262 if (vec
->sge
[xdr_sge_no
].iov_len
> bc
)
264 bc
-= vec
->sge
[xdr_sge_no
].iov_len
;
271 /* Copy the remaining SGE */
273 sge_bytes
= min_t(size_t,
274 bc
, vec
->sge
[xdr_sge_no
].iov_len
-sge_off
);
275 sge
[sge_no
].length
= sge_bytes
;
277 dma_map_xdr(xprt
, &rqstp
->rq_res
, xdr_off
,
278 sge_bytes
, DMA_TO_DEVICE
);
279 xdr_off
+= sge_bytes
;
280 if (ib_dma_mapping_error(xprt
->sc_cm_id
->device
,
283 atomic_inc(&xprt
->sc_dma_used
);
284 sge
[sge_no
].lkey
= xprt
->sc_pd
->local_dma_lkey
;
289 if (xdr_sge_no
> vec
->count
) {
290 pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no
);
294 if (sge_no
== xprt
->sc_max_sge
)
298 /* Prepare WRITE WR */
299 memset(&write_wr
, 0, sizeof write_wr
);
300 ctxt
->cqe
.done
= svc_rdma_wc_write
;
301 write_wr
.wr
.wr_cqe
= &ctxt
->cqe
;
302 write_wr
.wr
.sg_list
= &sge
[0];
303 write_wr
.wr
.num_sge
= sge_no
;
304 write_wr
.wr
.opcode
= IB_WR_RDMA_WRITE
;
305 write_wr
.wr
.send_flags
= IB_SEND_SIGNALED
;
307 write_wr
.remote_addr
= to
;
310 atomic_inc(&rdma_stat_write
);
311 if (svc_rdma_send(xprt
, &write_wr
.wr
))
313 return write_len
- bc
;
315 svc_rdma_unmap_dma(ctxt
);
316 svc_rdma_put_context(ctxt
, 0);
321 static int send_write_chunks(struct svcxprt_rdma
*xprt
,
322 struct rpcrdma_write_array
*wr_ary
,
323 struct rpcrdma_msg
*rdma_resp
,
324 struct svc_rqst
*rqstp
,
325 struct svc_rdma_req_map
*vec
)
327 u32 xfer_len
= rqstp
->rq_res
.page_len
;
333 struct rpcrdma_write_array
*res_ary
;
336 res_ary
= (struct rpcrdma_write_array
*)
337 &rdma_resp
->rm_body
.rm_chunks
[1];
339 /* Write chunks start at the pagelist */
340 nchunks
= be32_to_cpu(wr_ary
->wc_nchunks
);
341 for (xdr_off
= rqstp
->rq_res
.head
[0].iov_len
, chunk_no
= 0;
342 xfer_len
&& chunk_no
< nchunks
;
344 struct rpcrdma_segment
*arg_ch
;
347 arg_ch
= &wr_ary
->wc_array
[chunk_no
].wc_target
;
348 write_len
= min(xfer_len
, be32_to_cpu(arg_ch
->rs_length
));
350 /* Prepare the response chunk given the length actually
352 xdr_decode_hyper((__be32
*)&arg_ch
->rs_offset
, &rs_offset
);
353 svc_rdma_xdr_encode_array_chunk(res_ary
, chunk_no
,
359 ret
= send_write(xprt
, rqstp
,
360 be32_to_cpu(arg_ch
->rs_handle
),
361 rs_offset
+ chunk_off
,
373 /* Update the req with the number of chunks actually used */
374 svc_rdma_xdr_encode_write_list(rdma_resp
, chunk_no
);
376 return rqstp
->rq_res
.page_len
;
379 pr_err("svcrdma: failed to send write chunks, rc=%d\n", ret
);
384 static int send_reply_chunks(struct svcxprt_rdma
*xprt
,
385 struct rpcrdma_write_array
*rp_ary
,
386 struct rpcrdma_msg
*rdma_resp
,
387 struct svc_rqst
*rqstp
,
388 struct svc_rdma_req_map
*vec
)
390 u32 xfer_len
= rqstp
->rq_res
.len
;
396 struct rpcrdma_segment
*ch
;
397 struct rpcrdma_write_array
*res_ary
;
400 /* XXX: need to fix when reply lists occur with read-list and or
402 res_ary
= (struct rpcrdma_write_array
*)
403 &rdma_resp
->rm_body
.rm_chunks
[2];
405 /* xdr offset starts at RPC message */
406 nchunks
= be32_to_cpu(rp_ary
->wc_nchunks
);
407 for (xdr_off
= 0, chunk_no
= 0;
408 xfer_len
&& chunk_no
< nchunks
;
411 ch
= &rp_ary
->wc_array
[chunk_no
].wc_target
;
412 write_len
= min(xfer_len
, be32_to_cpu(ch
->rs_length
));
414 /* Prepare the reply chunk given the length actually
416 xdr_decode_hyper((__be32
*)&ch
->rs_offset
, &rs_offset
);
417 svc_rdma_xdr_encode_array_chunk(res_ary
, chunk_no
,
418 ch
->rs_handle
, ch
->rs_offset
,
422 ret
= send_write(xprt
, rqstp
,
423 be32_to_cpu(ch
->rs_handle
),
424 rs_offset
+ chunk_off
,
436 /* Update the req with the number of chunks actually used */
437 svc_rdma_xdr_encode_reply_array(res_ary
, chunk_no
);
439 return rqstp
->rq_res
.len
;
442 pr_err("svcrdma: failed to send reply chunks, rc=%d\n", ret
);
446 /* This function prepares the portion of the RPCRDMA message to be
447 * sent in the RDMA_SEND. This function is called after data sent via
448 * RDMA has already been transmitted. There are three cases:
449 * - The RPCRDMA header, RPC header, and payload are all sent in a
450 * single RDMA_SEND. This is the "inline" case.
451 * - The RPCRDMA header and some portion of the RPC header and data
452 * are sent via this RDMA_SEND and another portion of the data is
454 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
455 * header and data are all transmitted via RDMA.
456 * In all three cases, this function prepares the RPCRDMA header in
457 * sge[0], the 'type' parameter indicates the type to place in the
458 * RPCRDMA header, and the 'byte_count' field indicates how much of
459 * the XDR to include in this RDMA_SEND. NB: The offset of the payload
460 * to send is zero in the XDR.
462 static int send_reply(struct svcxprt_rdma
*rdma
,
463 struct svc_rqst
*rqstp
,
465 struct rpcrdma_msg
*rdma_resp
,
466 struct svc_rdma_op_ctxt
*ctxt
,
467 struct svc_rdma_req_map
*vec
,
470 struct ib_send_wr send_wr
;
478 ret
= svc_rdma_repost_recv(rdma
, GFP_KERNEL
);
480 svc_rdma_put_context(ctxt
, 0);
484 /* Prepare the context */
485 ctxt
->pages
[0] = page
;
488 /* Prepare the SGE for the RPCRDMA Header */
489 ctxt
->sge
[0].lkey
= rdma
->sc_pd
->local_dma_lkey
;
490 ctxt
->sge
[0].length
= svc_rdma_xdr_get_reply_hdr_len(rdma_resp
);
492 ib_dma_map_page(rdma
->sc_cm_id
->device
, page
, 0,
493 ctxt
->sge
[0].length
, DMA_TO_DEVICE
);
494 if (ib_dma_mapping_error(rdma
->sc_cm_id
->device
, ctxt
->sge
[0].addr
))
496 atomic_inc(&rdma
->sc_dma_used
);
498 ctxt
->direction
= DMA_TO_DEVICE
;
500 /* Map the payload indicated by 'byte_count' */
502 for (sge_no
= 1; byte_count
&& sge_no
< vec
->count
; sge_no
++) {
503 sge_bytes
= min_t(size_t, vec
->sge
[sge_no
].iov_len
, byte_count
);
504 byte_count
-= sge_bytes
;
505 ctxt
->sge
[sge_no
].addr
=
506 dma_map_xdr(rdma
, &rqstp
->rq_res
, xdr_off
,
507 sge_bytes
, DMA_TO_DEVICE
);
508 xdr_off
+= sge_bytes
;
509 if (ib_dma_mapping_error(rdma
->sc_cm_id
->device
,
510 ctxt
->sge
[sge_no
].addr
))
512 atomic_inc(&rdma
->sc_dma_used
);
513 ctxt
->sge
[sge_no
].lkey
= rdma
->sc_pd
->local_dma_lkey
;
514 ctxt
->sge
[sge_no
].length
= sge_bytes
;
516 if (byte_count
!= 0) {
517 pr_err("svcrdma: Could not map %d bytes\n", byte_count
);
521 /* Save all respages in the ctxt and remove them from the
522 * respages array. They are our pages until the I/O
525 pages
= rqstp
->rq_next_page
- rqstp
->rq_respages
;
526 for (page_no
= 0; page_no
< pages
; page_no
++) {
527 ctxt
->pages
[page_no
+1] = rqstp
->rq_respages
[page_no
];
529 rqstp
->rq_respages
[page_no
] = NULL
;
531 * If there are more pages than SGE, terminate SGE
532 * list so that svc_rdma_unmap_dma doesn't attempt to
535 if (page_no
+1 >= sge_no
)
536 ctxt
->sge
[page_no
+1].length
= 0;
538 rqstp
->rq_next_page
= rqstp
->rq_respages
+ 1;
540 /* The loop above bumps sc_dma_used for each sge. The
541 * xdr_buf.tail gets a separate sge, but resides in the
542 * same page as xdr_buf.head. Don't count it twice.
544 if (sge_no
> ctxt
->count
)
545 atomic_dec(&rdma
->sc_dma_used
);
547 if (sge_no
> rdma
->sc_max_sge
) {
548 pr_err("svcrdma: Too many sges (%d)\n", sge_no
);
551 memset(&send_wr
, 0, sizeof send_wr
);
552 ctxt
->cqe
.done
= svc_rdma_wc_send
;
553 send_wr
.wr_cqe
= &ctxt
->cqe
;
554 send_wr
.sg_list
= ctxt
->sge
;
555 send_wr
.num_sge
= sge_no
;
556 send_wr
.opcode
= IB_WR_SEND
;
557 send_wr
.send_flags
= IB_SEND_SIGNALED
;
559 ret
= svc_rdma_send(rdma
, &send_wr
);
566 svc_rdma_unmap_dma(ctxt
);
567 svc_rdma_put_context(ctxt
, 1);
568 pr_err("svcrdma: failed to send reply, rc=%d\n", ret
);
572 void svc_rdma_prep_reply_hdr(struct svc_rqst
*rqstp
)
576 int svc_rdma_sendto(struct svc_rqst
*rqstp
)
578 struct svc_xprt
*xprt
= rqstp
->rq_xprt
;
579 struct svcxprt_rdma
*rdma
=
580 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
581 struct rpcrdma_msg
*rdma_argp
;
582 struct rpcrdma_msg
*rdma_resp
;
583 struct rpcrdma_write_array
*wr_ary
, *rp_ary
;
584 enum rpcrdma_proc reply_type
;
587 struct page
*res_page
;
588 struct svc_rdma_op_ctxt
*ctxt
;
589 struct svc_rdma_req_map
*vec
;
591 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp
);
593 /* Get the RDMA request header. The receive logic always
594 * places this at the start of page 0.
596 rdma_argp
= page_address(rqstp
->rq_pages
[0]);
597 wr_ary
= svc_rdma_get_write_array(rdma_argp
);
598 rp_ary
= svc_rdma_get_reply_array(rdma_argp
, wr_ary
);
600 /* Build an req vec for the XDR */
601 ctxt
= svc_rdma_get_context(rdma
);
602 ctxt
->direction
= DMA_TO_DEVICE
;
603 vec
= svc_rdma_get_req_map(rdma
);
604 ret
= svc_rdma_map_xdr(rdma
, &rqstp
->rq_res
, vec
, wr_ary
!= NULL
);
607 inline_bytes
= rqstp
->rq_res
.len
;
609 /* Create the RDMA response header */
611 res_page
= alloc_page(GFP_KERNEL
);
614 rdma_resp
= page_address(res_page
);
616 reply_type
= RDMA_NOMSG
;
618 reply_type
= RDMA_MSG
;
619 svc_rdma_xdr_encode_reply_header(rdma
, rdma_argp
,
620 rdma_resp
, reply_type
);
622 /* Send any write-chunk data and build resp write-list */
624 ret
= send_write_chunks(rdma
, wr_ary
, rdma_resp
, rqstp
, vec
);
627 inline_bytes
-= ret
+ xdr_padsize(ret
);
630 /* Send any reply-list data and update resp reply-list */
632 ret
= send_reply_chunks(rdma
, rp_ary
, rdma_resp
, rqstp
, vec
);
638 ret
= send_reply(rdma
, rqstp
, res_page
, rdma_resp
, ctxt
, vec
,
643 svc_rdma_put_req_map(rdma
, vec
);
644 dprintk("svcrdma: send_reply returns %d\n", ret
);
650 svc_rdma_put_req_map(rdma
, vec
);
651 svc_rdma_put_context(ctxt
, 0);
652 set_bit(XPT_CLOSE
, &rdma
->sc_xprt
.xpt_flags
);
656 void svc_rdma_send_error(struct svcxprt_rdma
*xprt
, struct rpcrdma_msg
*rmsgp
,
659 struct ib_send_wr err_wr
;
661 struct svc_rdma_op_ctxt
*ctxt
;
662 enum rpcrdma_errcode err
;
667 ret
= svc_rdma_repost_recv(xprt
, GFP_KERNEL
);
671 p
= alloc_page(GFP_KERNEL
);
674 va
= page_address(p
);
676 /* XDR encode an error reply */
678 if (status
== -EPROTONOSUPPORT
)
680 length
= svc_rdma_xdr_encode_error(xprt
, rmsgp
, err
, va
);
682 ctxt
= svc_rdma_get_context(xprt
);
683 ctxt
->direction
= DMA_TO_DEVICE
;
687 /* Prepare SGE for local address */
688 ctxt
->sge
[0].lkey
= xprt
->sc_pd
->local_dma_lkey
;
689 ctxt
->sge
[0].length
= length
;
690 ctxt
->sge
[0].addr
= ib_dma_map_page(xprt
->sc_cm_id
->device
,
691 p
, 0, length
, DMA_TO_DEVICE
);
692 if (ib_dma_mapping_error(xprt
->sc_cm_id
->device
, ctxt
->sge
[0].addr
)) {
693 dprintk("svcrdma: Error mapping buffer for protocol error\n");
694 svc_rdma_put_context(ctxt
, 1);
697 atomic_inc(&xprt
->sc_dma_used
);
699 /* Prepare SEND WR */
700 memset(&err_wr
, 0, sizeof(err_wr
));
701 ctxt
->cqe
.done
= svc_rdma_wc_send
;
702 err_wr
.wr_cqe
= &ctxt
->cqe
;
703 err_wr
.sg_list
= ctxt
->sge
;
705 err_wr
.opcode
= IB_WR_SEND
;
706 err_wr
.send_flags
= IB_SEND_SIGNALED
;
709 ret
= svc_rdma_send(xprt
, &err_wr
);
711 dprintk("svcrdma: Error %d posting send for protocol error\n",
713 svc_rdma_unmap_dma(ctxt
);
714 svc_rdma_put_context(ctxt
, 1);