2 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Author: Tom Tucker <tom@opengridcomputing.com>
43 #include <linux/sunrpc/debug.h>
44 #include <linux/sunrpc/rpc_rdma.h>
45 #include <linux/spinlock.h>
46 #include <asm/unaligned.h>
47 #include <rdma/ib_verbs.h>
48 #include <rdma/rdma_cm.h>
49 #include <linux/sunrpc/svc_rdma.h>
51 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
54 * Replace the pages in the rq_argpages array with the pages from the SGE in
55 * the RDMA_RECV completion. The SGL should contain full pages up until the
58 static void rdma_build_arg_xdr(struct svc_rqst
*rqstp
,
59 struct svc_rdma_op_ctxt
*ctxt
,
62 struct rpcrdma_msg
*rmsgp
;
67 /* Swap the page in the SGE with the page in argpages */
68 page
= ctxt
->pages
[0];
69 put_page(rqstp
->rq_pages
[0]);
70 rqstp
->rq_pages
[0] = page
;
72 /* Set up the XDR head */
73 rqstp
->rq_arg
.head
[0].iov_base
= page_address(page
);
74 rqstp
->rq_arg
.head
[0].iov_len
=
75 min_t(size_t, byte_count
, ctxt
->sge
[0].length
);
76 rqstp
->rq_arg
.len
= byte_count
;
77 rqstp
->rq_arg
.buflen
= byte_count
;
79 /* Compute bytes past head in the SGL */
80 bc
= byte_count
- rqstp
->rq_arg
.head
[0].iov_len
;
82 /* If data remains, store it in the pagelist */
83 rqstp
->rq_arg
.page_len
= bc
;
84 rqstp
->rq_arg
.page_base
= 0;
86 /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
87 rmsgp
= (struct rpcrdma_msg
*)rqstp
->rq_arg
.head
[0].iov_base
;
88 if (rmsgp
->rm_type
== rdma_nomsg
)
89 rqstp
->rq_arg
.pages
= &rqstp
->rq_pages
[0];
91 rqstp
->rq_arg
.pages
= &rqstp
->rq_pages
[1];
94 while (bc
&& sge_no
< ctxt
->count
) {
95 page
= ctxt
->pages
[sge_no
];
96 put_page(rqstp
->rq_pages
[sge_no
]);
97 rqstp
->rq_pages
[sge_no
] = page
;
98 bc
-= min_t(u32
, bc
, ctxt
->sge
[sge_no
].length
);
99 rqstp
->rq_arg
.buflen
+= ctxt
->sge
[sge_no
].length
;
102 rqstp
->rq_respages
= &rqstp
->rq_pages
[sge_no
];
103 rqstp
->rq_next_page
= rqstp
->rq_respages
+ 1;
105 /* If not all pages were used from the SGL, free the remaining ones */
107 while (sge_no
< ctxt
->count
) {
108 page
= ctxt
->pages
[sge_no
++];
114 rqstp
->rq_arg
.tail
[0].iov_base
= NULL
;
115 rqstp
->rq_arg
.tail
[0].iov_len
= 0;
118 /* Issue an RDMA_READ using the local lkey to map the data sink */
119 int rdma_read_chunk_lcl(struct svcxprt_rdma
*xprt
,
120 struct svc_rqst
*rqstp
,
121 struct svc_rdma_op_ctxt
*head
,
129 struct ib_rdma_wr read_wr
;
130 int pages_needed
= PAGE_ALIGN(*page_offset
+ rs_length
) >> PAGE_SHIFT
;
131 struct svc_rdma_op_ctxt
*ctxt
= svc_rdma_get_context(xprt
);
133 u32 pg_off
= *page_offset
;
134 u32 pg_no
= *page_no
;
136 ctxt
->direction
= DMA_FROM_DEVICE
;
137 ctxt
->read_hdr
= head
;
138 pages_needed
= min_t(int, pages_needed
, xprt
->sc_max_sge_rd
);
139 read
= min_t(int, (pages_needed
<< PAGE_SHIFT
) - *page_offset
,
142 for (pno
= 0; pno
< pages_needed
; pno
++) {
143 int len
= min_t(int, rs_length
, PAGE_SIZE
- pg_off
);
145 head
->arg
.pages
[pg_no
] = rqstp
->rq_arg
.pages
[pg_no
];
146 head
->arg
.page_len
+= len
;
147 head
->arg
.len
+= len
;
150 rqstp
->rq_respages
= &rqstp
->rq_arg
.pages
[pg_no
+1];
151 rqstp
->rq_next_page
= rqstp
->rq_respages
+ 1;
152 ctxt
->sge
[pno
].addr
=
153 ib_dma_map_page(xprt
->sc_cm_id
->device
,
154 head
->arg
.pages
[pg_no
], pg_off
,
157 ret
= ib_dma_mapping_error(xprt
->sc_cm_id
->device
,
158 ctxt
->sge
[pno
].addr
);
161 atomic_inc(&xprt
->sc_dma_used
);
163 /* The lkey here is either a local dma lkey or a dma_mr lkey */
164 ctxt
->sge
[pno
].lkey
= xprt
->sc_dma_lkey
;
165 ctxt
->sge
[pno
].length
= len
;
168 /* adjust offset and wrap to next page if needed */
170 if (pg_off
== PAGE_SIZE
) {
177 if (last
&& rs_length
== 0)
178 set_bit(RDMACTXT_F_LAST_CTXT
, &ctxt
->flags
);
180 clear_bit(RDMACTXT_F_LAST_CTXT
, &ctxt
->flags
);
182 memset(&read_wr
, 0, sizeof(read_wr
));
183 read_wr
.wr
.wr_id
= (unsigned long)ctxt
;
184 read_wr
.wr
.opcode
= IB_WR_RDMA_READ
;
185 ctxt
->wr_op
= read_wr
.wr
.opcode
;
186 read_wr
.wr
.send_flags
= IB_SEND_SIGNALED
;
187 read_wr
.rkey
= rs_handle
;
188 read_wr
.remote_addr
= rs_offset
;
189 read_wr
.wr
.sg_list
= ctxt
->sge
;
190 read_wr
.wr
.num_sge
= pages_needed
;
192 ret
= svc_rdma_send(xprt
, &read_wr
.wr
);
194 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret
);
195 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
199 /* return current location in page array */
201 *page_offset
= pg_off
;
203 atomic_inc(&rdma_stat_read
);
206 svc_rdma_unmap_dma(ctxt
);
207 svc_rdma_put_context(ctxt
, 0);
211 /* Issue an RDMA_READ using an FRMR to map the data sink */
212 int rdma_read_chunk_frmr(struct svcxprt_rdma
*xprt
,
213 struct svc_rqst
*rqstp
,
214 struct svc_rdma_op_ctxt
*head
,
222 struct ib_rdma_wr read_wr
;
223 struct ib_send_wr inv_wr
;
224 struct ib_reg_wr reg_wr
;
226 int nents
= PAGE_ALIGN(*page_offset
+ rs_length
) >> PAGE_SHIFT
;
227 struct svc_rdma_op_ctxt
*ctxt
= svc_rdma_get_context(xprt
);
228 struct svc_rdma_fastreg_mr
*frmr
= svc_rdma_get_frmr(xprt
);
229 int ret
, read
, pno
, dma_nents
, n
;
230 u32 pg_off
= *page_offset
;
231 u32 pg_no
= *page_no
;
236 ctxt
->direction
= DMA_FROM_DEVICE
;
238 nents
= min_t(unsigned int, nents
, xprt
->sc_frmr_pg_list_len
);
239 read
= min_t(int, (nents
<< PAGE_SHIFT
) - *page_offset
, rs_length
);
241 frmr
->direction
= DMA_FROM_DEVICE
;
242 frmr
->access_flags
= (IB_ACCESS_LOCAL_WRITE
|IB_ACCESS_REMOTE_WRITE
);
243 frmr
->sg_nents
= nents
;
245 for (pno
= 0; pno
< nents
; pno
++) {
246 int len
= min_t(int, rs_length
, PAGE_SIZE
- pg_off
);
248 head
->arg
.pages
[pg_no
] = rqstp
->rq_arg
.pages
[pg_no
];
249 head
->arg
.page_len
+= len
;
250 head
->arg
.len
+= len
;
254 sg_set_page(&frmr
->sg
[pno
], rqstp
->rq_arg
.pages
[pg_no
],
257 rqstp
->rq_respages
= &rqstp
->rq_arg
.pages
[pg_no
+1];
258 rqstp
->rq_next_page
= rqstp
->rq_respages
+ 1;
260 /* adjust offset and wrap to next page if needed */
262 if (pg_off
== PAGE_SIZE
) {
269 if (last
&& rs_length
== 0)
270 set_bit(RDMACTXT_F_LAST_CTXT
, &ctxt
->flags
);
272 clear_bit(RDMACTXT_F_LAST_CTXT
, &ctxt
->flags
);
274 dma_nents
= ib_dma_map_sg(xprt
->sc_cm_id
->device
,
275 frmr
->sg
, frmr
->sg_nents
,
278 pr_err("svcrdma: failed to dma map sg %p\n",
282 atomic_inc(&xprt
->sc_dma_used
);
284 n
= ib_map_mr_sg(frmr
->mr
, frmr
->sg
, frmr
->sg_nents
, PAGE_SIZE
);
285 if (unlikely(n
!= frmr
->sg_nents
)) {
286 pr_err("svcrdma: failed to map mr %p (%d/%d elements)\n",
287 frmr
->mr
, n
, frmr
->sg_nents
);
288 return n
< 0 ? n
: -EINVAL
;
292 key
= (u8
)(frmr
->mr
->lkey
& 0x000000FF);
293 ib_update_fast_reg_key(frmr
->mr
, ++key
);
295 ctxt
->sge
[0].addr
= frmr
->mr
->iova
;
296 ctxt
->sge
[0].lkey
= frmr
->mr
->lkey
;
297 ctxt
->sge
[0].length
= frmr
->mr
->length
;
299 ctxt
->read_hdr
= head
;
302 reg_wr
.wr
.opcode
= IB_WR_REG_MR
;
304 reg_wr
.wr
.send_flags
= IB_SEND_SIGNALED
;
305 reg_wr
.wr
.num_sge
= 0;
306 reg_wr
.mr
= frmr
->mr
;
307 reg_wr
.key
= frmr
->mr
->lkey
;
308 reg_wr
.access
= frmr
->access_flags
;
309 reg_wr
.wr
.next
= &read_wr
.wr
;
311 /* Prepare RDMA_READ */
312 memset(&read_wr
, 0, sizeof(read_wr
));
313 read_wr
.wr
.send_flags
= IB_SEND_SIGNALED
;
314 read_wr
.rkey
= rs_handle
;
315 read_wr
.remote_addr
= rs_offset
;
316 read_wr
.wr
.sg_list
= ctxt
->sge
;
317 read_wr
.wr
.num_sge
= 1;
318 if (xprt
->sc_dev_caps
& SVCRDMA_DEVCAP_READ_W_INV
) {
319 read_wr
.wr
.opcode
= IB_WR_RDMA_READ_WITH_INV
;
320 read_wr
.wr
.wr_id
= (unsigned long)ctxt
;
321 read_wr
.wr
.ex
.invalidate_rkey
= ctxt
->frmr
->mr
->lkey
;
323 read_wr
.wr
.opcode
= IB_WR_RDMA_READ
;
324 read_wr
.wr
.next
= &inv_wr
;
325 /* Prepare invalidate */
326 memset(&inv_wr
, 0, sizeof(inv_wr
));
327 inv_wr
.wr_id
= (unsigned long)ctxt
;
328 inv_wr
.opcode
= IB_WR_LOCAL_INV
;
329 inv_wr
.send_flags
= IB_SEND_SIGNALED
| IB_SEND_FENCE
;
330 inv_wr
.ex
.invalidate_rkey
= frmr
->mr
->lkey
;
332 ctxt
->wr_op
= read_wr
.wr
.opcode
;
335 ret
= svc_rdma_send(xprt
, ®_wr
.wr
);
337 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret
);
338 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
342 /* return current location in page array */
344 *page_offset
= pg_off
;
346 atomic_inc(&rdma_stat_read
);
349 ib_dma_unmap_sg(xprt
->sc_cm_id
->device
,
350 frmr
->sg
, frmr
->sg_nents
, frmr
->direction
);
351 svc_rdma_put_context(ctxt
, 0);
352 svc_rdma_put_frmr(xprt
, frmr
);
357 rdma_rcl_chunk_count(struct rpcrdma_read_chunk
*ch
)
361 for (count
= 0; ch
->rc_discrim
!= xdr_zero
; ch
++)
366 /* If there was additional inline content, append it to the end of arg.pages.
367 * Tail copy has to be done after the reader function has determined how many
368 * pages are needed for RDMA READ.
371 rdma_copy_tail(struct svc_rqst
*rqstp
, struct svc_rdma_op_ctxt
*head
,
372 u32 position
, u32 byte_count
, u32 page_offset
, int page_no
)
378 srcp
= head
->arg
.head
[0].iov_base
+ position
;
379 byte_count
= head
->arg
.head
[0].iov_len
- position
;
380 if (byte_count
> PAGE_SIZE
) {
381 dprintk("svcrdma: large tail unsupported\n");
385 /* Fit as much of the tail on the current page as possible */
386 if (page_offset
!= PAGE_SIZE
) {
387 destp
= page_address(rqstp
->rq_arg
.pages
[page_no
]);
388 destp
+= page_offset
;
389 while (byte_count
--) {
392 if (page_offset
== PAGE_SIZE
&& byte_count
)
399 /* Fit the rest on the next page */
401 destp
= page_address(rqstp
->rq_arg
.pages
[page_no
]);
405 rqstp
->rq_respages
= &rqstp
->rq_arg
.pages
[page_no
+1];
406 rqstp
->rq_next_page
= rqstp
->rq_respages
+ 1;
409 byte_count
= head
->arg
.head
[0].iov_len
- position
;
410 head
->arg
.page_len
+= byte_count
;
411 head
->arg
.len
+= byte_count
;
412 head
->arg
.buflen
+= byte_count
;
416 static int rdma_read_chunks(struct svcxprt_rdma
*xprt
,
417 struct rpcrdma_msg
*rmsgp
,
418 struct svc_rqst
*rqstp
,
419 struct svc_rdma_op_ctxt
*head
)
422 struct rpcrdma_read_chunk
*ch
;
423 u32 handle
, page_offset
, byte_count
;
428 /* If no read list is present, return 0 */
429 ch
= svc_rdma_get_read_chunk(rmsgp
);
433 if (rdma_rcl_chunk_count(ch
) > RPCSVC_MAXPAGES
)
436 /* The request is completed when the RDMA_READs complete. The
437 * head context keeps all the pages that comprise the
440 head
->arg
.head
[0] = rqstp
->rq_arg
.head
[0];
441 head
->arg
.tail
[0] = rqstp
->rq_arg
.tail
[0];
442 head
->hdr_count
= head
->count
;
443 head
->arg
.page_base
= 0;
444 head
->arg
.page_len
= 0;
445 head
->arg
.len
= rqstp
->rq_arg
.len
;
446 head
->arg
.buflen
= rqstp
->rq_arg
.buflen
;
448 ch
= (struct rpcrdma_read_chunk
*)&rmsgp
->rm_body
.rm_chunks
[0];
449 position
= be32_to_cpu(ch
->rc_position
);
451 /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
453 head
->arg
.pages
= &head
->pages
[0];
454 page_offset
= head
->byte_len
;
456 head
->arg
.pages
= &head
->pages
[head
->count
];
462 for (; ch
->rc_discrim
!= xdr_zero
; ch
++) {
463 if (be32_to_cpu(ch
->rc_position
) != position
)
466 handle
= be32_to_cpu(ch
->rc_target
.rs_handle
),
467 byte_count
= be32_to_cpu(ch
->rc_target
.rs_length
);
468 xdr_decode_hyper((__be32
*)&ch
->rc_target
.rs_offset
,
471 while (byte_count
> 0) {
472 last
= (ch
+ 1)->rc_discrim
== xdr_zero
;
473 ret
= xprt
->sc_reader(xprt
, rqstp
, head
,
474 &page_no
, &page_offset
,
481 head
->arg
.buflen
+= ret
;
485 /* Read list may need XDR round-up (see RFC 5666, s. 3.7) */
486 if (page_offset
& 3) {
487 u32 pad
= 4 - (page_offset
& 3);
489 head
->arg
.page_len
+= pad
;
490 head
->arg
.len
+= pad
;
491 head
->arg
.buflen
+= pad
;
496 if (position
&& position
< head
->arg
.head
[0].iov_len
)
497 ret
= rdma_copy_tail(rqstp
, head
, position
,
498 byte_count
, page_offset
, page_no
);
499 head
->arg
.head
[0].iov_len
= position
;
500 head
->position
= position
;
503 /* Detach arg pages. svc_recv will replenish them */
505 &rqstp
->rq_pages
[page_no
] < rqstp
->rq_respages
; page_no
++)
506 rqstp
->rq_pages
[page_no
] = NULL
;
511 static int rdma_read_complete(struct svc_rqst
*rqstp
,
512 struct svc_rdma_op_ctxt
*head
)
518 for (page_no
= 0; page_no
< head
->count
; page_no
++) {
519 put_page(rqstp
->rq_pages
[page_no
]);
520 rqstp
->rq_pages
[page_no
] = head
->pages
[page_no
];
523 /* Adjustments made for RDMA_NOMSG type requests */
524 if (head
->position
== 0) {
525 if (head
->arg
.len
<= head
->sge
[0].length
) {
526 head
->arg
.head
[0].iov_len
= head
->arg
.len
-
528 head
->arg
.page_len
= 0;
530 head
->arg
.head
[0].iov_len
= head
->sge
[0].length
-
532 head
->arg
.page_len
= head
->arg
.len
-
537 /* Point rq_arg.pages past header */
538 rqstp
->rq_arg
.pages
= &rqstp
->rq_pages
[head
->hdr_count
];
539 rqstp
->rq_arg
.page_len
= head
->arg
.page_len
;
540 rqstp
->rq_arg
.page_base
= head
->arg
.page_base
;
542 /* rq_respages starts after the last arg page */
543 rqstp
->rq_respages
= &rqstp
->rq_pages
[page_no
];
544 rqstp
->rq_next_page
= rqstp
->rq_respages
+ 1;
546 /* Rebuild rq_arg head and tail. */
547 rqstp
->rq_arg
.head
[0] = head
->arg
.head
[0];
548 rqstp
->rq_arg
.tail
[0] = head
->arg
.tail
[0];
549 rqstp
->rq_arg
.len
= head
->arg
.len
;
550 rqstp
->rq_arg
.buflen
= head
->arg
.buflen
;
552 /* Free the context */
553 svc_rdma_put_context(head
, 0);
555 /* XXX: What should this be? */
556 rqstp
->rq_prot
= IPPROTO_MAX
;
557 svc_xprt_copy_addrs(rqstp
, rqstp
->rq_xprt
);
559 ret
= rqstp
->rq_arg
.head
[0].iov_len
560 + rqstp
->rq_arg
.page_len
561 + rqstp
->rq_arg
.tail
[0].iov_len
;
562 dprintk("svcrdma: deferred read ret=%d, rq_arg.len=%u, "
563 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zu\n",
564 ret
, rqstp
->rq_arg
.len
, rqstp
->rq_arg
.head
[0].iov_base
,
565 rqstp
->rq_arg
.head
[0].iov_len
);
571 * Set up the rqstp thread context to point to the RQ buffer. If
572 * necessary, pull additional data from the client with an RDMA_READ
575 int svc_rdma_recvfrom(struct svc_rqst
*rqstp
)
577 struct svc_xprt
*xprt
= rqstp
->rq_xprt
;
578 struct svcxprt_rdma
*rdma_xprt
=
579 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
580 struct svc_rdma_op_ctxt
*ctxt
= NULL
;
581 struct rpcrdma_msg
*rmsgp
;
585 dprintk("svcrdma: rqstp=%p\n", rqstp
);
587 spin_lock_bh(&rdma_xprt
->sc_rq_dto_lock
);
588 if (!list_empty(&rdma_xprt
->sc_read_complete_q
)) {
589 ctxt
= list_entry(rdma_xprt
->sc_read_complete_q
.next
,
590 struct svc_rdma_op_ctxt
,
592 list_del_init(&ctxt
->dto_q
);
593 spin_unlock_bh(&rdma_xprt
->sc_rq_dto_lock
);
594 return rdma_read_complete(rqstp
, ctxt
);
595 } else if (!list_empty(&rdma_xprt
->sc_rq_dto_q
)) {
596 ctxt
= list_entry(rdma_xprt
->sc_rq_dto_q
.next
,
597 struct svc_rdma_op_ctxt
,
599 list_del_init(&ctxt
->dto_q
);
601 atomic_inc(&rdma_stat_rq_starve
);
602 clear_bit(XPT_DATA
, &xprt
->xpt_flags
);
605 spin_unlock_bh(&rdma_xprt
->sc_rq_dto_lock
);
607 /* This is the EAGAIN path. The svc_recv routine will
608 * return -EAGAIN, the nfsd thread will go to call into
609 * svc_recv again and we shouldn't be on the active
612 if (test_bit(XPT_CLOSE
, &xprt
->xpt_flags
))
617 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
618 ctxt
, rdma_xprt
, rqstp
, ctxt
->wc_status
);
619 atomic_inc(&rdma_stat_recv
);
621 /* Build up the XDR from the receive buffers. */
622 rdma_build_arg_xdr(rqstp
, ctxt
, ctxt
->byte_len
);
624 /* Decode the RDMA header. */
625 len
= svc_rdma_xdr_decode_req(&rmsgp
, rqstp
);
626 rqstp
->rq_xprt_hlen
= len
;
628 /* If the request is invalid, reply with an error */
631 svc_rdma_send_error(rdma_xprt
, rmsgp
, ERR_VERS
);
635 /* Read read-list data. */
636 ret
= rdma_read_chunks(rdma_xprt
, rmsgp
, rqstp
, ctxt
);
638 /* read-list posted, defer until data received from client. */
640 } else if (ret
< 0) {
641 /* Post of read-list failed, free context. */
642 svc_rdma_put_context(ctxt
, 1);
646 ret
= rqstp
->rq_arg
.head
[0].iov_len
647 + rqstp
->rq_arg
.page_len
648 + rqstp
->rq_arg
.tail
[0].iov_len
;
649 svc_rdma_put_context(ctxt
, 0);
651 dprintk("svcrdma: ret=%d, rq_arg.len=%u, "
652 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n",
653 ret
, rqstp
->rq_arg
.len
,
654 rqstp
->rq_arg
.head
[0].iov_base
,
655 rqstp
->rq_arg
.head
[0].iov_len
);
656 rqstp
->rq_prot
= IPPROTO_MAX
;
657 svc_xprt_copy_addrs(rqstp
, xprt
);
662 svc_rdma_put_context(ctxt
, 1);
663 dprintk("svcrdma: transport %p is closing\n", xprt
);
665 * Set the close bit and enqueue it. svc_recv will see the
666 * close bit and call svc_xprt_delete
668 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);