2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * This file contains the top-level implementation of an RPC RDMA
46 * Naming convention: functions beginning with xprt_ are part of the
47 * transport switch. All others are RPC RDMA internal.
50 #include <linux/module.h>
51 #include <linux/init.h>
52 #include <linux/slab.h>
53 #include <linux/seq_file.h>
54 #include <linux/sunrpc/addr.h>
56 #include "xprt_rdma.h"
59 # define RPCDBG_FACILITY RPCDBG_TRANS
62 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_DESCRIPTION("RPC/RDMA Transport for Linux kernel NFS");
65 MODULE_AUTHOR("Network Appliance, Inc.");
71 static unsigned int xprt_rdma_slot_table_entries
= RPCRDMA_DEF_SLOT_TABLE
;
72 static unsigned int xprt_rdma_max_inline_read
= RPCRDMA_DEF_INLINE
;
73 static unsigned int xprt_rdma_max_inline_write
= RPCRDMA_DEF_INLINE
;
74 static unsigned int xprt_rdma_inline_write_padding
;
75 static unsigned int xprt_rdma_memreg_strategy
= RPCRDMA_FRMR
;
76 int xprt_rdma_pad_optimize
= 0;
80 static unsigned int min_slot_table_size
= RPCRDMA_MIN_SLOT_TABLE
;
81 static unsigned int max_slot_table_size
= RPCRDMA_MAX_SLOT_TABLE
;
82 static unsigned int zero
;
83 static unsigned int max_padding
= PAGE_SIZE
;
84 static unsigned int min_memreg
= RPCRDMA_BOUNCEBUFFERS
;
85 static unsigned int max_memreg
= RPCRDMA_LAST
- 1;
87 static struct ctl_table_header
*sunrpc_table_header
;
89 static struct ctl_table xr_tunables_table
[] = {
91 .procname
= "rdma_slot_table_entries",
92 .data
= &xprt_rdma_slot_table_entries
,
93 .maxlen
= sizeof(unsigned int),
95 .proc_handler
= proc_dointvec_minmax
,
96 .extra1
= &min_slot_table_size
,
97 .extra2
= &max_slot_table_size
100 .procname
= "rdma_max_inline_read",
101 .data
= &xprt_rdma_max_inline_read
,
102 .maxlen
= sizeof(unsigned int),
104 .proc_handler
= proc_dointvec
,
107 .procname
= "rdma_max_inline_write",
108 .data
= &xprt_rdma_max_inline_write
,
109 .maxlen
= sizeof(unsigned int),
111 .proc_handler
= proc_dointvec
,
114 .procname
= "rdma_inline_write_padding",
115 .data
= &xprt_rdma_inline_write_padding
,
116 .maxlen
= sizeof(unsigned int),
118 .proc_handler
= proc_dointvec_minmax
,
120 .extra2
= &max_padding
,
123 .procname
= "rdma_memreg_strategy",
124 .data
= &xprt_rdma_memreg_strategy
,
125 .maxlen
= sizeof(unsigned int),
127 .proc_handler
= proc_dointvec_minmax
,
128 .extra1
= &min_memreg
,
129 .extra2
= &max_memreg
,
132 .procname
= "rdma_pad_optimize",
133 .data
= &xprt_rdma_pad_optimize
,
134 .maxlen
= sizeof(unsigned int),
136 .proc_handler
= proc_dointvec
,
141 static struct ctl_table sunrpc_table
[] = {
143 .procname
= "sunrpc",
145 .child
= xr_tunables_table
152 static struct rpc_xprt_ops xprt_rdma_procs
; /* forward reference */
155 xprt_rdma_format_addresses(struct rpc_xprt
*xprt
)
157 struct sockaddr
*sap
= (struct sockaddr
*)
158 &rpcx_to_rdmad(xprt
).addr
;
159 struct sockaddr_in
*sin
= (struct sockaddr_in
*)sap
;
162 (void)rpc_ntop(sap
, buf
, sizeof(buf
));
163 xprt
->address_strings
[RPC_DISPLAY_ADDR
] = kstrdup(buf
, GFP_KERNEL
);
165 snprintf(buf
, sizeof(buf
), "%u", rpc_get_port(sap
));
166 xprt
->address_strings
[RPC_DISPLAY_PORT
] = kstrdup(buf
, GFP_KERNEL
);
168 xprt
->address_strings
[RPC_DISPLAY_PROTO
] = "rdma";
170 snprintf(buf
, sizeof(buf
), "%08x", ntohl(sin
->sin_addr
.s_addr
));
171 xprt
->address_strings
[RPC_DISPLAY_HEX_ADDR
] = kstrdup(buf
, GFP_KERNEL
);
173 snprintf(buf
, sizeof(buf
), "%4hx", rpc_get_port(sap
));
174 xprt
->address_strings
[RPC_DISPLAY_HEX_PORT
] = kstrdup(buf
, GFP_KERNEL
);
177 xprt
->address_strings
[RPC_DISPLAY_NETID
] = "rdma";
181 xprt_rdma_free_addresses(struct rpc_xprt
*xprt
)
185 for (i
= 0; i
< RPC_DISPLAY_MAX
; i
++)
187 case RPC_DISPLAY_PROTO
:
188 case RPC_DISPLAY_NETID
:
191 kfree(xprt
->address_strings
[i
]);
196 xprt_rdma_connect_worker(struct work_struct
*work
)
198 struct rpcrdma_xprt
*r_xprt
=
199 container_of(work
, struct rpcrdma_xprt
, rdma_connect
.work
);
200 struct rpc_xprt
*xprt
= &r_xprt
->xprt
;
203 current
->flags
|= PF_FSTRANS
;
204 xprt_clear_connected(xprt
);
206 dprintk("RPC: %s: %sconnect\n", __func__
,
207 r_xprt
->rx_ep
.rep_connected
!= 0 ? "re" : "");
208 rc
= rpcrdma_ep_connect(&r_xprt
->rx_ep
, &r_xprt
->rx_ia
);
210 xprt_wake_pending_tasks(xprt
, rc
);
212 dprintk("RPC: %s: exit\n", __func__
);
213 xprt_clear_connecting(xprt
);
214 current
->flags
&= ~PF_FSTRANS
;
221 * Free all memory associated with the object, including its own.
222 * NOTE: none of the *destroy methods free memory for their top-level
223 * objects, even though they may have allocated it (they do free
224 * private memory). It's up to the caller to handle it. In this
225 * case (RDMA transport), all structure memory is inlined with the
226 * struct rpcrdma_xprt.
229 xprt_rdma_destroy(struct rpc_xprt
*xprt
)
231 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
234 dprintk("RPC: %s: called\n", __func__
);
236 cancel_delayed_work_sync(&r_xprt
->rdma_connect
);
238 xprt_clear_connected(xprt
);
240 rpcrdma_buffer_destroy(&r_xprt
->rx_buf
);
241 rc
= rpcrdma_ep_destroy(&r_xprt
->rx_ep
, &r_xprt
->rx_ia
);
243 dprintk("RPC: %s: rpcrdma_ep_destroy returned %i\n",
245 rpcrdma_ia_close(&r_xprt
->rx_ia
);
247 xprt_rdma_free_addresses(xprt
);
251 dprintk("RPC: %s: returning\n", __func__
);
253 module_put(THIS_MODULE
);
256 static const struct rpc_timeout xprt_rdma_default_timeout
= {
257 .to_initval
= 60 * HZ
,
258 .to_maxval
= 60 * HZ
,
262 * xprt_setup_rdma - Set up transport to use RDMA
264 * @args: rpc transport arguments
266 static struct rpc_xprt
*
267 xprt_setup_rdma(struct xprt_create
*args
)
269 struct rpcrdma_create_data_internal cdata
;
270 struct rpc_xprt
*xprt
;
271 struct rpcrdma_xprt
*new_xprt
;
272 struct rpcrdma_ep
*new_ep
;
273 struct sockaddr_in
*sin
;
276 if (args
->addrlen
> sizeof(xprt
->addr
)) {
277 dprintk("RPC: %s: address too large\n", __func__
);
278 return ERR_PTR(-EBADF
);
281 xprt
= xprt_alloc(args
->net
, sizeof(struct rpcrdma_xprt
),
282 xprt_rdma_slot_table_entries
,
283 xprt_rdma_slot_table_entries
);
285 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
287 return ERR_PTR(-ENOMEM
);
290 /* 60 second timeout, no retries */
291 xprt
->timeout
= &xprt_rdma_default_timeout
;
292 xprt
->bind_timeout
= (60U * HZ
);
293 xprt
->reestablish_timeout
= (5U * HZ
);
294 xprt
->idle_timeout
= (5U * 60 * HZ
);
296 xprt
->resvport
= 0; /* privileged port not needed */
297 xprt
->tsh_size
= 0; /* RPC-RDMA handles framing */
298 xprt
->max_payload
= RPCRDMA_MAX_DATA_SEGS
* PAGE_SIZE
;
299 xprt
->ops
= &xprt_rdma_procs
;
302 * Set up RDMA-specific connect data.
305 /* Put server RDMA address in local cdata */
306 memcpy(&cdata
.addr
, args
->dstaddr
, args
->addrlen
);
308 /* Ensure xprt->addr holds valid server TCP (not RDMA)
309 * address, for any side protocols which peek at it */
310 xprt
->prot
= IPPROTO_TCP
;
311 xprt
->addrlen
= args
->addrlen
;
312 memcpy(&xprt
->addr
, &cdata
.addr
, xprt
->addrlen
);
314 sin
= (struct sockaddr_in
*)&cdata
.addr
;
315 if (ntohs(sin
->sin_port
) != 0)
316 xprt_set_bound(xprt
);
318 dprintk("RPC: %s: %pI4:%u\n",
319 __func__
, &sin
->sin_addr
.s_addr
, ntohs(sin
->sin_port
));
321 /* Set max requests */
322 cdata
.max_requests
= xprt
->max_reqs
;
324 /* Set some length limits */
325 cdata
.rsize
= RPCRDMA_MAX_SEGS
* PAGE_SIZE
; /* RDMA write max */
326 cdata
.wsize
= RPCRDMA_MAX_SEGS
* PAGE_SIZE
; /* RDMA read max */
328 cdata
.inline_wsize
= xprt_rdma_max_inline_write
;
329 if (cdata
.inline_wsize
> cdata
.wsize
)
330 cdata
.inline_wsize
= cdata
.wsize
;
332 cdata
.inline_rsize
= xprt_rdma_max_inline_read
;
333 if (cdata
.inline_rsize
> cdata
.rsize
)
334 cdata
.inline_rsize
= cdata
.rsize
;
336 cdata
.padding
= xprt_rdma_inline_write_padding
;
339 * Create new transport instance, which includes initialized
345 new_xprt
= rpcx_to_rdmax(xprt
);
347 rc
= rpcrdma_ia_open(new_xprt
, (struct sockaddr
*) &cdata
.addr
,
348 xprt_rdma_memreg_strategy
);
353 * initialize and create ep
355 new_xprt
->rx_data
= cdata
;
356 new_ep
= &new_xprt
->rx_ep
;
357 new_ep
->rep_remote_addr
= cdata
.addr
;
359 rc
= rpcrdma_ep_create(&new_xprt
->rx_ep
,
360 &new_xprt
->rx_ia
, &new_xprt
->rx_data
);
365 * Allocate pre-registered send and receive buffers for headers and
366 * any inline data. Also specify any padding which will be provided
367 * from a preregistered zero buffer.
369 rc
= rpcrdma_buffer_create(&new_xprt
->rx_buf
, new_ep
, &new_xprt
->rx_ia
,
375 * Register a callback for connection events. This is necessary because
376 * connection loss notification is async. We also catch connection loss
377 * when reaping receives.
379 INIT_DELAYED_WORK(&new_xprt
->rdma_connect
, xprt_rdma_connect_worker
);
380 new_ep
->rep_func
= rpcrdma_conn_func
;
381 new_ep
->rep_xprt
= xprt
;
383 xprt_rdma_format_addresses(xprt
);
385 if (!try_module_get(THIS_MODULE
))
391 xprt_rdma_free_addresses(xprt
);
394 (void) rpcrdma_ep_destroy(new_ep
, &new_xprt
->rx_ia
);
396 rpcrdma_ia_close(&new_xprt
->rx_ia
);
403 * Close a connection, during shutdown or timeout/reconnect
406 xprt_rdma_close(struct rpc_xprt
*xprt
)
408 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
410 dprintk("RPC: %s: closing\n", __func__
);
411 if (r_xprt
->rx_ep
.rep_connected
> 0)
412 xprt
->reestablish_timeout
= 0;
413 xprt_disconnect_done(xprt
);
414 (void) rpcrdma_ep_disconnect(&r_xprt
->rx_ep
, &r_xprt
->rx_ia
);
418 xprt_rdma_set_port(struct rpc_xprt
*xprt
, u16 port
)
420 struct sockaddr_in
*sap
;
422 sap
= (struct sockaddr_in
*)&xprt
->addr
;
423 sap
->sin_port
= htons(port
);
424 sap
= (struct sockaddr_in
*)&rpcx_to_rdmad(xprt
).addr
;
425 sap
->sin_port
= htons(port
);
426 dprintk("RPC: %s: %u\n", __func__
, port
);
430 xprt_rdma_connect(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
432 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
434 if (r_xprt
->rx_ep
.rep_connected
!= 0) {
436 schedule_delayed_work(&r_xprt
->rdma_connect
,
437 xprt
->reestablish_timeout
);
438 xprt
->reestablish_timeout
<<= 1;
439 if (xprt
->reestablish_timeout
> (30 * HZ
))
440 xprt
->reestablish_timeout
= (30 * HZ
);
441 else if (xprt
->reestablish_timeout
< (5 * HZ
))
442 xprt
->reestablish_timeout
= (5 * HZ
);
444 schedule_delayed_work(&r_xprt
->rdma_connect
, 0);
445 if (!RPC_IS_ASYNC(task
))
446 flush_delayed_work(&r_xprt
->rdma_connect
);
451 xprt_rdma_reserve_xprt(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
453 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
454 int credits
= atomic_read(&r_xprt
->rx_buf
.rb_credits
);
456 /* == RPC_CWNDSCALE @ init, but *after* setup */
457 if (r_xprt
->rx_buf
.rb_cwndscale
== 0UL) {
458 r_xprt
->rx_buf
.rb_cwndscale
= xprt
->cwnd
;
459 dprintk("RPC: %s: cwndscale %lu\n", __func__
,
460 r_xprt
->rx_buf
.rb_cwndscale
);
461 BUG_ON(r_xprt
->rx_buf
.rb_cwndscale
<= 0);
463 xprt
->cwnd
= credits
* r_xprt
->rx_buf
.rb_cwndscale
;
464 return xprt_reserve_xprt_cong(xprt
, task
);
468 * The RDMA allocate/free functions need the task structure as a place
469 * to hide the struct rpcrdma_req, which is necessary for the actual send/recv
470 * sequence. For this reason, the recv buffers are attached to send
471 * buffers for portions of the RPC. Note that the RPC layer allocates
472 * both send and receive buffers in the same call. We may register
473 * the receive buffer portion when using reply chunks.
476 xprt_rdma_allocate(struct rpc_task
*task
, size_t size
)
478 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
479 struct rpcrdma_req
*req
, *nreq
;
481 req
= rpcrdma_buffer_get(&rpcx_to_rdmax(xprt
)->rx_buf
);
484 if (size
> req
->rl_size
) {
485 dprintk("RPC: %s: size %zd too large for buffer[%zd]: "
486 "prog %d vers %d proc %d\n",
487 __func__
, size
, req
->rl_size
,
488 task
->tk_client
->cl_prog
, task
->tk_client
->cl_vers
,
489 task
->tk_msg
.rpc_proc
->p_proc
);
491 * Outgoing length shortage. Our inline write max must have
492 * been configured to perform direct i/o.
494 * This is therefore a large metadata operation, and the
495 * allocate call was made on the maximum possible message,
496 * e.g. containing long filename(s) or symlink data. In
497 * fact, while these metadata operations *might* carry
498 * large outgoing payloads, they rarely *do*. However, we
499 * have to commit to the request here, so reallocate and
500 * register it now. The data path will never require this
503 * If the allocation or registration fails, the RPC framework
504 * will (doggedly) retry.
506 if (rpcx_to_rdmax(xprt
)->rx_ia
.ri_memreg_strategy
==
507 RPCRDMA_BOUNCEBUFFERS
) {
508 /* forced to "pure inline" */
509 dprintk("RPC: %s: too much data (%zd) for inline "
510 "(r/w max %d/%d)\n", __func__
, size
,
511 rpcx_to_rdmad(xprt
).inline_rsize
,
512 rpcx_to_rdmad(xprt
).inline_wsize
);
514 rpc_exit(task
, -EIO
); /* fail the operation */
515 rpcx_to_rdmax(xprt
)->rx_stats
.failed_marshal_count
++;
518 if (task
->tk_flags
& RPC_TASK_SWAPPER
)
519 nreq
= kmalloc(sizeof *req
+ size
, GFP_ATOMIC
);
521 nreq
= kmalloc(sizeof *req
+ size
, GFP_NOFS
);
525 if (rpcrdma_register_internal(&rpcx_to_rdmax(xprt
)->rx_ia
,
526 nreq
->rl_base
, size
+ sizeof(struct rpcrdma_req
)
527 - offsetof(struct rpcrdma_req
, rl_base
),
528 &nreq
->rl_handle
, &nreq
->rl_iov
)) {
532 rpcx_to_rdmax(xprt
)->rx_stats
.hardway_register_count
+= size
;
533 nreq
->rl_size
= size
;
535 nreq
->rl_nchunks
= 0;
536 nreq
->rl_buffer
= (struct rpcrdma_buffer
*)req
;
537 nreq
->rl_reply
= req
->rl_reply
;
538 memcpy(nreq
->rl_segments
,
539 req
->rl_segments
, sizeof nreq
->rl_segments
);
540 /* flag the swap with an unused field */
541 nreq
->rl_iov
.length
= 0;
542 req
->rl_reply
= NULL
;
545 dprintk("RPC: %s: size %zd, request 0x%p\n", __func__
, size
, req
);
547 req
->rl_connect_cookie
= 0; /* our reserved value */
548 return req
->rl_xdr_buf
;
551 rpcrdma_buffer_put(req
);
552 rpcx_to_rdmax(xprt
)->rx_stats
.failed_marshal_count
++;
557 * This function returns all RDMA resources to the pool.
560 xprt_rdma_free(void *buffer
)
562 struct rpcrdma_req
*req
;
563 struct rpcrdma_xprt
*r_xprt
;
564 struct rpcrdma_rep
*rep
;
570 req
= container_of(buffer
, struct rpcrdma_req
, rl_xdr_buf
[0]);
571 if (req
->rl_iov
.length
== 0) { /* see allocate above */
572 r_xprt
= container_of(((struct rpcrdma_req
*) req
->rl_buffer
)->rl_buffer
,
573 struct rpcrdma_xprt
, rx_buf
);
575 r_xprt
= container_of(req
->rl_buffer
, struct rpcrdma_xprt
, rx_buf
);
578 dprintk("RPC: %s: called on 0x%p%s\n",
579 __func__
, rep
, (rep
&& rep
->rr_func
) ? " (with waiter)" : "");
582 * Finish the deregistration. When using mw bind, this was
583 * begun in rpcrdma_reply_handler(). In all other modes, we
584 * do it here, in thread context. The process is considered
585 * complete when the rr_func vector becomes NULL - this
586 * was put in place during rpcrdma_reply_handler() - the wait
587 * call below will not block if the dereg is "done". If
588 * interrupted, our framework will clean up.
590 for (i
= 0; req
->rl_nchunks
;) {
592 i
+= rpcrdma_deregister_external(
593 &req
->rl_segments
[i
], r_xprt
, NULL
);
596 if (rep
&& wait_event_interruptible(rep
->rr_unbind
, !rep
->rr_func
)) {
597 rep
->rr_func
= NULL
; /* abandon the callback */
598 req
->rl_reply
= NULL
;
601 if (req
->rl_iov
.length
== 0) { /* see allocate above */
602 struct rpcrdma_req
*oreq
= (struct rpcrdma_req
*)req
->rl_buffer
;
603 oreq
->rl_reply
= req
->rl_reply
;
604 (void) rpcrdma_deregister_internal(&r_xprt
->rx_ia
,
611 /* Put back request+reply buffers */
612 rpcrdma_buffer_put(req
);
616 * send_request invokes the meat of RPC RDMA. It must do the following:
617 * 1. Marshal the RPC request into an RPC RDMA request, which means
618 * putting a header in front of data, and creating IOVs for RDMA
619 * from those in the request.
620 * 2. In marshaling, detect opportunities for RDMA, and use them.
621 * 3. Post a recv message to set up asynch completion, then send
622 * the request (rpcrdma_ep_post).
623 * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP).
627 xprt_rdma_send_request(struct rpc_task
*task
)
629 struct rpc_rqst
*rqst
= task
->tk_rqstp
;
630 struct rpc_xprt
*xprt
= rqst
->rq_xprt
;
631 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
632 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
634 /* marshal the send itself */
635 if (req
->rl_niovs
== 0 && rpcrdma_marshal_req(rqst
) != 0) {
636 r_xprt
->rx_stats
.failed_marshal_count
++;
637 dprintk("RPC: %s: rpcrdma_marshal_req failed\n",
642 if (req
->rl_reply
== NULL
) /* e.g. reconnection */
643 rpcrdma_recv_buffer_get(req
);
646 req
->rl_reply
->rr_func
= rpcrdma_reply_handler
;
647 /* this need only be done once, but... */
648 req
->rl_reply
->rr_xprt
= xprt
;
651 /* Must suppress retransmit to maintain credits */
652 if (req
->rl_connect_cookie
== xprt
->connect_cookie
)
653 goto drop_connection
;
654 req
->rl_connect_cookie
= xprt
->connect_cookie
;
656 if (rpcrdma_ep_post(&r_xprt
->rx_ia
, &r_xprt
->rx_ep
, req
))
657 goto drop_connection
;
659 rqst
->rq_xmit_bytes_sent
+= rqst
->rq_snd_buf
.len
;
660 rqst
->rq_bytes_sent
= 0;
664 xprt_disconnect_done(xprt
);
665 return -ENOTCONN
; /* implies disconnect */
668 static void xprt_rdma_print_stats(struct rpc_xprt
*xprt
, struct seq_file
*seq
)
670 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
673 if (xprt_connected(xprt
))
674 idle_time
= (long)(jiffies
- xprt
->last_used
) / HZ
;
677 "\txprt:\trdma %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu "
678 "%lu %lu %lu %Lu %Lu %Lu %Lu %lu %lu %lu\n",
680 0, /* need a local port? */
681 xprt
->stat
.bind_count
,
682 xprt
->stat
.connect_count
,
683 xprt
->stat
.connect_time
,
691 r_xprt
->rx_stats
.read_chunk_count
,
692 r_xprt
->rx_stats
.write_chunk_count
,
693 r_xprt
->rx_stats
.reply_chunk_count
,
694 r_xprt
->rx_stats
.total_rdma_request
,
695 r_xprt
->rx_stats
.total_rdma_reply
,
696 r_xprt
->rx_stats
.pullup_copy_count
,
697 r_xprt
->rx_stats
.fixup_copy_count
,
698 r_xprt
->rx_stats
.hardway_register_count
,
699 r_xprt
->rx_stats
.failed_marshal_count
,
700 r_xprt
->rx_stats
.bad_reply_count
);
704 * Plumbing for rpc transport switch and kernel module
707 static struct rpc_xprt_ops xprt_rdma_procs
= {
708 .reserve_xprt
= xprt_rdma_reserve_xprt
,
709 .release_xprt
= xprt_release_xprt_cong
, /* sunrpc/xprt.c */
710 .alloc_slot
= xprt_alloc_slot
,
711 .release_request
= xprt_release_rqst_cong
, /* ditto */
712 .set_retrans_timeout
= xprt_set_retrans_timeout_def
, /* ditto */
713 .rpcbind
= rpcb_getport_async
, /* sunrpc/rpcb_clnt.c */
714 .set_port
= xprt_rdma_set_port
,
715 .connect
= xprt_rdma_connect
,
716 .buf_alloc
= xprt_rdma_allocate
,
717 .buf_free
= xprt_rdma_free
,
718 .send_request
= xprt_rdma_send_request
,
719 .close
= xprt_rdma_close
,
720 .destroy
= xprt_rdma_destroy
,
721 .print_stats
= xprt_rdma_print_stats
724 static struct xprt_class xprt_rdma
= {
725 .list
= LIST_HEAD_INIT(xprt_rdma
.list
),
727 .owner
= THIS_MODULE
,
728 .ident
= XPRT_TRANSPORT_RDMA
,
729 .setup
= xprt_setup_rdma
,
732 static void __exit
xprt_rdma_cleanup(void)
736 dprintk(KERN_INFO
"RPCRDMA Module Removed, deregister RPC RDMA transport\n");
738 if (sunrpc_table_header
) {
739 unregister_sysctl_table(sunrpc_table_header
);
740 sunrpc_table_header
= NULL
;
743 rc
= xprt_unregister_transport(&xprt_rdma
);
745 dprintk("RPC: %s: xprt_unregister returned %i\n",
749 static int __init
xprt_rdma_init(void)
753 rc
= xprt_register_transport(&xprt_rdma
);
758 dprintk(KERN_INFO
"RPCRDMA Module Init, register RPC RDMA transport\n");
760 dprintk(KERN_INFO
"Defaults:\n");
761 dprintk(KERN_INFO
"\tSlots %d\n"
762 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
763 xprt_rdma_slot_table_entries
,
764 xprt_rdma_max_inline_read
, xprt_rdma_max_inline_write
);
765 dprintk(KERN_INFO
"\tPadding %d\n\tMemreg %d\n",
766 xprt_rdma_inline_write_padding
, xprt_rdma_memreg_strategy
);
769 if (!sunrpc_table_header
)
770 sunrpc_table_header
= register_sysctl_table(sunrpc_table
);
775 module_init(xprt_rdma_init
);
776 module_exit(xprt_rdma_cleanup
);