2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * This file contains the top-level implementation of an RPC RDMA
46 * Naming convention: functions beginning with xprt_ are part of the
47 * transport switch. All others are RPC RDMA internal.
50 #include <linux/module.h>
51 #include <linux/init.h>
52 #include <linux/seq_file.h>
54 #include "xprt_rdma.h"
57 # define RPCDBG_FACILITY RPCDBG_TRANS
60 MODULE_LICENSE("Dual BSD/GPL");
62 MODULE_DESCRIPTION("RPC/RDMA Transport for Linux kernel NFS");
63 MODULE_AUTHOR("Network Appliance, Inc.");
69 static unsigned int xprt_rdma_slot_table_entries
= RPCRDMA_DEF_SLOT_TABLE
;
70 static unsigned int xprt_rdma_max_inline_read
= RPCRDMA_DEF_INLINE
;
71 static unsigned int xprt_rdma_max_inline_write
= RPCRDMA_DEF_INLINE
;
72 static unsigned int xprt_rdma_inline_write_padding
;
73 static unsigned int xprt_rdma_memreg_strategy
= RPCRDMA_FRMR
;
74 int xprt_rdma_pad_optimize
= 0;
78 static unsigned int min_slot_table_size
= RPCRDMA_MIN_SLOT_TABLE
;
79 static unsigned int max_slot_table_size
= RPCRDMA_MAX_SLOT_TABLE
;
80 static unsigned int zero
;
81 static unsigned int max_padding
= PAGE_SIZE
;
82 static unsigned int min_memreg
= RPCRDMA_BOUNCEBUFFERS
;
83 static unsigned int max_memreg
= RPCRDMA_LAST
- 1;
85 static struct ctl_table_header
*sunrpc_table_header
;
87 static ctl_table xr_tunables_table
[] = {
89 .ctl_name
= CTL_UNNUMBERED
,
90 .procname
= "rdma_slot_table_entries",
91 .data
= &xprt_rdma_slot_table_entries
,
92 .maxlen
= sizeof(unsigned int),
94 .proc_handler
= &proc_dointvec_minmax
,
95 .strategy
= &sysctl_intvec
,
96 .extra1
= &min_slot_table_size
,
97 .extra2
= &max_slot_table_size
100 .ctl_name
= CTL_UNNUMBERED
,
101 .procname
= "rdma_max_inline_read",
102 .data
= &xprt_rdma_max_inline_read
,
103 .maxlen
= sizeof(unsigned int),
105 .proc_handler
= &proc_dointvec
,
106 .strategy
= &sysctl_intvec
,
109 .ctl_name
= CTL_UNNUMBERED
,
110 .procname
= "rdma_max_inline_write",
111 .data
= &xprt_rdma_max_inline_write
,
112 .maxlen
= sizeof(unsigned int),
114 .proc_handler
= &proc_dointvec
,
115 .strategy
= &sysctl_intvec
,
118 .ctl_name
= CTL_UNNUMBERED
,
119 .procname
= "rdma_inline_write_padding",
120 .data
= &xprt_rdma_inline_write_padding
,
121 .maxlen
= sizeof(unsigned int),
123 .proc_handler
= &proc_dointvec_minmax
,
124 .strategy
= &sysctl_intvec
,
126 .extra2
= &max_padding
,
129 .ctl_name
= CTL_UNNUMBERED
,
130 .procname
= "rdma_memreg_strategy",
131 .data
= &xprt_rdma_memreg_strategy
,
132 .maxlen
= sizeof(unsigned int),
134 .proc_handler
= &proc_dointvec_minmax
,
135 .strategy
= &sysctl_intvec
,
136 .extra1
= &min_memreg
,
137 .extra2
= &max_memreg
,
140 .ctl_name
= CTL_UNNUMBERED
,
141 .procname
= "rdma_pad_optimize",
142 .data
= &xprt_rdma_pad_optimize
,
143 .maxlen
= sizeof(unsigned int),
145 .proc_handler
= &proc_dointvec
,
152 static ctl_table sunrpc_table
[] = {
154 .ctl_name
= CTL_SUNRPC
,
155 .procname
= "sunrpc",
157 .child
= xr_tunables_table
166 static struct rpc_xprt_ops xprt_rdma_procs
; /* forward reference */
169 xprt_rdma_format_addresses(struct rpc_xprt
*xprt
)
171 struct sockaddr
*sap
= (struct sockaddr
*)
172 &rpcx_to_rdmad(xprt
).addr
;
173 struct sockaddr_in
*sin
= (struct sockaddr_in
*)sap
;
176 (void)rpc_ntop(sap
, buf
, sizeof(buf
));
177 xprt
->address_strings
[RPC_DISPLAY_ADDR
] = kstrdup(buf
, GFP_KERNEL
);
179 (void)snprintf(buf
, sizeof(buf
), "%u", rpc_get_port(sap
));
180 xprt
->address_strings
[RPC_DISPLAY_PORT
] = kstrdup(buf
, GFP_KERNEL
);
182 xprt
->address_strings
[RPC_DISPLAY_PROTO
] = "rdma";
184 (void)snprintf(buf
, sizeof(buf
), "%02x%02x%02x%02x",
185 NIPQUAD(sin
->sin_addr
.s_addr
));
186 xprt
->address_strings
[RPC_DISPLAY_HEX_ADDR
] = kstrdup(buf
, GFP_KERNEL
);
188 (void)snprintf(buf
, sizeof(buf
), "%4hx", rpc_get_port(sap
));
189 xprt
->address_strings
[RPC_DISPLAY_HEX_PORT
] = kstrdup(buf
, GFP_KERNEL
);
192 xprt
->address_strings
[RPC_DISPLAY_NETID
] = "rdma";
196 xprt_rdma_free_addresses(struct rpc_xprt
*xprt
)
200 for (i
= 0; i
< RPC_DISPLAY_MAX
; i
++)
202 case RPC_DISPLAY_PROTO
:
203 case RPC_DISPLAY_NETID
:
206 kfree(xprt
->address_strings
[i
]);
211 xprt_rdma_connect_worker(struct work_struct
*work
)
213 struct rpcrdma_xprt
*r_xprt
=
214 container_of(work
, struct rpcrdma_xprt
, rdma_connect
.work
);
215 struct rpc_xprt
*xprt
= &r_xprt
->xprt
;
218 if (!xprt
->shutdown
) {
219 xprt_clear_connected(xprt
);
221 dprintk("RPC: %s: %sconnect\n", __func__
,
222 r_xprt
->rx_ep
.rep_connected
!= 0 ? "re" : "");
223 rc
= rpcrdma_ep_connect(&r_xprt
->rx_ep
, &r_xprt
->rx_ia
);
230 xprt_wake_pending_tasks(xprt
, rc
);
233 dprintk("RPC: %s: exit\n", __func__
);
234 xprt_clear_connecting(xprt
);
241 * Free all memory associated with the object, including its own.
242 * NOTE: none of the *destroy methods free memory for their top-level
243 * objects, even though they may have allocated it (they do free
244 * private memory). It's up to the caller to handle it. In this
245 * case (RDMA transport), all structure memory is inlined with the
246 * struct rpcrdma_xprt.
249 xprt_rdma_destroy(struct rpc_xprt
*xprt
)
251 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
254 dprintk("RPC: %s: called\n", __func__
);
256 cancel_delayed_work(&r_xprt
->rdma_connect
);
257 flush_scheduled_work();
259 xprt_clear_connected(xprt
);
261 rpcrdma_buffer_destroy(&r_xprt
->rx_buf
);
262 rc
= rpcrdma_ep_destroy(&r_xprt
->rx_ep
, &r_xprt
->rx_ia
);
264 dprintk("RPC: %s: rpcrdma_ep_destroy returned %i\n",
266 rpcrdma_ia_close(&r_xprt
->rx_ia
);
268 xprt_rdma_free_addresses(xprt
);
274 dprintk("RPC: %s: returning\n", __func__
);
276 module_put(THIS_MODULE
);
279 static const struct rpc_timeout xprt_rdma_default_timeout
= {
280 .to_initval
= 60 * HZ
,
281 .to_maxval
= 60 * HZ
,
285 * xprt_setup_rdma - Set up transport to use RDMA
287 * @args: rpc transport arguments
289 static struct rpc_xprt
*
290 xprt_setup_rdma(struct xprt_create
*args
)
292 struct rpcrdma_create_data_internal cdata
;
293 struct rpc_xprt
*xprt
;
294 struct rpcrdma_xprt
*new_xprt
;
295 struct rpcrdma_ep
*new_ep
;
296 struct sockaddr_in
*sin
;
299 if (args
->addrlen
> sizeof(xprt
->addr
)) {
300 dprintk("RPC: %s: address too large\n", __func__
);
301 return ERR_PTR(-EBADF
);
304 xprt
= kzalloc(sizeof(struct rpcrdma_xprt
), GFP_KERNEL
);
306 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
308 return ERR_PTR(-ENOMEM
);
311 xprt
->max_reqs
= xprt_rdma_slot_table_entries
;
312 xprt
->slot
= kcalloc(xprt
->max_reqs
,
313 sizeof(struct rpc_rqst
), GFP_KERNEL
);
314 if (xprt
->slot
== NULL
) {
315 dprintk("RPC: %s: couldn't allocate %d slots\n",
316 __func__
, xprt
->max_reqs
);
318 return ERR_PTR(-ENOMEM
);
321 /* 60 second timeout, no retries */
322 xprt
->timeout
= &xprt_rdma_default_timeout
;
323 xprt
->bind_timeout
= (60U * HZ
);
324 xprt
->connect_timeout
= (60U * HZ
);
325 xprt
->reestablish_timeout
= (5U * HZ
);
326 xprt
->idle_timeout
= (5U * 60 * HZ
);
328 xprt
->resvport
= 0; /* privileged port not needed */
329 xprt
->tsh_size
= 0; /* RPC-RDMA handles framing */
330 xprt
->max_payload
= RPCRDMA_MAX_DATA_SEGS
* PAGE_SIZE
;
331 xprt
->ops
= &xprt_rdma_procs
;
334 * Set up RDMA-specific connect data.
337 /* Put server RDMA address in local cdata */
338 memcpy(&cdata
.addr
, args
->dstaddr
, args
->addrlen
);
340 /* Ensure xprt->addr holds valid server TCP (not RDMA)
341 * address, for any side protocols which peek at it */
342 xprt
->prot
= IPPROTO_TCP
;
343 xprt
->addrlen
= args
->addrlen
;
344 memcpy(&xprt
->addr
, &cdata
.addr
, xprt
->addrlen
);
346 sin
= (struct sockaddr_in
*)&cdata
.addr
;
347 if (ntohs(sin
->sin_port
) != 0)
348 xprt_set_bound(xprt
);
350 dprintk("RPC: %s: %pI4:%u\n",
351 __func__
, &sin
->sin_addr
.s_addr
, ntohs(sin
->sin_port
));
353 /* Set max requests */
354 cdata
.max_requests
= xprt
->max_reqs
;
356 /* Set some length limits */
357 cdata
.rsize
= RPCRDMA_MAX_SEGS
* PAGE_SIZE
; /* RDMA write max */
358 cdata
.wsize
= RPCRDMA_MAX_SEGS
* PAGE_SIZE
; /* RDMA read max */
360 cdata
.inline_wsize
= xprt_rdma_max_inline_write
;
361 if (cdata
.inline_wsize
> cdata
.wsize
)
362 cdata
.inline_wsize
= cdata
.wsize
;
364 cdata
.inline_rsize
= xprt_rdma_max_inline_read
;
365 if (cdata
.inline_rsize
> cdata
.rsize
)
366 cdata
.inline_rsize
= cdata
.rsize
;
368 cdata
.padding
= xprt_rdma_inline_write_padding
;
371 * Create new transport instance, which includes initialized
377 new_xprt
= rpcx_to_rdmax(xprt
);
379 rc
= rpcrdma_ia_open(new_xprt
, (struct sockaddr
*) &cdata
.addr
,
380 xprt_rdma_memreg_strategy
);
385 * initialize and create ep
387 new_xprt
->rx_data
= cdata
;
388 new_ep
= &new_xprt
->rx_ep
;
389 new_ep
->rep_remote_addr
= cdata
.addr
;
391 rc
= rpcrdma_ep_create(&new_xprt
->rx_ep
,
392 &new_xprt
->rx_ia
, &new_xprt
->rx_data
);
397 * Allocate pre-registered send and receive buffers for headers and
398 * any inline data. Also specify any padding which will be provided
399 * from a preregistered zero buffer.
401 rc
= rpcrdma_buffer_create(&new_xprt
->rx_buf
, new_ep
, &new_xprt
->rx_ia
,
407 * Register a callback for connection events. This is necessary because
408 * connection loss notification is async. We also catch connection loss
409 * when reaping receives.
411 INIT_DELAYED_WORK(&new_xprt
->rdma_connect
, xprt_rdma_connect_worker
);
412 new_ep
->rep_func
= rpcrdma_conn_func
;
413 new_ep
->rep_xprt
= xprt
;
415 xprt_rdma_format_addresses(xprt
);
417 if (!try_module_get(THIS_MODULE
))
423 xprt_rdma_free_addresses(xprt
);
426 (void) rpcrdma_ep_destroy(new_ep
, &new_xprt
->rx_ia
);
428 rpcrdma_ia_close(&new_xprt
->rx_ia
);
436 * Close a connection, during shutdown or timeout/reconnect
439 xprt_rdma_close(struct rpc_xprt
*xprt
)
441 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
443 dprintk("RPC: %s: closing\n", __func__
);
444 if (r_xprt
->rx_ep
.rep_connected
> 0)
445 xprt
->reestablish_timeout
= 0;
446 xprt_disconnect_done(xprt
);
447 (void) rpcrdma_ep_disconnect(&r_xprt
->rx_ep
, &r_xprt
->rx_ia
);
451 xprt_rdma_set_port(struct rpc_xprt
*xprt
, u16 port
)
453 struct sockaddr_in
*sap
;
455 sap
= (struct sockaddr_in
*)&xprt
->addr
;
456 sap
->sin_port
= htons(port
);
457 sap
= (struct sockaddr_in
*)&rpcx_to_rdmad(xprt
).addr
;
458 sap
->sin_port
= htons(port
);
459 dprintk("RPC: %s: %u\n", __func__
, port
);
463 xprt_rdma_connect(struct rpc_task
*task
)
465 struct rpc_xprt
*xprt
= (struct rpc_xprt
*)task
->tk_xprt
;
466 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
468 if (!xprt_test_and_set_connecting(xprt
)) {
469 if (r_xprt
->rx_ep
.rep_connected
!= 0) {
471 schedule_delayed_work(&r_xprt
->rdma_connect
,
472 xprt
->reestablish_timeout
);
473 xprt
->reestablish_timeout
<<= 1;
474 if (xprt
->reestablish_timeout
> (30 * HZ
))
475 xprt
->reestablish_timeout
= (30 * HZ
);
476 else if (xprt
->reestablish_timeout
< (5 * HZ
))
477 xprt
->reestablish_timeout
= (5 * HZ
);
479 schedule_delayed_work(&r_xprt
->rdma_connect
, 0);
480 if (!RPC_IS_ASYNC(task
))
481 flush_scheduled_work();
487 xprt_rdma_reserve_xprt(struct rpc_task
*task
)
489 struct rpc_xprt
*xprt
= task
->tk_xprt
;
490 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
491 int credits
= atomic_read(&r_xprt
->rx_buf
.rb_credits
);
493 /* == RPC_CWNDSCALE @ init, but *after* setup */
494 if (r_xprt
->rx_buf
.rb_cwndscale
== 0UL) {
495 r_xprt
->rx_buf
.rb_cwndscale
= xprt
->cwnd
;
496 dprintk("RPC: %s: cwndscale %lu\n", __func__
,
497 r_xprt
->rx_buf
.rb_cwndscale
);
498 BUG_ON(r_xprt
->rx_buf
.rb_cwndscale
<= 0);
500 xprt
->cwnd
= credits
* r_xprt
->rx_buf
.rb_cwndscale
;
501 return xprt_reserve_xprt_cong(task
);
505 * The RDMA allocate/free functions need the task structure as a place
506 * to hide the struct rpcrdma_req, which is necessary for the actual send/recv
507 * sequence. For this reason, the recv buffers are attached to send
508 * buffers for portions of the RPC. Note that the RPC layer allocates
509 * both send and receive buffers in the same call. We may register
510 * the receive buffer portion when using reply chunks.
513 xprt_rdma_allocate(struct rpc_task
*task
, size_t size
)
515 struct rpc_xprt
*xprt
= task
->tk_xprt
;
516 struct rpcrdma_req
*req
, *nreq
;
518 req
= rpcrdma_buffer_get(&rpcx_to_rdmax(xprt
)->rx_buf
);
521 if (size
> req
->rl_size
) {
522 dprintk("RPC: %s: size %zd too large for buffer[%zd]: "
523 "prog %d vers %d proc %d\n",
524 __func__
, size
, req
->rl_size
,
525 task
->tk_client
->cl_prog
, task
->tk_client
->cl_vers
,
526 task
->tk_msg
.rpc_proc
->p_proc
);
528 * Outgoing length shortage. Our inline write max must have
529 * been configured to perform direct i/o.
531 * This is therefore a large metadata operation, and the
532 * allocate call was made on the maximum possible message,
533 * e.g. containing long filename(s) or symlink data. In
534 * fact, while these metadata operations *might* carry
535 * large outgoing payloads, they rarely *do*. However, we
536 * have to commit to the request here, so reallocate and
537 * register it now. The data path will never require this
540 * If the allocation or registration fails, the RPC framework
541 * will (doggedly) retry.
543 if (rpcx_to_rdmax(xprt
)->rx_ia
.ri_memreg_strategy
==
544 RPCRDMA_BOUNCEBUFFERS
) {
545 /* forced to "pure inline" */
546 dprintk("RPC: %s: too much data (%zd) for inline "
547 "(r/w max %d/%d)\n", __func__
, size
,
548 rpcx_to_rdmad(xprt
).inline_rsize
,
549 rpcx_to_rdmad(xprt
).inline_wsize
);
551 rpc_exit(task
, -EIO
); /* fail the operation */
552 rpcx_to_rdmax(xprt
)->rx_stats
.failed_marshal_count
++;
555 if (task
->tk_flags
& RPC_TASK_SWAPPER
)
556 nreq
= kmalloc(sizeof *req
+ size
, GFP_ATOMIC
);
558 nreq
= kmalloc(sizeof *req
+ size
, GFP_NOFS
);
562 if (rpcrdma_register_internal(&rpcx_to_rdmax(xprt
)->rx_ia
,
563 nreq
->rl_base
, size
+ sizeof(struct rpcrdma_req
)
564 - offsetof(struct rpcrdma_req
, rl_base
),
565 &nreq
->rl_handle
, &nreq
->rl_iov
)) {
569 rpcx_to_rdmax(xprt
)->rx_stats
.hardway_register_count
+= size
;
570 nreq
->rl_size
= size
;
572 nreq
->rl_nchunks
= 0;
573 nreq
->rl_buffer
= (struct rpcrdma_buffer
*)req
;
574 nreq
->rl_reply
= req
->rl_reply
;
575 memcpy(nreq
->rl_segments
,
576 req
->rl_segments
, sizeof nreq
->rl_segments
);
577 /* flag the swap with an unused field */
578 nreq
->rl_iov
.length
= 0;
579 req
->rl_reply
= NULL
;
582 dprintk("RPC: %s: size %zd, request 0x%p\n", __func__
, size
, req
);
584 req
->rl_connect_cookie
= 0; /* our reserved value */
585 return req
->rl_xdr_buf
;
588 rpcrdma_buffer_put(req
);
589 rpcx_to_rdmax(xprt
)->rx_stats
.failed_marshal_count
++;
594 * This function returns all RDMA resources to the pool.
597 xprt_rdma_free(void *buffer
)
599 struct rpcrdma_req
*req
;
600 struct rpcrdma_xprt
*r_xprt
;
601 struct rpcrdma_rep
*rep
;
607 req
= container_of(buffer
, struct rpcrdma_req
, rl_xdr_buf
[0]);
608 if (req
->rl_iov
.length
== 0) { /* see allocate above */
609 r_xprt
= container_of(((struct rpcrdma_req
*) req
->rl_buffer
)->rl_buffer
,
610 struct rpcrdma_xprt
, rx_buf
);
612 r_xprt
= container_of(req
->rl_buffer
, struct rpcrdma_xprt
, rx_buf
);
615 dprintk("RPC: %s: called on 0x%p%s\n",
616 __func__
, rep
, (rep
&& rep
->rr_func
) ? " (with waiter)" : "");
619 * Finish the deregistration. When using mw bind, this was
620 * begun in rpcrdma_reply_handler(). In all other modes, we
621 * do it here, in thread context. The process is considered
622 * complete when the rr_func vector becomes NULL - this
623 * was put in place during rpcrdma_reply_handler() - the wait
624 * call below will not block if the dereg is "done". If
625 * interrupted, our framework will clean up.
627 for (i
= 0; req
->rl_nchunks
;) {
629 i
+= rpcrdma_deregister_external(
630 &req
->rl_segments
[i
], r_xprt
, NULL
);
633 if (rep
&& wait_event_interruptible(rep
->rr_unbind
, !rep
->rr_func
)) {
634 rep
->rr_func
= NULL
; /* abandon the callback */
635 req
->rl_reply
= NULL
;
638 if (req
->rl_iov
.length
== 0) { /* see allocate above */
639 struct rpcrdma_req
*oreq
= (struct rpcrdma_req
*)req
->rl_buffer
;
640 oreq
->rl_reply
= req
->rl_reply
;
641 (void) rpcrdma_deregister_internal(&r_xprt
->rx_ia
,
648 /* Put back request+reply buffers */
649 rpcrdma_buffer_put(req
);
653 * send_request invokes the meat of RPC RDMA. It must do the following:
654 * 1. Marshal the RPC request into an RPC RDMA request, which means
655 * putting a header in front of data, and creating IOVs for RDMA
656 * from those in the request.
657 * 2. In marshaling, detect opportunities for RDMA, and use them.
658 * 3. Post a recv message to set up asynch completion, then send
659 * the request (rpcrdma_ep_post).
660 * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP).
664 xprt_rdma_send_request(struct rpc_task
*task
)
666 struct rpc_rqst
*rqst
= task
->tk_rqstp
;
667 struct rpc_xprt
*xprt
= task
->tk_xprt
;
668 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
669 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
671 /* marshal the send itself */
672 if (req
->rl_niovs
== 0 && rpcrdma_marshal_req(rqst
) != 0) {
673 r_xprt
->rx_stats
.failed_marshal_count
++;
674 dprintk("RPC: %s: rpcrdma_marshal_req failed\n",
679 if (req
->rl_reply
== NULL
) /* e.g. reconnection */
680 rpcrdma_recv_buffer_get(req
);
683 req
->rl_reply
->rr_func
= rpcrdma_reply_handler
;
684 /* this need only be done once, but... */
685 req
->rl_reply
->rr_xprt
= xprt
;
688 /* Must suppress retransmit to maintain credits */
689 if (req
->rl_connect_cookie
== xprt
->connect_cookie
)
690 goto drop_connection
;
691 req
->rl_connect_cookie
= xprt
->connect_cookie
;
693 if (rpcrdma_ep_post(&r_xprt
->rx_ia
, &r_xprt
->rx_ep
, req
))
694 goto drop_connection
;
696 task
->tk_bytes_sent
+= rqst
->rq_snd_buf
.len
;
697 rqst
->rq_bytes_sent
= 0;
701 xprt_disconnect_done(xprt
);
702 return -ENOTCONN
; /* implies disconnect */
705 static void xprt_rdma_print_stats(struct rpc_xprt
*xprt
, struct seq_file
*seq
)
707 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
710 if (xprt_connected(xprt
))
711 idle_time
= (long)(jiffies
- xprt
->last_used
) / HZ
;
714 "\txprt:\trdma %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu "
715 "%lu %lu %lu %Lu %Lu %Lu %Lu %lu %lu %lu\n",
717 0, /* need a local port? */
718 xprt
->stat
.bind_count
,
719 xprt
->stat
.connect_count
,
720 xprt
->stat
.connect_time
,
728 r_xprt
->rx_stats
.read_chunk_count
,
729 r_xprt
->rx_stats
.write_chunk_count
,
730 r_xprt
->rx_stats
.reply_chunk_count
,
731 r_xprt
->rx_stats
.total_rdma_request
,
732 r_xprt
->rx_stats
.total_rdma_reply
,
733 r_xprt
->rx_stats
.pullup_copy_count
,
734 r_xprt
->rx_stats
.fixup_copy_count
,
735 r_xprt
->rx_stats
.hardway_register_count
,
736 r_xprt
->rx_stats
.failed_marshal_count
,
737 r_xprt
->rx_stats
.bad_reply_count
);
741 * Plumbing for rpc transport switch and kernel module
744 static struct rpc_xprt_ops xprt_rdma_procs
= {
745 .reserve_xprt
= xprt_rdma_reserve_xprt
,
746 .release_xprt
= xprt_release_xprt_cong
, /* sunrpc/xprt.c */
747 .release_request
= xprt_release_rqst_cong
, /* ditto */
748 .set_retrans_timeout
= xprt_set_retrans_timeout_def
, /* ditto */
749 .rpcbind
= rpcb_getport_async
, /* sunrpc/rpcb_clnt.c */
750 .set_port
= xprt_rdma_set_port
,
751 .connect
= xprt_rdma_connect
,
752 .buf_alloc
= xprt_rdma_allocate
,
753 .buf_free
= xprt_rdma_free
,
754 .send_request
= xprt_rdma_send_request
,
755 .close
= xprt_rdma_close
,
756 .destroy
= xprt_rdma_destroy
,
757 .print_stats
= xprt_rdma_print_stats
760 static struct xprt_class xprt_rdma
= {
761 .list
= LIST_HEAD_INIT(xprt_rdma
.list
),
763 .owner
= THIS_MODULE
,
764 .ident
= XPRT_TRANSPORT_RDMA
,
765 .setup
= xprt_setup_rdma
,
768 static void __exit
xprt_rdma_cleanup(void)
772 dprintk(KERN_INFO
"RPCRDMA Module Removed, deregister RPC RDMA transport\n");
774 if (sunrpc_table_header
) {
775 unregister_sysctl_table(sunrpc_table_header
);
776 sunrpc_table_header
= NULL
;
779 rc
= xprt_unregister_transport(&xprt_rdma
);
781 dprintk("RPC: %s: xprt_unregister returned %i\n",
785 static int __init
xprt_rdma_init(void)
789 rc
= xprt_register_transport(&xprt_rdma
);
794 dprintk(KERN_INFO
"RPCRDMA Module Init, register RPC RDMA transport\n");
796 dprintk(KERN_INFO
"Defaults:\n");
797 dprintk(KERN_INFO
"\tSlots %d\n"
798 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
799 xprt_rdma_slot_table_entries
,
800 xprt_rdma_max_inline_read
, xprt_rdma_max_inline_write
);
801 dprintk(KERN_INFO
"\tPadding %d\n\tMemreg %d\n",
802 xprt_rdma_inline_write_padding
, xprt_rdma_memreg_strategy
);
805 if (!sunrpc_table_header
)
806 sunrpc_table_header
= register_sysctl_table(sunrpc_table
);
811 module_init(xprt_rdma_init
);
812 module_exit(xprt_rdma_cleanup
);