2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * This file contains the top-level implementation of an RPC RDMA
46 * Naming convention: functions beginning with xprt_ are part of the
47 * transport switch. All others are RPC RDMA internal.
50 #include <linux/module.h>
51 #include <linux/slab.h>
52 #include <linux/seq_file.h>
53 #include <linux/sunrpc/addr.h>
55 #include "xprt_rdma.h"
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58 # define RPCDBG_FACILITY RPCDBG_TRANS
65 static unsigned int xprt_rdma_slot_table_entries
= RPCRDMA_DEF_SLOT_TABLE
;
66 static unsigned int xprt_rdma_max_inline_read
= RPCRDMA_DEF_INLINE
;
67 static unsigned int xprt_rdma_max_inline_write
= RPCRDMA_DEF_INLINE
;
68 static unsigned int xprt_rdma_inline_write_padding
;
69 static unsigned int xprt_rdma_memreg_strategy
= RPCRDMA_FRMR
;
70 int xprt_rdma_pad_optimize
= 1;
72 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
74 static unsigned int min_slot_table_size
= RPCRDMA_MIN_SLOT_TABLE
;
75 static unsigned int max_slot_table_size
= RPCRDMA_MAX_SLOT_TABLE
;
76 static unsigned int zero
;
77 static unsigned int max_padding
= PAGE_SIZE
;
78 static unsigned int min_memreg
= RPCRDMA_BOUNCEBUFFERS
;
79 static unsigned int max_memreg
= RPCRDMA_LAST
- 1;
81 static struct ctl_table_header
*sunrpc_table_header
;
83 static struct ctl_table xr_tunables_table
[] = {
85 .procname
= "rdma_slot_table_entries",
86 .data
= &xprt_rdma_slot_table_entries
,
87 .maxlen
= sizeof(unsigned int),
89 .proc_handler
= proc_dointvec_minmax
,
90 .extra1
= &min_slot_table_size
,
91 .extra2
= &max_slot_table_size
94 .procname
= "rdma_max_inline_read",
95 .data
= &xprt_rdma_max_inline_read
,
96 .maxlen
= sizeof(unsigned int),
98 .proc_handler
= proc_dointvec
,
101 .procname
= "rdma_max_inline_write",
102 .data
= &xprt_rdma_max_inline_write
,
103 .maxlen
= sizeof(unsigned int),
105 .proc_handler
= proc_dointvec
,
108 .procname
= "rdma_inline_write_padding",
109 .data
= &xprt_rdma_inline_write_padding
,
110 .maxlen
= sizeof(unsigned int),
112 .proc_handler
= proc_dointvec_minmax
,
114 .extra2
= &max_padding
,
117 .procname
= "rdma_memreg_strategy",
118 .data
= &xprt_rdma_memreg_strategy
,
119 .maxlen
= sizeof(unsigned int),
121 .proc_handler
= proc_dointvec_minmax
,
122 .extra1
= &min_memreg
,
123 .extra2
= &max_memreg
,
126 .procname
= "rdma_pad_optimize",
127 .data
= &xprt_rdma_pad_optimize
,
128 .maxlen
= sizeof(unsigned int),
130 .proc_handler
= proc_dointvec
,
135 static struct ctl_table sunrpc_table
[] = {
137 .procname
= "sunrpc",
139 .child
= xr_tunables_table
146 #define RPCRDMA_BIND_TO (60U * HZ)
147 #define RPCRDMA_INIT_REEST_TO (5U * HZ)
148 #define RPCRDMA_MAX_REEST_TO (30U * HZ)
149 #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ)
151 static struct rpc_xprt_ops xprt_rdma_procs
; /* forward reference */
154 xprt_rdma_format_addresses4(struct rpc_xprt
*xprt
, struct sockaddr
*sap
)
156 struct sockaddr_in
*sin
= (struct sockaddr_in
*)sap
;
159 snprintf(buf
, sizeof(buf
), "%08x", ntohl(sin
->sin_addr
.s_addr
));
160 xprt
->address_strings
[RPC_DISPLAY_HEX_ADDR
] = kstrdup(buf
, GFP_KERNEL
);
162 xprt
->address_strings
[RPC_DISPLAY_NETID
] = RPCBIND_NETID_RDMA
;
166 xprt_rdma_format_addresses6(struct rpc_xprt
*xprt
, struct sockaddr
*sap
)
168 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)sap
;
171 snprintf(buf
, sizeof(buf
), "%pi6", &sin6
->sin6_addr
);
172 xprt
->address_strings
[RPC_DISPLAY_HEX_ADDR
] = kstrdup(buf
, GFP_KERNEL
);
174 xprt
->address_strings
[RPC_DISPLAY_NETID
] = RPCBIND_NETID_RDMA6
;
178 xprt_rdma_format_addresses(struct rpc_xprt
*xprt
, struct sockaddr
*sap
)
182 switch (sap
->sa_family
) {
184 xprt_rdma_format_addresses4(xprt
, sap
);
187 xprt_rdma_format_addresses6(xprt
, sap
);
190 pr_err("rpcrdma: Unrecognized address family\n");
194 (void)rpc_ntop(sap
, buf
, sizeof(buf
));
195 xprt
->address_strings
[RPC_DISPLAY_ADDR
] = kstrdup(buf
, GFP_KERNEL
);
197 snprintf(buf
, sizeof(buf
), "%u", rpc_get_port(sap
));
198 xprt
->address_strings
[RPC_DISPLAY_PORT
] = kstrdup(buf
, GFP_KERNEL
);
200 snprintf(buf
, sizeof(buf
), "%4hx", rpc_get_port(sap
));
201 xprt
->address_strings
[RPC_DISPLAY_HEX_PORT
] = kstrdup(buf
, GFP_KERNEL
);
203 xprt
->address_strings
[RPC_DISPLAY_PROTO
] = "rdma";
207 xprt_rdma_free_addresses(struct rpc_xprt
*xprt
)
211 for (i
= 0; i
< RPC_DISPLAY_MAX
; i
++)
213 case RPC_DISPLAY_PROTO
:
214 case RPC_DISPLAY_NETID
:
217 kfree(xprt
->address_strings
[i
]);
222 xprt_rdma_connect_worker(struct work_struct
*work
)
224 struct rpcrdma_xprt
*r_xprt
= container_of(work
, struct rpcrdma_xprt
,
225 rx_connect_worker
.work
);
226 struct rpc_xprt
*xprt
= &r_xprt
->rx_xprt
;
229 xprt_clear_connected(xprt
);
231 dprintk("RPC: %s: %sconnect\n", __func__
,
232 r_xprt
->rx_ep
.rep_connected
!= 0 ? "re" : "");
233 rc
= rpcrdma_ep_connect(&r_xprt
->rx_ep
, &r_xprt
->rx_ia
);
235 xprt_wake_pending_tasks(xprt
, rc
);
237 dprintk("RPC: %s: exit\n", __func__
);
238 xprt_clear_connecting(xprt
);
242 xprt_rdma_inject_disconnect(struct rpc_xprt
*xprt
)
244 struct rpcrdma_xprt
*r_xprt
= container_of(xprt
, struct rpcrdma_xprt
,
247 pr_info("rpcrdma: injecting transport disconnect on xprt=%p\n", xprt
);
248 rdma_disconnect(r_xprt
->rx_ia
.ri_id
);
255 * Free all memory associated with the object, including its own.
256 * NOTE: none of the *destroy methods free memory for their top-level
257 * objects, even though they may have allocated it (they do free
258 * private memory). It's up to the caller to handle it. In this
259 * case (RDMA transport), all structure memory is inlined with the
260 * struct rpcrdma_xprt.
263 xprt_rdma_destroy(struct rpc_xprt
*xprt
)
265 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
267 dprintk("RPC: %s: called\n", __func__
);
269 cancel_delayed_work_sync(&r_xprt
->rx_connect_worker
);
271 xprt_clear_connected(xprt
);
273 rpcrdma_ep_destroy(&r_xprt
->rx_ep
, &r_xprt
->rx_ia
);
274 rpcrdma_buffer_destroy(&r_xprt
->rx_buf
);
275 rpcrdma_ia_close(&r_xprt
->rx_ia
);
277 xprt_rdma_free_addresses(xprt
);
281 dprintk("RPC: %s: returning\n", __func__
);
283 module_put(THIS_MODULE
);
286 static const struct rpc_timeout xprt_rdma_default_timeout
= {
287 .to_initval
= 60 * HZ
,
288 .to_maxval
= 60 * HZ
,
292 * xprt_setup_rdma - Set up transport to use RDMA
294 * @args: rpc transport arguments
296 static struct rpc_xprt
*
297 xprt_setup_rdma(struct xprt_create
*args
)
299 struct rpcrdma_create_data_internal cdata
;
300 struct rpc_xprt
*xprt
;
301 struct rpcrdma_xprt
*new_xprt
;
302 struct rpcrdma_ep
*new_ep
;
303 struct sockaddr
*sap
;
306 if (args
->addrlen
> sizeof(xprt
->addr
)) {
307 dprintk("RPC: %s: address too large\n", __func__
);
308 return ERR_PTR(-EBADF
);
311 xprt
= xprt_alloc(args
->net
, sizeof(struct rpcrdma_xprt
),
312 xprt_rdma_slot_table_entries
,
313 xprt_rdma_slot_table_entries
);
315 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
317 return ERR_PTR(-ENOMEM
);
320 /* 60 second timeout, no retries */
321 xprt
->timeout
= &xprt_rdma_default_timeout
;
322 xprt
->bind_timeout
= RPCRDMA_BIND_TO
;
323 xprt
->reestablish_timeout
= RPCRDMA_INIT_REEST_TO
;
324 xprt
->idle_timeout
= RPCRDMA_IDLE_DISC_TO
;
326 xprt
->resvport
= 0; /* privileged port not needed */
327 xprt
->tsh_size
= 0; /* RPC-RDMA handles framing */
328 xprt
->ops
= &xprt_rdma_procs
;
331 * Set up RDMA-specific connect data.
334 sap
= (struct sockaddr
*)&cdata
.addr
;
335 memcpy(sap
, args
->dstaddr
, args
->addrlen
);
337 /* Ensure xprt->addr holds valid server TCP (not RDMA)
338 * address, for any side protocols which peek at it */
339 xprt
->prot
= IPPROTO_TCP
;
340 xprt
->addrlen
= args
->addrlen
;
341 memcpy(&xprt
->addr
, sap
, xprt
->addrlen
);
343 if (rpc_get_port(sap
))
344 xprt_set_bound(xprt
);
346 cdata
.max_requests
= xprt
->max_reqs
;
348 cdata
.rsize
= RPCRDMA_MAX_SEGS
* PAGE_SIZE
; /* RDMA write max */
349 cdata
.wsize
= RPCRDMA_MAX_SEGS
* PAGE_SIZE
; /* RDMA read max */
351 cdata
.inline_wsize
= xprt_rdma_max_inline_write
;
352 if (cdata
.inline_wsize
> cdata
.wsize
)
353 cdata
.inline_wsize
= cdata
.wsize
;
355 cdata
.inline_rsize
= xprt_rdma_max_inline_read
;
356 if (cdata
.inline_rsize
> cdata
.rsize
)
357 cdata
.inline_rsize
= cdata
.rsize
;
359 cdata
.padding
= xprt_rdma_inline_write_padding
;
362 * Create new transport instance, which includes initialized
368 new_xprt
= rpcx_to_rdmax(xprt
);
370 rc
= rpcrdma_ia_open(new_xprt
, sap
, xprt_rdma_memreg_strategy
);
375 * initialize and create ep
377 new_xprt
->rx_data
= cdata
;
378 new_ep
= &new_xprt
->rx_ep
;
379 new_ep
->rep_remote_addr
= cdata
.addr
;
381 rc
= rpcrdma_ep_create(&new_xprt
->rx_ep
,
382 &new_xprt
->rx_ia
, &new_xprt
->rx_data
);
387 * Allocate pre-registered send and receive buffers for headers and
388 * any inline data. Also specify any padding which will be provided
389 * from a preregistered zero buffer.
391 rc
= rpcrdma_buffer_create(new_xprt
);
396 * Register a callback for connection events. This is necessary because
397 * connection loss notification is async. We also catch connection loss
398 * when reaping receives.
400 INIT_DELAYED_WORK(&new_xprt
->rx_connect_worker
,
401 xprt_rdma_connect_worker
);
403 xprt_rdma_format_addresses(xprt
, sap
);
404 xprt
->max_payload
= new_xprt
->rx_ia
.ri_ops
->ro_maxpages(new_xprt
);
405 if (xprt
->max_payload
== 0)
407 xprt
->max_payload
<<= PAGE_SHIFT
;
408 dprintk("RPC: %s: transport data payload maximum: %zu bytes\n",
409 __func__
, xprt
->max_payload
);
411 if (!try_module_get(THIS_MODULE
))
414 dprintk("RPC: %s: %s:%s\n", __func__
,
415 xprt
->address_strings
[RPC_DISPLAY_ADDR
],
416 xprt
->address_strings
[RPC_DISPLAY_PORT
]);
420 xprt_rdma_free_addresses(xprt
);
423 rpcrdma_ep_destroy(new_ep
, &new_xprt
->rx_ia
);
425 rpcrdma_ia_close(&new_xprt
->rx_ia
);
432 * Close a connection, during shutdown or timeout/reconnect
435 xprt_rdma_close(struct rpc_xprt
*xprt
)
437 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
439 dprintk("RPC: %s: closing\n", __func__
);
440 if (r_xprt
->rx_ep
.rep_connected
> 0)
441 xprt
->reestablish_timeout
= 0;
442 xprt_disconnect_done(xprt
);
443 rpcrdma_ep_disconnect(&r_xprt
->rx_ep
, &r_xprt
->rx_ia
);
447 xprt_rdma_set_port(struct rpc_xprt
*xprt
, u16 port
)
449 struct sockaddr_in
*sap
;
451 sap
= (struct sockaddr_in
*)&xprt
->addr
;
452 sap
->sin_port
= htons(port
);
453 sap
= (struct sockaddr_in
*)&rpcx_to_rdmad(xprt
).addr
;
454 sap
->sin_port
= htons(port
);
455 dprintk("RPC: %s: %u\n", __func__
, port
);
459 xprt_rdma_connect(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
461 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
463 if (r_xprt
->rx_ep
.rep_connected
!= 0) {
465 schedule_delayed_work(&r_xprt
->rx_connect_worker
,
466 xprt
->reestablish_timeout
);
467 xprt
->reestablish_timeout
<<= 1;
468 if (xprt
->reestablish_timeout
> RPCRDMA_MAX_REEST_TO
)
469 xprt
->reestablish_timeout
= RPCRDMA_MAX_REEST_TO
;
470 else if (xprt
->reestablish_timeout
< RPCRDMA_INIT_REEST_TO
)
471 xprt
->reestablish_timeout
= RPCRDMA_INIT_REEST_TO
;
473 schedule_delayed_work(&r_xprt
->rx_connect_worker
, 0);
474 if (!RPC_IS_ASYNC(task
))
475 flush_delayed_work(&r_xprt
->rx_connect_worker
);
480 * The RDMA allocate/free functions need the task structure as a place
481 * to hide the struct rpcrdma_req, which is necessary for the actual send/recv
484 * The RPC layer allocates both send and receive buffers in the same call
485 * (rq_send_buf and rq_rcv_buf are both part of a single contiguous buffer).
486 * We may register rq_rcv_buf when using reply chunks.
489 xprt_rdma_allocate(struct rpc_task
*task
, size_t size
)
491 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
492 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
493 struct rpcrdma_regbuf
*rb
;
494 struct rpcrdma_req
*req
;
498 req
= rpcrdma_buffer_get(&r_xprt
->rx_buf
);
502 flags
= GFP_NOIO
| __GFP_NOWARN
;
503 if (RPC_IS_SWAPPER(task
))
504 flags
= __GFP_MEMALLOC
| GFP_NOWAIT
| __GFP_NOWARN
;
506 if (req
->rl_rdmabuf
== NULL
)
508 if (req
->rl_sendbuf
== NULL
)
510 if (size
> req
->rl_sendbuf
->rg_size
)
514 dprintk("RPC: %s: size %zd, request 0x%p\n", __func__
, size
, req
);
515 req
->rl_connect_cookie
= 0; /* our reserved value */
516 return req
->rl_sendbuf
->rg_base
;
519 min_size
= RPCRDMA_INLINE_WRITE_THRESHOLD(task
->tk_rqstp
);
520 rb
= rpcrdma_alloc_regbuf(&r_xprt
->rx_ia
, min_size
, flags
);
523 req
->rl_rdmabuf
= rb
;
526 /* XDR encoding and RPC/RDMA marshaling of this request has not
527 * yet occurred. Thus a lower bound is needed to prevent buffer
528 * overrun during marshaling.
530 * RPC/RDMA marshaling may choose to send payload bearing ops
531 * inline, if the result is smaller than the inline threshold.
532 * The value of the "size" argument accounts for header
533 * requirements but not for the payload in these cases.
535 * Likewise, allocate enough space to receive a reply up to the
536 * size of the inline threshold.
538 * It's unlikely that both the send header and the received
539 * reply will be large, but slush is provided here to allow
540 * flexibility when marshaling.
542 min_size
= RPCRDMA_INLINE_READ_THRESHOLD(task
->tk_rqstp
);
543 min_size
+= RPCRDMA_INLINE_WRITE_THRESHOLD(task
->tk_rqstp
);
547 rb
= rpcrdma_alloc_regbuf(&r_xprt
->rx_ia
, size
, flags
);
552 r_xprt
->rx_stats
.hardway_register_count
+= size
;
553 rpcrdma_free_regbuf(&r_xprt
->rx_ia
, req
->rl_sendbuf
);
554 req
->rl_sendbuf
= rb
;
558 rpcrdma_buffer_put(req
);
559 r_xprt
->rx_stats
.failed_marshal_count
++;
564 * This function returns all RDMA resources to the pool.
567 xprt_rdma_free(void *buffer
)
569 struct rpcrdma_req
*req
;
570 struct rpcrdma_xprt
*r_xprt
;
571 struct rpcrdma_regbuf
*rb
;
577 rb
= container_of(buffer
, struct rpcrdma_regbuf
, rg_base
[0]);
579 r_xprt
= container_of(req
->rl_buffer
, struct rpcrdma_xprt
, rx_buf
);
581 dprintk("RPC: %s: called on 0x%p\n", __func__
, req
->rl_reply
);
583 for (i
= 0; req
->rl_nchunks
;) {
585 i
+= r_xprt
->rx_ia
.ri_ops
->ro_unmap(r_xprt
,
586 &req
->rl_segments
[i
]);
589 rpcrdma_buffer_put(req
);
593 * send_request invokes the meat of RPC RDMA. It must do the following:
594 * 1. Marshal the RPC request into an RPC RDMA request, which means
595 * putting a header in front of data, and creating IOVs for RDMA
596 * from those in the request.
597 * 2. In marshaling, detect opportunities for RDMA, and use them.
598 * 3. Post a recv message to set up asynch completion, then send
599 * the request (rpcrdma_ep_post).
600 * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP).
604 xprt_rdma_send_request(struct rpc_task
*task
)
606 struct rpc_rqst
*rqst
= task
->tk_rqstp
;
607 struct rpc_xprt
*xprt
= rqst
->rq_xprt
;
608 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
609 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
612 rc
= rpcrdma_marshal_req(rqst
);
616 if (req
->rl_reply
== NULL
) /* e.g. reconnection */
617 rpcrdma_recv_buffer_get(req
);
619 /* Must suppress retransmit to maintain credits */
620 if (req
->rl_connect_cookie
== xprt
->connect_cookie
)
621 goto drop_connection
;
622 req
->rl_connect_cookie
= xprt
->connect_cookie
;
624 if (rpcrdma_ep_post(&r_xprt
->rx_ia
, &r_xprt
->rx_ep
, req
))
625 goto drop_connection
;
627 rqst
->rq_xmit_bytes_sent
+= rqst
->rq_snd_buf
.len
;
628 rqst
->rq_bytes_sent
= 0;
632 r_xprt
->rx_stats
.failed_marshal_count
++;
633 dprintk("RPC: %s: rpcrdma_marshal_req failed, status %i\n",
638 xprt_disconnect_done(xprt
);
639 return -ENOTCONN
; /* implies disconnect */
642 static void xprt_rdma_print_stats(struct rpc_xprt
*xprt
, struct seq_file
*seq
)
644 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
647 if (xprt_connected(xprt
))
648 idle_time
= (long)(jiffies
- xprt
->last_used
) / HZ
;
650 seq_puts(seq
, "\txprt:\trdma ");
651 seq_printf(seq
, "%u %lu %lu %lu %ld %lu %lu %lu %llu %llu ",
652 0, /* need a local port? */
653 xprt
->stat
.bind_count
,
654 xprt
->stat
.connect_count
,
655 xprt
->stat
.connect_time
,
662 seq_printf(seq
, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu\n",
663 r_xprt
->rx_stats
.read_chunk_count
,
664 r_xprt
->rx_stats
.write_chunk_count
,
665 r_xprt
->rx_stats
.reply_chunk_count
,
666 r_xprt
->rx_stats
.total_rdma_request
,
667 r_xprt
->rx_stats
.total_rdma_reply
,
668 r_xprt
->rx_stats
.pullup_copy_count
,
669 r_xprt
->rx_stats
.fixup_copy_count
,
670 r_xprt
->rx_stats
.hardway_register_count
,
671 r_xprt
->rx_stats
.failed_marshal_count
,
672 r_xprt
->rx_stats
.bad_reply_count
,
673 r_xprt
->rx_stats
.nomsg_call_count
);
677 xprt_rdma_enable_swap(struct rpc_xprt
*xprt
)
683 xprt_rdma_disable_swap(struct rpc_xprt
*xprt
)
688 * Plumbing for rpc transport switch and kernel module
691 static struct rpc_xprt_ops xprt_rdma_procs
= {
692 .reserve_xprt
= xprt_reserve_xprt_cong
,
693 .release_xprt
= xprt_release_xprt_cong
, /* sunrpc/xprt.c */
694 .alloc_slot
= xprt_alloc_slot
,
695 .release_request
= xprt_release_rqst_cong
, /* ditto */
696 .set_retrans_timeout
= xprt_set_retrans_timeout_def
, /* ditto */
697 .rpcbind
= rpcb_getport_async
, /* sunrpc/rpcb_clnt.c */
698 .set_port
= xprt_rdma_set_port
,
699 .connect
= xprt_rdma_connect
,
700 .buf_alloc
= xprt_rdma_allocate
,
701 .buf_free
= xprt_rdma_free
,
702 .send_request
= xprt_rdma_send_request
,
703 .close
= xprt_rdma_close
,
704 .destroy
= xprt_rdma_destroy
,
705 .print_stats
= xprt_rdma_print_stats
,
706 .enable_swap
= xprt_rdma_enable_swap
,
707 .disable_swap
= xprt_rdma_disable_swap
,
708 .inject_disconnect
= xprt_rdma_inject_disconnect
,
709 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
710 .bc_setup
= xprt_rdma_bc_setup
,
711 .bc_up
= xprt_rdma_bc_up
,
712 .bc_free_rqst
= xprt_rdma_bc_free_rqst
,
713 .bc_destroy
= xprt_rdma_bc_destroy
,
717 static struct xprt_class xprt_rdma
= {
718 .list
= LIST_HEAD_INIT(xprt_rdma
.list
),
720 .owner
= THIS_MODULE
,
721 .ident
= XPRT_TRANSPORT_RDMA
,
722 .setup
= xprt_setup_rdma
,
725 void xprt_rdma_cleanup(void)
729 dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n");
730 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
731 if (sunrpc_table_header
) {
732 unregister_sysctl_table(sunrpc_table_header
);
733 sunrpc_table_header
= NULL
;
736 rc
= xprt_unregister_transport(&xprt_rdma
);
738 dprintk("RPC: %s: xprt_unregister returned %i\n",
741 rpcrdma_destroy_wq();
742 frwr_destroy_recovery_wq();
745 int xprt_rdma_init(void)
749 rc
= frwr_alloc_recovery_wq();
753 rc
= rpcrdma_alloc_wq();
755 frwr_destroy_recovery_wq();
759 rc
= xprt_register_transport(&xprt_rdma
);
761 rpcrdma_destroy_wq();
762 frwr_destroy_recovery_wq();
766 dprintk("RPCRDMA Module Init, register RPC RDMA transport\n");
768 dprintk("Defaults:\n");
769 dprintk("\tSlots %d\n"
770 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
771 xprt_rdma_slot_table_entries
,
772 xprt_rdma_max_inline_read
, xprt_rdma_max_inline_write
);
773 dprintk("\tPadding %d\n\tMemreg %d\n",
774 xprt_rdma_inline_write_padding
, xprt_rdma_memreg_strategy
);
776 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
777 if (!sunrpc_table_header
)
778 sunrpc_table_header
= register_sysctl_table(sunrpc_table
);