2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Author: Tom Tucker <tom@opengridcomputing.com>
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
45 #include <linux/sysctl.h>
46 #include <linux/workqueue.h>
47 #include <linux/sunrpc/clnt.h>
48 #include <linux/sunrpc/sched.h>
49 #include <linux/sunrpc/svc_rdma.h>
50 #include "xprt_rdma.h"
52 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
54 /* RPC/RDMA parameters */
55 unsigned int svcrdma_ord
= RPCRDMA_ORD
;
56 static unsigned int min_ord
= 1;
57 static unsigned int max_ord
= 4096;
58 unsigned int svcrdma_max_requests
= RPCRDMA_MAX_REQUESTS
;
59 static unsigned int min_max_requests
= 4;
60 static unsigned int max_max_requests
= 16384;
61 unsigned int svcrdma_max_req_size
= RPCRDMA_MAX_REQ_SIZE
;
62 static unsigned int min_max_inline
= 4096;
63 static unsigned int max_max_inline
= 65536;
65 atomic_t rdma_stat_recv
;
66 atomic_t rdma_stat_read
;
67 atomic_t rdma_stat_write
;
68 atomic_t rdma_stat_sq_starve
;
69 atomic_t rdma_stat_rq_starve
;
70 atomic_t rdma_stat_rq_poll
;
71 atomic_t rdma_stat_rq_prod
;
72 atomic_t rdma_stat_sq_poll
;
73 atomic_t rdma_stat_sq_prod
;
75 /* Temporary NFS request map and context caches */
76 struct kmem_cache
*svc_rdma_map_cachep
;
77 struct kmem_cache
*svc_rdma_ctxt_cachep
;
79 struct workqueue_struct
*svc_rdma_wq
;
82 * This function implements reading and resetting an atomic_t stat
83 * variable through read/write to a proc file. Any write to the file
84 * resets the associated statistic to zero. Any read returns it's
87 static int read_reset_stat(struct ctl_table
*table
, int write
,
88 void __user
*buffer
, size_t *lenp
,
91 atomic_t
*stat
= (atomic_t
*)table
->data
;
101 int len
= snprintf(str_buf
, 32, "%d\n", atomic_read(stat
));
104 len
= strlen(str_buf
);
109 data
= &str_buf
[*ppos
];
113 if (len
&& copy_to_user(buffer
, str_buf
, len
))
121 static struct ctl_table_header
*svcrdma_table_header
;
122 static struct ctl_table svcrdma_parm_table
[] = {
124 .procname
= "max_requests",
125 .data
= &svcrdma_max_requests
,
126 .maxlen
= sizeof(unsigned int),
128 .proc_handler
= proc_dointvec_minmax
,
129 .extra1
= &min_max_requests
,
130 .extra2
= &max_max_requests
133 .procname
= "max_req_size",
134 .data
= &svcrdma_max_req_size
,
135 .maxlen
= sizeof(unsigned int),
137 .proc_handler
= proc_dointvec_minmax
,
138 .extra1
= &min_max_inline
,
139 .extra2
= &max_max_inline
142 .procname
= "max_outbound_read_requests",
143 .data
= &svcrdma_ord
,
144 .maxlen
= sizeof(unsigned int),
146 .proc_handler
= proc_dointvec_minmax
,
152 .procname
= "rdma_stat_read",
153 .data
= &rdma_stat_read
,
154 .maxlen
= sizeof(atomic_t
),
156 .proc_handler
= read_reset_stat
,
159 .procname
= "rdma_stat_recv",
160 .data
= &rdma_stat_recv
,
161 .maxlen
= sizeof(atomic_t
),
163 .proc_handler
= read_reset_stat
,
166 .procname
= "rdma_stat_write",
167 .data
= &rdma_stat_write
,
168 .maxlen
= sizeof(atomic_t
),
170 .proc_handler
= read_reset_stat
,
173 .procname
= "rdma_stat_sq_starve",
174 .data
= &rdma_stat_sq_starve
,
175 .maxlen
= sizeof(atomic_t
),
177 .proc_handler
= read_reset_stat
,
180 .procname
= "rdma_stat_rq_starve",
181 .data
= &rdma_stat_rq_starve
,
182 .maxlen
= sizeof(atomic_t
),
184 .proc_handler
= read_reset_stat
,
187 .procname
= "rdma_stat_rq_poll",
188 .data
= &rdma_stat_rq_poll
,
189 .maxlen
= sizeof(atomic_t
),
191 .proc_handler
= read_reset_stat
,
194 .procname
= "rdma_stat_rq_prod",
195 .data
= &rdma_stat_rq_prod
,
196 .maxlen
= sizeof(atomic_t
),
198 .proc_handler
= read_reset_stat
,
201 .procname
= "rdma_stat_sq_poll",
202 .data
= &rdma_stat_sq_poll
,
203 .maxlen
= sizeof(atomic_t
),
205 .proc_handler
= read_reset_stat
,
208 .procname
= "rdma_stat_sq_prod",
209 .data
= &rdma_stat_sq_prod
,
210 .maxlen
= sizeof(atomic_t
),
212 .proc_handler
= read_reset_stat
,
217 static struct ctl_table svcrdma_table
[] = {
219 .procname
= "svc_rdma",
221 .child
= svcrdma_parm_table
226 static struct ctl_table svcrdma_root_table
[] = {
228 .procname
= "sunrpc",
230 .child
= svcrdma_table
235 void svc_rdma_cleanup(void)
237 dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n");
238 destroy_workqueue(svc_rdma_wq
);
239 if (svcrdma_table_header
) {
240 unregister_sysctl_table(svcrdma_table_header
);
241 svcrdma_table_header
= NULL
;
243 svc_unreg_xprt_class(&svc_rdma_class
);
244 kmem_cache_destroy(svc_rdma_map_cachep
);
245 kmem_cache_destroy(svc_rdma_ctxt_cachep
);
248 int svc_rdma_init(void)
250 dprintk("SVCRDMA Module Init, register RPC RDMA transport\n");
251 dprintk("\tsvcrdma_ord : %d\n", svcrdma_ord
);
252 dprintk("\tmax_requests : %d\n", svcrdma_max_requests
);
253 dprintk("\tsq_depth : %d\n",
254 svcrdma_max_requests
* RPCRDMA_SQ_DEPTH_MULT
);
255 dprintk("\tmax_inline : %d\n", svcrdma_max_req_size
);
257 svc_rdma_wq
= alloc_workqueue("svc_rdma", 0, 0);
261 if (!svcrdma_table_header
)
262 svcrdma_table_header
=
263 register_sysctl_table(svcrdma_root_table
);
265 /* Create the temporary map cache */
266 svc_rdma_map_cachep
= kmem_cache_create("svc_rdma_map_cache",
267 sizeof(struct svc_rdma_req_map
),
271 if (!svc_rdma_map_cachep
) {
272 printk(KERN_INFO
"Could not allocate map cache.\n");
276 /* Create the temporary context cache */
277 svc_rdma_ctxt_cachep
=
278 kmem_cache_create("svc_rdma_ctxt_cache",
279 sizeof(struct svc_rdma_op_ctxt
),
283 if (!svc_rdma_ctxt_cachep
) {
284 printk(KERN_INFO
"Could not allocate WR ctxt cache.\n");
288 /* Register RDMA with the SVC transport switch */
289 svc_reg_xprt_class(&svc_rdma_class
);
292 kmem_cache_destroy(svc_rdma_map_cachep
);
294 unregister_sysctl_table(svcrdma_table_header
);
295 destroy_workqueue(svc_rdma_wq
);
298 MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
299 MODULE_DESCRIPTION("SVC RDMA Transport");
300 MODULE_LICENSE("Dual BSD/GPL");
301 module_init(svc_rdma_init
);
302 module_exit(svc_rdma_cleanup
);