scsi: ufs: fix race between clock gating and devfreq scaling work
[linux/fpc-iii.git] / net / rds / ib.c
blob0efb3d2b338d993ba1b47aba7f4aef3ecc965d55
1 /*
2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/kernel.h>
34 #include <linux/in.h>
35 #include <linux/if.h>
36 #include <linux/netdevice.h>
37 #include <linux/inetdevice.h>
38 #include <linux/if_arp.h>
39 #include <linux/delay.h>
40 #include <linux/slab.h>
41 #include <linux/module.h>
43 #include "rds_single_path.h"
44 #include "rds.h"
45 #include "ib.h"
46 #include "ib_mr.h"
48 unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE;
49 unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE;
50 unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
52 module_param(rds_ib_mr_1m_pool_size, int, 0444);
53 MODULE_PARM_DESC(rds_ib_mr_1m_pool_size, " Max number of 1M mr per HCA");
54 module_param(rds_ib_mr_8k_pool_size, int, 0444);
55 MODULE_PARM_DESC(rds_ib_mr_8k_pool_size, " Max number of 8K mr per HCA");
56 module_param(rds_ib_retry_count, int, 0444);
57 MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
60 * we have a clumsy combination of RCU and a rwsem protecting this list
61 * because it is used both in the get_mr fast path and while blocking in
62 * the FMR flushing path.
64 DECLARE_RWSEM(rds_ib_devices_lock);
65 struct list_head rds_ib_devices;
67 /* NOTE: if also grabbing ibdev lock, grab this first */
68 DEFINE_SPINLOCK(ib_nodev_conns_lock);
69 LIST_HEAD(ib_nodev_conns);
71 static void rds_ib_nodev_connect(void)
73 struct rds_ib_connection *ic;
75 spin_lock(&ib_nodev_conns_lock);
76 list_for_each_entry(ic, &ib_nodev_conns, ib_node)
77 rds_conn_connect_if_down(ic->conn);
78 spin_unlock(&ib_nodev_conns_lock);
81 static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
83 struct rds_ib_connection *ic;
84 unsigned long flags;
86 spin_lock_irqsave(&rds_ibdev->spinlock, flags);
87 list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
88 rds_conn_drop(ic->conn);
89 spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
93 * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references
94 * from interrupt context so we push freing off into a work struct in krdsd.
96 static void rds_ib_dev_free(struct work_struct *work)
98 struct rds_ib_ipaddr *i_ipaddr, *i_next;
99 struct rds_ib_device *rds_ibdev = container_of(work,
100 struct rds_ib_device, free_work);
102 if (rds_ibdev->mr_8k_pool)
103 rds_ib_destroy_mr_pool(rds_ibdev->mr_8k_pool);
104 if (rds_ibdev->mr_1m_pool)
105 rds_ib_destroy_mr_pool(rds_ibdev->mr_1m_pool);
106 if (rds_ibdev->pd)
107 ib_dealloc_pd(rds_ibdev->pd);
109 list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
110 list_del(&i_ipaddr->list);
111 kfree(i_ipaddr);
114 kfree(rds_ibdev);
117 void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
119 BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0);
120 if (atomic_dec_and_test(&rds_ibdev->refcount))
121 queue_work(rds_wq, &rds_ibdev->free_work);
124 static void rds_ib_add_one(struct ib_device *device)
126 struct rds_ib_device *rds_ibdev;
128 /* Only handle IB (no iWARP) devices */
129 if (device->node_type != RDMA_NODE_IB_CA)
130 return;
132 rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL,
133 ibdev_to_node(device));
134 if (!rds_ibdev)
135 return;
137 spin_lock_init(&rds_ibdev->spinlock);
138 atomic_set(&rds_ibdev->refcount, 1);
139 INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
141 rds_ibdev->max_wrs = device->attrs.max_qp_wr;
142 rds_ibdev->max_sge = min(device->attrs.max_sge, RDS_IB_MAX_SGE);
144 rds_ibdev->has_fr = (device->attrs.device_cap_flags &
145 IB_DEVICE_MEM_MGT_EXTENSIONS);
146 rds_ibdev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
147 device->map_phys_fmr && device->unmap_fmr);
148 rds_ibdev->use_fastreg = (rds_ibdev->has_fr && !rds_ibdev->has_fmr);
150 rds_ibdev->fmr_max_remaps = device->attrs.max_map_per_fmr?: 32;
151 rds_ibdev->max_1m_mrs = device->attrs.max_mr ?
152 min_t(unsigned int, (device->attrs.max_mr / 2),
153 rds_ib_mr_1m_pool_size) : rds_ib_mr_1m_pool_size;
155 rds_ibdev->max_8k_mrs = device->attrs.max_mr ?
156 min_t(unsigned int, ((device->attrs.max_mr / 2) * RDS_MR_8K_SCALE),
157 rds_ib_mr_8k_pool_size) : rds_ib_mr_8k_pool_size;
159 rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom;
160 rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom;
162 rds_ibdev->dev = device;
163 rds_ibdev->pd = ib_alloc_pd(device, 0);
164 if (IS_ERR(rds_ibdev->pd)) {
165 rds_ibdev->pd = NULL;
166 goto put_dev;
169 rds_ibdev->mr_1m_pool =
170 rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL);
171 if (IS_ERR(rds_ibdev->mr_1m_pool)) {
172 rds_ibdev->mr_1m_pool = NULL;
173 goto put_dev;
176 rds_ibdev->mr_8k_pool =
177 rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_8K_POOL);
178 if (IS_ERR(rds_ibdev->mr_8k_pool)) {
179 rds_ibdev->mr_8k_pool = NULL;
180 goto put_dev;
183 rdsdebug("RDS/IB: max_mr = %d, max_wrs = %d, max_sge = %d, fmr_max_remaps = %d, max_1m_mrs = %d, max_8k_mrs = %d\n",
184 device->attrs.max_fmr, rds_ibdev->max_wrs, rds_ibdev->max_sge,
185 rds_ibdev->fmr_max_remaps, rds_ibdev->max_1m_mrs,
186 rds_ibdev->max_8k_mrs);
188 pr_info("RDS/IB: %s: %s supported and preferred\n",
189 device->name,
190 rds_ibdev->use_fastreg ? "FRMR" : "FMR");
192 INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
193 INIT_LIST_HEAD(&rds_ibdev->conn_list);
195 down_write(&rds_ib_devices_lock);
196 list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
197 up_write(&rds_ib_devices_lock);
198 atomic_inc(&rds_ibdev->refcount);
200 ib_set_client_data(device, &rds_ib_client, rds_ibdev);
201 atomic_inc(&rds_ibdev->refcount);
203 rds_ib_nodev_connect();
205 put_dev:
206 rds_ib_dev_put(rds_ibdev);
210 * New connections use this to find the device to associate with the
211 * connection. It's not in the fast path so we're not concerned about the
212 * performance of the IB call. (As of this writing, it uses an interrupt
213 * blocking spinlock to serialize walking a per-device list of all registered
214 * clients.)
216 * RCU is used to handle incoming connections racing with device teardown.
217 * Rather than use a lock to serialize removal from the client_data and
218 * getting a new reference, we use an RCU grace period. The destruction
219 * path removes the device from client_data and then waits for all RCU
220 * readers to finish.
222 * A new connection can get NULL from this if its arriving on a
223 * device that is in the process of being removed.
225 struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
227 struct rds_ib_device *rds_ibdev;
229 rcu_read_lock();
230 rds_ibdev = ib_get_client_data(device, &rds_ib_client);
231 if (rds_ibdev)
232 atomic_inc(&rds_ibdev->refcount);
233 rcu_read_unlock();
234 return rds_ibdev;
238 * The IB stack is letting us know that a device is going away. This can
239 * happen if the underlying HCA driver is removed or if PCI hotplug is removing
240 * the pci function, for example.
242 * This can be called at any time and can be racing with any other RDS path.
244 static void rds_ib_remove_one(struct ib_device *device, void *client_data)
246 struct rds_ib_device *rds_ibdev = client_data;
248 if (!rds_ibdev)
249 return;
251 rds_ib_dev_shutdown(rds_ibdev);
253 /* stop connection attempts from getting a reference to this device. */
254 ib_set_client_data(device, &rds_ib_client, NULL);
256 down_write(&rds_ib_devices_lock);
257 list_del_rcu(&rds_ibdev->list);
258 up_write(&rds_ib_devices_lock);
261 * This synchronize rcu is waiting for readers of both the ib
262 * client data and the devices list to finish before we drop
263 * both of those references.
265 synchronize_rcu();
266 rds_ib_dev_put(rds_ibdev);
267 rds_ib_dev_put(rds_ibdev);
270 struct ib_client rds_ib_client = {
271 .name = "rds_ib",
272 .add = rds_ib_add_one,
273 .remove = rds_ib_remove_one
276 static int rds_ib_conn_info_visitor(struct rds_connection *conn,
277 void *buffer)
279 struct rds_info_rdma_connection *iinfo = buffer;
280 struct rds_ib_connection *ic;
282 /* We will only ever look at IB transports */
283 if (conn->c_trans != &rds_ib_transport)
284 return 0;
286 iinfo->src_addr = conn->c_laddr;
287 iinfo->dst_addr = conn->c_faddr;
289 memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
290 memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
291 if (rds_conn_state(conn) == RDS_CONN_UP) {
292 struct rds_ib_device *rds_ibdev;
293 struct rdma_dev_addr *dev_addr;
295 ic = conn->c_transport_data;
296 dev_addr = &ic->i_cm_id->route.addr.dev_addr;
298 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
299 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
301 rds_ibdev = ic->rds_ibdev;
302 iinfo->max_send_wr = ic->i_send_ring.w_nr;
303 iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
304 iinfo->max_send_sge = rds_ibdev->max_sge;
305 rds_ib_get_mr_info(rds_ibdev, iinfo);
307 return 1;
310 static void rds_ib_ic_info(struct socket *sock, unsigned int len,
311 struct rds_info_iterator *iter,
312 struct rds_info_lengths *lens)
314 rds_for_each_conn_info(sock, len, iter, lens,
315 rds_ib_conn_info_visitor,
316 sizeof(struct rds_info_rdma_connection));
321 * Early RDS/IB was built to only bind to an address if there is an IPoIB
322 * device with that address set.
324 * If it were me, I'd advocate for something more flexible. Sending and
325 * receiving should be device-agnostic. Transports would try and maintain
326 * connections between peers who have messages queued. Userspace would be
327 * allowed to influence which paths have priority. We could call userspace
328 * asserting this policy "routing".
330 static int rds_ib_laddr_check(struct net *net, __be32 addr)
332 int ret;
333 struct rdma_cm_id *cm_id;
334 struct sockaddr_in sin;
336 /* Create a CMA ID and try to bind it. This catches both
337 * IB and iWARP capable NICs.
339 cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler,
340 NULL, RDMA_PS_TCP, IB_QPT_RC);
341 if (IS_ERR(cm_id))
342 return PTR_ERR(cm_id);
344 memset(&sin, 0, sizeof(sin));
345 sin.sin_family = AF_INET;
346 sin.sin_addr.s_addr = addr;
348 /* rdma_bind_addr will only succeed for IB & iWARP devices */
349 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
350 /* due to this, we will claim to support iWARP devices unless we
351 check node_type. */
352 if (ret || !cm_id->device ||
353 cm_id->device->node_type != RDMA_NODE_IB_CA)
354 ret = -EADDRNOTAVAIL;
356 rdsdebug("addr %pI4 ret %d node type %d\n",
357 &addr, ret,
358 cm_id->device ? cm_id->device->node_type : -1);
360 rdma_destroy_id(cm_id);
362 return ret;
365 static void rds_ib_unregister_client(void)
367 ib_unregister_client(&rds_ib_client);
368 /* wait for rds_ib_dev_free() to complete */
369 flush_workqueue(rds_wq);
372 void rds_ib_exit(void)
374 rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
375 rds_ib_unregister_client();
376 rds_ib_destroy_nodev_conns();
377 rds_ib_sysctl_exit();
378 rds_ib_recv_exit();
379 rds_trans_unregister(&rds_ib_transport);
380 rds_ib_mr_exit();
383 struct rds_transport rds_ib_transport = {
384 .laddr_check = rds_ib_laddr_check,
385 .xmit_path_complete = rds_ib_xmit_path_complete,
386 .xmit = rds_ib_xmit,
387 .xmit_rdma = rds_ib_xmit_rdma,
388 .xmit_atomic = rds_ib_xmit_atomic,
389 .recv_path = rds_ib_recv_path,
390 .conn_alloc = rds_ib_conn_alloc,
391 .conn_free = rds_ib_conn_free,
392 .conn_path_connect = rds_ib_conn_path_connect,
393 .conn_path_shutdown = rds_ib_conn_path_shutdown,
394 .inc_copy_to_user = rds_ib_inc_copy_to_user,
395 .inc_free = rds_ib_inc_free,
396 .cm_initiate_connect = rds_ib_cm_initiate_connect,
397 .cm_handle_connect = rds_ib_cm_handle_connect,
398 .cm_connect_complete = rds_ib_cm_connect_complete,
399 .stats_info_copy = rds_ib_stats_info_copy,
400 .exit = rds_ib_exit,
401 .get_mr = rds_ib_get_mr,
402 .sync_mr = rds_ib_sync_mr,
403 .free_mr = rds_ib_free_mr,
404 .flush_mrs = rds_ib_flush_mrs,
405 .t_owner = THIS_MODULE,
406 .t_name = "infiniband",
407 .t_type = RDS_TRANS_IB
410 int rds_ib_init(void)
412 int ret;
414 INIT_LIST_HEAD(&rds_ib_devices);
416 ret = rds_ib_mr_init();
417 if (ret)
418 goto out;
420 ret = ib_register_client(&rds_ib_client);
421 if (ret)
422 goto out_mr_exit;
424 ret = rds_ib_sysctl_init();
425 if (ret)
426 goto out_ibreg;
428 ret = rds_ib_recv_init();
429 if (ret)
430 goto out_sysctl;
432 ret = rds_trans_register(&rds_ib_transport);
433 if (ret)
434 goto out_recv;
436 rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
438 goto out;
440 out_recv:
441 rds_ib_recv_exit();
442 out_sysctl:
443 rds_ib_sysctl_exit();
444 out_ibreg:
445 rds_ib_unregister_client();
446 out_mr_exit:
447 rds_ib_mr_exit();
448 out:
449 return ret;
452 MODULE_LICENSE("GPL");