drm/nouveau: consume the return of large GSP message
[drm/drm-misc.git] / drivers / infiniband / core / device.c
blobca9b956c034d37e6e6f937c16fa58595a3e17bf9
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/netdevice.h>
41 #include <net/net_namespace.h>
42 #include <linux/security.h>
43 #include <linux/notifier.h>
44 #include <linux/hashtable.h>
45 #include <rdma/rdma_netlink.h>
46 #include <rdma/ib_addr.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_counter.h>
50 #include "core_priv.h"
51 #include "restrack.h"
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("core kernel InfiniBand API");
55 MODULE_LICENSE("Dual BSD/GPL");
57 struct workqueue_struct *ib_comp_wq;
58 struct workqueue_struct *ib_comp_unbound_wq;
59 struct workqueue_struct *ib_wq;
60 EXPORT_SYMBOL_GPL(ib_wq);
61 static struct workqueue_struct *ib_unreg_wq;
64 * Each of the three rwsem locks (devices, clients, client_data) protects the
65 * xarray of the same name. Specifically it allows the caller to assert that
66 * the MARK will/will not be changing under the lock, and for devices and
67 * clients, that the value in the xarray is still a valid pointer. Change of
68 * the MARK is linked to the object state, so holding the lock and testing the
69 * MARK also asserts that the contained object is in a certain state.
71 * This is used to build a two stage register/unregister flow where objects
72 * can continue to be in the xarray even though they are still in progress to
73 * register/unregister.
75 * The xarray itself provides additional locking, and restartable iteration,
76 * which is also relied on.
78 * Locks should not be nested, with the exception of client_data, which is
79 * allowed to nest under the read side of the other two locks.
81 * The devices_rwsem also protects the device name list, any change or
82 * assignment of device name must also hold the write side to guarantee unique
83 * names.
87 * devices contains devices that have had their names assigned. The
88 * devices may not be registered. Users that care about the registration
89 * status need to call ib_device_try_get() on the device to ensure it is
90 * registered, and keep it registered, for the required duration.
93 static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
94 static DECLARE_RWSEM(devices_rwsem);
95 #define DEVICE_REGISTERED XA_MARK_1
97 static u32 highest_client_id;
98 #define CLIENT_REGISTERED XA_MARK_1
99 static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
100 static DECLARE_RWSEM(clients_rwsem);
102 static void ib_client_put(struct ib_client *client)
104 if (refcount_dec_and_test(&client->uses))
105 complete(&client->uses_zero);
109 * If client_data is registered then the corresponding client must also still
110 * be registered.
112 #define CLIENT_DATA_REGISTERED XA_MARK_1
114 unsigned int rdma_dev_net_id;
117 * A list of net namespaces is maintained in an xarray. This is necessary
118 * because we can't get the locking right using the existing net ns list. We
119 * would require a init_net callback after the list is updated.
121 static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC);
123 * rwsem to protect accessing the rdma_nets xarray entries.
125 static DECLARE_RWSEM(rdma_nets_rwsem);
127 bool ib_devices_shared_netns = true;
128 module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
129 MODULE_PARM_DESC(netns_mode,
130 "Share device among net namespaces; default=1 (shared)");
132 * rdma_dev_access_netns() - Return whether an rdma device can be accessed
133 * from a specified net namespace or not.
134 * @dev: Pointer to rdma device which needs to be checked
135 * @net: Pointer to net namesapce for which access to be checked
137 * When the rdma device is in shared mode, it ignores the net namespace.
138 * When the rdma device is exclusive to a net namespace, rdma device net
139 * namespace is checked against the specified one.
141 bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
143 return (ib_devices_shared_netns ||
144 net_eq(read_pnet(&dev->coredev.rdma_net), net));
146 EXPORT_SYMBOL(rdma_dev_access_netns);
149 * xarray has this behavior where it won't iterate over NULL values stored in
150 * allocated arrays. So we need our own iterator to see all values stored in
151 * the array. This does the same thing as xa_for_each except that it also
152 * returns NULL valued entries if the array is allocating. Simplified to only
153 * work on simple xarrays.
155 static void *xan_find_marked(struct xarray *xa, unsigned long *indexp,
156 xa_mark_t filter)
158 XA_STATE(xas, xa, *indexp);
159 void *entry;
161 rcu_read_lock();
162 do {
163 entry = xas_find_marked(&xas, ULONG_MAX, filter);
164 if (xa_is_zero(entry))
165 break;
166 } while (xas_retry(&xas, entry));
167 rcu_read_unlock();
169 if (entry) {
170 *indexp = xas.xa_index;
171 if (xa_is_zero(entry))
172 return NULL;
173 return entry;
175 return XA_ERROR(-ENOENT);
177 #define xan_for_each_marked(xa, index, entry, filter) \
178 for (index = 0, entry = xan_find_marked(xa, &(index), filter); \
179 !xa_is_err(entry); \
180 (index)++, entry = xan_find_marked(xa, &(index), filter))
182 /* RCU hash table mapping netdevice pointers to struct ib_port_data */
183 static DEFINE_SPINLOCK(ndev_hash_lock);
184 static DECLARE_HASHTABLE(ndev_hash, 5);
186 static void free_netdevs(struct ib_device *ib_dev);
187 static void ib_unregister_work(struct work_struct *work);
188 static void __ib_unregister_device(struct ib_device *device);
189 static int ib_security_change(struct notifier_block *nb, unsigned long event,
190 void *lsm_data);
191 static void ib_policy_change_task(struct work_struct *work);
192 static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
194 static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
195 struct va_format *vaf)
197 if (ibdev && ibdev->dev.parent)
198 dev_printk_emit(level[1] - '0',
199 ibdev->dev.parent,
200 "%s %s %s: %pV",
201 dev_driver_string(ibdev->dev.parent),
202 dev_name(ibdev->dev.parent),
203 dev_name(&ibdev->dev),
204 vaf);
205 else if (ibdev)
206 printk("%s%s: %pV",
207 level, dev_name(&ibdev->dev), vaf);
208 else
209 printk("%s(NULL ib_device): %pV", level, vaf);
212 void ibdev_printk(const char *level, const struct ib_device *ibdev,
213 const char *format, ...)
215 struct va_format vaf;
216 va_list args;
218 va_start(args, format);
220 vaf.fmt = format;
221 vaf.va = &args;
223 __ibdev_printk(level, ibdev, &vaf);
225 va_end(args);
227 EXPORT_SYMBOL(ibdev_printk);
229 #define define_ibdev_printk_level(func, level) \
230 void func(const struct ib_device *ibdev, const char *fmt, ...) \
232 struct va_format vaf; \
233 va_list args; \
235 va_start(args, fmt); \
237 vaf.fmt = fmt; \
238 vaf.va = &args; \
240 __ibdev_printk(level, ibdev, &vaf); \
242 va_end(args); \
244 EXPORT_SYMBOL(func);
246 define_ibdev_printk_level(ibdev_emerg, KERN_EMERG);
247 define_ibdev_printk_level(ibdev_alert, KERN_ALERT);
248 define_ibdev_printk_level(ibdev_crit, KERN_CRIT);
249 define_ibdev_printk_level(ibdev_err, KERN_ERR);
250 define_ibdev_printk_level(ibdev_warn, KERN_WARNING);
251 define_ibdev_printk_level(ibdev_notice, KERN_NOTICE);
252 define_ibdev_printk_level(ibdev_info, KERN_INFO);
254 static struct notifier_block ibdev_lsm_nb = {
255 .notifier_call = ib_security_change,
258 static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
259 struct net *net);
261 /* Pointer to the RCU head at the start of the ib_port_data array */
262 struct ib_port_data_rcu {
263 struct rcu_head rcu_head;
264 struct ib_port_data pdata[];
267 static void ib_device_check_mandatory(struct ib_device *device)
269 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
270 static const struct {
271 size_t offset;
272 char *name;
273 } mandatory_table[] = {
274 IB_MANDATORY_FUNC(query_device),
275 IB_MANDATORY_FUNC(query_port),
276 IB_MANDATORY_FUNC(alloc_pd),
277 IB_MANDATORY_FUNC(dealloc_pd),
278 IB_MANDATORY_FUNC(create_qp),
279 IB_MANDATORY_FUNC(modify_qp),
280 IB_MANDATORY_FUNC(destroy_qp),
281 IB_MANDATORY_FUNC(post_send),
282 IB_MANDATORY_FUNC(post_recv),
283 IB_MANDATORY_FUNC(create_cq),
284 IB_MANDATORY_FUNC(destroy_cq),
285 IB_MANDATORY_FUNC(poll_cq),
286 IB_MANDATORY_FUNC(req_notify_cq),
287 IB_MANDATORY_FUNC(get_dma_mr),
288 IB_MANDATORY_FUNC(reg_user_mr),
289 IB_MANDATORY_FUNC(dereg_mr),
290 IB_MANDATORY_FUNC(get_port_immutable)
292 int i;
294 device->kverbs_provider = true;
295 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
296 if (!*(void **) ((void *) &device->ops +
297 mandatory_table[i].offset)) {
298 device->kverbs_provider = false;
299 break;
305 * Caller must perform ib_device_put() to return the device reference count
306 * when ib_device_get_by_index() returns valid device pointer.
308 struct ib_device *ib_device_get_by_index(const struct net *net, u32 index)
310 struct ib_device *device;
312 down_read(&devices_rwsem);
313 device = xa_load(&devices, index);
314 if (device) {
315 if (!rdma_dev_access_netns(device, net)) {
316 device = NULL;
317 goto out;
320 if (!ib_device_try_get(device))
321 device = NULL;
323 out:
324 up_read(&devices_rwsem);
325 return device;
329 * ib_device_put - Release IB device reference
330 * @device: device whose reference to be released
332 * ib_device_put() releases reference to the IB device to allow it to be
333 * unregistered and eventually free.
335 void ib_device_put(struct ib_device *device)
337 if (refcount_dec_and_test(&device->refcount))
338 complete(&device->unreg_completion);
340 EXPORT_SYMBOL(ib_device_put);
342 static struct ib_device *__ib_device_get_by_name(const char *name)
344 struct ib_device *device;
345 unsigned long index;
347 xa_for_each (&devices, index, device)
348 if (!strcmp(name, dev_name(&device->dev)))
349 return device;
351 return NULL;
355 * ib_device_get_by_name - Find an IB device by name
356 * @name: The name to look for
357 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
359 * Find and hold an ib_device by its name. The caller must call
360 * ib_device_put() on the returned pointer.
362 struct ib_device *ib_device_get_by_name(const char *name,
363 enum rdma_driver_id driver_id)
365 struct ib_device *device;
367 down_read(&devices_rwsem);
368 device = __ib_device_get_by_name(name);
369 if (device && driver_id != RDMA_DRIVER_UNKNOWN &&
370 device->ops.driver_id != driver_id)
371 device = NULL;
373 if (device) {
374 if (!ib_device_try_get(device))
375 device = NULL;
377 up_read(&devices_rwsem);
378 return device;
380 EXPORT_SYMBOL(ib_device_get_by_name);
382 static int rename_compat_devs(struct ib_device *device)
384 struct ib_core_device *cdev;
385 unsigned long index;
386 int ret = 0;
388 mutex_lock(&device->compat_devs_mutex);
389 xa_for_each (&device->compat_devs, index, cdev) {
390 ret = device_rename(&cdev->dev, dev_name(&device->dev));
391 if (ret) {
392 dev_warn(&cdev->dev,
393 "Fail to rename compatdev to new name %s\n",
394 dev_name(&device->dev));
395 break;
398 mutex_unlock(&device->compat_devs_mutex);
399 return ret;
402 int ib_device_rename(struct ib_device *ibdev, const char *name)
404 unsigned long index;
405 void *client_data;
406 int ret;
408 down_write(&devices_rwsem);
409 if (!strcmp(name, dev_name(&ibdev->dev))) {
410 up_write(&devices_rwsem);
411 return 0;
414 if (__ib_device_get_by_name(name)) {
415 up_write(&devices_rwsem);
416 return -EEXIST;
419 ret = device_rename(&ibdev->dev, name);
420 if (ret) {
421 up_write(&devices_rwsem);
422 return ret;
425 strscpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
426 ret = rename_compat_devs(ibdev);
428 downgrade_write(&devices_rwsem);
429 down_read(&ibdev->client_data_rwsem);
430 xan_for_each_marked(&ibdev->client_data, index, client_data,
431 CLIENT_DATA_REGISTERED) {
432 struct ib_client *client = xa_load(&clients, index);
434 if (!client || !client->rename)
435 continue;
437 client->rename(ibdev, client_data);
439 up_read(&ibdev->client_data_rwsem);
440 rdma_nl_notify_event(ibdev, 0, RDMA_RENAME_EVENT);
441 up_read(&devices_rwsem);
442 return 0;
445 int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim)
447 if (use_dim > 1)
448 return -EINVAL;
449 ibdev->use_cq_dim = use_dim;
451 return 0;
454 static int alloc_name(struct ib_device *ibdev, const char *name)
456 struct ib_device *device;
457 unsigned long index;
458 struct ida inuse;
459 int rc;
460 int i;
462 lockdep_assert_held_write(&devices_rwsem);
463 ida_init(&inuse);
464 xa_for_each (&devices, index, device) {
465 char buf[IB_DEVICE_NAME_MAX];
467 if (sscanf(dev_name(&device->dev), name, &i) != 1)
468 continue;
469 if (i < 0 || i >= INT_MAX)
470 continue;
471 snprintf(buf, sizeof buf, name, i);
472 if (strcmp(buf, dev_name(&device->dev)) != 0)
473 continue;
475 rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL);
476 if (rc < 0)
477 goto out;
480 rc = ida_alloc(&inuse, GFP_KERNEL);
481 if (rc < 0)
482 goto out;
484 rc = dev_set_name(&ibdev->dev, name, rc);
485 out:
486 ida_destroy(&inuse);
487 return rc;
490 static void ib_device_release(struct device *device)
492 struct ib_device *dev = container_of(device, struct ib_device, dev);
494 free_netdevs(dev);
495 WARN_ON(refcount_read(&dev->refcount));
496 if (dev->hw_stats_data)
497 ib_device_release_hw_stats(dev->hw_stats_data);
498 if (dev->port_data) {
499 ib_cache_release_one(dev);
500 ib_security_release_port_pkey_list(dev);
501 rdma_counter_release(dev);
502 kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
503 pdata[0]),
504 rcu_head);
507 mutex_destroy(&dev->subdev_lock);
508 mutex_destroy(&dev->unregistration_lock);
509 mutex_destroy(&dev->compat_devs_mutex);
511 xa_destroy(&dev->compat_devs);
512 xa_destroy(&dev->client_data);
513 kfree_rcu(dev, rcu_head);
516 static int ib_device_uevent(const struct device *device,
517 struct kobj_uevent_env *env)
519 if (add_uevent_var(env, "NAME=%s", dev_name(device)))
520 return -ENOMEM;
523 * It would be nice to pass the node GUID with the event...
526 return 0;
529 static const void *net_namespace(const struct device *d)
531 const struct ib_core_device *coredev =
532 container_of(d, struct ib_core_device, dev);
534 return read_pnet(&coredev->rdma_net);
537 static struct class ib_class = {
538 .name = "infiniband",
539 .dev_release = ib_device_release,
540 .dev_uevent = ib_device_uevent,
541 .ns_type = &net_ns_type_operations,
542 .namespace = net_namespace,
545 static void rdma_init_coredev(struct ib_core_device *coredev,
546 struct ib_device *dev, struct net *net)
548 /* This BUILD_BUG_ON is intended to catch layout change
549 * of union of ib_core_device and device.
550 * dev must be the first element as ib_core and providers
551 * driver uses it. Adding anything in ib_core_device before
552 * device will break this assumption.
554 BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) !=
555 offsetof(struct ib_device, dev));
557 coredev->dev.class = &ib_class;
558 coredev->dev.groups = dev->groups;
559 device_initialize(&coredev->dev);
560 coredev->owner = dev;
561 INIT_LIST_HEAD(&coredev->port_list);
562 write_pnet(&coredev->rdma_net, net);
566 * _ib_alloc_device - allocate an IB device struct
567 * @size:size of structure to allocate
569 * Low-level drivers should use ib_alloc_device() to allocate &struct
570 * ib_device. @size is the size of the structure to be allocated,
571 * including any private data used by the low-level driver.
572 * ib_dealloc_device() must be used to free structures allocated with
573 * ib_alloc_device().
575 struct ib_device *_ib_alloc_device(size_t size)
577 struct ib_device *device;
578 unsigned int i;
580 if (WARN_ON(size < sizeof(struct ib_device)))
581 return NULL;
583 device = kzalloc(size, GFP_KERNEL);
584 if (!device)
585 return NULL;
587 if (rdma_restrack_init(device)) {
588 kfree(device);
589 return NULL;
592 rdma_init_coredev(&device->coredev, device, &init_net);
594 INIT_LIST_HEAD(&device->event_handler_list);
595 spin_lock_init(&device->qp_open_list_lock);
596 init_rwsem(&device->event_handler_rwsem);
597 mutex_init(&device->unregistration_lock);
599 * client_data needs to be alloc because we don't want our mark to be
600 * destroyed if the user stores NULL in the client data.
602 xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
603 init_rwsem(&device->client_data_rwsem);
604 xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC);
605 mutex_init(&device->compat_devs_mutex);
606 init_completion(&device->unreg_completion);
607 INIT_WORK(&device->unregistration_work, ib_unregister_work);
609 spin_lock_init(&device->cq_pools_lock);
610 for (i = 0; i < ARRAY_SIZE(device->cq_pools); i++)
611 INIT_LIST_HEAD(&device->cq_pools[i]);
613 rwlock_init(&device->cache_lock);
615 device->uverbs_cmd_mask =
616 BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
617 BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
618 BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) |
619 BIT_ULL(IB_USER_VERBS_CMD_CLOSE_XRCD) |
620 BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) |
621 BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
622 BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |
623 BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) |
624 BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) |
625 BIT_ULL(IB_USER_VERBS_CMD_CREATE_XSRQ) |
626 BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
627 BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
628 BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
629 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) |
630 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) |
631 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
632 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) |
633 BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST) |
634 BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) |
635 BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) |
636 BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) |
637 BIT_ULL(IB_USER_VERBS_CMD_OPEN_QP) |
638 BIT_ULL(IB_USER_VERBS_CMD_OPEN_XRCD) |
639 BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) |
640 BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) |
641 BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
642 BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) |
643 BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
644 BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) |
645 BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ);
647 mutex_init(&device->subdev_lock);
648 INIT_LIST_HEAD(&device->subdev_list_head);
649 INIT_LIST_HEAD(&device->subdev_list);
651 return device;
653 EXPORT_SYMBOL(_ib_alloc_device);
656 * ib_dealloc_device - free an IB device struct
657 * @device:structure to free
659 * Free a structure allocated with ib_alloc_device().
661 void ib_dealloc_device(struct ib_device *device)
663 if (device->ops.dealloc_driver)
664 device->ops.dealloc_driver(device);
667 * ib_unregister_driver() requires all devices to remain in the xarray
668 * while their ops are callable. The last op we call is dealloc_driver
669 * above. This is needed to create a fence on op callbacks prior to
670 * allowing the driver module to unload.
672 down_write(&devices_rwsem);
673 if (xa_load(&devices, device->index) == device)
674 xa_erase(&devices, device->index);
675 up_write(&devices_rwsem);
677 /* Expedite releasing netdev references */
678 free_netdevs(device);
680 WARN_ON(!xa_empty(&device->compat_devs));
681 WARN_ON(!xa_empty(&device->client_data));
682 WARN_ON(refcount_read(&device->refcount));
683 rdma_restrack_clean(device);
684 /* Balances with device_initialize */
685 put_device(&device->dev);
687 EXPORT_SYMBOL(ib_dealloc_device);
690 * add_client_context() and remove_client_context() must be safe against
691 * parallel calls on the same device - registration/unregistration of both the
692 * device and client can be occurring in parallel.
694 * The routines need to be a fence, any caller must not return until the add
695 * or remove is fully completed.
697 static int add_client_context(struct ib_device *device,
698 struct ib_client *client)
700 int ret = 0;
702 if (!device->kverbs_provider && !client->no_kverbs_req)
703 return 0;
705 down_write(&device->client_data_rwsem);
707 * So long as the client is registered hold both the client and device
708 * unregistration locks.
710 if (!refcount_inc_not_zero(&client->uses))
711 goto out_unlock;
712 refcount_inc(&device->refcount);
715 * Another caller to add_client_context got here first and has already
716 * completely initialized context.
718 if (xa_get_mark(&device->client_data, client->client_id,
719 CLIENT_DATA_REGISTERED))
720 goto out;
722 ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
723 GFP_KERNEL));
724 if (ret)
725 goto out;
726 downgrade_write(&device->client_data_rwsem);
727 if (client->add) {
728 if (client->add(device)) {
730 * If a client fails to add then the error code is
731 * ignored, but we won't call any more ops on this
732 * client.
734 xa_erase(&device->client_data, client->client_id);
735 up_read(&device->client_data_rwsem);
736 ib_device_put(device);
737 ib_client_put(client);
738 return 0;
742 /* Readers shall not see a client until add has been completed */
743 xa_set_mark(&device->client_data, client->client_id,
744 CLIENT_DATA_REGISTERED);
745 up_read(&device->client_data_rwsem);
746 return 0;
748 out:
749 ib_device_put(device);
750 ib_client_put(client);
751 out_unlock:
752 up_write(&device->client_data_rwsem);
753 return ret;
756 static void remove_client_context(struct ib_device *device,
757 unsigned int client_id)
759 struct ib_client *client;
760 void *client_data;
762 down_write(&device->client_data_rwsem);
763 if (!xa_get_mark(&device->client_data, client_id,
764 CLIENT_DATA_REGISTERED)) {
765 up_write(&device->client_data_rwsem);
766 return;
768 client_data = xa_load(&device->client_data, client_id);
769 xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
770 client = xa_load(&clients, client_id);
771 up_write(&device->client_data_rwsem);
774 * Notice we cannot be holding any exclusive locks when calling the
775 * remove callback as the remove callback can recurse back into any
776 * public functions in this module and thus try for any locks those
777 * functions take.
779 * For this reason clients and drivers should not call the
780 * unregistration functions will holdling any locks.
782 if (client->remove)
783 client->remove(device, client_data);
785 xa_erase(&device->client_data, client_id);
786 ib_device_put(device);
787 ib_client_put(client);
790 static int alloc_port_data(struct ib_device *device)
792 struct ib_port_data_rcu *pdata_rcu;
793 u32 port;
795 if (device->port_data)
796 return 0;
798 /* This can only be called once the physical port range is defined */
799 if (WARN_ON(!device->phys_port_cnt))
800 return -EINVAL;
802 /* Reserve U32_MAX so the logic to go over all the ports is sane */
803 if (WARN_ON(device->phys_port_cnt == U32_MAX))
804 return -EINVAL;
807 * device->port_data is indexed directly by the port number to make
808 * access to this data as efficient as possible.
810 * Therefore port_data is declared as a 1 based array with potential
811 * empty slots at the beginning.
813 pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
814 size_add(rdma_end_port(device), 1)),
815 GFP_KERNEL);
816 if (!pdata_rcu)
817 return -ENOMEM;
819 * The rcu_head is put in front of the port data array and the stored
820 * pointer is adjusted since we never need to see that member until
821 * kfree_rcu.
823 device->port_data = pdata_rcu->pdata;
825 rdma_for_each_port (device, port) {
826 struct ib_port_data *pdata = &device->port_data[port];
828 pdata->ib_dev = device;
829 spin_lock_init(&pdata->pkey_list_lock);
830 INIT_LIST_HEAD(&pdata->pkey_list);
831 spin_lock_init(&pdata->netdev_lock);
832 INIT_HLIST_NODE(&pdata->ndev_hash_link);
834 return 0;
837 static int verify_immutable(const struct ib_device *dev, u32 port)
839 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
840 rdma_max_mad_size(dev, port) != 0);
843 static int setup_port_data(struct ib_device *device)
845 u32 port;
846 int ret;
848 ret = alloc_port_data(device);
849 if (ret)
850 return ret;
852 rdma_for_each_port (device, port) {
853 struct ib_port_data *pdata = &device->port_data[port];
855 ret = device->ops.get_port_immutable(device, port,
856 &pdata->immutable);
857 if (ret)
858 return ret;
860 if (verify_immutable(device, port))
861 return -EINVAL;
863 return 0;
867 * ib_port_immutable_read() - Read rdma port's immutable data
868 * @dev: IB device
869 * @port: port number whose immutable data to read. It starts with index 1 and
870 * valid upto including rdma_end_port().
872 const struct ib_port_immutable*
873 ib_port_immutable_read(struct ib_device *dev, unsigned int port)
875 WARN_ON(!rdma_is_port_valid(dev, port));
876 return &dev->port_data[port].immutable;
878 EXPORT_SYMBOL(ib_port_immutable_read);
880 void ib_get_device_fw_str(struct ib_device *dev, char *str)
882 if (dev->ops.get_dev_fw_str)
883 dev->ops.get_dev_fw_str(dev, str);
884 else
885 str[0] = '\0';
887 EXPORT_SYMBOL(ib_get_device_fw_str);
889 static void ib_policy_change_task(struct work_struct *work)
891 struct ib_device *dev;
892 unsigned long index;
894 down_read(&devices_rwsem);
895 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
896 unsigned int i;
898 rdma_for_each_port (dev, i) {
899 u64 sp;
900 ib_get_cached_subnet_prefix(dev, i, &sp);
901 ib_security_cache_change(dev, i, sp);
904 up_read(&devices_rwsem);
907 static int ib_security_change(struct notifier_block *nb, unsigned long event,
908 void *lsm_data)
910 if (event != LSM_POLICY_CHANGE)
911 return NOTIFY_DONE;
913 schedule_work(&ib_policy_change_work);
914 ib_mad_agent_security_change();
916 return NOTIFY_OK;
919 static void compatdev_release(struct device *dev)
921 struct ib_core_device *cdev =
922 container_of(dev, struct ib_core_device, dev);
924 kfree(cdev);
927 static int add_one_compat_dev(struct ib_device *device,
928 struct rdma_dev_net *rnet)
930 struct ib_core_device *cdev;
931 int ret;
933 lockdep_assert_held(&rdma_nets_rwsem);
934 if (!ib_devices_shared_netns)
935 return 0;
938 * Create and add compat device in all namespaces other than where it
939 * is currently bound to.
941 if (net_eq(read_pnet(&rnet->net),
942 read_pnet(&device->coredev.rdma_net)))
943 return 0;
946 * The first of init_net() or ib_register_device() to take the
947 * compat_devs_mutex wins and gets to add the device. Others will wait
948 * for completion here.
950 mutex_lock(&device->compat_devs_mutex);
951 cdev = xa_load(&device->compat_devs, rnet->id);
952 if (cdev) {
953 ret = 0;
954 goto done;
956 ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL);
957 if (ret)
958 goto done;
960 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
961 if (!cdev) {
962 ret = -ENOMEM;
963 goto cdev_err;
966 cdev->dev.parent = device->dev.parent;
967 rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
968 cdev->dev.release = compatdev_release;
969 ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
970 if (ret)
971 goto add_err;
973 ret = device_add(&cdev->dev);
974 if (ret)
975 goto add_err;
976 ret = ib_setup_port_attrs(cdev);
977 if (ret)
978 goto port_err;
980 ret = xa_err(xa_store(&device->compat_devs, rnet->id,
981 cdev, GFP_KERNEL));
982 if (ret)
983 goto insert_err;
985 mutex_unlock(&device->compat_devs_mutex);
986 return 0;
988 insert_err:
989 ib_free_port_attrs(cdev);
990 port_err:
991 device_del(&cdev->dev);
992 add_err:
993 put_device(&cdev->dev);
994 cdev_err:
995 xa_release(&device->compat_devs, rnet->id);
996 done:
997 mutex_unlock(&device->compat_devs_mutex);
998 return ret;
1001 static void remove_one_compat_dev(struct ib_device *device, u32 id)
1003 struct ib_core_device *cdev;
1005 mutex_lock(&device->compat_devs_mutex);
1006 cdev = xa_erase(&device->compat_devs, id);
1007 mutex_unlock(&device->compat_devs_mutex);
1008 if (cdev) {
1009 ib_free_port_attrs(cdev);
1010 device_del(&cdev->dev);
1011 put_device(&cdev->dev);
1015 static void remove_compat_devs(struct ib_device *device)
1017 struct ib_core_device *cdev;
1018 unsigned long index;
1020 xa_for_each (&device->compat_devs, index, cdev)
1021 remove_one_compat_dev(device, index);
1024 static int add_compat_devs(struct ib_device *device)
1026 struct rdma_dev_net *rnet;
1027 unsigned long index;
1028 int ret = 0;
1030 lockdep_assert_held(&devices_rwsem);
1032 down_read(&rdma_nets_rwsem);
1033 xa_for_each (&rdma_nets, index, rnet) {
1034 ret = add_one_compat_dev(device, rnet);
1035 if (ret)
1036 break;
1038 up_read(&rdma_nets_rwsem);
1039 return ret;
1042 static void remove_all_compat_devs(void)
1044 struct ib_compat_device *cdev;
1045 struct ib_device *dev;
1046 unsigned long index;
1048 down_read(&devices_rwsem);
1049 xa_for_each (&devices, index, dev) {
1050 unsigned long c_index = 0;
1052 /* Hold nets_rwsem so that any other thread modifying this
1053 * system param can sync with this thread.
1055 down_read(&rdma_nets_rwsem);
1056 xa_for_each (&dev->compat_devs, c_index, cdev)
1057 remove_one_compat_dev(dev, c_index);
1058 up_read(&rdma_nets_rwsem);
1060 up_read(&devices_rwsem);
1063 static int add_all_compat_devs(void)
1065 struct rdma_dev_net *rnet;
1066 struct ib_device *dev;
1067 unsigned long index;
1068 int ret = 0;
1070 down_read(&devices_rwsem);
1071 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
1072 unsigned long net_index = 0;
1074 /* Hold nets_rwsem so that any other thread modifying this
1075 * system param can sync with this thread.
1077 down_read(&rdma_nets_rwsem);
1078 xa_for_each (&rdma_nets, net_index, rnet) {
1079 ret = add_one_compat_dev(dev, rnet);
1080 if (ret)
1081 break;
1083 up_read(&rdma_nets_rwsem);
1085 up_read(&devices_rwsem);
1086 if (ret)
1087 remove_all_compat_devs();
1088 return ret;
1091 int rdma_compatdev_set(u8 enable)
1093 struct rdma_dev_net *rnet;
1094 unsigned long index;
1095 int ret = 0;
1097 down_write(&rdma_nets_rwsem);
1098 if (ib_devices_shared_netns == enable) {
1099 up_write(&rdma_nets_rwsem);
1100 return 0;
1103 /* enable/disable of compat devices is not supported
1104 * when more than default init_net exists.
1106 xa_for_each (&rdma_nets, index, rnet) {
1107 ret++;
1108 break;
1110 if (!ret)
1111 ib_devices_shared_netns = enable;
1112 up_write(&rdma_nets_rwsem);
1113 if (ret)
1114 return -EBUSY;
1116 if (enable)
1117 ret = add_all_compat_devs();
1118 else
1119 remove_all_compat_devs();
1120 return ret;
1123 static void rdma_dev_exit_net(struct net *net)
1125 struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
1126 struct ib_device *dev;
1127 unsigned long index;
1128 int ret;
1130 down_write(&rdma_nets_rwsem);
1132 * Prevent the ID from being re-used and hide the id from xa_for_each.
1134 ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL));
1135 WARN_ON(ret);
1136 up_write(&rdma_nets_rwsem);
1138 down_read(&devices_rwsem);
1139 xa_for_each (&devices, index, dev) {
1140 get_device(&dev->dev);
1142 * Release the devices_rwsem so that pontentially blocking
1143 * device_del, doesn't hold the devices_rwsem for too long.
1145 up_read(&devices_rwsem);
1147 remove_one_compat_dev(dev, rnet->id);
1150 * If the real device is in the NS then move it back to init.
1152 rdma_dev_change_netns(dev, net, &init_net);
1154 put_device(&dev->dev);
1155 down_read(&devices_rwsem);
1157 up_read(&devices_rwsem);
1159 rdma_nl_net_exit(rnet);
1160 xa_erase(&rdma_nets, rnet->id);
1163 static __net_init int rdma_dev_init_net(struct net *net)
1165 struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
1166 unsigned long index;
1167 struct ib_device *dev;
1168 int ret;
1170 write_pnet(&rnet->net, net);
1172 ret = rdma_nl_net_init(rnet);
1173 if (ret)
1174 return ret;
1176 /* No need to create any compat devices in default init_net. */
1177 if (net_eq(net, &init_net))
1178 return 0;
1180 ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
1181 if (ret) {
1182 rdma_nl_net_exit(rnet);
1183 return ret;
1186 down_read(&devices_rwsem);
1187 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
1188 /* Hold nets_rwsem so that netlink command cannot change
1189 * system configuration for device sharing mode.
1191 down_read(&rdma_nets_rwsem);
1192 ret = add_one_compat_dev(dev, rnet);
1193 up_read(&rdma_nets_rwsem);
1194 if (ret)
1195 break;
1197 up_read(&devices_rwsem);
1199 if (ret)
1200 rdma_dev_exit_net(net);
1202 return ret;
1206 * Assign the unique string device name and the unique device index. This is
1207 * undone by ib_dealloc_device.
1209 static int assign_name(struct ib_device *device, const char *name)
1211 static u32 last_id;
1212 int ret;
1214 down_write(&devices_rwsem);
1215 /* Assign a unique name to the device */
1216 if (strchr(name, '%'))
1217 ret = alloc_name(device, name);
1218 else
1219 ret = dev_set_name(&device->dev, name);
1220 if (ret)
1221 goto out;
1223 if (__ib_device_get_by_name(dev_name(&device->dev))) {
1224 ret = -ENFILE;
1225 goto out;
1227 strscpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
1229 ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
1230 &last_id, GFP_KERNEL);
1231 if (ret > 0)
1232 ret = 0;
1234 out:
1235 up_write(&devices_rwsem);
1236 return ret;
1240 * setup_device() allocates memory and sets up data that requires calling the
1241 * device ops, this is the only reason these actions are not done during
1242 * ib_alloc_device. It is undone by ib_dealloc_device().
1244 static int setup_device(struct ib_device *device)
1246 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
1247 int ret;
1249 ib_device_check_mandatory(device);
1251 ret = setup_port_data(device);
1252 if (ret) {
1253 dev_warn(&device->dev, "Couldn't create per-port data\n");
1254 return ret;
1257 memset(&device->attrs, 0, sizeof(device->attrs));
1258 ret = device->ops.query_device(device, &device->attrs, &uhw);
1259 if (ret) {
1260 dev_warn(&device->dev,
1261 "Couldn't query the device attributes\n");
1262 return ret;
1265 return 0;
1268 static void disable_device(struct ib_device *device)
1270 u32 cid;
1272 WARN_ON(!refcount_read(&device->refcount));
1274 down_write(&devices_rwsem);
1275 xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
1276 up_write(&devices_rwsem);
1279 * Remove clients in LIFO order, see assign_client_id. This could be
1280 * more efficient if xarray learns to reverse iterate. Since no new
1281 * clients can be added to this ib_device past this point we only need
1282 * the maximum possible client_id value here.
1284 down_read(&clients_rwsem);
1285 cid = highest_client_id;
1286 up_read(&clients_rwsem);
1287 while (cid) {
1288 cid--;
1289 remove_client_context(device, cid);
1292 ib_cq_pool_cleanup(device);
1294 /* Pairs with refcount_set in enable_device */
1295 ib_device_put(device);
1296 wait_for_completion(&device->unreg_completion);
1299 * compat devices must be removed after device refcount drops to zero.
1300 * Otherwise init_net() may add more compatdevs after removing compat
1301 * devices and before device is disabled.
1303 remove_compat_devs(device);
1307 * An enabled device is visible to all clients and to all the public facing
1308 * APIs that return a device pointer. This always returns with a new get, even
1309 * if it fails.
1311 static int enable_device_and_get(struct ib_device *device)
1313 struct ib_client *client;
1314 unsigned long index;
1315 int ret = 0;
1318 * One ref belongs to the xa and the other belongs to this
1319 * thread. This is needed to guard against parallel unregistration.
1321 refcount_set(&device->refcount, 2);
1322 down_write(&devices_rwsem);
1323 xa_set_mark(&devices, device->index, DEVICE_REGISTERED);
1326 * By using downgrade_write() we ensure that no other thread can clear
1327 * DEVICE_REGISTERED while we are completing the client setup.
1329 downgrade_write(&devices_rwsem);
1331 if (device->ops.enable_driver) {
1332 ret = device->ops.enable_driver(device);
1333 if (ret)
1334 goto out;
1337 down_read(&clients_rwsem);
1338 xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
1339 ret = add_client_context(device, client);
1340 if (ret)
1341 break;
1343 up_read(&clients_rwsem);
1344 if (!ret)
1345 ret = add_compat_devs(device);
1346 out:
1347 up_read(&devices_rwsem);
1348 return ret;
1351 static void prevent_dealloc_device(struct ib_device *ib_dev)
1355 static void ib_device_notify_register(struct ib_device *device)
1357 struct net_device *netdev;
1358 u32 port;
1359 int ret;
1361 ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
1362 if (ret)
1363 return;
1365 rdma_for_each_port(device, port) {
1366 netdev = ib_device_get_netdev(device, port);
1367 if (!netdev)
1368 continue;
1370 ret = rdma_nl_notify_event(device, port,
1371 RDMA_NETDEV_ATTACH_EVENT);
1372 dev_put(netdev);
1373 if (ret)
1374 return;
1379 * ib_register_device - Register an IB device with IB core
1380 * @device: Device to register
1381 * @name: unique string device name. This may include a '%' which will
1382 * cause a unique index to be added to the passed device name.
1383 * @dma_device: pointer to a DMA-capable device. If %NULL, then the IB
1384 * device will be used. In this case the caller should fully
1385 * setup the ibdev for DMA. This usually means using dma_virt_ops.
1387 * Low-level drivers use ib_register_device() to register their
1388 * devices with the IB core. All registered clients will receive a
1389 * callback for each device that is added. @device must be allocated
1390 * with ib_alloc_device().
1392 * If the driver uses ops.dealloc_driver and calls any ib_unregister_device()
1393 * asynchronously then the device pointer may become freed as soon as this
1394 * function returns.
1396 int ib_register_device(struct ib_device *device, const char *name,
1397 struct device *dma_device)
1399 int ret;
1401 ret = assign_name(device, name);
1402 if (ret)
1403 return ret;
1406 * If the caller does not provide a DMA capable device then the IB core
1407 * will set up ib_sge and scatterlist structures that stash the kernel
1408 * virtual address into the address field.
1410 WARN_ON(dma_device && !dma_device->dma_parms);
1411 device->dma_device = dma_device;
1413 ret = setup_device(device);
1414 if (ret)
1415 return ret;
1417 ret = ib_cache_setup_one(device);
1418 if (ret) {
1419 dev_warn(&device->dev,
1420 "Couldn't set up InfiniBand P_Key/GID cache\n");
1421 return ret;
1424 device->groups[0] = &ib_dev_attr_group;
1425 device->groups[1] = device->ops.device_group;
1426 ret = ib_setup_device_attrs(device);
1427 if (ret)
1428 goto cache_cleanup;
1430 ib_device_register_rdmacg(device);
1432 rdma_counter_init(device);
1435 * Ensure that ADD uevent is not fired because it
1436 * is too early amd device is not initialized yet.
1438 dev_set_uevent_suppress(&device->dev, true);
1439 ret = device_add(&device->dev);
1440 if (ret)
1441 goto cg_cleanup;
1443 ret = ib_setup_port_attrs(&device->coredev);
1444 if (ret) {
1445 dev_warn(&device->dev,
1446 "Couldn't register device with driver model\n");
1447 goto dev_cleanup;
1450 ret = enable_device_and_get(device);
1451 if (ret) {
1452 void (*dealloc_fn)(struct ib_device *);
1455 * If we hit this error flow then we don't want to
1456 * automatically dealloc the device since the caller is
1457 * expected to call ib_dealloc_device() after
1458 * ib_register_device() fails. This is tricky due to the
1459 * possibility for a parallel unregistration along with this
1460 * error flow. Since we have a refcount here we know any
1461 * parallel flow is stopped in disable_device and will see the
1462 * special dealloc_driver pointer, causing the responsibility to
1463 * ib_dealloc_device() to revert back to this thread.
1465 dealloc_fn = device->ops.dealloc_driver;
1466 device->ops.dealloc_driver = prevent_dealloc_device;
1467 ib_device_put(device);
1468 __ib_unregister_device(device);
1469 device->ops.dealloc_driver = dealloc_fn;
1470 dev_set_uevent_suppress(&device->dev, false);
1471 return ret;
1473 dev_set_uevent_suppress(&device->dev, false);
1474 /* Mark for userspace that device is ready */
1475 kobject_uevent(&device->dev.kobj, KOBJ_ADD);
1477 ib_device_notify_register(device);
1478 ib_device_put(device);
1480 return 0;
1482 dev_cleanup:
1483 device_del(&device->dev);
1484 cg_cleanup:
1485 dev_set_uevent_suppress(&device->dev, false);
1486 ib_device_unregister_rdmacg(device);
1487 cache_cleanup:
1488 ib_cache_cleanup_one(device);
1489 return ret;
1491 EXPORT_SYMBOL(ib_register_device);
1493 /* Callers must hold a get on the device. */
1494 static void __ib_unregister_device(struct ib_device *ib_dev)
1496 struct ib_device *sub, *tmp;
1498 mutex_lock(&ib_dev->subdev_lock);
1499 list_for_each_entry_safe_reverse(sub, tmp,
1500 &ib_dev->subdev_list_head,
1501 subdev_list) {
1502 list_del(&sub->subdev_list);
1503 ib_dev->ops.del_sub_dev(sub);
1504 ib_device_put(ib_dev);
1506 mutex_unlock(&ib_dev->subdev_lock);
1509 * We have a registration lock so that all the calls to unregister are
1510 * fully fenced, once any unregister returns the device is truely
1511 * unregistered even if multiple callers are unregistering it at the
1512 * same time. This also interacts with the registration flow and
1513 * provides sane semantics if register and unregister are racing.
1515 mutex_lock(&ib_dev->unregistration_lock);
1516 if (!refcount_read(&ib_dev->refcount))
1517 goto out;
1519 disable_device(ib_dev);
1520 rdma_nl_notify_event(ib_dev, 0, RDMA_UNREGISTER_EVENT);
1522 /* Expedite removing unregistered pointers from the hash table */
1523 free_netdevs(ib_dev);
1525 ib_free_port_attrs(&ib_dev->coredev);
1526 device_del(&ib_dev->dev);
1527 ib_device_unregister_rdmacg(ib_dev);
1528 ib_cache_cleanup_one(ib_dev);
1531 * Drivers using the new flow may not call ib_dealloc_device except
1532 * in error unwind prior to registration success.
1534 if (ib_dev->ops.dealloc_driver &&
1535 ib_dev->ops.dealloc_driver != prevent_dealloc_device) {
1536 WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
1537 ib_dealloc_device(ib_dev);
1539 out:
1540 mutex_unlock(&ib_dev->unregistration_lock);
1544 * ib_unregister_device - Unregister an IB device
1545 * @ib_dev: The device to unregister
1547 * Unregister an IB device. All clients will receive a remove callback.
1549 * Callers should call this routine only once, and protect against races with
1550 * registration. Typically it should only be called as part of a remove
1551 * callback in an implementation of driver core's struct device_driver and
1552 * related.
1554 * If ops.dealloc_driver is used then ib_dev will be freed upon return from
1555 * this function.
1557 void ib_unregister_device(struct ib_device *ib_dev)
1559 get_device(&ib_dev->dev);
1560 __ib_unregister_device(ib_dev);
1561 put_device(&ib_dev->dev);
1563 EXPORT_SYMBOL(ib_unregister_device);
1566 * ib_unregister_device_and_put - Unregister a device while holding a 'get'
1567 * @ib_dev: The device to unregister
1569 * This is the same as ib_unregister_device(), except it includes an internal
1570 * ib_device_put() that should match a 'get' obtained by the caller.
1572 * It is safe to call this routine concurrently from multiple threads while
1573 * holding the 'get'. When the function returns the device is fully
1574 * unregistered.
1576 * Drivers using this flow MUST use the driver_unregister callback to clean up
1577 * their resources associated with the device and dealloc it.
1579 void ib_unregister_device_and_put(struct ib_device *ib_dev)
1581 WARN_ON(!ib_dev->ops.dealloc_driver);
1582 get_device(&ib_dev->dev);
1583 ib_device_put(ib_dev);
1584 __ib_unregister_device(ib_dev);
1585 put_device(&ib_dev->dev);
1587 EXPORT_SYMBOL(ib_unregister_device_and_put);
1590 * ib_unregister_driver - Unregister all IB devices for a driver
1591 * @driver_id: The driver to unregister
1593 * This implements a fence for device unregistration. It only returns once all
1594 * devices associated with the driver_id have fully completed their
1595 * unregistration and returned from ib_unregister_device*().
1597 * If device's are not yet unregistered it goes ahead and starts unregistering
1598 * them.
1600 * This does not block creation of new devices with the given driver_id, that
1601 * is the responsibility of the caller.
1603 void ib_unregister_driver(enum rdma_driver_id driver_id)
1605 struct ib_device *ib_dev;
1606 unsigned long index;
1608 down_read(&devices_rwsem);
1609 xa_for_each (&devices, index, ib_dev) {
1610 if (ib_dev->ops.driver_id != driver_id)
1611 continue;
1613 get_device(&ib_dev->dev);
1614 up_read(&devices_rwsem);
1616 WARN_ON(!ib_dev->ops.dealloc_driver);
1617 __ib_unregister_device(ib_dev);
1619 put_device(&ib_dev->dev);
1620 down_read(&devices_rwsem);
1622 up_read(&devices_rwsem);
1624 EXPORT_SYMBOL(ib_unregister_driver);
1626 static void ib_unregister_work(struct work_struct *work)
1628 struct ib_device *ib_dev =
1629 container_of(work, struct ib_device, unregistration_work);
1631 __ib_unregister_device(ib_dev);
1632 put_device(&ib_dev->dev);
1636 * ib_unregister_device_queued - Unregister a device using a work queue
1637 * @ib_dev: The device to unregister
1639 * This schedules an asynchronous unregistration using a WQ for the device. A
1640 * driver should use this to avoid holding locks while doing unregistration,
1641 * such as holding the RTNL lock.
1643 * Drivers using this API must use ib_unregister_driver before module unload
1644 * to ensure that all scheduled unregistrations have completed.
1646 void ib_unregister_device_queued(struct ib_device *ib_dev)
1648 WARN_ON(!refcount_read(&ib_dev->refcount));
1649 WARN_ON(!ib_dev->ops.dealloc_driver);
1650 get_device(&ib_dev->dev);
1651 if (!queue_work(ib_unreg_wq, &ib_dev->unregistration_work))
1652 put_device(&ib_dev->dev);
1654 EXPORT_SYMBOL(ib_unregister_device_queued);
1657 * The caller must pass in a device that has the kref held and the refcount
1658 * released. If the device is in cur_net and still registered then it is moved
1659 * into net.
1661 static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
1662 struct net *net)
1664 int ret2 = -EINVAL;
1665 int ret;
1667 mutex_lock(&device->unregistration_lock);
1670 * If a device not under ib_device_get() or if the unregistration_lock
1671 * is not held, the namespace can be changed, or it can be unregistered.
1672 * Check again under the lock.
1674 if (refcount_read(&device->refcount) == 0 ||
1675 !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) {
1676 ret = -ENODEV;
1677 goto out;
1680 kobject_uevent(&device->dev.kobj, KOBJ_REMOVE);
1681 disable_device(device);
1684 * At this point no one can be using the device, so it is safe to
1685 * change the namespace.
1687 write_pnet(&device->coredev.rdma_net, net);
1689 down_read(&devices_rwsem);
1691 * Currently rdma devices are system wide unique. So the device name
1692 * is guaranteed free in the new namespace. Publish the new namespace
1693 * at the sysfs level.
1695 ret = device_rename(&device->dev, dev_name(&device->dev));
1696 up_read(&devices_rwsem);
1697 if (ret) {
1698 dev_warn(&device->dev,
1699 "%s: Couldn't rename device after namespace change\n",
1700 __func__);
1701 /* Try and put things back and re-enable the device */
1702 write_pnet(&device->coredev.rdma_net, cur_net);
1705 ret2 = enable_device_and_get(device);
1706 if (ret2) {
1708 * This shouldn't really happen, but if it does, let the user
1709 * retry at later point. So don't disable the device.
1711 dev_warn(&device->dev,
1712 "%s: Couldn't re-enable device after namespace change\n",
1713 __func__);
1715 kobject_uevent(&device->dev.kobj, KOBJ_ADD);
1717 ib_device_put(device);
1718 out:
1719 mutex_unlock(&device->unregistration_lock);
1720 if (ret)
1721 return ret;
1722 return ret2;
1725 int ib_device_set_netns_put(struct sk_buff *skb,
1726 struct ib_device *dev, u32 ns_fd)
1728 struct net *net;
1729 int ret;
1731 net = get_net_ns_by_fd(ns_fd);
1732 if (IS_ERR(net)) {
1733 ret = PTR_ERR(net);
1734 goto net_err;
1737 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
1738 ret = -EPERM;
1739 goto ns_err;
1743 * All the ib_clients, including uverbs, are reset when the namespace is
1744 * changed and this cannot be blocked waiting for userspace to do
1745 * something, so disassociation is mandatory.
1747 if (!dev->ops.disassociate_ucontext || ib_devices_shared_netns) {
1748 ret = -EOPNOTSUPP;
1749 goto ns_err;
1752 get_device(&dev->dev);
1753 ib_device_put(dev);
1754 ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net);
1755 put_device(&dev->dev);
1757 put_net(net);
1758 return ret;
1760 ns_err:
1761 put_net(net);
1762 net_err:
1763 ib_device_put(dev);
1764 return ret;
1767 static struct pernet_operations rdma_dev_net_ops = {
1768 .init = rdma_dev_init_net,
1769 .exit = rdma_dev_exit_net,
1770 .id = &rdma_dev_net_id,
1771 .size = sizeof(struct rdma_dev_net),
1774 static int assign_client_id(struct ib_client *client)
1776 int ret;
1778 lockdep_assert_held(&clients_rwsem);
1780 * The add/remove callbacks must be called in FIFO/LIFO order. To
1781 * achieve this we assign client_ids so they are sorted in
1782 * registration order.
1784 client->client_id = highest_client_id;
1785 ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
1786 if (ret)
1787 return ret;
1789 highest_client_id++;
1790 xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
1791 return 0;
1794 static void remove_client_id(struct ib_client *client)
1796 down_write(&clients_rwsem);
1797 xa_erase(&clients, client->client_id);
1798 for (; highest_client_id; highest_client_id--)
1799 if (xa_load(&clients, highest_client_id - 1))
1800 break;
1801 up_write(&clients_rwsem);
1805 * ib_register_client - Register an IB client
1806 * @client:Client to register
1808 * Upper level users of the IB drivers can use ib_register_client() to
1809 * register callbacks for IB device addition and removal. When an IB
1810 * device is added, each registered client's add method will be called
1811 * (in the order the clients were registered), and when a device is
1812 * removed, each client's remove method will be called (in the reverse
1813 * order that clients were registered). In addition, when
1814 * ib_register_client() is called, the client will receive an add
1815 * callback for all devices already registered.
1817 int ib_register_client(struct ib_client *client)
1819 struct ib_device *device;
1820 unsigned long index;
1821 bool need_unreg = false;
1822 int ret;
1824 refcount_set(&client->uses, 1);
1825 init_completion(&client->uses_zero);
1828 * The devices_rwsem is held in write mode to ensure that a racing
1829 * ib_register_device() sees a consisent view of clients and devices.
1831 down_write(&devices_rwsem);
1832 down_write(&clients_rwsem);
1833 ret = assign_client_id(client);
1834 if (ret)
1835 goto out;
1837 need_unreg = true;
1838 xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
1839 ret = add_client_context(device, client);
1840 if (ret)
1841 goto out;
1843 ret = 0;
1844 out:
1845 up_write(&clients_rwsem);
1846 up_write(&devices_rwsem);
1847 if (need_unreg && ret)
1848 ib_unregister_client(client);
1849 return ret;
1851 EXPORT_SYMBOL(ib_register_client);
1854 * ib_unregister_client - Unregister an IB client
1855 * @client:Client to unregister
1857 * Upper level users use ib_unregister_client() to remove their client
1858 * registration. When ib_unregister_client() is called, the client
1859 * will receive a remove callback for each IB device still registered.
1861 * This is a full fence, once it returns no client callbacks will be called,
1862 * or are running in another thread.
1864 void ib_unregister_client(struct ib_client *client)
1866 struct ib_device *device;
1867 unsigned long index;
1869 down_write(&clients_rwsem);
1870 ib_client_put(client);
1871 xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
1872 up_write(&clients_rwsem);
1874 /* We do not want to have locks while calling client->remove() */
1875 rcu_read_lock();
1876 xa_for_each (&devices, index, device) {
1877 if (!ib_device_try_get(device))
1878 continue;
1879 rcu_read_unlock();
1881 remove_client_context(device, client->client_id);
1883 ib_device_put(device);
1884 rcu_read_lock();
1886 rcu_read_unlock();
1889 * remove_client_context() is not a fence, it can return even though a
1890 * removal is ongoing. Wait until all removals are completed.
1892 wait_for_completion(&client->uses_zero);
1893 remove_client_id(client);
1895 EXPORT_SYMBOL(ib_unregister_client);
1897 static int __ib_get_global_client_nl_info(const char *client_name,
1898 struct ib_client_nl_info *res)
1900 struct ib_client *client;
1901 unsigned long index;
1902 int ret = -ENOENT;
1904 down_read(&clients_rwsem);
1905 xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
1906 if (strcmp(client->name, client_name) != 0)
1907 continue;
1908 if (!client->get_global_nl_info) {
1909 ret = -EOPNOTSUPP;
1910 break;
1912 ret = client->get_global_nl_info(res);
1913 if (WARN_ON(ret == -ENOENT))
1914 ret = -EINVAL;
1915 if (!ret && res->cdev)
1916 get_device(res->cdev);
1917 break;
1919 up_read(&clients_rwsem);
1920 return ret;
1923 static int __ib_get_client_nl_info(struct ib_device *ibdev,
1924 const char *client_name,
1925 struct ib_client_nl_info *res)
1927 unsigned long index;
1928 void *client_data;
1929 int ret = -ENOENT;
1931 down_read(&ibdev->client_data_rwsem);
1932 xan_for_each_marked (&ibdev->client_data, index, client_data,
1933 CLIENT_DATA_REGISTERED) {
1934 struct ib_client *client = xa_load(&clients, index);
1936 if (!client || strcmp(client->name, client_name) != 0)
1937 continue;
1938 if (!client->get_nl_info) {
1939 ret = -EOPNOTSUPP;
1940 break;
1942 ret = client->get_nl_info(ibdev, client_data, res);
1943 if (WARN_ON(ret == -ENOENT))
1944 ret = -EINVAL;
1947 * The cdev is guaranteed valid as long as we are inside the
1948 * client_data_rwsem as remove_one can't be called. Keep it
1949 * valid for the caller.
1951 if (!ret && res->cdev)
1952 get_device(res->cdev);
1953 break;
1955 up_read(&ibdev->client_data_rwsem);
1957 return ret;
1961 * ib_get_client_nl_info - Fetch the nl_info from a client
1962 * @ibdev: IB device
1963 * @client_name: Name of the client
1964 * @res: Result of the query
1966 int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
1967 struct ib_client_nl_info *res)
1969 int ret;
1971 if (ibdev)
1972 ret = __ib_get_client_nl_info(ibdev, client_name, res);
1973 else
1974 ret = __ib_get_global_client_nl_info(client_name, res);
1975 #ifdef CONFIG_MODULES
1976 if (ret == -ENOENT) {
1977 request_module("rdma-client-%s", client_name);
1978 if (ibdev)
1979 ret = __ib_get_client_nl_info(ibdev, client_name, res);
1980 else
1981 ret = __ib_get_global_client_nl_info(client_name, res);
1983 #endif
1984 if (ret) {
1985 if (ret == -ENOENT)
1986 return -EOPNOTSUPP;
1987 return ret;
1990 if (WARN_ON(!res->cdev))
1991 return -EINVAL;
1992 return 0;
1996 * ib_set_client_data - Set IB client context
1997 * @device:Device to set context for
1998 * @client:Client to set context for
1999 * @data:Context to set
2001 * ib_set_client_data() sets client context data that can be retrieved with
2002 * ib_get_client_data(). This can only be called while the client is
2003 * registered to the device, once the ib_client remove() callback returns this
2004 * cannot be called.
2006 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2007 void *data)
2009 void *rc;
2011 if (WARN_ON(IS_ERR(data)))
2012 data = NULL;
2014 rc = xa_store(&device->client_data, client->client_id, data,
2015 GFP_KERNEL);
2016 WARN_ON(xa_is_err(rc));
2018 EXPORT_SYMBOL(ib_set_client_data);
2021 * ib_register_event_handler - Register an IB event handler
2022 * @event_handler:Handler to register
2024 * ib_register_event_handler() registers an event handler that will be
2025 * called back when asynchronous IB events occur (as defined in
2026 * chapter 11 of the InfiniBand Architecture Specification). This
2027 * callback occurs in workqueue context.
2029 void ib_register_event_handler(struct ib_event_handler *event_handler)
2031 down_write(&event_handler->device->event_handler_rwsem);
2032 list_add_tail(&event_handler->list,
2033 &event_handler->device->event_handler_list);
2034 up_write(&event_handler->device->event_handler_rwsem);
2036 EXPORT_SYMBOL(ib_register_event_handler);
2039 * ib_unregister_event_handler - Unregister an event handler
2040 * @event_handler:Handler to unregister
2042 * Unregister an event handler registered with
2043 * ib_register_event_handler().
2045 void ib_unregister_event_handler(struct ib_event_handler *event_handler)
2047 down_write(&event_handler->device->event_handler_rwsem);
2048 list_del(&event_handler->list);
2049 up_write(&event_handler->device->event_handler_rwsem);
2051 EXPORT_SYMBOL(ib_unregister_event_handler);
2053 void ib_dispatch_event_clients(struct ib_event *event)
2055 struct ib_event_handler *handler;
2057 down_read(&event->device->event_handler_rwsem);
2059 list_for_each_entry(handler, &event->device->event_handler_list, list)
2060 handler->handler(handler, event);
2062 up_read(&event->device->event_handler_rwsem);
2065 static int iw_query_port(struct ib_device *device,
2066 u32 port_num,
2067 struct ib_port_attr *port_attr)
2069 struct in_device *inetdev;
2070 struct net_device *netdev;
2072 memset(port_attr, 0, sizeof(*port_attr));
2074 netdev = ib_device_get_netdev(device, port_num);
2075 if (!netdev)
2076 return -ENODEV;
2078 port_attr->max_mtu = IB_MTU_4096;
2079 port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
2081 if (!netif_carrier_ok(netdev)) {
2082 port_attr->state = IB_PORT_DOWN;
2083 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
2084 } else {
2085 rcu_read_lock();
2086 inetdev = __in_dev_get_rcu(netdev);
2088 if (inetdev && inetdev->ifa_list) {
2089 port_attr->state = IB_PORT_ACTIVE;
2090 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
2091 } else {
2092 port_attr->state = IB_PORT_INIT;
2093 port_attr->phys_state =
2094 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
2097 rcu_read_unlock();
2100 dev_put(netdev);
2101 return device->ops.query_port(device, port_num, port_attr);
2104 static int __ib_query_port(struct ib_device *device,
2105 u32 port_num,
2106 struct ib_port_attr *port_attr)
2108 int err;
2110 memset(port_attr, 0, sizeof(*port_attr));
2112 err = device->ops.query_port(device, port_num, port_attr);
2113 if (err || port_attr->subnet_prefix)
2114 return err;
2116 if (rdma_port_get_link_layer(device, port_num) !=
2117 IB_LINK_LAYER_INFINIBAND)
2118 return 0;
2120 ib_get_cached_subnet_prefix(device, port_num,
2121 &port_attr->subnet_prefix);
2122 return 0;
2126 * ib_query_port - Query IB port attributes
2127 * @device:Device to query
2128 * @port_num:Port number to query
2129 * @port_attr:Port attributes
2131 * ib_query_port() returns the attributes of a port through the
2132 * @port_attr pointer.
2134 int ib_query_port(struct ib_device *device,
2135 u32 port_num,
2136 struct ib_port_attr *port_attr)
2138 if (!rdma_is_port_valid(device, port_num))
2139 return -EINVAL;
2141 if (rdma_protocol_iwarp(device, port_num))
2142 return iw_query_port(device, port_num, port_attr);
2143 else
2144 return __ib_query_port(device, port_num, port_attr);
2146 EXPORT_SYMBOL(ib_query_port);
2148 static void add_ndev_hash(struct ib_port_data *pdata)
2150 unsigned long flags;
2152 might_sleep();
2154 spin_lock_irqsave(&ndev_hash_lock, flags);
2155 if (hash_hashed(&pdata->ndev_hash_link)) {
2156 hash_del_rcu(&pdata->ndev_hash_link);
2157 spin_unlock_irqrestore(&ndev_hash_lock, flags);
2159 * We cannot do hash_add_rcu after a hash_del_rcu until the
2160 * grace period
2162 synchronize_rcu();
2163 spin_lock_irqsave(&ndev_hash_lock, flags);
2165 if (pdata->netdev)
2166 hash_add_rcu(ndev_hash, &pdata->ndev_hash_link,
2167 (uintptr_t)pdata->netdev);
2168 spin_unlock_irqrestore(&ndev_hash_lock, flags);
2172 * ib_device_set_netdev - Associate the ib_dev with an underlying net_device
2173 * @ib_dev: Device to modify
2174 * @ndev: net_device to affiliate, may be NULL
2175 * @port: IB port the net_device is connected to
2177 * Drivers should use this to link the ib_device to a netdev so the netdev
2178 * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be
2179 * affiliated with any port.
2181 * The caller must ensure that the given ndev is not unregistered or
2182 * unregistering, and that either the ib_device is unregistered or
2183 * ib_device_set_netdev() is called with NULL when the ndev sends a
2184 * NETDEV_UNREGISTER event.
2186 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
2187 u32 port)
2189 enum rdma_nl_notify_event_type etype;
2190 struct net_device *old_ndev;
2191 struct ib_port_data *pdata;
2192 unsigned long flags;
2193 int ret;
2195 if (!rdma_is_port_valid(ib_dev, port))
2196 return -EINVAL;
2199 * Drivers wish to call this before ib_register_driver, so we have to
2200 * setup the port data early.
2202 ret = alloc_port_data(ib_dev);
2203 if (ret)
2204 return ret;
2206 pdata = &ib_dev->port_data[port];
2207 spin_lock_irqsave(&pdata->netdev_lock, flags);
2208 old_ndev = rcu_dereference_protected(
2209 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
2210 if (old_ndev == ndev) {
2211 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
2212 return 0;
2215 rcu_assign_pointer(pdata->netdev, ndev);
2216 netdev_put(old_ndev, &pdata->netdev_tracker);
2217 netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC);
2218 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
2220 add_ndev_hash(pdata);
2222 /* Make sure that the device is registered before we send events */
2223 if (xa_load(&devices, ib_dev->index) != ib_dev)
2224 return 0;
2226 etype = ndev ? RDMA_NETDEV_ATTACH_EVENT : RDMA_NETDEV_DETACH_EVENT;
2227 rdma_nl_notify_event(ib_dev, port, etype);
2229 return 0;
2231 EXPORT_SYMBOL(ib_device_set_netdev);
2233 static void free_netdevs(struct ib_device *ib_dev)
2235 unsigned long flags;
2236 u32 port;
2238 if (!ib_dev->port_data)
2239 return;
2241 rdma_for_each_port (ib_dev, port) {
2242 struct ib_port_data *pdata = &ib_dev->port_data[port];
2243 struct net_device *ndev;
2245 spin_lock_irqsave(&pdata->netdev_lock, flags);
2246 ndev = rcu_dereference_protected(
2247 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
2248 if (ndev) {
2249 spin_lock(&ndev_hash_lock);
2250 hash_del_rcu(&pdata->ndev_hash_link);
2251 spin_unlock(&ndev_hash_lock);
2254 * If this is the last dev_put there is still a
2255 * synchronize_rcu before the netdev is kfreed, so we
2256 * can continue to rely on unlocked pointer
2257 * comparisons after the put
2259 rcu_assign_pointer(pdata->netdev, NULL);
2260 netdev_put(ndev, &pdata->netdev_tracker);
2262 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
2266 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
2267 u32 port)
2269 struct ib_port_data *pdata;
2270 struct net_device *res;
2272 if (!rdma_is_port_valid(ib_dev, port))
2273 return NULL;
2275 if (!ib_dev->port_data)
2276 return NULL;
2278 pdata = &ib_dev->port_data[port];
2281 * New drivers should use ib_device_set_netdev() not the legacy
2282 * get_netdev().
2284 if (ib_dev->ops.get_netdev)
2285 res = ib_dev->ops.get_netdev(ib_dev, port);
2286 else {
2287 spin_lock(&pdata->netdev_lock);
2288 res = rcu_dereference_protected(
2289 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
2290 dev_hold(res);
2291 spin_unlock(&pdata->netdev_lock);
2294 return res;
2296 EXPORT_SYMBOL(ib_device_get_netdev);
2299 * ib_device_get_by_netdev - Find an IB device associated with a netdev
2300 * @ndev: netdev to locate
2301 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
2303 * Find and hold an ib_device that is associated with a netdev via
2304 * ib_device_set_netdev(). The caller must call ib_device_put() on the
2305 * returned pointer.
2307 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
2308 enum rdma_driver_id driver_id)
2310 struct ib_device *res = NULL;
2311 struct ib_port_data *cur;
2313 rcu_read_lock();
2314 hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link,
2315 (uintptr_t)ndev) {
2316 if (rcu_access_pointer(cur->netdev) == ndev &&
2317 (driver_id == RDMA_DRIVER_UNKNOWN ||
2318 cur->ib_dev->ops.driver_id == driver_id) &&
2319 ib_device_try_get(cur->ib_dev)) {
2320 res = cur->ib_dev;
2321 break;
2324 rcu_read_unlock();
2326 return res;
2328 EXPORT_SYMBOL(ib_device_get_by_netdev);
2331 * ib_enum_roce_netdev - enumerate all RoCE ports
2332 * @ib_dev : IB device we want to query
2333 * @filter: Should we call the callback?
2334 * @filter_cookie: Cookie passed to filter
2335 * @cb: Callback to call for each found RoCE ports
2336 * @cookie: Cookie passed back to the callback
2338 * Enumerates all of the physical RoCE ports of ib_dev
2339 * which are related to netdevice and calls callback() on each
2340 * device for which filter() function returns non zero.
2342 void ib_enum_roce_netdev(struct ib_device *ib_dev,
2343 roce_netdev_filter filter,
2344 void *filter_cookie,
2345 roce_netdev_callback cb,
2346 void *cookie)
2348 u32 port;
2350 rdma_for_each_port (ib_dev, port)
2351 if (rdma_protocol_roce(ib_dev, port)) {
2352 struct net_device *idev =
2353 ib_device_get_netdev(ib_dev, port);
2355 if (filter(ib_dev, port, idev, filter_cookie))
2356 cb(ib_dev, port, idev, cookie);
2357 dev_put(idev);
2362 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
2363 * @filter: Should we call the callback?
2364 * @filter_cookie: Cookie passed to filter
2365 * @cb: Callback to call for each found RoCE ports
2366 * @cookie: Cookie passed back to the callback
2368 * Enumerates all RoCE devices' physical ports which are related
2369 * to netdevices and calls callback() on each device for which
2370 * filter() function returns non zero.
2372 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
2373 void *filter_cookie,
2374 roce_netdev_callback cb,
2375 void *cookie)
2377 struct ib_device *dev;
2378 unsigned long index;
2380 down_read(&devices_rwsem);
2381 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED)
2382 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
2383 up_read(&devices_rwsem);
2387 * ib_enum_all_devs - enumerate all ib_devices
2388 * @cb: Callback to call for each found ib_device
2390 * Enumerates all ib_devices and calls callback() on each device.
2392 int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
2393 struct netlink_callback *cb)
2395 unsigned long index;
2396 struct ib_device *dev;
2397 unsigned int idx = 0;
2398 int ret = 0;
2400 down_read(&devices_rwsem);
2401 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
2402 if (!rdma_dev_access_netns(dev, sock_net(skb->sk)))
2403 continue;
2405 ret = nldev_cb(dev, skb, cb, idx);
2406 if (ret)
2407 break;
2408 idx++;
2410 up_read(&devices_rwsem);
2411 return ret;
2415 * ib_query_pkey - Get P_Key table entry
2416 * @device:Device to query
2417 * @port_num:Port number to query
2418 * @index:P_Key table index to query
2419 * @pkey:Returned P_Key
2421 * ib_query_pkey() fetches the specified P_Key table entry.
2423 int ib_query_pkey(struct ib_device *device,
2424 u32 port_num, u16 index, u16 *pkey)
2426 if (!rdma_is_port_valid(device, port_num))
2427 return -EINVAL;
2429 if (!device->ops.query_pkey)
2430 return -EOPNOTSUPP;
2432 return device->ops.query_pkey(device, port_num, index, pkey);
2434 EXPORT_SYMBOL(ib_query_pkey);
2437 * ib_modify_device - Change IB device attributes
2438 * @device:Device to modify
2439 * @device_modify_mask:Mask of attributes to change
2440 * @device_modify:New attribute values
2442 * ib_modify_device() changes a device's attributes as specified by
2443 * the @device_modify_mask and @device_modify structure.
2445 int ib_modify_device(struct ib_device *device,
2446 int device_modify_mask,
2447 struct ib_device_modify *device_modify)
2449 if (!device->ops.modify_device)
2450 return -EOPNOTSUPP;
2452 return device->ops.modify_device(device, device_modify_mask,
2453 device_modify);
2455 EXPORT_SYMBOL(ib_modify_device);
2458 * ib_modify_port - Modifies the attributes for the specified port.
2459 * @device: The device to modify.
2460 * @port_num: The number of the port to modify.
2461 * @port_modify_mask: Mask used to specify which attributes of the port
2462 * to change.
2463 * @port_modify: New attribute values for the port.
2465 * ib_modify_port() changes a port's attributes as specified by the
2466 * @port_modify_mask and @port_modify structure.
2468 int ib_modify_port(struct ib_device *device,
2469 u32 port_num, int port_modify_mask,
2470 struct ib_port_modify *port_modify)
2472 int rc;
2474 if (!rdma_is_port_valid(device, port_num))
2475 return -EINVAL;
2477 if (device->ops.modify_port)
2478 rc = device->ops.modify_port(device, port_num,
2479 port_modify_mask,
2480 port_modify);
2481 else if (rdma_protocol_roce(device, port_num) &&
2482 ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 ||
2483 (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0))
2484 rc = 0;
2485 else
2486 rc = -EOPNOTSUPP;
2487 return rc;
2489 EXPORT_SYMBOL(ib_modify_port);
2492 * ib_find_gid - Returns the port number and GID table index where
2493 * a specified GID value occurs. Its searches only for IB link layer.
2494 * @device: The device to query.
2495 * @gid: The GID value to search for.
2496 * @port_num: The port number of the device where the GID value was found.
2497 * @index: The index into the GID table where the GID was found. This
2498 * parameter may be NULL.
2500 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2501 u32 *port_num, u16 *index)
2503 union ib_gid tmp_gid;
2504 u32 port;
2505 int ret, i;
2507 rdma_for_each_port (device, port) {
2508 if (!rdma_protocol_ib(device, port))
2509 continue;
2511 for (i = 0; i < device->port_data[port].immutable.gid_tbl_len;
2512 ++i) {
2513 ret = rdma_query_gid(device, port, i, &tmp_gid);
2514 if (ret)
2515 continue;
2517 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
2518 *port_num = port;
2519 if (index)
2520 *index = i;
2521 return 0;
2526 return -ENOENT;
2528 EXPORT_SYMBOL(ib_find_gid);
2531 * ib_find_pkey - Returns the PKey table index where a specified
2532 * PKey value occurs.
2533 * @device: The device to query.
2534 * @port_num: The port number of the device to search for the PKey.
2535 * @pkey: The PKey value to search for.
2536 * @index: The index into the PKey table where the PKey was found.
2538 int ib_find_pkey(struct ib_device *device,
2539 u32 port_num, u16 pkey, u16 *index)
2541 int ret, i;
2542 u16 tmp_pkey;
2543 int partial_ix = -1;
2545 for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len;
2546 ++i) {
2547 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
2548 if (ret)
2549 return ret;
2550 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
2551 /* if there is full-member pkey take it.*/
2552 if (tmp_pkey & 0x8000) {
2553 *index = i;
2554 return 0;
2556 if (partial_ix < 0)
2557 partial_ix = i;
2561 /*no full-member, if exists take the limited*/
2562 if (partial_ix >= 0) {
2563 *index = partial_ix;
2564 return 0;
2566 return -ENOENT;
2568 EXPORT_SYMBOL(ib_find_pkey);
2571 * ib_get_net_dev_by_params() - Return the appropriate net_dev
2572 * for a received CM request
2573 * @dev: An RDMA device on which the request has been received.
2574 * @port: Port number on the RDMA device.
2575 * @pkey: The Pkey the request came on.
2576 * @gid: A GID that the net_dev uses to communicate.
2577 * @addr: Contains the IP address that the request specified as its
2578 * destination.
2581 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
2582 u32 port,
2583 u16 pkey,
2584 const union ib_gid *gid,
2585 const struct sockaddr *addr)
2587 struct net_device *net_dev = NULL;
2588 unsigned long index;
2589 void *client_data;
2591 if (!rdma_protocol_ib(dev, port))
2592 return NULL;
2595 * Holding the read side guarantees that the client will not become
2596 * unregistered while we are calling get_net_dev_by_params()
2598 down_read(&dev->client_data_rwsem);
2599 xan_for_each_marked (&dev->client_data, index, client_data,
2600 CLIENT_DATA_REGISTERED) {
2601 struct ib_client *client = xa_load(&clients, index);
2603 if (!client || !client->get_net_dev_by_params)
2604 continue;
2606 net_dev = client->get_net_dev_by_params(dev, port, pkey, gid,
2607 addr, client_data);
2608 if (net_dev)
2609 break;
2611 up_read(&dev->client_data_rwsem);
2613 return net_dev;
2615 EXPORT_SYMBOL(ib_get_net_dev_by_params);
2617 void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
2619 struct ib_device_ops *dev_ops = &dev->ops;
2620 #define SET_DEVICE_OP(ptr, name) \
2621 do { \
2622 if (ops->name) \
2623 if (!((ptr)->name)) \
2624 (ptr)->name = ops->name; \
2625 } while (0)
2627 #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
2629 if (ops->driver_id != RDMA_DRIVER_UNKNOWN) {
2630 WARN_ON(dev_ops->driver_id != RDMA_DRIVER_UNKNOWN &&
2631 dev_ops->driver_id != ops->driver_id);
2632 dev_ops->driver_id = ops->driver_id;
2634 if (ops->owner) {
2635 WARN_ON(dev_ops->owner && dev_ops->owner != ops->owner);
2636 dev_ops->owner = ops->owner;
2638 if (ops->uverbs_abi_ver)
2639 dev_ops->uverbs_abi_ver = ops->uverbs_abi_ver;
2641 dev_ops->uverbs_no_driver_id_binding |=
2642 ops->uverbs_no_driver_id_binding;
2644 SET_DEVICE_OP(dev_ops, add_gid);
2645 SET_DEVICE_OP(dev_ops, add_sub_dev);
2646 SET_DEVICE_OP(dev_ops, advise_mr);
2647 SET_DEVICE_OP(dev_ops, alloc_dm);
2648 SET_DEVICE_OP(dev_ops, alloc_hw_device_stats);
2649 SET_DEVICE_OP(dev_ops, alloc_hw_port_stats);
2650 SET_DEVICE_OP(dev_ops, alloc_mr);
2651 SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
2652 SET_DEVICE_OP(dev_ops, alloc_mw);
2653 SET_DEVICE_OP(dev_ops, alloc_pd);
2654 SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
2655 SET_DEVICE_OP(dev_ops, alloc_ucontext);
2656 SET_DEVICE_OP(dev_ops, alloc_xrcd);
2657 SET_DEVICE_OP(dev_ops, attach_mcast);
2658 SET_DEVICE_OP(dev_ops, check_mr_status);
2659 SET_DEVICE_OP(dev_ops, counter_alloc_stats);
2660 SET_DEVICE_OP(dev_ops, counter_bind_qp);
2661 SET_DEVICE_OP(dev_ops, counter_dealloc);
2662 SET_DEVICE_OP(dev_ops, counter_unbind_qp);
2663 SET_DEVICE_OP(dev_ops, counter_update_stats);
2664 SET_DEVICE_OP(dev_ops, create_ah);
2665 SET_DEVICE_OP(dev_ops, create_counters);
2666 SET_DEVICE_OP(dev_ops, create_cq);
2667 SET_DEVICE_OP(dev_ops, create_flow);
2668 SET_DEVICE_OP(dev_ops, create_qp);
2669 SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
2670 SET_DEVICE_OP(dev_ops, create_srq);
2671 SET_DEVICE_OP(dev_ops, create_user_ah);
2672 SET_DEVICE_OP(dev_ops, create_wq);
2673 SET_DEVICE_OP(dev_ops, dealloc_dm);
2674 SET_DEVICE_OP(dev_ops, dealloc_driver);
2675 SET_DEVICE_OP(dev_ops, dealloc_mw);
2676 SET_DEVICE_OP(dev_ops, dealloc_pd);
2677 SET_DEVICE_OP(dev_ops, dealloc_ucontext);
2678 SET_DEVICE_OP(dev_ops, dealloc_xrcd);
2679 SET_DEVICE_OP(dev_ops, del_gid);
2680 SET_DEVICE_OP(dev_ops, del_sub_dev);
2681 SET_DEVICE_OP(dev_ops, dereg_mr);
2682 SET_DEVICE_OP(dev_ops, destroy_ah);
2683 SET_DEVICE_OP(dev_ops, destroy_counters);
2684 SET_DEVICE_OP(dev_ops, destroy_cq);
2685 SET_DEVICE_OP(dev_ops, destroy_flow);
2686 SET_DEVICE_OP(dev_ops, destroy_flow_action);
2687 SET_DEVICE_OP(dev_ops, destroy_qp);
2688 SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
2689 SET_DEVICE_OP(dev_ops, destroy_srq);
2690 SET_DEVICE_OP(dev_ops, destroy_wq);
2691 SET_DEVICE_OP(dev_ops, device_group);
2692 SET_DEVICE_OP(dev_ops, detach_mcast);
2693 SET_DEVICE_OP(dev_ops, disassociate_ucontext);
2694 SET_DEVICE_OP(dev_ops, drain_rq);
2695 SET_DEVICE_OP(dev_ops, drain_sq);
2696 SET_DEVICE_OP(dev_ops, enable_driver);
2697 SET_DEVICE_OP(dev_ops, fill_res_cm_id_entry);
2698 SET_DEVICE_OP(dev_ops, fill_res_cq_entry);
2699 SET_DEVICE_OP(dev_ops, fill_res_cq_entry_raw);
2700 SET_DEVICE_OP(dev_ops, fill_res_mr_entry);
2701 SET_DEVICE_OP(dev_ops, fill_res_mr_entry_raw);
2702 SET_DEVICE_OP(dev_ops, fill_res_qp_entry);
2703 SET_DEVICE_OP(dev_ops, fill_res_qp_entry_raw);
2704 SET_DEVICE_OP(dev_ops, fill_res_srq_entry);
2705 SET_DEVICE_OP(dev_ops, fill_res_srq_entry_raw);
2706 SET_DEVICE_OP(dev_ops, fill_stat_mr_entry);
2707 SET_DEVICE_OP(dev_ops, get_dev_fw_str);
2708 SET_DEVICE_OP(dev_ops, get_dma_mr);
2709 SET_DEVICE_OP(dev_ops, get_hw_stats);
2710 SET_DEVICE_OP(dev_ops, get_link_layer);
2711 SET_DEVICE_OP(dev_ops, get_netdev);
2712 SET_DEVICE_OP(dev_ops, get_numa_node);
2713 SET_DEVICE_OP(dev_ops, get_port_immutable);
2714 SET_DEVICE_OP(dev_ops, get_vector_affinity);
2715 SET_DEVICE_OP(dev_ops, get_vf_config);
2716 SET_DEVICE_OP(dev_ops, get_vf_guid);
2717 SET_DEVICE_OP(dev_ops, get_vf_stats);
2718 SET_DEVICE_OP(dev_ops, iw_accept);
2719 SET_DEVICE_OP(dev_ops, iw_add_ref);
2720 SET_DEVICE_OP(dev_ops, iw_connect);
2721 SET_DEVICE_OP(dev_ops, iw_create_listen);
2722 SET_DEVICE_OP(dev_ops, iw_destroy_listen);
2723 SET_DEVICE_OP(dev_ops, iw_get_qp);
2724 SET_DEVICE_OP(dev_ops, iw_reject);
2725 SET_DEVICE_OP(dev_ops, iw_rem_ref);
2726 SET_DEVICE_OP(dev_ops, map_mr_sg);
2727 SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
2728 SET_DEVICE_OP(dev_ops, mmap);
2729 SET_DEVICE_OP(dev_ops, mmap_free);
2730 SET_DEVICE_OP(dev_ops, modify_ah);
2731 SET_DEVICE_OP(dev_ops, modify_cq);
2732 SET_DEVICE_OP(dev_ops, modify_device);
2733 SET_DEVICE_OP(dev_ops, modify_hw_stat);
2734 SET_DEVICE_OP(dev_ops, modify_port);
2735 SET_DEVICE_OP(dev_ops, modify_qp);
2736 SET_DEVICE_OP(dev_ops, modify_srq);
2737 SET_DEVICE_OP(dev_ops, modify_wq);
2738 SET_DEVICE_OP(dev_ops, peek_cq);
2739 SET_DEVICE_OP(dev_ops, poll_cq);
2740 SET_DEVICE_OP(dev_ops, port_groups);
2741 SET_DEVICE_OP(dev_ops, post_recv);
2742 SET_DEVICE_OP(dev_ops, post_send);
2743 SET_DEVICE_OP(dev_ops, post_srq_recv);
2744 SET_DEVICE_OP(dev_ops, process_mad);
2745 SET_DEVICE_OP(dev_ops, query_ah);
2746 SET_DEVICE_OP(dev_ops, query_device);
2747 SET_DEVICE_OP(dev_ops, query_gid);
2748 SET_DEVICE_OP(dev_ops, query_pkey);
2749 SET_DEVICE_OP(dev_ops, query_port);
2750 SET_DEVICE_OP(dev_ops, query_qp);
2751 SET_DEVICE_OP(dev_ops, query_srq);
2752 SET_DEVICE_OP(dev_ops, query_ucontext);
2753 SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
2754 SET_DEVICE_OP(dev_ops, read_counters);
2755 SET_DEVICE_OP(dev_ops, reg_dm_mr);
2756 SET_DEVICE_OP(dev_ops, reg_user_mr);
2757 SET_DEVICE_OP(dev_ops, reg_user_mr_dmabuf);
2758 SET_DEVICE_OP(dev_ops, req_notify_cq);
2759 SET_DEVICE_OP(dev_ops, rereg_user_mr);
2760 SET_DEVICE_OP(dev_ops, resize_cq);
2761 SET_DEVICE_OP(dev_ops, set_vf_guid);
2762 SET_DEVICE_OP(dev_ops, set_vf_link_state);
2763 SET_DEVICE_OP(dev_ops, ufile_hw_cleanup);
2765 SET_OBJ_SIZE(dev_ops, ib_ah);
2766 SET_OBJ_SIZE(dev_ops, ib_counters);
2767 SET_OBJ_SIZE(dev_ops, ib_cq);
2768 SET_OBJ_SIZE(dev_ops, ib_mw);
2769 SET_OBJ_SIZE(dev_ops, ib_pd);
2770 SET_OBJ_SIZE(dev_ops, ib_qp);
2771 SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
2772 SET_OBJ_SIZE(dev_ops, ib_srq);
2773 SET_OBJ_SIZE(dev_ops, ib_ucontext);
2774 SET_OBJ_SIZE(dev_ops, ib_xrcd);
2776 EXPORT_SYMBOL(ib_set_device_ops);
2778 int ib_add_sub_device(struct ib_device *parent,
2779 enum rdma_nl_dev_type type,
2780 const char *name)
2782 struct ib_device *sub;
2783 int ret = 0;
2785 if (!parent->ops.add_sub_dev || !parent->ops.del_sub_dev)
2786 return -EOPNOTSUPP;
2788 if (!ib_device_try_get(parent))
2789 return -EINVAL;
2791 sub = parent->ops.add_sub_dev(parent, type, name);
2792 if (IS_ERR(sub)) {
2793 ib_device_put(parent);
2794 return PTR_ERR(sub);
2797 sub->type = type;
2798 sub->parent = parent;
2800 mutex_lock(&parent->subdev_lock);
2801 list_add_tail(&parent->subdev_list_head, &sub->subdev_list);
2802 mutex_unlock(&parent->subdev_lock);
2804 return ret;
2806 EXPORT_SYMBOL(ib_add_sub_device);
2808 int ib_del_sub_device_and_put(struct ib_device *sub)
2810 struct ib_device *parent = sub->parent;
2812 if (!parent)
2813 return -EOPNOTSUPP;
2815 mutex_lock(&parent->subdev_lock);
2816 list_del(&sub->subdev_list);
2817 mutex_unlock(&parent->subdev_lock);
2819 ib_device_put(sub);
2820 parent->ops.del_sub_dev(sub);
2821 ib_device_put(parent);
2823 return 0;
2825 EXPORT_SYMBOL(ib_del_sub_device_and_put);
2827 #ifdef CONFIG_INFINIBAND_VIRT_DMA
2828 int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents)
2830 struct scatterlist *s;
2831 int i;
2833 for_each_sg(sg, s, nents, i) {
2834 sg_dma_address(s) = (uintptr_t)sg_virt(s);
2835 sg_dma_len(s) = s->length;
2837 return nents;
2839 EXPORT_SYMBOL(ib_dma_virt_map_sg);
2840 #endif /* CONFIG_INFINIBAND_VIRT_DMA */
2842 static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
2843 [RDMA_NL_LS_OP_RESOLVE] = {
2844 .doit = ib_nl_handle_resolve_resp,
2845 .flags = RDMA_NL_ADMIN_PERM,
2847 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
2848 .doit = ib_nl_handle_set_timeout,
2849 .flags = RDMA_NL_ADMIN_PERM,
2851 [RDMA_NL_LS_OP_IP_RESOLVE] = {
2852 .doit = ib_nl_handle_ip_res_resp,
2853 .flags = RDMA_NL_ADMIN_PERM,
2857 static int ib_netdevice_event(struct notifier_block *this,
2858 unsigned long event, void *ptr)
2860 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2861 struct net_device *ib_ndev;
2862 struct ib_device *ibdev;
2863 u32 port;
2865 switch (event) {
2866 case NETDEV_CHANGENAME:
2867 ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
2868 if (!ibdev)
2869 return NOTIFY_DONE;
2871 rdma_for_each_port(ibdev, port) {
2872 ib_ndev = ib_device_get_netdev(ibdev, port);
2873 if (ndev == ib_ndev)
2874 rdma_nl_notify_event(ibdev, port,
2875 RDMA_NETDEV_RENAME_EVENT);
2876 dev_put(ib_ndev);
2878 ib_device_put(ibdev);
2879 break;
2880 default:
2881 break;
2884 return NOTIFY_DONE;
2887 static struct notifier_block nb_netdevice = {
2888 .notifier_call = ib_netdevice_event,
2891 static int __init ib_core_init(void)
2893 int ret = -ENOMEM;
2895 ib_wq = alloc_workqueue("infiniband", 0, 0);
2896 if (!ib_wq)
2897 return -ENOMEM;
2899 ib_unreg_wq = alloc_workqueue("ib-unreg-wq", WQ_UNBOUND,
2900 WQ_UNBOUND_MAX_ACTIVE);
2901 if (!ib_unreg_wq)
2902 goto err;
2904 ib_comp_wq = alloc_workqueue("ib-comp-wq",
2905 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
2906 if (!ib_comp_wq)
2907 goto err_unbound;
2909 ib_comp_unbound_wq =
2910 alloc_workqueue("ib-comp-unb-wq",
2911 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
2912 WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
2913 if (!ib_comp_unbound_wq)
2914 goto err_comp;
2916 ret = class_register(&ib_class);
2917 if (ret) {
2918 pr_warn("Couldn't create InfiniBand device class\n");
2919 goto err_comp_unbound;
2922 rdma_nl_init();
2924 ret = addr_init();
2925 if (ret) {
2926 pr_warn("Couldn't init IB address resolution\n");
2927 goto err_ibnl;
2930 ret = ib_mad_init();
2931 if (ret) {
2932 pr_warn("Couldn't init IB MAD\n");
2933 goto err_addr;
2936 ret = ib_sa_init();
2937 if (ret) {
2938 pr_warn("Couldn't init SA\n");
2939 goto err_mad;
2942 ret = register_blocking_lsm_notifier(&ibdev_lsm_nb);
2943 if (ret) {
2944 pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
2945 goto err_sa;
2948 ret = register_pernet_device(&rdma_dev_net_ops);
2949 if (ret) {
2950 pr_warn("Couldn't init compat dev. ret %d\n", ret);
2951 goto err_compat;
2954 nldev_init();
2955 rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
2956 ret = roce_gid_mgmt_init();
2957 if (ret) {
2958 pr_warn("Couldn't init RoCE GID management\n");
2959 goto err_parent;
2962 register_netdevice_notifier(&nb_netdevice);
2964 return 0;
2966 err_parent:
2967 rdma_nl_unregister(RDMA_NL_LS);
2968 nldev_exit();
2969 unregister_pernet_device(&rdma_dev_net_ops);
2970 err_compat:
2971 unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
2972 err_sa:
2973 ib_sa_cleanup();
2974 err_mad:
2975 ib_mad_cleanup();
2976 err_addr:
2977 addr_cleanup();
2978 err_ibnl:
2979 class_unregister(&ib_class);
2980 err_comp_unbound:
2981 destroy_workqueue(ib_comp_unbound_wq);
2982 err_comp:
2983 destroy_workqueue(ib_comp_wq);
2984 err_unbound:
2985 destroy_workqueue(ib_unreg_wq);
2986 err:
2987 destroy_workqueue(ib_wq);
2988 return ret;
2991 static void __exit ib_core_cleanup(void)
2993 unregister_netdevice_notifier(&nb_netdevice);
2994 roce_gid_mgmt_cleanup();
2995 rdma_nl_unregister(RDMA_NL_LS);
2996 nldev_exit();
2997 unregister_pernet_device(&rdma_dev_net_ops);
2998 unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
2999 ib_sa_cleanup();
3000 ib_mad_cleanup();
3001 addr_cleanup();
3002 rdma_nl_exit();
3003 class_unregister(&ib_class);
3004 destroy_workqueue(ib_comp_unbound_wq);
3005 destroy_workqueue(ib_comp_wq);
3006 /* Make sure that any pending umem accounting work is done. */
3007 destroy_workqueue(ib_wq);
3008 destroy_workqueue(ib_unreg_wq);
3009 WARN_ON(!xa_empty(&clients));
3010 WARN_ON(!xa_empty(&devices));
3013 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
3015 /* ib core relies on netdev stack to first register net_ns_type_operations
3016 * ns kobject type before ib_core initialization.
3018 fs_initcall(ib_core_init);
3019 module_exit(ib_core_cleanup);