2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <rdma/rdma_netlink.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
46 #include "core_priv.h"
48 MODULE_AUTHOR("Roland Dreier");
49 MODULE_DESCRIPTION("core kernel InfiniBand API");
50 MODULE_LICENSE("Dual BSD/GPL");
52 struct ib_client_data
{
53 struct list_head list
;
54 struct ib_client
*client
;
56 /* The device or client is going down. Do not call client or device
57 * callbacks other than remove(). */
61 struct workqueue_struct
*ib_wq
;
62 EXPORT_SYMBOL_GPL(ib_wq
);
64 /* The device_list and client_list contain devices and clients after their
65 * registration has completed, and the devices and clients are removed
66 * during unregistration. */
67 static LIST_HEAD(device_list
);
68 static LIST_HEAD(client_list
);
71 * device_mutex and lists_rwsem protect access to both device_list and
72 * client_list. device_mutex protects writer access by device and client
73 * registration / de-registration. lists_rwsem protects reader access to
74 * these lists. Iterators of these lists must lock it for read, while updates
75 * to the lists must be done with a write lock. A special case is when the
76 * device_mutex is locked. In this case locking the lists for read access is
77 * not necessary as the device_mutex implies it.
79 * lists_rwsem also protects access to the client data list.
81 static DEFINE_MUTEX(device_mutex
);
82 static DECLARE_RWSEM(lists_rwsem
);
85 static int ib_device_check_mandatory(struct ib_device
*device
)
87 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
91 } mandatory_table
[] = {
92 IB_MANDATORY_FUNC(query_device
),
93 IB_MANDATORY_FUNC(query_port
),
94 IB_MANDATORY_FUNC(query_pkey
),
95 IB_MANDATORY_FUNC(query_gid
),
96 IB_MANDATORY_FUNC(alloc_pd
),
97 IB_MANDATORY_FUNC(dealloc_pd
),
98 IB_MANDATORY_FUNC(create_ah
),
99 IB_MANDATORY_FUNC(destroy_ah
),
100 IB_MANDATORY_FUNC(create_qp
),
101 IB_MANDATORY_FUNC(modify_qp
),
102 IB_MANDATORY_FUNC(destroy_qp
),
103 IB_MANDATORY_FUNC(post_send
),
104 IB_MANDATORY_FUNC(post_recv
),
105 IB_MANDATORY_FUNC(create_cq
),
106 IB_MANDATORY_FUNC(destroy_cq
),
107 IB_MANDATORY_FUNC(poll_cq
),
108 IB_MANDATORY_FUNC(req_notify_cq
),
109 IB_MANDATORY_FUNC(get_dma_mr
),
110 IB_MANDATORY_FUNC(dereg_mr
),
111 IB_MANDATORY_FUNC(get_port_immutable
)
115 for (i
= 0; i
< ARRAY_SIZE(mandatory_table
); ++i
) {
116 if (!*(void **) ((void *) device
+ mandatory_table
[i
].offset
)) {
117 printk(KERN_WARNING
"Device %s is missing mandatory function %s\n",
118 device
->name
, mandatory_table
[i
].name
);
126 static struct ib_device
*__ib_device_get_by_name(const char *name
)
128 struct ib_device
*device
;
130 list_for_each_entry(device
, &device_list
, core_list
)
131 if (!strncmp(name
, device
->name
, IB_DEVICE_NAME_MAX
))
138 static int alloc_name(char *name
)
140 unsigned long *inuse
;
141 char buf
[IB_DEVICE_NAME_MAX
];
142 struct ib_device
*device
;
145 inuse
= (unsigned long *) get_zeroed_page(GFP_KERNEL
);
149 list_for_each_entry(device
, &device_list
, core_list
) {
150 if (!sscanf(device
->name
, name
, &i
))
152 if (i
< 0 || i
>= PAGE_SIZE
* 8)
154 snprintf(buf
, sizeof buf
, name
, i
);
155 if (!strncmp(buf
, device
->name
, IB_DEVICE_NAME_MAX
))
159 i
= find_first_zero_bit(inuse
, PAGE_SIZE
* 8);
160 free_page((unsigned long) inuse
);
161 snprintf(buf
, sizeof buf
, name
, i
);
163 if (__ib_device_get_by_name(buf
))
166 strlcpy(name
, buf
, IB_DEVICE_NAME_MAX
);
170 static void ib_device_release(struct device
*device
)
172 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
174 ib_cache_release_one(dev
);
175 kfree(dev
->port_immutable
);
179 static int ib_device_uevent(struct device
*device
,
180 struct kobj_uevent_env
*env
)
182 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
184 if (add_uevent_var(env
, "NAME=%s", dev
->name
))
188 * It would be nice to pass the node GUID with the event...
194 static struct class ib_class
= {
195 .name
= "infiniband",
196 .dev_release
= ib_device_release
,
197 .dev_uevent
= ib_device_uevent
,
201 * ib_alloc_device - allocate an IB device struct
202 * @size:size of structure to allocate
204 * Low-level drivers should use ib_alloc_device() to allocate &struct
205 * ib_device. @size is the size of the structure to be allocated,
206 * including any private data used by the low-level driver.
207 * ib_dealloc_device() must be used to free structures allocated with
210 struct ib_device
*ib_alloc_device(size_t size
)
212 struct ib_device
*device
;
214 if (WARN_ON(size
< sizeof(struct ib_device
)))
217 device
= kzalloc(size
, GFP_KERNEL
);
221 device
->dev
.class = &ib_class
;
222 device_initialize(&device
->dev
);
224 dev_set_drvdata(&device
->dev
, device
);
226 INIT_LIST_HEAD(&device
->event_handler_list
);
227 spin_lock_init(&device
->event_handler_lock
);
228 spin_lock_init(&device
->client_data_lock
);
229 INIT_LIST_HEAD(&device
->client_data_list
);
230 INIT_LIST_HEAD(&device
->port_list
);
234 EXPORT_SYMBOL(ib_alloc_device
);
237 * ib_dealloc_device - free an IB device struct
238 * @device:structure to free
240 * Free a structure allocated with ib_alloc_device().
242 void ib_dealloc_device(struct ib_device
*device
)
244 WARN_ON(device
->reg_state
!= IB_DEV_UNREGISTERED
&&
245 device
->reg_state
!= IB_DEV_UNINITIALIZED
);
246 kobject_put(&device
->dev
.kobj
);
248 EXPORT_SYMBOL(ib_dealloc_device
);
250 static int add_client_context(struct ib_device
*device
, struct ib_client
*client
)
252 struct ib_client_data
*context
;
255 context
= kmalloc(sizeof *context
, GFP_KERNEL
);
257 printk(KERN_WARNING
"Couldn't allocate client context for %s/%s\n",
258 device
->name
, client
->name
);
262 context
->client
= client
;
263 context
->data
= NULL
;
264 context
->going_down
= false;
266 down_write(&lists_rwsem
);
267 spin_lock_irqsave(&device
->client_data_lock
, flags
);
268 list_add(&context
->list
, &device
->client_data_list
);
269 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
270 up_write(&lists_rwsem
);
275 static int verify_immutable(const struct ib_device
*dev
, u8 port
)
277 return WARN_ON(!rdma_cap_ib_mad(dev
, port
) &&
278 rdma_max_mad_size(dev
, port
) != 0);
281 static int read_port_immutable(struct ib_device
*device
)
284 u8 start_port
= rdma_start_port(device
);
285 u8 end_port
= rdma_end_port(device
);
289 * device->port_immutable is indexed directly by the port number to make
290 * access to this data as efficient as possible.
292 * Therefore port_immutable is declared as a 1 based array with
293 * potential empty slots at the beginning.
295 device
->port_immutable
= kzalloc(sizeof(*device
->port_immutable
)
298 if (!device
->port_immutable
)
301 for (port
= start_port
; port
<= end_port
; ++port
) {
302 ret
= device
->get_port_immutable(device
, port
,
303 &device
->port_immutable
[port
]);
307 if (verify_immutable(device
, port
))
314 * ib_register_device - Register an IB device with IB core
315 * @device:Device to register
317 * Low-level drivers use ib_register_device() to register their
318 * devices with the IB core. All registered clients will receive a
319 * callback for each device that is added. @device must be allocated
320 * with ib_alloc_device().
322 int ib_register_device(struct ib_device
*device
,
323 int (*port_callback
)(struct ib_device
*,
324 u8
, struct kobject
*))
327 struct ib_client
*client
;
329 mutex_lock(&device_mutex
);
331 if (strchr(device
->name
, '%')) {
332 ret
= alloc_name(device
->name
);
337 if (ib_device_check_mandatory(device
)) {
342 ret
= read_port_immutable(device
);
344 printk(KERN_WARNING
"Couldn't create per port immutable data %s\n",
349 ret
= ib_cache_setup_one(device
);
351 printk(KERN_WARNING
"Couldn't set up InfiniBand P_Key/GID cache\n");
355 ret
= ib_device_register_sysfs(device
, port_callback
);
357 printk(KERN_WARNING
"Couldn't register device %s with driver model\n",
359 ib_cache_cleanup_one(device
);
363 device
->reg_state
= IB_DEV_REGISTERED
;
365 list_for_each_entry(client
, &client_list
, list
)
366 if (client
->add
&& !add_client_context(device
, client
))
369 down_write(&lists_rwsem
);
370 list_add_tail(&device
->core_list
, &device_list
);
371 up_write(&lists_rwsem
);
373 mutex_unlock(&device_mutex
);
376 EXPORT_SYMBOL(ib_register_device
);
379 * ib_unregister_device - Unregister an IB device
380 * @device:Device to unregister
382 * Unregister an IB device. All clients will receive a remove callback.
384 void ib_unregister_device(struct ib_device
*device
)
386 struct ib_client_data
*context
, *tmp
;
389 mutex_lock(&device_mutex
);
391 down_write(&lists_rwsem
);
392 list_del(&device
->core_list
);
393 spin_lock_irqsave(&device
->client_data_lock
, flags
);
394 list_for_each_entry_safe(context
, tmp
, &device
->client_data_list
, list
)
395 context
->going_down
= true;
396 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
397 downgrade_write(&lists_rwsem
);
399 list_for_each_entry_safe(context
, tmp
, &device
->client_data_list
,
401 if (context
->client
->remove
)
402 context
->client
->remove(device
, context
->data
);
404 up_read(&lists_rwsem
);
406 mutex_unlock(&device_mutex
);
408 ib_device_unregister_sysfs(device
);
409 ib_cache_cleanup_one(device
);
411 down_write(&lists_rwsem
);
412 spin_lock_irqsave(&device
->client_data_lock
, flags
);
413 list_for_each_entry_safe(context
, tmp
, &device
->client_data_list
, list
)
415 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
416 up_write(&lists_rwsem
);
418 device
->reg_state
= IB_DEV_UNREGISTERED
;
420 EXPORT_SYMBOL(ib_unregister_device
);
423 * ib_register_client - Register an IB client
424 * @client:Client to register
426 * Upper level users of the IB drivers can use ib_register_client() to
427 * register callbacks for IB device addition and removal. When an IB
428 * device is added, each registered client's add method will be called
429 * (in the order the clients were registered), and when a device is
430 * removed, each client's remove method will be called (in the reverse
431 * order that clients were registered). In addition, when
432 * ib_register_client() is called, the client will receive an add
433 * callback for all devices already registered.
435 int ib_register_client(struct ib_client
*client
)
437 struct ib_device
*device
;
439 mutex_lock(&device_mutex
);
441 list_for_each_entry(device
, &device_list
, core_list
)
442 if (client
->add
&& !add_client_context(device
, client
))
445 down_write(&lists_rwsem
);
446 list_add_tail(&client
->list
, &client_list
);
447 up_write(&lists_rwsem
);
449 mutex_unlock(&device_mutex
);
453 EXPORT_SYMBOL(ib_register_client
);
456 * ib_unregister_client - Unregister an IB client
457 * @client:Client to unregister
459 * Upper level users use ib_unregister_client() to remove their client
460 * registration. When ib_unregister_client() is called, the client
461 * will receive a remove callback for each IB device still registered.
463 void ib_unregister_client(struct ib_client
*client
)
465 struct ib_client_data
*context
, *tmp
;
466 struct ib_device
*device
;
469 mutex_lock(&device_mutex
);
471 down_write(&lists_rwsem
);
472 list_del(&client
->list
);
473 up_write(&lists_rwsem
);
475 list_for_each_entry(device
, &device_list
, core_list
) {
476 struct ib_client_data
*found_context
= NULL
;
478 down_write(&lists_rwsem
);
479 spin_lock_irqsave(&device
->client_data_lock
, flags
);
480 list_for_each_entry_safe(context
, tmp
, &device
->client_data_list
, list
)
481 if (context
->client
== client
) {
482 context
->going_down
= true;
483 found_context
= context
;
486 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
487 up_write(&lists_rwsem
);
490 client
->remove(device
, found_context
?
491 found_context
->data
: NULL
);
493 if (!found_context
) {
494 pr_warn("No client context found for %s/%s\n",
495 device
->name
, client
->name
);
499 down_write(&lists_rwsem
);
500 spin_lock_irqsave(&device
->client_data_lock
, flags
);
501 list_del(&found_context
->list
);
502 kfree(found_context
);
503 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
504 up_write(&lists_rwsem
);
507 mutex_unlock(&device_mutex
);
509 EXPORT_SYMBOL(ib_unregister_client
);
512 * ib_get_client_data - Get IB client context
513 * @device:Device to get context for
514 * @client:Client to get context for
516 * ib_get_client_data() returns client context set with
517 * ib_set_client_data().
519 void *ib_get_client_data(struct ib_device
*device
, struct ib_client
*client
)
521 struct ib_client_data
*context
;
525 spin_lock_irqsave(&device
->client_data_lock
, flags
);
526 list_for_each_entry(context
, &device
->client_data_list
, list
)
527 if (context
->client
== client
) {
531 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
535 EXPORT_SYMBOL(ib_get_client_data
);
538 * ib_set_client_data - Set IB client context
539 * @device:Device to set context for
540 * @client:Client to set context for
541 * @data:Context to set
543 * ib_set_client_data() sets client context that can be retrieved with
544 * ib_get_client_data().
546 void ib_set_client_data(struct ib_device
*device
, struct ib_client
*client
,
549 struct ib_client_data
*context
;
552 spin_lock_irqsave(&device
->client_data_lock
, flags
);
553 list_for_each_entry(context
, &device
->client_data_list
, list
)
554 if (context
->client
== client
) {
555 context
->data
= data
;
559 printk(KERN_WARNING
"No client context found for %s/%s\n",
560 device
->name
, client
->name
);
563 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
565 EXPORT_SYMBOL(ib_set_client_data
);
568 * ib_register_event_handler - Register an IB event handler
569 * @event_handler:Handler to register
571 * ib_register_event_handler() registers an event handler that will be
572 * called back when asynchronous IB events occur (as defined in
573 * chapter 11 of the InfiniBand Architecture Specification). This
574 * callback may occur in interrupt context.
576 int ib_register_event_handler (struct ib_event_handler
*event_handler
)
580 spin_lock_irqsave(&event_handler
->device
->event_handler_lock
, flags
);
581 list_add_tail(&event_handler
->list
,
582 &event_handler
->device
->event_handler_list
);
583 spin_unlock_irqrestore(&event_handler
->device
->event_handler_lock
, flags
);
587 EXPORT_SYMBOL(ib_register_event_handler
);
590 * ib_unregister_event_handler - Unregister an event handler
591 * @event_handler:Handler to unregister
593 * Unregister an event handler registered with
594 * ib_register_event_handler().
596 int ib_unregister_event_handler(struct ib_event_handler
*event_handler
)
600 spin_lock_irqsave(&event_handler
->device
->event_handler_lock
, flags
);
601 list_del(&event_handler
->list
);
602 spin_unlock_irqrestore(&event_handler
->device
->event_handler_lock
, flags
);
606 EXPORT_SYMBOL(ib_unregister_event_handler
);
609 * ib_dispatch_event - Dispatch an asynchronous event
610 * @event:Event to dispatch
612 * Low-level drivers must call ib_dispatch_event() to dispatch the
613 * event to all registered event handlers when an asynchronous event
616 void ib_dispatch_event(struct ib_event
*event
)
619 struct ib_event_handler
*handler
;
621 spin_lock_irqsave(&event
->device
->event_handler_lock
, flags
);
623 list_for_each_entry(handler
, &event
->device
->event_handler_list
, list
)
624 handler
->handler(handler
, event
);
626 spin_unlock_irqrestore(&event
->device
->event_handler_lock
, flags
);
628 EXPORT_SYMBOL(ib_dispatch_event
);
631 * ib_query_device - Query IB device attributes
632 * @device:Device to query
633 * @device_attr:Device attributes
635 * ib_query_device() returns the attributes of a device through the
636 * @device_attr pointer.
638 int ib_query_device(struct ib_device
*device
,
639 struct ib_device_attr
*device_attr
)
641 struct ib_udata uhw
= {.outlen
= 0, .inlen
= 0};
643 memset(device_attr
, 0, sizeof(*device_attr
));
645 return device
->query_device(device
, device_attr
, &uhw
);
647 EXPORT_SYMBOL(ib_query_device
);
650 * ib_query_port - Query IB port attributes
651 * @device:Device to query
652 * @port_num:Port number to query
653 * @port_attr:Port attributes
655 * ib_query_port() returns the attributes of a port through the
656 * @port_attr pointer.
658 int ib_query_port(struct ib_device
*device
,
660 struct ib_port_attr
*port_attr
)
662 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
665 return device
->query_port(device
, port_num
, port_attr
);
667 EXPORT_SYMBOL(ib_query_port
);
670 * ib_query_gid - Get GID table entry
671 * @device:Device to query
672 * @port_num:Port number to query
673 * @index:GID table index to query
675 * @attr: Returned GID attributes related to this GID index (only in RoCE).
678 * ib_query_gid() fetches the specified GID table entry.
680 int ib_query_gid(struct ib_device
*device
,
681 u8 port_num
, int index
, union ib_gid
*gid
,
682 struct ib_gid_attr
*attr
)
684 if (rdma_cap_roce_gid_table(device
, port_num
))
685 return ib_get_cached_gid(device
, port_num
, index
, gid
, attr
);
690 return device
->query_gid(device
, port_num
, index
, gid
);
692 EXPORT_SYMBOL(ib_query_gid
);
695 * ib_enum_roce_netdev - enumerate all RoCE ports
696 * @ib_dev : IB device we want to query
697 * @filter: Should we call the callback?
698 * @filter_cookie: Cookie passed to filter
699 * @cb: Callback to call for each found RoCE ports
700 * @cookie: Cookie passed back to the callback
702 * Enumerates all of the physical RoCE ports of ib_dev
703 * which are related to netdevice and calls callback() on each
704 * device for which filter() function returns non zero.
706 void ib_enum_roce_netdev(struct ib_device
*ib_dev
,
707 roce_netdev_filter filter
,
709 roce_netdev_callback cb
,
714 for (port
= rdma_start_port(ib_dev
); port
<= rdma_end_port(ib_dev
);
716 if (rdma_protocol_roce(ib_dev
, port
)) {
717 struct net_device
*idev
= NULL
;
719 if (ib_dev
->get_netdev
)
720 idev
= ib_dev
->get_netdev(ib_dev
, port
);
723 idev
->reg_state
>= NETREG_UNREGISTERED
) {
728 if (filter(ib_dev
, port
, idev
, filter_cookie
))
729 cb(ib_dev
, port
, idev
, cookie
);
737 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
738 * @filter: Should we call the callback?
739 * @filter_cookie: Cookie passed to filter
740 * @cb: Callback to call for each found RoCE ports
741 * @cookie: Cookie passed back to the callback
743 * Enumerates all RoCE devices' physical ports which are related
744 * to netdevices and calls callback() on each device for which
745 * filter() function returns non zero.
747 void ib_enum_all_roce_netdevs(roce_netdev_filter filter
,
749 roce_netdev_callback cb
,
752 struct ib_device
*dev
;
754 down_read(&lists_rwsem
);
755 list_for_each_entry(dev
, &device_list
, core_list
)
756 ib_enum_roce_netdev(dev
, filter
, filter_cookie
, cb
, cookie
);
757 up_read(&lists_rwsem
);
761 * ib_query_pkey - Get P_Key table entry
762 * @device:Device to query
763 * @port_num:Port number to query
764 * @index:P_Key table index to query
765 * @pkey:Returned P_Key
767 * ib_query_pkey() fetches the specified P_Key table entry.
769 int ib_query_pkey(struct ib_device
*device
,
770 u8 port_num
, u16 index
, u16
*pkey
)
772 return device
->query_pkey(device
, port_num
, index
, pkey
);
774 EXPORT_SYMBOL(ib_query_pkey
);
777 * ib_modify_device - Change IB device attributes
778 * @device:Device to modify
779 * @device_modify_mask:Mask of attributes to change
780 * @device_modify:New attribute values
782 * ib_modify_device() changes a device's attributes as specified by
783 * the @device_modify_mask and @device_modify structure.
785 int ib_modify_device(struct ib_device
*device
,
786 int device_modify_mask
,
787 struct ib_device_modify
*device_modify
)
789 if (!device
->modify_device
)
792 return device
->modify_device(device
, device_modify_mask
,
795 EXPORT_SYMBOL(ib_modify_device
);
798 * ib_modify_port - Modifies the attributes for the specified port.
799 * @device: The device to modify.
800 * @port_num: The number of the port to modify.
801 * @port_modify_mask: Mask used to specify which attributes of the port
803 * @port_modify: New attribute values for the port.
805 * ib_modify_port() changes a port's attributes as specified by the
806 * @port_modify_mask and @port_modify structure.
808 int ib_modify_port(struct ib_device
*device
,
809 u8 port_num
, int port_modify_mask
,
810 struct ib_port_modify
*port_modify
)
812 if (!device
->modify_port
)
815 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
818 return device
->modify_port(device
, port_num
, port_modify_mask
,
821 EXPORT_SYMBOL(ib_modify_port
);
824 * ib_find_gid - Returns the port number and GID table index where
825 * a specified GID value occurs.
826 * @device: The device to query.
827 * @gid: The GID value to search for.
828 * @ndev: The ndev related to the GID to search for.
829 * @port_num: The port number of the device where the GID value was found.
830 * @index: The index into the GID table where the GID was found. This
831 * parameter may be NULL.
833 int ib_find_gid(struct ib_device
*device
, union ib_gid
*gid
,
834 struct net_device
*ndev
, u8
*port_num
, u16
*index
)
836 union ib_gid tmp_gid
;
839 for (port
= rdma_start_port(device
); port
<= rdma_end_port(device
); ++port
) {
840 if (rdma_cap_roce_gid_table(device
, port
)) {
841 if (!ib_find_cached_gid_by_port(device
, gid
, port
,
848 for (i
= 0; i
< device
->port_immutable
[port
].gid_tbl_len
; ++i
) {
849 ret
= ib_query_gid(device
, port
, i
, &tmp_gid
, NULL
);
852 if (!memcmp(&tmp_gid
, gid
, sizeof *gid
)) {
863 EXPORT_SYMBOL(ib_find_gid
);
866 * ib_find_pkey - Returns the PKey table index where a specified
868 * @device: The device to query.
869 * @port_num: The port number of the device to search for the PKey.
870 * @pkey: The PKey value to search for.
871 * @index: The index into the PKey table where the PKey was found.
873 int ib_find_pkey(struct ib_device
*device
,
874 u8 port_num
, u16 pkey
, u16
*index
)
880 for (i
= 0; i
< device
->port_immutable
[port_num
].pkey_tbl_len
; ++i
) {
881 ret
= ib_query_pkey(device
, port_num
, i
, &tmp_pkey
);
884 if ((pkey
& 0x7fff) == (tmp_pkey
& 0x7fff)) {
885 /* if there is full-member pkey take it.*/
886 if (tmp_pkey
& 0x8000) {
895 /*no full-member, if exists take the limited*/
896 if (partial_ix
>= 0) {
902 EXPORT_SYMBOL(ib_find_pkey
);
905 * ib_get_net_dev_by_params() - Return the appropriate net_dev
906 * for a received CM request
907 * @dev: An RDMA device on which the request has been received.
908 * @port: Port number on the RDMA device.
909 * @pkey: The Pkey the request came on.
910 * @gid: A GID that the net_dev uses to communicate.
911 * @addr: Contains the IP address that the request specified as its
914 struct net_device
*ib_get_net_dev_by_params(struct ib_device
*dev
,
917 const union ib_gid
*gid
,
918 const struct sockaddr
*addr
)
920 struct net_device
*net_dev
= NULL
;
921 struct ib_client_data
*context
;
923 if (!rdma_protocol_ib(dev
, port
))
926 down_read(&lists_rwsem
);
928 list_for_each_entry(context
, &dev
->client_data_list
, list
) {
929 struct ib_client
*client
= context
->client
;
931 if (context
->going_down
)
934 if (client
->get_net_dev_by_params
) {
935 net_dev
= client
->get_net_dev_by_params(dev
, port
, pkey
,
943 up_read(&lists_rwsem
);
947 EXPORT_SYMBOL(ib_get_net_dev_by_params
);
949 static int __init
ib_core_init(void)
953 ib_wq
= alloc_workqueue("infiniband", 0, 0);
957 ret
= class_register(&ib_class
);
959 printk(KERN_WARNING
"Couldn't create InfiniBand device class\n");
965 printk(KERN_WARNING
"Couldn't init IB netlink interface\n");
974 class_unregister(&ib_class
);
977 destroy_workqueue(ib_wq
);
981 static void __exit
ib_core_cleanup(void)
985 class_unregister(&ib_class
);
986 /* Make sure that any pending umem accounting work is done. */
987 destroy_workqueue(ib_wq
);
990 module_init(ib_core_init
);
991 module_exit(ib_core_cleanup
);