2 * Copyright (c) 2015, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include "core_priv.h"
36 #include <linux/in6.h>
38 /* For in6_dev_get/in6_dev_put */
39 #include <net/addrconf.h>
40 #include <net/bonding.h>
42 #include <rdma/ib_cache.h>
43 #include <rdma/ib_addr.h>
45 static struct workqueue_struct
*gid_cache_wq
;
47 static struct workqueue_struct
*gid_cache_wq
;
54 struct update_gid_event_work
{
55 struct work_struct work
;
57 struct ib_gid_attr gid_attr
;
58 enum gid_op_type gid_op
;
61 #define ROCE_NETDEV_CALLBACK_SZ 3
62 struct netdev_event_work_cmd
{
63 roce_netdev_callback cb
;
64 roce_netdev_filter filter
;
65 struct net_device
*ndev
;
66 struct net_device
*filter_ndev
;
69 struct netdev_event_work
{
70 struct work_struct work
;
71 struct netdev_event_work_cmd cmds
[ROCE_NETDEV_CALLBACK_SZ
];
75 bool (*is_supported
)(const struct ib_device
*device
, u8 port_num
);
76 enum ib_gid_type gid_type
;
77 } PORT_CAP_TO_GID_TYPE
[] = {
78 {rdma_protocol_roce_eth_encap
, IB_GID_TYPE_ROCE
},
79 {rdma_protocol_roce_udp_encap
, IB_GID_TYPE_ROCE_UDP_ENCAP
},
82 #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
84 unsigned long roce_gid_type_mask_support(struct ib_device
*ib_dev
, u8 port
)
87 unsigned int ret_flags
= 0;
89 if (!rdma_protocol_roce(ib_dev
, port
))
90 return 1UL << IB_GID_TYPE_IB
;
92 for (i
= 0; i
< CAP_TO_GID_TABLE_SIZE
; i
++)
93 if (PORT_CAP_TO_GID_TYPE
[i
].is_supported(ib_dev
, port
))
94 ret_flags
|= 1UL << PORT_CAP_TO_GID_TYPE
[i
].gid_type
;
98 EXPORT_SYMBOL(roce_gid_type_mask_support
);
100 static void update_gid(enum gid_op_type gid_op
, struct ib_device
*ib_dev
,
101 u8 port
, union ib_gid
*gid
,
102 struct ib_gid_attr
*gid_attr
)
105 unsigned long gid_type_mask
= roce_gid_type_mask_support(ib_dev
, port
);
107 for (i
= 0; i
< IB_GID_TYPE_SIZE
; i
++) {
108 if ((1UL << i
) & gid_type_mask
) {
109 gid_attr
->gid_type
= i
;
112 ib_cache_gid_add(ib_dev
, port
,
116 ib_cache_gid_del(ib_dev
, port
,
124 enum bonding_slave_state
{
125 BONDING_SLAVE_STATE_ACTIVE
= 1UL << 0,
126 BONDING_SLAVE_STATE_INACTIVE
= 1UL << 1,
127 /* No primary slave or the device isn't a slave in bonding */
128 BONDING_SLAVE_STATE_NA
= 1UL << 2,
131 static enum bonding_slave_state
is_eth_active_slave_of_bonding_rcu(struct net_device
*dev
,
132 struct net_device
*upper
)
134 if (upper
&& netif_is_bond_master(upper
)) {
135 struct net_device
*pdev
=
136 bond_option_active_slave_get_rcu(netdev_priv(upper
));
139 return dev
== pdev
? BONDING_SLAVE_STATE_ACTIVE
:
140 BONDING_SLAVE_STATE_INACTIVE
;
143 return BONDING_SLAVE_STATE_NA
;
146 #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
147 BONDING_SLAVE_STATE_NA)
148 static int is_eth_port_of_netdev(struct ib_device
*ib_dev
, u8 port
,
149 struct net_device
*rdma_ndev
, void *cookie
)
151 struct net_device
*real_dev
;
158 real_dev
= rdma_vlan_dev_real_dev(cookie
);
162 res
= ((rdma_is_upper_dev_rcu(rdma_ndev
, cookie
) &&
163 (is_eth_active_slave_of_bonding_rcu(rdma_ndev
, real_dev
) &
164 REQUIRED_BOND_STATES
)) ||
165 real_dev
== rdma_ndev
);
171 static int is_eth_port_inactive_slave(struct ib_device
*ib_dev
, u8 port
,
172 struct net_device
*rdma_ndev
, void *cookie
)
174 struct net_device
*master_dev
;
181 master_dev
= netdev_master_upper_dev_get_rcu(rdma_ndev
);
182 res
= is_eth_active_slave_of_bonding_rcu(rdma_ndev
, master_dev
) ==
183 BONDING_SLAVE_STATE_INACTIVE
;
189 static int pass_all_filter(struct ib_device
*ib_dev
, u8 port
,
190 struct net_device
*rdma_ndev
, void *cookie
)
195 static int upper_device_filter(struct ib_device
*ib_dev
, u8 port
,
196 struct net_device
*rdma_ndev
, void *cookie
)
203 if (rdma_ndev
== cookie
)
207 res
= rdma_is_upper_dev_rcu(rdma_ndev
, cookie
);
213 static void update_gid_ip(enum gid_op_type gid_op
,
214 struct ib_device
*ib_dev
,
215 u8 port
, struct net_device
*ndev
,
216 struct sockaddr
*addr
)
219 struct ib_gid_attr gid_attr
;
221 rdma_ip2gid(addr
, &gid
);
222 memset(&gid_attr
, 0, sizeof(gid_attr
));
223 gid_attr
.ndev
= ndev
;
225 update_gid(gid_op
, ib_dev
, port
, &gid
, &gid_attr
);
228 static void enum_netdev_default_gids(struct ib_device
*ib_dev
,
229 u8 port
, struct net_device
*event_ndev
,
230 struct net_device
*rdma_ndev
)
232 unsigned long gid_type_mask
;
236 ((rdma_ndev
!= event_ndev
&&
237 !rdma_is_upper_dev_rcu(rdma_ndev
, event_ndev
)) ||
238 is_eth_active_slave_of_bonding_rcu(rdma_ndev
,
239 netdev_master_upper_dev_get_rcu(rdma_ndev
)) ==
240 BONDING_SLAVE_STATE_INACTIVE
)) {
246 gid_type_mask
= roce_gid_type_mask_support(ib_dev
, port
);
248 ib_cache_gid_set_default_gid(ib_dev
, port
, rdma_ndev
, gid_type_mask
,
249 IB_CACHE_GID_DEFAULT_MODE_SET
);
252 static void bond_delete_netdev_default_gids(struct ib_device
*ib_dev
,
254 struct net_device
*event_ndev
,
255 struct net_device
*rdma_ndev
)
257 struct net_device
*real_dev
= rdma_vlan_dev_real_dev(event_ndev
);
263 real_dev
= event_ndev
;
267 if (rdma_is_upper_dev_rcu(rdma_ndev
, event_ndev
) &&
268 is_eth_active_slave_of_bonding_rcu(rdma_ndev
, real_dev
) ==
269 BONDING_SLAVE_STATE_INACTIVE
) {
270 unsigned long gid_type_mask
;
274 gid_type_mask
= roce_gid_type_mask_support(ib_dev
, port
);
276 ib_cache_gid_set_default_gid(ib_dev
, port
, rdma_ndev
,
278 IB_CACHE_GID_DEFAULT_MODE_DELETE
);
284 static void enum_netdev_ipv4_ips(struct ib_device
*ib_dev
,
285 u8 port
, struct net_device
*ndev
)
287 struct in_device
*in_dev
;
289 struct list_head list
;
290 struct sockaddr_in ip
;
292 struct sin_list
*sin_iter
;
293 struct sin_list
*sin_temp
;
296 if (ndev
->reg_state
>= NETREG_UNREGISTERING
)
300 in_dev
= __in_dev_get_rcu(ndev
);
307 struct sin_list
*entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
312 entry
->ip
.sin_family
= AF_INET
;
313 entry
->ip
.sin_addr
.s_addr
= ifa
->ifa_address
;
314 list_add_tail(&entry
->list
, &sin_list
);
319 list_for_each_entry_safe(sin_iter
, sin_temp
, &sin_list
, list
) {
320 update_gid_ip(GID_ADD
, ib_dev
, port
, ndev
,
321 (struct sockaddr
*)&sin_iter
->ip
);
322 list_del(&sin_iter
->list
);
327 static void enum_netdev_ipv6_ips(struct ib_device
*ib_dev
,
328 u8 port
, struct net_device
*ndev
)
330 struct inet6_ifaddr
*ifp
;
331 struct inet6_dev
*in6_dev
;
333 struct list_head list
;
334 struct sockaddr_in6 sin6
;
336 struct sin6_list
*sin6_iter
;
337 struct sin6_list
*sin6_temp
;
338 struct ib_gid_attr gid_attr
= {.ndev
= ndev
};
339 LIST_HEAD(sin6_list
);
341 if (ndev
->reg_state
>= NETREG_UNREGISTERING
)
344 in6_dev
= in6_dev_get(ndev
);
348 read_lock_bh(&in6_dev
->lock
);
349 list_for_each_entry(ifp
, &in6_dev
->addr_list
, if_list
) {
350 struct sin6_list
*entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
355 entry
->sin6
.sin6_family
= AF_INET6
;
356 entry
->sin6
.sin6_addr
= ifp
->addr
;
357 list_add_tail(&entry
->list
, &sin6_list
);
359 read_unlock_bh(&in6_dev
->lock
);
361 in6_dev_put(in6_dev
);
363 list_for_each_entry_safe(sin6_iter
, sin6_temp
, &sin6_list
, list
) {
366 rdma_ip2gid((struct sockaddr
*)&sin6_iter
->sin6
, &gid
);
367 update_gid(GID_ADD
, ib_dev
, port
, &gid
, &gid_attr
);
368 list_del(&sin6_iter
->list
);
373 static void _add_netdev_ips(struct ib_device
*ib_dev
, u8 port
,
374 struct net_device
*ndev
)
376 enum_netdev_ipv4_ips(ib_dev
, port
, ndev
);
377 if (IS_ENABLED(CONFIG_IPV6
))
378 enum_netdev_ipv6_ips(ib_dev
, port
, ndev
);
381 static void add_netdev_ips(struct ib_device
*ib_dev
, u8 port
,
382 struct net_device
*rdma_ndev
, void *cookie
)
384 enum_netdev_default_gids(ib_dev
, port
, cookie
, rdma_ndev
);
385 _add_netdev_ips(ib_dev
, port
, cookie
);
388 static void del_netdev_ips(struct ib_device
*ib_dev
, u8 port
,
389 struct net_device
*rdma_ndev
, void *cookie
)
391 ib_cache_gid_del_all_netdev_gids(ib_dev
, port
, cookie
);
394 static void enum_all_gids_of_dev_cb(struct ib_device
*ib_dev
,
396 struct net_device
*rdma_ndev
,
400 struct net_device
*ndev
;
402 /* Lock the rtnl to make sure the netdevs does not move under
407 for_each_netdev(net
, ndev
)
408 if (is_eth_port_of_netdev(ib_dev
, port
, rdma_ndev
, ndev
))
409 add_netdev_ips(ib_dev
, port
, rdma_ndev
, ndev
);
414 * rdma_roce_rescan_device - Rescan all of the network devices in the system
415 * and add their gids, as needed, to the relevant RoCE devices.
417 * @device: the rdma device
419 void rdma_roce_rescan_device(struct ib_device
*ib_dev
)
421 ib_enum_roce_netdev(ib_dev
, pass_all_filter
, NULL
,
422 enum_all_gids_of_dev_cb
, NULL
);
424 EXPORT_SYMBOL(rdma_roce_rescan_device
);
426 static void callback_for_addr_gid_device_scan(struct ib_device
*device
,
428 struct net_device
*rdma_ndev
,
431 struct update_gid_event_work
*parsed
= cookie
;
433 return update_gid(parsed
->gid_op
, device
,
439 struct list_head list
;
440 struct net_device
*upper
;
443 static int netdev_upper_walk(struct net_device
*upper
, void *data
)
445 struct upper_list
*entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
446 struct list_head
*upper_list
= data
;
451 list_add_tail(&entry
->list
, upper_list
);
453 entry
->upper
= upper
;
458 static void handle_netdev_upper(struct ib_device
*ib_dev
, u8 port
,
460 void (*handle_netdev
)(struct ib_device
*ib_dev
,
462 struct net_device
*ndev
))
464 struct net_device
*ndev
= cookie
;
465 struct upper_list
*upper_iter
;
466 struct upper_list
*upper_temp
;
467 LIST_HEAD(upper_list
);
470 netdev_walk_all_upper_dev_rcu(ndev
, netdev_upper_walk
, &upper_list
);
473 handle_netdev(ib_dev
, port
, ndev
);
474 list_for_each_entry_safe(upper_iter
, upper_temp
, &upper_list
,
476 handle_netdev(ib_dev
, port
, upper_iter
->upper
);
477 dev_put(upper_iter
->upper
);
478 list_del(&upper_iter
->list
);
483 static void _roce_del_all_netdev_gids(struct ib_device
*ib_dev
, u8 port
,
484 struct net_device
*event_ndev
)
486 ib_cache_gid_del_all_netdev_gids(ib_dev
, port
, event_ndev
);
489 static void del_netdev_upper_ips(struct ib_device
*ib_dev
, u8 port
,
490 struct net_device
*rdma_ndev
, void *cookie
)
492 handle_netdev_upper(ib_dev
, port
, cookie
, _roce_del_all_netdev_gids
);
495 static void add_netdev_upper_ips(struct ib_device
*ib_dev
, u8 port
,
496 struct net_device
*rdma_ndev
, void *cookie
)
498 handle_netdev_upper(ib_dev
, port
, cookie
, _add_netdev_ips
);
501 static void del_netdev_default_ips_join(struct ib_device
*ib_dev
, u8 port
,
502 struct net_device
*rdma_ndev
,
505 struct net_device
*master_ndev
;
508 master_ndev
= netdev_master_upper_dev_get_rcu(rdma_ndev
);
510 dev_hold(master_ndev
);
514 bond_delete_netdev_default_gids(ib_dev
, port
, master_ndev
,
516 dev_put(master_ndev
);
520 static void del_netdev_default_ips(struct ib_device
*ib_dev
, u8 port
,
521 struct net_device
*rdma_ndev
, void *cookie
)
523 bond_delete_netdev_default_gids(ib_dev
, port
, cookie
, rdma_ndev
);
526 /* The following functions operate on all IB devices. netdevice_event and
527 * addr_event execute ib_enum_all_roce_netdevs through a work.
528 * ib_enum_all_roce_netdevs iterates through all IB devices.
531 static void netdevice_event_work_handler(struct work_struct
*_work
)
533 struct netdev_event_work
*work
=
534 container_of(_work
, struct netdev_event_work
, work
);
537 for (i
= 0; i
< ARRAY_SIZE(work
->cmds
) && work
->cmds
[i
].cb
; i
++) {
538 ib_enum_all_roce_netdevs(work
->cmds
[i
].filter
,
539 work
->cmds
[i
].filter_ndev
,
542 dev_put(work
->cmds
[i
].ndev
);
543 dev_put(work
->cmds
[i
].filter_ndev
);
549 static int netdevice_queue_work(struct netdev_event_work_cmd
*cmds
,
550 struct net_device
*ndev
)
553 struct netdev_event_work
*ndev_work
=
554 kmalloc(sizeof(*ndev_work
), GFP_KERNEL
);
559 memcpy(ndev_work
->cmds
, cmds
, sizeof(ndev_work
->cmds
));
560 for (i
= 0; i
< ARRAY_SIZE(ndev_work
->cmds
) && ndev_work
->cmds
[i
].cb
; i
++) {
561 if (!ndev_work
->cmds
[i
].ndev
)
562 ndev_work
->cmds
[i
].ndev
= ndev
;
563 if (!ndev_work
->cmds
[i
].filter_ndev
)
564 ndev_work
->cmds
[i
].filter_ndev
= ndev
;
565 dev_hold(ndev_work
->cmds
[i
].ndev
);
566 dev_hold(ndev_work
->cmds
[i
].filter_ndev
);
568 INIT_WORK(&ndev_work
->work
, netdevice_event_work_handler
);
570 queue_work(gid_cache_wq
, &ndev_work
->work
);
575 static const struct netdev_event_work_cmd add_cmd
= {
576 .cb
= add_netdev_ips
, .filter
= is_eth_port_of_netdev
};
577 static const struct netdev_event_work_cmd add_cmd_upper_ips
= {
578 .cb
= add_netdev_upper_ips
, .filter
= is_eth_port_of_netdev
};
580 static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info
*changeupper_info
,
581 struct netdev_event_work_cmd
*cmds
)
583 static const struct netdev_event_work_cmd upper_ips_del_cmd
= {
584 .cb
= del_netdev_upper_ips
, .filter
= upper_device_filter
};
585 static const struct netdev_event_work_cmd bonding_default_del_cmd
= {
586 .cb
= del_netdev_default_ips
, .filter
= is_eth_port_inactive_slave
};
588 if (changeupper_info
->linking
== false) {
589 cmds
[0] = upper_ips_del_cmd
;
590 cmds
[0].ndev
= changeupper_info
->upper_dev
;
593 cmds
[0] = bonding_default_del_cmd
;
594 cmds
[0].ndev
= changeupper_info
->upper_dev
;
595 cmds
[1] = add_cmd_upper_ips
;
596 cmds
[1].ndev
= changeupper_info
->upper_dev
;
597 cmds
[1].filter_ndev
= changeupper_info
->upper_dev
;
601 static int netdevice_event(struct notifier_block
*this, unsigned long event
,
604 static const struct netdev_event_work_cmd del_cmd
= {
605 .cb
= del_netdev_ips
, .filter
= pass_all_filter
};
606 static const struct netdev_event_work_cmd bonding_default_del_cmd_join
= {
607 .cb
= del_netdev_default_ips_join
, .filter
= is_eth_port_inactive_slave
};
608 static const struct netdev_event_work_cmd default_del_cmd
= {
609 .cb
= del_netdev_default_ips
, .filter
= pass_all_filter
};
610 static const struct netdev_event_work_cmd bonding_event_ips_del_cmd
= {
611 .cb
= del_netdev_upper_ips
, .filter
= upper_device_filter
};
612 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
613 struct netdev_event_work_cmd cmds
[ROCE_NETDEV_CALLBACK_SZ
] = { {NULL
} };
615 if (ndev
->type
!= ARPHRD_ETHER
)
619 case NETDEV_REGISTER
:
621 cmds
[0] = bonding_default_del_cmd_join
;
625 case NETDEV_UNREGISTER
:
626 if (ndev
->reg_state
< NETREG_UNREGISTERED
)
632 case NETDEV_CHANGEADDR
:
633 cmds
[0] = default_del_cmd
;
637 case NETDEV_CHANGEUPPER
:
638 netdevice_event_changeupper(
639 container_of(ptr
, struct netdev_notifier_changeupper_info
, info
),
643 case NETDEV_BONDING_FAILOVER
:
644 cmds
[0] = bonding_event_ips_del_cmd
;
645 cmds
[1] = bonding_default_del_cmd_join
;
646 cmds
[2] = add_cmd_upper_ips
;
653 return netdevice_queue_work(cmds
, ndev
);
656 static void update_gid_event_work_handler(struct work_struct
*_work
)
658 struct update_gid_event_work
*work
=
659 container_of(_work
, struct update_gid_event_work
, work
);
661 ib_enum_all_roce_netdevs(is_eth_port_of_netdev
, work
->gid_attr
.ndev
,
662 callback_for_addr_gid_device_scan
, work
);
664 dev_put(work
->gid_attr
.ndev
);
668 static int addr_event(struct notifier_block
*this, unsigned long event
,
669 struct sockaddr
*sa
, struct net_device
*ndev
)
671 struct update_gid_event_work
*work
;
672 enum gid_op_type gid_op
;
674 if (ndev
->type
!= ARPHRD_ETHER
)
690 work
= kmalloc(sizeof(*work
), GFP_ATOMIC
);
694 INIT_WORK(&work
->work
, update_gid_event_work_handler
);
696 rdma_ip2gid(sa
, &work
->gid
);
697 work
->gid_op
= gid_op
;
699 memset(&work
->gid_attr
, 0, sizeof(work
->gid_attr
));
701 work
->gid_attr
.ndev
= ndev
;
703 queue_work(gid_cache_wq
, &work
->work
);
708 static int inetaddr_event(struct notifier_block
*this, unsigned long event
,
711 struct sockaddr_in in
;
712 struct net_device
*ndev
;
713 struct in_ifaddr
*ifa
= ptr
;
715 in
.sin_family
= AF_INET
;
716 in
.sin_addr
.s_addr
= ifa
->ifa_address
;
717 ndev
= ifa
->ifa_dev
->dev
;
719 return addr_event(this, event
, (struct sockaddr
*)&in
, ndev
);
722 static int inet6addr_event(struct notifier_block
*this, unsigned long event
,
725 struct sockaddr_in6 in6
;
726 struct net_device
*ndev
;
727 struct inet6_ifaddr
*ifa6
= ptr
;
729 in6
.sin6_family
= AF_INET6
;
730 in6
.sin6_addr
= ifa6
->addr
;
731 ndev
= ifa6
->idev
->dev
;
733 return addr_event(this, event
, (struct sockaddr
*)&in6
, ndev
);
736 static struct notifier_block nb_netdevice
= {
737 .notifier_call
= netdevice_event
740 static struct notifier_block nb_inetaddr
= {
741 .notifier_call
= inetaddr_event
744 static struct notifier_block nb_inet6addr
= {
745 .notifier_call
= inet6addr_event
748 int __init
roce_gid_mgmt_init(void)
750 gid_cache_wq
= alloc_ordered_workqueue("gid-cache-wq", 0);
754 register_inetaddr_notifier(&nb_inetaddr
);
755 if (IS_ENABLED(CONFIG_IPV6
))
756 register_inet6addr_notifier(&nb_inet6addr
);
757 /* We relay on the netdevice notifier to enumerate all
758 * existing devices in the system. Register to this notifier
759 * last to make sure we will not miss any IP add/del
762 register_netdevice_notifier(&nb_netdevice
);
767 void __exit
roce_gid_mgmt_cleanup(void)
769 if (IS_ENABLED(CONFIG_IPV6
))
770 unregister_inet6addr_notifier(&nb_inet6addr
);
771 unregister_inetaddr_notifier(&nb_inetaddr
);
772 unregister_netdevice_notifier(&nb_netdevice
);
773 /* Ensure all gid deletion tasks complete before we go down,
774 * to avoid any reference to free'd memory. By the time
775 * ib-core is removed, all physical devices have been removed,
776 * so no issue with remaining hardware contexts.
778 destroy_workqueue(gid_cache_wq
);