2 * Copyright (c) 2015, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include "core_priv.h"
36 #include <linux/in6.h>
38 /* For in6_dev_get/in6_dev_put */
39 #include <net/addrconf.h>
40 #include <net/bonding.h>
42 #include <rdma/ib_cache.h>
43 #include <rdma/ib_addr.h>
45 static struct workqueue_struct
*gid_cache_wq
;
52 struct update_gid_event_work
{
53 struct work_struct work
;
55 struct ib_gid_attr gid_attr
;
56 enum gid_op_type gid_op
;
59 #define ROCE_NETDEV_CALLBACK_SZ 3
60 struct netdev_event_work_cmd
{
61 roce_netdev_callback cb
;
62 roce_netdev_filter filter
;
63 struct net_device
*ndev
;
64 struct net_device
*filter_ndev
;
67 struct netdev_event_work
{
68 struct work_struct work
;
69 struct netdev_event_work_cmd cmds
[ROCE_NETDEV_CALLBACK_SZ
];
73 bool (*is_supported
)(const struct ib_device
*device
, u8 port_num
);
74 enum ib_gid_type gid_type
;
75 } PORT_CAP_TO_GID_TYPE
[] = {
76 {rdma_protocol_roce_eth_encap
, IB_GID_TYPE_ROCE
},
77 {rdma_protocol_roce_udp_encap
, IB_GID_TYPE_ROCE_UDP_ENCAP
},
80 #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
82 unsigned long roce_gid_type_mask_support(struct ib_device
*ib_dev
, u8 port
)
85 unsigned int ret_flags
= 0;
87 if (!rdma_protocol_roce(ib_dev
, port
))
88 return 1UL << IB_GID_TYPE_IB
;
90 for (i
= 0; i
< CAP_TO_GID_TABLE_SIZE
; i
++)
91 if (PORT_CAP_TO_GID_TYPE
[i
].is_supported(ib_dev
, port
))
92 ret_flags
|= 1UL << PORT_CAP_TO_GID_TYPE
[i
].gid_type
;
96 EXPORT_SYMBOL(roce_gid_type_mask_support
);
98 static void update_gid(enum gid_op_type gid_op
, struct ib_device
*ib_dev
,
99 u8 port
, union ib_gid
*gid
,
100 struct ib_gid_attr
*gid_attr
)
103 unsigned long gid_type_mask
= roce_gid_type_mask_support(ib_dev
, port
);
105 for (i
= 0; i
< IB_GID_TYPE_SIZE
; i
++) {
106 if ((1UL << i
) & gid_type_mask
) {
107 gid_attr
->gid_type
= i
;
110 ib_cache_gid_add(ib_dev
, port
,
114 ib_cache_gid_del(ib_dev
, port
,
122 enum bonding_slave_state
{
123 BONDING_SLAVE_STATE_ACTIVE
= 1UL << 0,
124 BONDING_SLAVE_STATE_INACTIVE
= 1UL << 1,
125 /* No primary slave or the device isn't a slave in bonding */
126 BONDING_SLAVE_STATE_NA
= 1UL << 2,
129 static enum bonding_slave_state
is_eth_active_slave_of_bonding_rcu(struct net_device
*dev
,
130 struct net_device
*upper
)
132 if (upper
&& netif_is_bond_master(upper
)) {
133 struct net_device
*pdev
=
134 bond_option_active_slave_get_rcu(netdev_priv(upper
));
137 return dev
== pdev
? BONDING_SLAVE_STATE_ACTIVE
:
138 BONDING_SLAVE_STATE_INACTIVE
;
141 return BONDING_SLAVE_STATE_NA
;
144 #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
145 BONDING_SLAVE_STATE_NA)
147 is_eth_port_of_netdev_filter(struct ib_device
*ib_dev
, u8 port
,
148 struct net_device
*rdma_ndev
, void *cookie
)
150 struct net_device
*real_dev
;
157 real_dev
= rdma_vlan_dev_real_dev(cookie
);
161 res
= ((rdma_is_upper_dev_rcu(rdma_ndev
, cookie
) &&
162 (is_eth_active_slave_of_bonding_rcu(rdma_ndev
, real_dev
) &
163 REQUIRED_BOND_STATES
)) ||
164 real_dev
== rdma_ndev
);
171 is_eth_port_inactive_slave_filter(struct ib_device
*ib_dev
, u8 port
,
172 struct net_device
*rdma_ndev
, void *cookie
)
174 struct net_device
*master_dev
;
181 master_dev
= netdev_master_upper_dev_get_rcu(rdma_ndev
);
182 res
= is_eth_active_slave_of_bonding_rcu(rdma_ndev
, master_dev
) ==
183 BONDING_SLAVE_STATE_INACTIVE
;
189 /** is_ndev_for_default_gid_filter - Check if a given netdevice
190 * can be considered for default GIDs or not.
191 * @ib_dev: IB device to check
192 * @port: Port to consider for adding default GID
193 * @rdma_ndev: rdma netdevice pointer
194 * @cookie_ndev: Netdevice to consider to form a default GID
196 * is_ndev_for_default_gid_filter() returns true if a given netdevice can be
197 * considered for deriving default RoCE GID, returns false otherwise.
200 is_ndev_for_default_gid_filter(struct ib_device
*ib_dev
, u8 port
,
201 struct net_device
*rdma_ndev
, void *cookie
)
203 struct net_device
*cookie_ndev
= cookie
;
212 * When rdma netdevice is used in bonding, bonding master netdevice
213 * should be considered for default GIDs. Therefore, ignore slave rdma
214 * netdevices when bonding is considered.
215 * Additionally when event(cookie) netdevice is bond master device,
216 * make sure that it the upper netdevice of rdma netdevice.
218 res
= ((cookie_ndev
== rdma_ndev
&& !netif_is_bond_slave(rdma_ndev
)) ||
219 (netif_is_bond_master(cookie_ndev
) &&
220 rdma_is_upper_dev_rcu(rdma_ndev
, cookie_ndev
)));
226 static bool pass_all_filter(struct ib_device
*ib_dev
, u8 port
,
227 struct net_device
*rdma_ndev
, void *cookie
)
232 static bool upper_device_filter(struct ib_device
*ib_dev
, u8 port
,
233 struct net_device
*rdma_ndev
, void *cookie
)
240 if (rdma_ndev
== cookie
)
244 res
= rdma_is_upper_dev_rcu(rdma_ndev
, cookie
);
251 * is_upper_ndev_bond_master_filter - Check if a given netdevice
252 * is bond master device of netdevice of the the RDMA device of port.
253 * @ib_dev: IB device to check
254 * @port: Port to consider for adding default GID
255 * @rdma_ndev: Pointer to rdma netdevice
256 * @cookie: Netdevice to consider to form a default GID
258 * is_upper_ndev_bond_master_filter() returns true if a cookie_netdev
259 * is bond master device and rdma_ndev is its lower netdevice. It might
260 * not have been established as slave device yet.
263 is_upper_ndev_bond_master_filter(struct ib_device
*ib_dev
, u8 port
,
264 struct net_device
*rdma_ndev
,
267 struct net_device
*cookie_ndev
= cookie
;
274 if (netif_is_bond_master(cookie_ndev
) &&
275 rdma_is_upper_dev_rcu(rdma_ndev
, cookie_ndev
))
281 static void update_gid_ip(enum gid_op_type gid_op
,
282 struct ib_device
*ib_dev
,
283 u8 port
, struct net_device
*ndev
,
284 struct sockaddr
*addr
)
287 struct ib_gid_attr gid_attr
;
289 rdma_ip2gid(addr
, &gid
);
290 memset(&gid_attr
, 0, sizeof(gid_attr
));
291 gid_attr
.ndev
= ndev
;
293 update_gid(gid_op
, ib_dev
, port
, &gid
, &gid_attr
);
296 static void bond_delete_netdev_default_gids(struct ib_device
*ib_dev
,
298 struct net_device
*rdma_ndev
,
299 struct net_device
*event_ndev
)
301 struct net_device
*real_dev
= rdma_vlan_dev_real_dev(event_ndev
);
302 unsigned long gid_type_mask
;
308 real_dev
= event_ndev
;
312 if (((rdma_ndev
!= event_ndev
&&
313 !rdma_is_upper_dev_rcu(rdma_ndev
, event_ndev
)) ||
314 is_eth_active_slave_of_bonding_rcu(rdma_ndev
, real_dev
)
316 BONDING_SLAVE_STATE_INACTIVE
)) {
323 gid_type_mask
= roce_gid_type_mask_support(ib_dev
, port
);
325 ib_cache_gid_set_default_gid(ib_dev
, port
, rdma_ndev
,
327 IB_CACHE_GID_DEFAULT_MODE_DELETE
);
330 static void enum_netdev_ipv4_ips(struct ib_device
*ib_dev
,
331 u8 port
, struct net_device
*ndev
)
333 const struct in_ifaddr
*ifa
;
334 struct in_device
*in_dev
;
336 struct list_head list
;
337 struct sockaddr_in ip
;
339 struct sin_list
*sin_iter
;
340 struct sin_list
*sin_temp
;
343 if (ndev
->reg_state
>= NETREG_UNREGISTERING
)
347 in_dev
= __in_dev_get_rcu(ndev
);
353 in_dev_for_each_ifa_rcu(ifa
, in_dev
) {
354 struct sin_list
*entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
359 entry
->ip
.sin_family
= AF_INET
;
360 entry
->ip
.sin_addr
.s_addr
= ifa
->ifa_address
;
361 list_add_tail(&entry
->list
, &sin_list
);
366 list_for_each_entry_safe(sin_iter
, sin_temp
, &sin_list
, list
) {
367 update_gid_ip(GID_ADD
, ib_dev
, port
, ndev
,
368 (struct sockaddr
*)&sin_iter
->ip
);
369 list_del(&sin_iter
->list
);
374 static void enum_netdev_ipv6_ips(struct ib_device
*ib_dev
,
375 u8 port
, struct net_device
*ndev
)
377 struct inet6_ifaddr
*ifp
;
378 struct inet6_dev
*in6_dev
;
380 struct list_head list
;
381 struct sockaddr_in6 sin6
;
383 struct sin6_list
*sin6_iter
;
384 struct sin6_list
*sin6_temp
;
385 struct ib_gid_attr gid_attr
= {.ndev
= ndev
};
386 LIST_HEAD(sin6_list
);
388 if (ndev
->reg_state
>= NETREG_UNREGISTERING
)
391 in6_dev
= in6_dev_get(ndev
);
395 read_lock_bh(&in6_dev
->lock
);
396 list_for_each_entry(ifp
, &in6_dev
->addr_list
, if_list
) {
397 struct sin6_list
*entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
402 entry
->sin6
.sin6_family
= AF_INET6
;
403 entry
->sin6
.sin6_addr
= ifp
->addr
;
404 list_add_tail(&entry
->list
, &sin6_list
);
406 read_unlock_bh(&in6_dev
->lock
);
408 in6_dev_put(in6_dev
);
410 list_for_each_entry_safe(sin6_iter
, sin6_temp
, &sin6_list
, list
) {
413 rdma_ip2gid((struct sockaddr
*)&sin6_iter
->sin6
, &gid
);
414 update_gid(GID_ADD
, ib_dev
, port
, &gid
, &gid_attr
);
415 list_del(&sin6_iter
->list
);
420 static void _add_netdev_ips(struct ib_device
*ib_dev
, u8 port
,
421 struct net_device
*ndev
)
423 enum_netdev_ipv4_ips(ib_dev
, port
, ndev
);
424 if (IS_ENABLED(CONFIG_IPV6
))
425 enum_netdev_ipv6_ips(ib_dev
, port
, ndev
);
428 static void add_netdev_ips(struct ib_device
*ib_dev
, u8 port
,
429 struct net_device
*rdma_ndev
, void *cookie
)
431 _add_netdev_ips(ib_dev
, port
, cookie
);
434 static void del_netdev_ips(struct ib_device
*ib_dev
, u8 port
,
435 struct net_device
*rdma_ndev
, void *cookie
)
437 ib_cache_gid_del_all_netdev_gids(ib_dev
, port
, cookie
);
441 * del_default_gids - Delete default GIDs of the event/cookie netdevice
442 * @ib_dev: RDMA device pointer
443 * @port: Port of the RDMA device whose GID table to consider
444 * @rdma_ndev: Unused rdma netdevice
445 * @cookie: Pointer to event netdevice
447 * del_default_gids() deletes the default GIDs of the event/cookie netdevice.
449 static void del_default_gids(struct ib_device
*ib_dev
, u8 port
,
450 struct net_device
*rdma_ndev
, void *cookie
)
452 struct net_device
*cookie_ndev
= cookie
;
453 unsigned long gid_type_mask
;
455 gid_type_mask
= roce_gid_type_mask_support(ib_dev
, port
);
457 ib_cache_gid_set_default_gid(ib_dev
, port
, cookie_ndev
, gid_type_mask
,
458 IB_CACHE_GID_DEFAULT_MODE_DELETE
);
461 static void add_default_gids(struct ib_device
*ib_dev
, u8 port
,
462 struct net_device
*rdma_ndev
, void *cookie
)
464 struct net_device
*event_ndev
= cookie
;
465 unsigned long gid_type_mask
;
467 gid_type_mask
= roce_gid_type_mask_support(ib_dev
, port
);
468 ib_cache_gid_set_default_gid(ib_dev
, port
, event_ndev
, gid_type_mask
,
469 IB_CACHE_GID_DEFAULT_MODE_SET
);
472 static void enum_all_gids_of_dev_cb(struct ib_device
*ib_dev
,
474 struct net_device
*rdma_ndev
,
478 struct net_device
*ndev
;
480 /* Lock the rtnl to make sure the netdevs does not move under
484 down_read(&net_rwsem
);
486 for_each_netdev(net
, ndev
) {
488 * Filter and add default GIDs of the primary netdevice
489 * when not in bonding mode, or add default GIDs
490 * of bond master device, when in bonding mode.
492 if (is_ndev_for_default_gid_filter(ib_dev
, port
,
494 add_default_gids(ib_dev
, port
, rdma_ndev
, ndev
);
496 if (is_eth_port_of_netdev_filter(ib_dev
, port
,
498 _add_netdev_ips(ib_dev
, port
, ndev
);
505 * rdma_roce_rescan_device - Rescan all of the network devices in the system
506 * and add their gids, as needed, to the relevant RoCE devices.
508 * @device: the rdma device
510 void rdma_roce_rescan_device(struct ib_device
*ib_dev
)
512 ib_enum_roce_netdev(ib_dev
, pass_all_filter
, NULL
,
513 enum_all_gids_of_dev_cb
, NULL
);
515 EXPORT_SYMBOL(rdma_roce_rescan_device
);
517 static void callback_for_addr_gid_device_scan(struct ib_device
*device
,
519 struct net_device
*rdma_ndev
,
522 struct update_gid_event_work
*parsed
= cookie
;
524 return update_gid(parsed
->gid_op
, device
,
530 struct list_head list
;
531 struct net_device
*upper
;
534 static int netdev_upper_walk(struct net_device
*upper
,
535 struct netdev_nested_priv
*priv
)
537 struct upper_list
*entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
538 struct list_head
*upper_list
= (struct list_head
*)priv
->data
;
543 list_add_tail(&entry
->list
, upper_list
);
545 entry
->upper
= upper
;
550 static void handle_netdev_upper(struct ib_device
*ib_dev
, u8 port
,
552 void (*handle_netdev
)(struct ib_device
*ib_dev
,
554 struct net_device
*ndev
))
556 struct net_device
*ndev
= cookie
;
557 struct netdev_nested_priv priv
;
558 struct upper_list
*upper_iter
;
559 struct upper_list
*upper_temp
;
560 LIST_HEAD(upper_list
);
562 priv
.data
= &upper_list
;
564 netdev_walk_all_upper_dev_rcu(ndev
, netdev_upper_walk
, &priv
);
567 handle_netdev(ib_dev
, port
, ndev
);
568 list_for_each_entry_safe(upper_iter
, upper_temp
, &upper_list
,
570 handle_netdev(ib_dev
, port
, upper_iter
->upper
);
571 dev_put(upper_iter
->upper
);
572 list_del(&upper_iter
->list
);
577 static void _roce_del_all_netdev_gids(struct ib_device
*ib_dev
, u8 port
,
578 struct net_device
*event_ndev
)
580 ib_cache_gid_del_all_netdev_gids(ib_dev
, port
, event_ndev
);
583 static void del_netdev_upper_ips(struct ib_device
*ib_dev
, u8 port
,
584 struct net_device
*rdma_ndev
, void *cookie
)
586 handle_netdev_upper(ib_dev
, port
, cookie
, _roce_del_all_netdev_gids
);
589 static void add_netdev_upper_ips(struct ib_device
*ib_dev
, u8 port
,
590 struct net_device
*rdma_ndev
, void *cookie
)
592 handle_netdev_upper(ib_dev
, port
, cookie
, _add_netdev_ips
);
595 static void del_netdev_default_ips_join(struct ib_device
*ib_dev
, u8 port
,
596 struct net_device
*rdma_ndev
,
599 struct net_device
*master_ndev
;
602 master_ndev
= netdev_master_upper_dev_get_rcu(rdma_ndev
);
604 dev_hold(master_ndev
);
608 bond_delete_netdev_default_gids(ib_dev
, port
, rdma_ndev
,
610 dev_put(master_ndev
);
614 /* The following functions operate on all IB devices. netdevice_event and
615 * addr_event execute ib_enum_all_roce_netdevs through a work.
616 * ib_enum_all_roce_netdevs iterates through all IB devices.
619 static void netdevice_event_work_handler(struct work_struct
*_work
)
621 struct netdev_event_work
*work
=
622 container_of(_work
, struct netdev_event_work
, work
);
625 for (i
= 0; i
< ARRAY_SIZE(work
->cmds
) && work
->cmds
[i
].cb
; i
++) {
626 ib_enum_all_roce_netdevs(work
->cmds
[i
].filter
,
627 work
->cmds
[i
].filter_ndev
,
630 dev_put(work
->cmds
[i
].ndev
);
631 dev_put(work
->cmds
[i
].filter_ndev
);
637 static int netdevice_queue_work(struct netdev_event_work_cmd
*cmds
,
638 struct net_device
*ndev
)
641 struct netdev_event_work
*ndev_work
=
642 kmalloc(sizeof(*ndev_work
), GFP_KERNEL
);
647 memcpy(ndev_work
->cmds
, cmds
, sizeof(ndev_work
->cmds
));
648 for (i
= 0; i
< ARRAY_SIZE(ndev_work
->cmds
) && ndev_work
->cmds
[i
].cb
; i
++) {
649 if (!ndev_work
->cmds
[i
].ndev
)
650 ndev_work
->cmds
[i
].ndev
= ndev
;
651 if (!ndev_work
->cmds
[i
].filter_ndev
)
652 ndev_work
->cmds
[i
].filter_ndev
= ndev
;
653 dev_hold(ndev_work
->cmds
[i
].ndev
);
654 dev_hold(ndev_work
->cmds
[i
].filter_ndev
);
656 INIT_WORK(&ndev_work
->work
, netdevice_event_work_handler
);
658 queue_work(gid_cache_wq
, &ndev_work
->work
);
663 static const struct netdev_event_work_cmd add_cmd
= {
664 .cb
= add_netdev_ips
,
665 .filter
= is_eth_port_of_netdev_filter
668 static const struct netdev_event_work_cmd add_cmd_upper_ips
= {
669 .cb
= add_netdev_upper_ips
,
670 .filter
= is_eth_port_of_netdev_filter
674 ndev_event_unlink(struct netdev_notifier_changeupper_info
*changeupper_info
,
675 struct netdev_event_work_cmd
*cmds
)
677 static const struct netdev_event_work_cmd
678 upper_ips_del_cmd
= {
679 .cb
= del_netdev_upper_ips
,
680 .filter
= upper_device_filter
683 cmds
[0] = upper_ips_del_cmd
;
684 cmds
[0].ndev
= changeupper_info
->upper_dev
;
688 static const struct netdev_event_work_cmd bonding_default_add_cmd
= {
689 .cb
= add_default_gids
,
690 .filter
= is_upper_ndev_bond_master_filter
694 ndev_event_link(struct net_device
*event_ndev
,
695 struct netdev_notifier_changeupper_info
*changeupper_info
,
696 struct netdev_event_work_cmd
*cmds
)
698 static const struct netdev_event_work_cmd
699 bonding_default_del_cmd
= {
700 .cb
= del_default_gids
,
701 .filter
= is_upper_ndev_bond_master_filter
704 * When a lower netdev is linked to its upper bonding
705 * netdev, delete lower slave netdev's default GIDs.
707 cmds
[0] = bonding_default_del_cmd
;
708 cmds
[0].ndev
= event_ndev
;
709 cmds
[0].filter_ndev
= changeupper_info
->upper_dev
;
711 /* Now add bonding upper device default GIDs */
712 cmds
[1] = bonding_default_add_cmd
;
713 cmds
[1].ndev
= changeupper_info
->upper_dev
;
714 cmds
[1].filter_ndev
= changeupper_info
->upper_dev
;
716 /* Now add bonding upper device IP based GIDs */
717 cmds
[2] = add_cmd_upper_ips
;
718 cmds
[2].ndev
= changeupper_info
->upper_dev
;
719 cmds
[2].filter_ndev
= changeupper_info
->upper_dev
;
722 static void netdevice_event_changeupper(struct net_device
*event_ndev
,
723 struct netdev_notifier_changeupper_info
*changeupper_info
,
724 struct netdev_event_work_cmd
*cmds
)
726 if (changeupper_info
->linking
)
727 ndev_event_link(event_ndev
, changeupper_info
, cmds
);
729 ndev_event_unlink(changeupper_info
, cmds
);
732 static const struct netdev_event_work_cmd add_default_gid_cmd
= {
733 .cb
= add_default_gids
,
734 .filter
= is_ndev_for_default_gid_filter
,
737 static int netdevice_event(struct notifier_block
*this, unsigned long event
,
740 static const struct netdev_event_work_cmd del_cmd
= {
741 .cb
= del_netdev_ips
, .filter
= pass_all_filter
};
742 static const struct netdev_event_work_cmd
743 bonding_default_del_cmd_join
= {
744 .cb
= del_netdev_default_ips_join
,
745 .filter
= is_eth_port_inactive_slave_filter
747 static const struct netdev_event_work_cmd
749 .cb
= del_netdev_ips
,
750 .filter
= is_eth_port_of_netdev_filter
752 static const struct netdev_event_work_cmd bonding_event_ips_del_cmd
= {
753 .cb
= del_netdev_upper_ips
, .filter
= upper_device_filter
};
754 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
755 struct netdev_event_work_cmd cmds
[ROCE_NETDEV_CALLBACK_SZ
] = { {NULL
} };
757 if (ndev
->type
!= ARPHRD_ETHER
)
761 case NETDEV_REGISTER
:
763 cmds
[0] = bonding_default_del_cmd_join
;
764 cmds
[1] = add_default_gid_cmd
;
768 case NETDEV_UNREGISTER
:
769 if (ndev
->reg_state
< NETREG_UNREGISTERED
)
775 case NETDEV_CHANGEADDR
:
776 cmds
[0] = netdev_del_cmd
;
777 if (ndev
->reg_state
== NETREG_REGISTERED
) {
778 cmds
[1] = add_default_gid_cmd
;
783 case NETDEV_CHANGEUPPER
:
784 netdevice_event_changeupper(ndev
,
785 container_of(ptr
, struct netdev_notifier_changeupper_info
, info
),
789 case NETDEV_BONDING_FAILOVER
:
790 cmds
[0] = bonding_event_ips_del_cmd
;
791 /* Add default GIDs of the bond device */
792 cmds
[1] = bonding_default_add_cmd
;
793 /* Add IP based GIDs of the bond device */
794 cmds
[2] = add_cmd_upper_ips
;
801 return netdevice_queue_work(cmds
, ndev
);
804 static void update_gid_event_work_handler(struct work_struct
*_work
)
806 struct update_gid_event_work
*work
=
807 container_of(_work
, struct update_gid_event_work
, work
);
809 ib_enum_all_roce_netdevs(is_eth_port_of_netdev_filter
,
811 callback_for_addr_gid_device_scan
, work
);
813 dev_put(work
->gid_attr
.ndev
);
817 static int addr_event(struct notifier_block
*this, unsigned long event
,
818 struct sockaddr
*sa
, struct net_device
*ndev
)
820 struct update_gid_event_work
*work
;
821 enum gid_op_type gid_op
;
823 if (ndev
->type
!= ARPHRD_ETHER
)
839 work
= kmalloc(sizeof(*work
), GFP_ATOMIC
);
843 INIT_WORK(&work
->work
, update_gid_event_work_handler
);
845 rdma_ip2gid(sa
, &work
->gid
);
846 work
->gid_op
= gid_op
;
848 memset(&work
->gid_attr
, 0, sizeof(work
->gid_attr
));
850 work
->gid_attr
.ndev
= ndev
;
852 queue_work(gid_cache_wq
, &work
->work
);
857 static int inetaddr_event(struct notifier_block
*this, unsigned long event
,
860 struct sockaddr_in in
;
861 struct net_device
*ndev
;
862 struct in_ifaddr
*ifa
= ptr
;
864 in
.sin_family
= AF_INET
;
865 in
.sin_addr
.s_addr
= ifa
->ifa_address
;
866 ndev
= ifa
->ifa_dev
->dev
;
868 return addr_event(this, event
, (struct sockaddr
*)&in
, ndev
);
871 static int inet6addr_event(struct notifier_block
*this, unsigned long event
,
874 struct sockaddr_in6 in6
;
875 struct net_device
*ndev
;
876 struct inet6_ifaddr
*ifa6
= ptr
;
878 in6
.sin6_family
= AF_INET6
;
879 in6
.sin6_addr
= ifa6
->addr
;
880 ndev
= ifa6
->idev
->dev
;
882 return addr_event(this, event
, (struct sockaddr
*)&in6
, ndev
);
885 static struct notifier_block nb_netdevice
= {
886 .notifier_call
= netdevice_event
889 static struct notifier_block nb_inetaddr
= {
890 .notifier_call
= inetaddr_event
893 static struct notifier_block nb_inet6addr
= {
894 .notifier_call
= inet6addr_event
897 int __init
roce_gid_mgmt_init(void)
899 gid_cache_wq
= alloc_ordered_workqueue("gid-cache-wq", 0);
903 register_inetaddr_notifier(&nb_inetaddr
);
904 if (IS_ENABLED(CONFIG_IPV6
))
905 register_inet6addr_notifier(&nb_inet6addr
);
906 /* We relay on the netdevice notifier to enumerate all
907 * existing devices in the system. Register to this notifier
908 * last to make sure we will not miss any IP add/del
911 register_netdevice_notifier(&nb_netdevice
);
916 void __exit
roce_gid_mgmt_cleanup(void)
918 if (IS_ENABLED(CONFIG_IPV6
))
919 unregister_inet6addr_notifier(&nb_inet6addr
);
920 unregister_inetaddr_notifier(&nb_inetaddr
);
921 unregister_netdevice_notifier(&nb_netdevice
);
922 /* Ensure all gid deletion tasks complete before we go down,
923 * to avoid any reference to free'd memory. By the time
924 * ib-core is removed, all physical devices have been removed,
925 * so no issue with remaining hardware contexts.
927 destroy_workqueue(gid_cache_wq
);