Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / infiniband / core / roce_gid_mgmt.c
blob5a52ec77940af799c682a7dbdf8ac6a4d9abf7f7
1 /*
2 * Copyright (c) 2015, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include "core_priv.h"
35 #include <linux/in.h>
36 #include <linux/in6.h>
38 /* For in6_dev_get/in6_dev_put */
39 #include <net/addrconf.h>
40 #include <net/bonding.h>
42 #include <rdma/ib_cache.h>
43 #include <rdma/ib_addr.h>
45 static struct workqueue_struct *gid_cache_wq;
47 static struct workqueue_struct *gid_cache_wq;
49 enum gid_op_type {
50 GID_DEL = 0,
51 GID_ADD
54 struct update_gid_event_work {
55 struct work_struct work;
56 union ib_gid gid;
57 struct ib_gid_attr gid_attr;
58 enum gid_op_type gid_op;
61 #define ROCE_NETDEV_CALLBACK_SZ 3
62 struct netdev_event_work_cmd {
63 roce_netdev_callback cb;
64 roce_netdev_filter filter;
65 struct net_device *ndev;
66 struct net_device *filter_ndev;
69 struct netdev_event_work {
70 struct work_struct work;
71 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ];
74 static const struct {
75 bool (*is_supported)(const struct ib_device *device, u8 port_num);
76 enum ib_gid_type gid_type;
77 } PORT_CAP_TO_GID_TYPE[] = {
78 {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
79 {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP},
82 #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
84 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
86 int i;
87 unsigned int ret_flags = 0;
89 if (!rdma_protocol_roce(ib_dev, port))
90 return 1UL << IB_GID_TYPE_IB;
92 for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++)
93 if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port))
94 ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type;
96 return ret_flags;
98 EXPORT_SYMBOL(roce_gid_type_mask_support);
100 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
101 u8 port, union ib_gid *gid,
102 struct ib_gid_attr *gid_attr)
104 int i;
105 unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
107 for (i = 0; i < IB_GID_TYPE_SIZE; i++) {
108 if ((1UL << i) & gid_type_mask) {
109 gid_attr->gid_type = i;
110 switch (gid_op) {
111 case GID_ADD:
112 ib_cache_gid_add(ib_dev, port,
113 gid, gid_attr);
114 break;
115 case GID_DEL:
116 ib_cache_gid_del(ib_dev, port,
117 gid, gid_attr);
118 break;
124 enum bonding_slave_state {
125 BONDING_SLAVE_STATE_ACTIVE = 1UL << 0,
126 BONDING_SLAVE_STATE_INACTIVE = 1UL << 1,
127 /* No primary slave or the device isn't a slave in bonding */
128 BONDING_SLAVE_STATE_NA = 1UL << 2,
131 static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
132 struct net_device *upper)
134 if (upper && netif_is_bond_master(upper)) {
135 struct net_device *pdev =
136 bond_option_active_slave_get_rcu(netdev_priv(upper));
138 if (pdev)
139 return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
140 BONDING_SLAVE_STATE_INACTIVE;
143 return BONDING_SLAVE_STATE_NA;
146 #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
147 BONDING_SLAVE_STATE_NA)
148 static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
149 struct net_device *rdma_ndev, void *cookie)
151 struct net_device *real_dev;
152 int res;
154 if (!rdma_ndev)
155 return 0;
157 rcu_read_lock();
158 real_dev = rdma_vlan_dev_real_dev(cookie);
159 if (!real_dev)
160 real_dev = cookie;
162 res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
163 (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
164 REQUIRED_BOND_STATES)) ||
165 real_dev == rdma_ndev);
167 rcu_read_unlock();
168 return res;
171 static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
172 struct net_device *rdma_ndev, void *cookie)
174 struct net_device *master_dev;
175 int res;
177 if (!rdma_ndev)
178 return 0;
180 rcu_read_lock();
181 master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
182 res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
183 BONDING_SLAVE_STATE_INACTIVE;
184 rcu_read_unlock();
186 return res;
189 static int pass_all_filter(struct ib_device *ib_dev, u8 port,
190 struct net_device *rdma_ndev, void *cookie)
192 return 1;
195 static int upper_device_filter(struct ib_device *ib_dev, u8 port,
196 struct net_device *rdma_ndev, void *cookie)
198 int res;
200 if (!rdma_ndev)
201 return 0;
203 if (rdma_ndev == cookie)
204 return 1;
206 rcu_read_lock();
207 res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
208 rcu_read_unlock();
210 return res;
213 static void update_gid_ip(enum gid_op_type gid_op,
214 struct ib_device *ib_dev,
215 u8 port, struct net_device *ndev,
216 struct sockaddr *addr)
218 union ib_gid gid;
219 struct ib_gid_attr gid_attr;
221 rdma_ip2gid(addr, &gid);
222 memset(&gid_attr, 0, sizeof(gid_attr));
223 gid_attr.ndev = ndev;
225 update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
228 static void enum_netdev_default_gids(struct ib_device *ib_dev,
229 u8 port, struct net_device *event_ndev,
230 struct net_device *rdma_ndev)
232 unsigned long gid_type_mask;
234 rcu_read_lock();
235 if (!rdma_ndev ||
236 ((rdma_ndev != event_ndev &&
237 !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
238 is_eth_active_slave_of_bonding_rcu(rdma_ndev,
239 netdev_master_upper_dev_get_rcu(rdma_ndev)) ==
240 BONDING_SLAVE_STATE_INACTIVE)) {
241 rcu_read_unlock();
242 return;
244 rcu_read_unlock();
246 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
248 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, gid_type_mask,
249 IB_CACHE_GID_DEFAULT_MODE_SET);
252 static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
253 u8 port,
254 struct net_device *event_ndev,
255 struct net_device *rdma_ndev)
257 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
259 if (!rdma_ndev)
260 return;
262 if (!real_dev)
263 real_dev = event_ndev;
265 rcu_read_lock();
267 if (rdma_is_upper_dev_rcu(rdma_ndev, event_ndev) &&
268 is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) ==
269 BONDING_SLAVE_STATE_INACTIVE) {
270 unsigned long gid_type_mask;
272 rcu_read_unlock();
274 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
276 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
277 gid_type_mask,
278 IB_CACHE_GID_DEFAULT_MODE_DELETE);
279 } else {
280 rcu_read_unlock();
284 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
285 u8 port, struct net_device *ndev)
287 struct in_device *in_dev;
288 struct sin_list {
289 struct list_head list;
290 struct sockaddr_in ip;
292 struct sin_list *sin_iter;
293 struct sin_list *sin_temp;
295 LIST_HEAD(sin_list);
296 if (ndev->reg_state >= NETREG_UNREGISTERING)
297 return;
299 rcu_read_lock();
300 in_dev = __in_dev_get_rcu(ndev);
301 if (!in_dev) {
302 rcu_read_unlock();
303 return;
306 for_ifa(in_dev) {
307 struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
309 if (!entry)
310 continue;
312 entry->ip.sin_family = AF_INET;
313 entry->ip.sin_addr.s_addr = ifa->ifa_address;
314 list_add_tail(&entry->list, &sin_list);
316 endfor_ifa(in_dev);
317 rcu_read_unlock();
319 list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
320 update_gid_ip(GID_ADD, ib_dev, port, ndev,
321 (struct sockaddr *)&sin_iter->ip);
322 list_del(&sin_iter->list);
323 kfree(sin_iter);
327 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
328 u8 port, struct net_device *ndev)
330 struct inet6_ifaddr *ifp;
331 struct inet6_dev *in6_dev;
332 struct sin6_list {
333 struct list_head list;
334 struct sockaddr_in6 sin6;
336 struct sin6_list *sin6_iter;
337 struct sin6_list *sin6_temp;
338 struct ib_gid_attr gid_attr = {.ndev = ndev};
339 LIST_HEAD(sin6_list);
341 if (ndev->reg_state >= NETREG_UNREGISTERING)
342 return;
344 in6_dev = in6_dev_get(ndev);
345 if (!in6_dev)
346 return;
348 read_lock_bh(&in6_dev->lock);
349 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
350 struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
352 if (!entry)
353 continue;
355 entry->sin6.sin6_family = AF_INET6;
356 entry->sin6.sin6_addr = ifp->addr;
357 list_add_tail(&entry->list, &sin6_list);
359 read_unlock_bh(&in6_dev->lock);
361 in6_dev_put(in6_dev);
363 list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
364 union ib_gid gid;
366 rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
367 update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
368 list_del(&sin6_iter->list);
369 kfree(sin6_iter);
373 static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
374 struct net_device *ndev)
376 enum_netdev_ipv4_ips(ib_dev, port, ndev);
377 if (IS_ENABLED(CONFIG_IPV6))
378 enum_netdev_ipv6_ips(ib_dev, port, ndev);
381 static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
382 struct net_device *rdma_ndev, void *cookie)
384 enum_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
385 _add_netdev_ips(ib_dev, port, cookie);
388 static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
389 struct net_device *rdma_ndev, void *cookie)
391 ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
394 static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
395 u8 port,
396 struct net_device *rdma_ndev,
397 void *cookie)
399 struct net *net;
400 struct net_device *ndev;
402 /* Lock the rtnl to make sure the netdevs does not move under
403 * our feet
405 rtnl_lock();
406 for_each_net(net)
407 for_each_netdev(net, ndev)
408 if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
409 add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
410 rtnl_unlock();
414 * rdma_roce_rescan_device - Rescan all of the network devices in the system
415 * and add their gids, as needed, to the relevant RoCE devices.
417 * @device: the rdma device
419 void rdma_roce_rescan_device(struct ib_device *ib_dev)
421 ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
422 enum_all_gids_of_dev_cb, NULL);
424 EXPORT_SYMBOL(rdma_roce_rescan_device);
426 static void callback_for_addr_gid_device_scan(struct ib_device *device,
427 u8 port,
428 struct net_device *rdma_ndev,
429 void *cookie)
431 struct update_gid_event_work *parsed = cookie;
433 return update_gid(parsed->gid_op, device,
434 port, &parsed->gid,
435 &parsed->gid_attr);
438 struct upper_list {
439 struct list_head list;
440 struct net_device *upper;
443 static int netdev_upper_walk(struct net_device *upper, void *data)
445 struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
446 struct list_head *upper_list = data;
448 if (!entry)
449 return 0;
451 list_add_tail(&entry->list, upper_list);
452 dev_hold(upper);
453 entry->upper = upper;
455 return 0;
458 static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
459 void *cookie,
460 void (*handle_netdev)(struct ib_device *ib_dev,
461 u8 port,
462 struct net_device *ndev))
464 struct net_device *ndev = cookie;
465 struct upper_list *upper_iter;
466 struct upper_list *upper_temp;
467 LIST_HEAD(upper_list);
469 rcu_read_lock();
470 netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &upper_list);
471 rcu_read_unlock();
473 handle_netdev(ib_dev, port, ndev);
474 list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
475 list) {
476 handle_netdev(ib_dev, port, upper_iter->upper);
477 dev_put(upper_iter->upper);
478 list_del(&upper_iter->list);
479 kfree(upper_iter);
483 static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
484 struct net_device *event_ndev)
486 ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
489 static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
490 struct net_device *rdma_ndev, void *cookie)
492 handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
495 static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
496 struct net_device *rdma_ndev, void *cookie)
498 handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
501 static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
502 struct net_device *rdma_ndev,
503 void *cookie)
505 struct net_device *master_ndev;
507 rcu_read_lock();
508 master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
509 if (master_ndev)
510 dev_hold(master_ndev);
511 rcu_read_unlock();
513 if (master_ndev) {
514 bond_delete_netdev_default_gids(ib_dev, port, master_ndev,
515 rdma_ndev);
516 dev_put(master_ndev);
520 static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
521 struct net_device *rdma_ndev, void *cookie)
523 bond_delete_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
526 /* The following functions operate on all IB devices. netdevice_event and
527 * addr_event execute ib_enum_all_roce_netdevs through a work.
528 * ib_enum_all_roce_netdevs iterates through all IB devices.
531 static void netdevice_event_work_handler(struct work_struct *_work)
533 struct netdev_event_work *work =
534 container_of(_work, struct netdev_event_work, work);
535 unsigned int i;
537 for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
538 ib_enum_all_roce_netdevs(work->cmds[i].filter,
539 work->cmds[i].filter_ndev,
540 work->cmds[i].cb,
541 work->cmds[i].ndev);
542 dev_put(work->cmds[i].ndev);
543 dev_put(work->cmds[i].filter_ndev);
546 kfree(work);
549 static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
550 struct net_device *ndev)
552 unsigned int i;
553 struct netdev_event_work *ndev_work =
554 kmalloc(sizeof(*ndev_work), GFP_KERNEL);
556 if (!ndev_work)
557 return NOTIFY_DONE;
559 memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
560 for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
561 if (!ndev_work->cmds[i].ndev)
562 ndev_work->cmds[i].ndev = ndev;
563 if (!ndev_work->cmds[i].filter_ndev)
564 ndev_work->cmds[i].filter_ndev = ndev;
565 dev_hold(ndev_work->cmds[i].ndev);
566 dev_hold(ndev_work->cmds[i].filter_ndev);
568 INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
570 queue_work(gid_cache_wq, &ndev_work->work);
572 return NOTIFY_DONE;
575 static const struct netdev_event_work_cmd add_cmd = {
576 .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
577 static const struct netdev_event_work_cmd add_cmd_upper_ips = {
578 .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
580 static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info,
581 struct netdev_event_work_cmd *cmds)
583 static const struct netdev_event_work_cmd upper_ips_del_cmd = {
584 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
585 static const struct netdev_event_work_cmd bonding_default_del_cmd = {
586 .cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave};
588 if (changeupper_info->linking == false) {
589 cmds[0] = upper_ips_del_cmd;
590 cmds[0].ndev = changeupper_info->upper_dev;
591 cmds[1] = add_cmd;
592 } else {
593 cmds[0] = bonding_default_del_cmd;
594 cmds[0].ndev = changeupper_info->upper_dev;
595 cmds[1] = add_cmd_upper_ips;
596 cmds[1].ndev = changeupper_info->upper_dev;
597 cmds[1].filter_ndev = changeupper_info->upper_dev;
601 static int netdevice_event(struct notifier_block *this, unsigned long event,
602 void *ptr)
604 static const struct netdev_event_work_cmd del_cmd = {
605 .cb = del_netdev_ips, .filter = pass_all_filter};
606 static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
607 .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave};
608 static const struct netdev_event_work_cmd default_del_cmd = {
609 .cb = del_netdev_default_ips, .filter = pass_all_filter};
610 static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
611 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
612 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
613 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
615 if (ndev->type != ARPHRD_ETHER)
616 return NOTIFY_DONE;
618 switch (event) {
619 case NETDEV_REGISTER:
620 case NETDEV_UP:
621 cmds[0] = bonding_default_del_cmd_join;
622 cmds[1] = add_cmd;
623 break;
625 case NETDEV_UNREGISTER:
626 if (ndev->reg_state < NETREG_UNREGISTERED)
627 cmds[0] = del_cmd;
628 else
629 return NOTIFY_DONE;
630 break;
632 case NETDEV_CHANGEADDR:
633 cmds[0] = default_del_cmd;
634 cmds[1] = add_cmd;
635 break;
637 case NETDEV_CHANGEUPPER:
638 netdevice_event_changeupper(
639 container_of(ptr, struct netdev_notifier_changeupper_info, info),
640 cmds);
641 break;
643 case NETDEV_BONDING_FAILOVER:
644 cmds[0] = bonding_event_ips_del_cmd;
645 cmds[1] = bonding_default_del_cmd_join;
646 cmds[2] = add_cmd_upper_ips;
647 break;
649 default:
650 return NOTIFY_DONE;
653 return netdevice_queue_work(cmds, ndev);
656 static void update_gid_event_work_handler(struct work_struct *_work)
658 struct update_gid_event_work *work =
659 container_of(_work, struct update_gid_event_work, work);
661 ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev,
662 callback_for_addr_gid_device_scan, work);
664 dev_put(work->gid_attr.ndev);
665 kfree(work);
668 static int addr_event(struct notifier_block *this, unsigned long event,
669 struct sockaddr *sa, struct net_device *ndev)
671 struct update_gid_event_work *work;
672 enum gid_op_type gid_op;
674 if (ndev->type != ARPHRD_ETHER)
675 return NOTIFY_DONE;
677 switch (event) {
678 case NETDEV_UP:
679 gid_op = GID_ADD;
680 break;
682 case NETDEV_DOWN:
683 gid_op = GID_DEL;
684 break;
686 default:
687 return NOTIFY_DONE;
690 work = kmalloc(sizeof(*work), GFP_ATOMIC);
691 if (!work)
692 return NOTIFY_DONE;
694 INIT_WORK(&work->work, update_gid_event_work_handler);
696 rdma_ip2gid(sa, &work->gid);
697 work->gid_op = gid_op;
699 memset(&work->gid_attr, 0, sizeof(work->gid_attr));
700 dev_hold(ndev);
701 work->gid_attr.ndev = ndev;
703 queue_work(gid_cache_wq, &work->work);
705 return NOTIFY_DONE;
708 static int inetaddr_event(struct notifier_block *this, unsigned long event,
709 void *ptr)
711 struct sockaddr_in in;
712 struct net_device *ndev;
713 struct in_ifaddr *ifa = ptr;
715 in.sin_family = AF_INET;
716 in.sin_addr.s_addr = ifa->ifa_address;
717 ndev = ifa->ifa_dev->dev;
719 return addr_event(this, event, (struct sockaddr *)&in, ndev);
722 static int inet6addr_event(struct notifier_block *this, unsigned long event,
723 void *ptr)
725 struct sockaddr_in6 in6;
726 struct net_device *ndev;
727 struct inet6_ifaddr *ifa6 = ptr;
729 in6.sin6_family = AF_INET6;
730 in6.sin6_addr = ifa6->addr;
731 ndev = ifa6->idev->dev;
733 return addr_event(this, event, (struct sockaddr *)&in6, ndev);
736 static struct notifier_block nb_netdevice = {
737 .notifier_call = netdevice_event
740 static struct notifier_block nb_inetaddr = {
741 .notifier_call = inetaddr_event
744 static struct notifier_block nb_inet6addr = {
745 .notifier_call = inet6addr_event
748 int __init roce_gid_mgmt_init(void)
750 gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
751 if (!gid_cache_wq)
752 return -ENOMEM;
754 register_inetaddr_notifier(&nb_inetaddr);
755 if (IS_ENABLED(CONFIG_IPV6))
756 register_inet6addr_notifier(&nb_inet6addr);
757 /* We relay on the netdevice notifier to enumerate all
758 * existing devices in the system. Register to this notifier
759 * last to make sure we will not miss any IP add/del
760 * callbacks.
762 register_netdevice_notifier(&nb_netdevice);
764 return 0;
767 void __exit roce_gid_mgmt_cleanup(void)
769 if (IS_ENABLED(CONFIG_IPV6))
770 unregister_inet6addr_notifier(&nb_inet6addr);
771 unregister_inetaddr_notifier(&nb_inetaddr);
772 unregister_netdevice_notifier(&nb_netdevice);
773 /* Ensure all gid deletion tasks complete before we go down,
774 * to avoid any reference to free'd memory. By the time
775 * ib-core is removed, all physical devices have been removed,
776 * so no issue with remaining hardware contexts.
778 destroy_workqueue(gid_cache_wq);