1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
7 #include <linux/if_bridge.h>
8 #include <net/switchdev.h>
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
13 static struct workqueue_struct
*sparx5_owq
;
15 struct sparx5_switchdev_event_work
{
16 struct work_struct work
;
17 struct switchdev_notifier_fdb_info fdb_info
;
18 struct net_device
*dev
;
19 struct sparx5
*sparx5
;
23 static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port
*port
,
24 struct switchdev_brport_flags flags
)
26 if (flags
.mask
& ~(BR_FLOOD
| BR_MCAST_FLOOD
| BR_BCAST_FLOOD
))
32 static void sparx5_port_update_mcast_ip_flood(struct sparx5_port
*port
, bool flood_flag
)
34 bool should_flood
= flood_flag
|| port
->is_mrouter
;
35 struct sparx5
*sparx5
= port
->sparx5
;
38 for (pgid
= sparx5_get_pgid(sparx5
, PGID_IPV4_MC_DATA
);
39 pgid
<= sparx5_get_pgid(sparx5
, PGID_IPV6_MC_CTRL
); pgid
++)
40 sparx5_pgid_update_mask(port
, pgid
, should_flood
);
43 static void sparx5_port_attr_bridge_flags(struct sparx5_port
*port
,
44 struct switchdev_brport_flags flags
)
46 struct sparx5
*sparx5
= port
->sparx5
;
48 if (flags
.mask
& BR_MCAST_FLOOD
) {
49 sparx5_pgid_update_mask(port
,
50 sparx5_get_pgid(sparx5
, PGID_MC_FLOOD
),
51 !!(flags
.val
& BR_MCAST_FLOOD
));
52 sparx5_port_update_mcast_ip_flood(port
, !!(flags
.val
& BR_MCAST_FLOOD
));
55 if (flags
.mask
& BR_FLOOD
)
56 sparx5_pgid_update_mask(port
,
57 sparx5_get_pgid(sparx5
, PGID_UC_FLOOD
),
58 !!(flags
.val
& BR_FLOOD
));
59 if (flags
.mask
& BR_BCAST_FLOOD
)
60 sparx5_pgid_update_mask(port
,
61 sparx5_get_pgid(sparx5
, PGID_BCAST
),
62 !!(flags
.val
& BR_BCAST_FLOOD
));
65 static void sparx5_attr_stp_state_set(struct sparx5_port
*port
,
68 struct sparx5
*sparx5
= port
->sparx5
;
70 if (!test_bit(port
->portno
, sparx5
->bridge_mask
)) {
71 netdev_err(port
->ndev
,
72 "Controlling non-bridged port %d?\n", port
->portno
);
77 case BR_STATE_FORWARDING
:
78 set_bit(port
->portno
, sparx5
->bridge_fwd_mask
);
80 case BR_STATE_LEARNING
:
81 set_bit(port
->portno
, sparx5
->bridge_lrn_mask
);
85 /* All other states treated as blocking */
86 clear_bit(port
->portno
, sparx5
->bridge_fwd_mask
);
87 clear_bit(port
->portno
, sparx5
->bridge_lrn_mask
);
91 /* apply the bridge_fwd_mask to all the ports */
92 sparx5_update_fwd(sparx5
);
95 static void sparx5_port_attr_ageing_set(struct sparx5_port
*port
,
96 unsigned long ageing_clock_t
)
98 unsigned long ageing_jiffies
= clock_t_to_jiffies(ageing_clock_t
);
99 u32 ageing_time
= jiffies_to_msecs(ageing_jiffies
);
101 sparx5_set_ageing(port
->sparx5
, ageing_time
);
104 static void sparx5_port_attr_mrouter_set(struct sparx5_port
*port
,
105 struct net_device
*orig_dev
,
108 struct sparx5
*sparx5
= port
->sparx5
;
109 struct sparx5_mdb_entry
*e
;
112 if ((enable
&& port
->is_mrouter
) || (!enable
&& !port
->is_mrouter
))
115 /* Add/del mrouter port on all active mdb entries in HW.
116 * Don't change entry port mask, since that represents
117 * ports that actually joined that group.
119 mutex_lock(&sparx5
->mdb_lock
);
120 list_for_each_entry(e
, &sparx5
->mdb_entries
, list
) {
121 if (!test_bit(port
->portno
, e
->port_mask
) &&
122 ether_addr_is_ip_mcast(e
->addr
))
123 sparx5_pgid_update_mask(port
, e
->pgid_idx
, enable
);
125 mutex_unlock(&sparx5
->mdb_lock
);
127 /* Enable/disable flooding depending on if port is mrouter port
128 * or if mcast flood is enabled.
130 port
->is_mrouter
= enable
;
131 flood_flag
= br_port_flag_is_set(port
->ndev
, BR_MCAST_FLOOD
);
132 sparx5_port_update_mcast_ip_flood(port
, flood_flag
);
135 static int sparx5_port_attr_set(struct net_device
*dev
, const void *ctx
,
136 const struct switchdev_attr
*attr
,
137 struct netlink_ext_ack
*extack
)
139 struct sparx5_port
*port
= netdev_priv(dev
);
142 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS
:
143 return sparx5_port_attr_pre_bridge_flags(port
,
144 attr
->u
.brport_flags
);
145 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
146 sparx5_port_attr_bridge_flags(port
, attr
->u
.brport_flags
);
148 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
149 sparx5_attr_stp_state_set(port
, attr
->u
.stp_state
);
151 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
152 sparx5_port_attr_ageing_set(port
, attr
->u
.ageing_time
);
154 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
:
155 /* Used PVID 1 when default_pvid is 0, to avoid
156 * collision with non-bridged ports.
160 port
->vlan_aware
= attr
->u
.vlan_filtering
;
161 sparx5_vlan_port_apply(port
->sparx5
, port
);
163 case SWITCHDEV_ATTR_ID_PORT_MROUTER
:
164 sparx5_port_attr_mrouter_set(port
,
175 static int sparx5_port_bridge_join(struct sparx5_port
*port
,
176 struct net_device
*bridge
,
177 struct netlink_ext_ack
*extack
)
179 struct sparx5
*sparx5
= port
->sparx5
;
180 struct net_device
*ndev
= port
->ndev
;
183 if (bitmap_empty(sparx5
->bridge_mask
, SPX5_PORTS
))
184 /* First bridged port */
185 sparx5
->hw_bridge_dev
= bridge
;
187 if (sparx5
->hw_bridge_dev
!= bridge
)
188 /* This is adding the port to a second bridge, this is
193 set_bit(port
->portno
, sparx5
->bridge_mask
);
195 err
= switchdev_bridge_port_offload(ndev
, ndev
, NULL
, NULL
, NULL
,
198 goto err_switchdev_offload
;
200 /* Remove standalone port entry */
201 sparx5_mact_forget(sparx5
, ndev
->dev_addr
, 0);
203 /* Port enters in bridge mode therefore don't need to copy to CPU
204 * frames for multicast in case the bridge is not requesting them
206 __dev_mc_unsync(ndev
, sparx5_mc_unsync
);
210 err_switchdev_offload
:
211 clear_bit(port
->portno
, sparx5
->bridge_mask
);
215 static void sparx5_port_bridge_leave(struct sparx5_port
*port
,
216 struct net_device
*bridge
)
218 struct sparx5
*sparx5
= port
->sparx5
;
220 switchdev_bridge_port_unoffload(port
->ndev
, NULL
, NULL
, NULL
);
222 clear_bit(port
->portno
, sparx5
->bridge_mask
);
223 if (bitmap_empty(sparx5
->bridge_mask
, SPX5_PORTS
))
224 sparx5
->hw_bridge_dev
= NULL
;
226 /* Clear bridge vlan settings before updating the port settings */
227 port
->vlan_aware
= 0;
228 port
->pvid
= NULL_VID
;
229 port
->vid
= NULL_VID
;
231 /* Forward frames to CPU */
232 sparx5_mact_learn(sparx5
, sparx5_get_pgid(sparx5
, PGID_CPU
),
233 port
->ndev
->dev_addr
, 0);
235 /* Port enters in host more therefore restore mc list */
236 __dev_mc_sync(port
->ndev
, sparx5_mc_sync
, sparx5_mc_unsync
);
239 static int sparx5_port_changeupper(struct net_device
*dev
,
240 struct netdev_notifier_changeupper_info
*info
)
242 struct sparx5_port
*port
= netdev_priv(dev
);
243 struct netlink_ext_ack
*extack
;
246 extack
= netdev_notifier_info_to_extack(&info
->info
);
248 if (netif_is_bridge_master(info
->upper_dev
)) {
250 err
= sparx5_port_bridge_join(port
, info
->upper_dev
,
253 sparx5_port_bridge_leave(port
, info
->upper_dev
);
255 sparx5_vlan_port_apply(port
->sparx5
, port
);
261 static int sparx5_port_add_addr(struct net_device
*dev
, bool up
)
263 struct sparx5_port
*port
= netdev_priv(dev
);
264 struct sparx5
*sparx5
= port
->sparx5
;
265 u16 vid
= port
->pvid
;
268 sparx5_mact_learn(sparx5
, sparx5_get_pgid(sparx5
, PGID_CPU
),
269 port
->ndev
->dev_addr
, vid
);
271 sparx5_mact_forget(sparx5
, port
->ndev
->dev_addr
, vid
);
276 static int sparx5_netdevice_port_event(struct net_device
*dev
,
277 struct notifier_block
*nb
,
278 unsigned long event
, void *ptr
)
282 if (!sparx5_netdevice_check(dev
))
286 case NETDEV_CHANGEUPPER
:
287 err
= sparx5_port_changeupper(dev
, ptr
);
290 err
= sparx5_port_add_addr(dev
, true);
293 err
= sparx5_port_add_addr(dev
, false);
300 static int sparx5_netdevice_event(struct notifier_block
*nb
,
301 unsigned long event
, void *ptr
)
303 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
306 ret
= sparx5_netdevice_port_event(dev
, nb
, event
, ptr
);
308 return notifier_from_errno(ret
);
311 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct
*work
)
313 struct sparx5_switchdev_event_work
*switchdev_work
=
314 container_of(work
, struct sparx5_switchdev_event_work
, work
);
315 struct net_device
*dev
= switchdev_work
->dev
;
316 struct switchdev_notifier_fdb_info
*fdb_info
;
317 struct sparx5_port
*port
;
318 struct sparx5
*sparx5
;
323 if (!sparx5_netdevice_check(dev
)) {
325 sparx5
= switchdev_work
->sparx5
;
328 sparx5
= switchdev_work
->sparx5
;
329 port
= netdev_priv(dev
);
332 fdb_info
= &switchdev_work
->fdb_info
;
334 /* Used PVID 1 when default_pvid is 0, to avoid
335 * collision with non-bridged ports.
337 if (fdb_info
->vid
== 0)
342 switch (switchdev_work
->event
) {
343 case SWITCHDEV_FDB_ADD_TO_DEVICE
:
345 sparx5_add_mact_entry(sparx5
, dev
,
346 sparx5_get_pgid(sparx5
, PGID_CPU
),
347 fdb_info
->addr
, vid
);
349 sparx5_add_mact_entry(sparx5
, port
->ndev
, port
->portno
,
350 fdb_info
->addr
, vid
);
352 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
353 sparx5_del_mact_entry(sparx5
, fdb_info
->addr
, vid
);
358 kfree(switchdev_work
->fdb_info
.addr
);
359 kfree(switchdev_work
);
363 static void sparx5_schedule_work(struct work_struct
*work
)
365 queue_work(sparx5_owq
, work
);
368 static int sparx5_switchdev_event(struct notifier_block
*nb
,
369 unsigned long event
, void *ptr
)
371 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
372 struct sparx5_switchdev_event_work
*switchdev_work
;
373 struct switchdev_notifier_fdb_info
*fdb_info
;
374 struct switchdev_notifier_info
*info
= ptr
;
378 spx5
= container_of(nb
, struct sparx5
, switchdev_nb
);
381 case SWITCHDEV_PORT_ATTR_SET
:
382 err
= switchdev_handle_port_attr_set(dev
, ptr
,
383 sparx5_netdevice_check
,
384 sparx5_port_attr_set
);
385 return notifier_from_errno(err
);
386 case SWITCHDEV_FDB_ADD_TO_DEVICE
:
388 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
389 switchdev_work
= kzalloc(sizeof(*switchdev_work
), GFP_ATOMIC
);
393 switchdev_work
->dev
= dev
;
394 switchdev_work
->event
= event
;
395 switchdev_work
->sparx5
= spx5
;
397 fdb_info
= container_of(info
,
398 struct switchdev_notifier_fdb_info
,
400 INIT_WORK(&switchdev_work
->work
,
401 sparx5_switchdev_bridge_fdb_event_work
);
402 memcpy(&switchdev_work
->fdb_info
, ptr
,
403 sizeof(switchdev_work
->fdb_info
));
404 switchdev_work
->fdb_info
.addr
= kzalloc(ETH_ALEN
, GFP_ATOMIC
);
405 if (!switchdev_work
->fdb_info
.addr
)
408 ether_addr_copy((u8
*)switchdev_work
->fdb_info
.addr
,
412 sparx5_schedule_work(&switchdev_work
->work
);
418 kfree(switchdev_work
);
422 static int sparx5_handle_port_vlan_add(struct net_device
*dev
,
423 struct notifier_block
*nb
,
424 const struct switchdev_obj_port_vlan
*v
)
426 struct sparx5_port
*port
= netdev_priv(dev
);
428 if (netif_is_bridge_master(dev
)) {
429 struct sparx5
*sparx5
=
430 container_of(nb
, struct sparx5
,
431 switchdev_blocking_nb
);
433 /* Flood broadcast to CPU */
434 sparx5_mact_learn(sparx5
, sparx5_get_pgid(sparx5
, PGID_BCAST
),
435 dev
->broadcast
, v
->vid
);
439 if (!sparx5_netdevice_check(dev
))
442 return sparx5_vlan_vid_add(port
, v
->vid
,
443 v
->flags
& BRIDGE_VLAN_INFO_PVID
,
444 v
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
);
447 static int sparx5_alloc_mdb_entry(struct sparx5
*sparx5
,
448 const unsigned char *addr
,
450 struct sparx5_mdb_entry
**entry_out
)
452 struct sparx5_mdb_entry
*entry
;
456 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
460 err
= sparx5_pgid_alloc_mcast(sparx5
, &pgid_idx
);
466 memcpy(entry
->addr
, addr
, ETH_ALEN
);
468 entry
->pgid_idx
= pgid_idx
;
470 mutex_lock(&sparx5
->mdb_lock
);
471 list_add_tail(&entry
->list
, &sparx5
->mdb_entries
);
472 mutex_unlock(&sparx5
->mdb_lock
);
478 static void sparx5_free_mdb_entry(struct sparx5
*sparx5
,
479 const unsigned char *addr
,
482 struct sparx5_mdb_entry
*entry
, *tmp
;
484 mutex_lock(&sparx5
->mdb_lock
);
485 list_for_each_entry_safe(entry
, tmp
, &sparx5
->mdb_entries
, list
) {
486 if ((vid
== 0 || entry
->vid
== vid
) &&
487 ether_addr_equal(addr
, entry
->addr
)) {
488 list_del(&entry
->list
);
490 sparx5_pgid_free(sparx5
, entry
->pgid_idx
);
497 mutex_unlock(&sparx5
->mdb_lock
);
500 static struct sparx5_mdb_entry
*sparx5_mdb_get_entry(struct sparx5
*sparx5
,
501 const unsigned char *addr
,
504 struct sparx5_mdb_entry
*e
, *found
= NULL
;
506 mutex_lock(&sparx5
->mdb_lock
);
507 list_for_each_entry(e
, &sparx5
->mdb_entries
, list
) {
508 if (ether_addr_equal(e
->addr
, addr
) && e
->vid
== vid
) {
515 mutex_unlock(&sparx5
->mdb_lock
);
519 static void sparx5_cpu_copy_ena(struct sparx5
*spx5
, u16 pgid
, bool enable
)
521 spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(enable
),
522 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA
, spx5
,
523 ANA_AC_PGID_MISC_CFG(pgid
));
526 static int sparx5_handle_port_mdb_add(struct net_device
*dev
,
527 struct notifier_block
*nb
,
528 const struct switchdev_obj_port_mdb
*v
)
530 struct sparx5_port
*port
= netdev_priv(dev
);
531 struct sparx5
*spx5
= port
->sparx5
;
532 struct sparx5_mdb_entry
*entry
;
533 bool is_host
, is_new
;
537 if (!sparx5_netdevice_check(dev
))
540 is_host
= netif_is_bridge_master(v
->obj
.orig_dev
);
542 /* When VLAN unaware the vlan value is not parsed and we receive vid 0.
543 * Fall back to bridge vid 1.
545 if (!br_vlan_enabled(spx5
->hw_bridge_dev
))
551 entry
= sparx5_mdb_get_entry(spx5
, v
->addr
, vid
);
553 err
= sparx5_alloc_mdb_entry(spx5
, v
->addr
, vid
, &entry
);
559 mutex_lock(&spx5
->mdb_lock
);
561 /* Add any mrouter ports to the new entry */
562 if (is_new
&& ether_addr_is_ip_mcast(v
->addr
))
563 for (i
= 0; i
< spx5
->data
->consts
->n_ports
; i
++)
564 if (spx5
->ports
[i
] && spx5
->ports
[i
]->is_mrouter
)
565 sparx5_pgid_update_mask(spx5
->ports
[i
],
569 if (is_host
&& !entry
->cpu_copy
) {
570 sparx5_cpu_copy_ena(spx5
, entry
->pgid_idx
, true);
571 entry
->cpu_copy
= true;
572 } else if (!is_host
) {
573 sparx5_pgid_update_mask(port
, entry
->pgid_idx
, true);
574 set_bit(port
->portno
, entry
->port_mask
);
576 mutex_unlock(&spx5
->mdb_lock
);
578 sparx5_mact_learn(spx5
, entry
->pgid_idx
, entry
->addr
, entry
->vid
);
583 static int sparx5_handle_port_mdb_del(struct net_device
*dev
,
584 struct notifier_block
*nb
,
585 const struct switchdev_obj_port_mdb
*v
)
587 struct sparx5_port
*port
= netdev_priv(dev
);
588 struct sparx5
*spx5
= port
->sparx5
;
589 struct sparx5_mdb_entry
*entry
;
593 if (!sparx5_netdevice_check(dev
))
596 is_host
= netif_is_bridge_master(v
->obj
.orig_dev
);
598 if (!br_vlan_enabled(spx5
->hw_bridge_dev
))
603 entry
= sparx5_mdb_get_entry(spx5
, v
->addr
, vid
);
607 mutex_lock(&spx5
->mdb_lock
);
608 if (is_host
&& entry
->cpu_copy
) {
609 sparx5_cpu_copy_ena(spx5
, entry
->pgid_idx
, false);
610 entry
->cpu_copy
= false;
611 } else if (!is_host
) {
612 clear_bit(port
->portno
, entry
->port_mask
);
614 /* Port not mrouter port or addr is L2 mcast, remove port from mask. */
615 if (!port
->is_mrouter
|| !ether_addr_is_ip_mcast(v
->addr
))
616 sparx5_pgid_update_mask(port
, entry
->pgid_idx
, false);
618 mutex_unlock(&spx5
->mdb_lock
);
620 if (bitmap_empty(entry
->port_mask
, SPX5_PORTS
) && !entry
->cpu_copy
) {
621 /* Clear pgid in case mrouter ports exists
622 * that are not part of the group.
624 sparx5_pgid_clear(spx5
, entry
->pgid_idx
);
625 sparx5_mact_forget(spx5
, entry
->addr
, entry
->vid
);
626 sparx5_free_mdb_entry(spx5
, entry
->addr
, entry
->vid
);
631 static int sparx5_handle_port_obj_add(struct net_device
*dev
,
632 struct notifier_block
*nb
,
633 struct switchdev_notifier_port_obj_info
*info
)
635 const struct switchdev_obj
*obj
= info
->obj
;
639 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
640 err
= sparx5_handle_port_vlan_add(dev
, nb
,
641 SWITCHDEV_OBJ_PORT_VLAN(obj
));
643 case SWITCHDEV_OBJ_ID_PORT_MDB
:
644 case SWITCHDEV_OBJ_ID_HOST_MDB
:
645 err
= sparx5_handle_port_mdb_add(dev
, nb
,
646 SWITCHDEV_OBJ_PORT_MDB(obj
));
653 info
->handled
= true;
657 static int sparx5_handle_port_vlan_del(struct net_device
*dev
,
658 struct notifier_block
*nb
,
661 struct sparx5_port
*port
= netdev_priv(dev
);
665 if (netif_is_bridge_master(dev
)) {
666 struct sparx5
*sparx5
=
667 container_of(nb
, struct sparx5
,
668 switchdev_blocking_nb
);
670 sparx5_mact_forget(sparx5
, dev
->broadcast
, vid
);
674 if (!sparx5_netdevice_check(dev
))
677 ret
= sparx5_vlan_vid_del(port
, vid
);
684 static int sparx5_handle_port_obj_del(struct net_device
*dev
,
685 struct notifier_block
*nb
,
686 struct switchdev_notifier_port_obj_info
*info
)
688 const struct switchdev_obj
*obj
= info
->obj
;
692 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
693 err
= sparx5_handle_port_vlan_del(dev
, nb
,
694 SWITCHDEV_OBJ_PORT_VLAN(obj
)->vid
);
696 case SWITCHDEV_OBJ_ID_PORT_MDB
:
697 case SWITCHDEV_OBJ_ID_HOST_MDB
:
698 err
= sparx5_handle_port_mdb_del(dev
, nb
,
699 SWITCHDEV_OBJ_PORT_MDB(obj
));
706 info
->handled
= true;
710 static int sparx5_switchdev_blocking_event(struct notifier_block
*nb
,
714 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
718 case SWITCHDEV_PORT_OBJ_ADD
:
719 err
= sparx5_handle_port_obj_add(dev
, nb
, ptr
);
720 return notifier_from_errno(err
);
721 case SWITCHDEV_PORT_OBJ_DEL
:
722 err
= sparx5_handle_port_obj_del(dev
, nb
, ptr
);
723 return notifier_from_errno(err
);
724 case SWITCHDEV_PORT_ATTR_SET
:
725 err
= switchdev_handle_port_attr_set(dev
, ptr
,
726 sparx5_netdevice_check
,
727 sparx5_port_attr_set
);
728 return notifier_from_errno(err
);
734 int sparx5_register_notifier_blocks(struct sparx5
*s5
)
738 s5
->netdevice_nb
.notifier_call
= sparx5_netdevice_event
;
739 err
= register_netdevice_notifier(&s5
->netdevice_nb
);
743 s5
->switchdev_nb
.notifier_call
= sparx5_switchdev_event
;
744 err
= register_switchdev_notifier(&s5
->switchdev_nb
);
746 goto err_switchdev_nb
;
748 s5
->switchdev_blocking_nb
.notifier_call
= sparx5_switchdev_blocking_event
;
749 err
= register_switchdev_blocking_notifier(&s5
->switchdev_blocking_nb
);
751 goto err_switchdev_blocking_nb
;
753 sparx5_owq
= alloc_ordered_workqueue("sparx5_order", 0);
756 goto err_switchdev_blocking_nb
;
761 err_switchdev_blocking_nb
:
762 unregister_switchdev_notifier(&s5
->switchdev_nb
);
764 unregister_netdevice_notifier(&s5
->netdevice_nb
);
769 void sparx5_unregister_notifier_blocks(struct sparx5
*s5
)
771 destroy_workqueue(sparx5_owq
);
773 unregister_switchdev_blocking_notifier(&s5
->switchdev_blocking_nb
);
774 unregister_switchdev_notifier(&s5
->switchdev_nb
);
775 unregister_netdevice_notifier(&s5
->netdevice_nb
);