1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/list.h>
4 #include <linux/netdevice.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/skbuff.h>
8 #include <net/switchdev.h>
10 #include "br_private.h"
12 static struct static_key_false br_switchdev_tx_fwd_offload
;
14 static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port
*p
,
15 const struct sk_buff
*skb
)
17 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload
))
20 return (p
->flags
& BR_TX_FWD_OFFLOAD
) &&
21 (p
->hwdom
!= BR_INPUT_SKB_CB(skb
)->src_hwdom
);
24 bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff
*skb
)
26 if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload
))
29 return BR_INPUT_SKB_CB(skb
)->tx_fwd_offload
;
32 void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff
*skb
)
34 skb
->offload_fwd_mark
= br_switchdev_frame_uses_tx_fwd_offload(skb
);
37 /* Mark the frame for TX forwarding offload if this egress port supports it */
38 void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port
*p
,
41 if (nbp_switchdev_can_offload_tx_fwd(p
, skb
))
42 BR_INPUT_SKB_CB(skb
)->tx_fwd_offload
= true;
45 /* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms
46 * that the skb has been already forwarded to, to avoid further cloning to
47 * other ports in the same hwdom by making nbp_switchdev_allowed_egress()
50 void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port
*p
,
53 if (nbp_switchdev_can_offload_tx_fwd(p
, skb
))
54 set_bit(p
->hwdom
, &BR_INPUT_SKB_CB(skb
)->fwd_hwdoms
);
57 void nbp_switchdev_frame_mark(const struct net_bridge_port
*p
,
61 BR_INPUT_SKB_CB(skb
)->src_hwdom
= p
->hwdom
;
64 bool nbp_switchdev_allowed_egress(const struct net_bridge_port
*p
,
65 const struct sk_buff
*skb
)
67 struct br_input_skb_cb
*cb
= BR_INPUT_SKB_CB(skb
);
69 return !test_bit(p
->hwdom
, &cb
->fwd_hwdoms
) &&
70 (!skb
->offload_fwd_mark
|| cb
->src_hwdom
!= p
->hwdom
);
73 /* Flags that can be offloaded to hardware */
74 #define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | BR_PORT_MAB | \
75 BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \
76 BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST)
78 int br_switchdev_set_port_flag(struct net_bridge_port
*p
,
81 struct netlink_ext_ack
*extack
)
83 struct switchdev_attr attr
= {
86 struct switchdev_notifier_port_attr_info info
= {
91 mask
&= BR_PORT_FLAGS_HW_OFFLOAD
;
95 attr
.id
= SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS
;
96 attr
.u
.brport_flags
.val
= flags
;
97 attr
.u
.brport_flags
.mask
= mask
;
99 /* We run from atomic context here */
100 err
= call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET
, p
->dev
,
102 err
= notifier_to_errno(err
);
103 if (err
== -EOPNOTSUPP
)
107 NL_SET_ERR_MSG_WEAK_MOD(extack
,
108 "bridge flag offload is not supported");
112 attr
.id
= SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
;
113 attr
.flags
= SWITCHDEV_F_DEFER
;
115 err
= switchdev_port_attr_set(p
->dev
, &attr
, extack
);
117 NL_SET_ERR_MSG_WEAK_MOD(extack
,
118 "error setting offload flag on port");
125 static void br_switchdev_fdb_populate(struct net_bridge
*br
,
126 struct switchdev_notifier_fdb_info
*item
,
127 const struct net_bridge_fdb_entry
*fdb
,
130 const struct net_bridge_port
*p
= READ_ONCE(fdb
->dst
);
132 item
->addr
= fdb
->key
.addr
.addr
;
133 item
->vid
= fdb
->key
.vlan_id
;
134 item
->added_by_user
= test_bit(BR_FDB_ADDED_BY_USER
, &fdb
->flags
);
135 item
->offloaded
= test_bit(BR_FDB_OFFLOADED
, &fdb
->flags
);
136 item
->is_local
= test_bit(BR_FDB_LOCAL
, &fdb
->flags
);
137 item
->locked
= false;
138 item
->info
.dev
= (!p
|| item
->is_local
) ? br
->dev
: p
->dev
;
139 item
->info
.ctx
= ctx
;
143 br_switchdev_fdb_notify(struct net_bridge
*br
,
144 const struct net_bridge_fdb_entry
*fdb
, int type
)
146 struct switchdev_notifier_fdb_info item
;
148 if (test_bit(BR_FDB_LOCKED
, &fdb
->flags
))
151 /* Entries with these flags were created using ndm_state == NUD_REACHABLE,
152 * ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something
153 * equivalent to 'bridge fdb add ... master dynamic (sticky)'.
154 * Drivers don't know how to deal with these, so don't notify them to
155 * avoid confusing them.
157 if (test_bit(BR_FDB_ADDED_BY_USER
, &fdb
->flags
) &&
158 !test_bit(BR_FDB_STATIC
, &fdb
->flags
) &&
159 !test_bit(BR_FDB_ADDED_BY_EXT_LEARN
, &fdb
->flags
))
162 br_switchdev_fdb_populate(br
, &item
, fdb
, NULL
);
166 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE
,
167 item
.info
.dev
, &item
.info
, NULL
);
170 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE
,
171 item
.info
.dev
, &item
.info
, NULL
);
176 int br_switchdev_port_vlan_add(struct net_device
*dev
, u16 vid
, u16 flags
,
177 bool changed
, struct netlink_ext_ack
*extack
)
179 struct switchdev_obj_port_vlan v
= {
181 .obj
.id
= SWITCHDEV_OBJ_ID_PORT_VLAN
,
187 return switchdev_port_obj_add(dev
, &v
.obj
, extack
);
190 int br_switchdev_port_vlan_del(struct net_device
*dev
, u16 vid
)
192 struct switchdev_obj_port_vlan v
= {
194 .obj
.id
= SWITCHDEV_OBJ_ID_PORT_VLAN
,
198 return switchdev_port_obj_del(dev
, &v
.obj
);
201 static int nbp_switchdev_hwdom_set(struct net_bridge_port
*joining
)
203 struct net_bridge
*br
= joining
->br
;
204 struct net_bridge_port
*p
;
207 /* joining is yet to be added to the port list. */
208 list_for_each_entry(p
, &br
->port_list
, list
) {
209 if (netdev_phys_item_id_same(&joining
->ppid
, &p
->ppid
)) {
210 joining
->hwdom
= p
->hwdom
;
215 hwdom
= find_next_zero_bit(&br
->busy_hwdoms
, BR_HWDOM_MAX
, 1);
216 if (hwdom
>= BR_HWDOM_MAX
)
219 set_bit(hwdom
, &br
->busy_hwdoms
);
220 joining
->hwdom
= hwdom
;
224 static void nbp_switchdev_hwdom_put(struct net_bridge_port
*leaving
)
226 struct net_bridge
*br
= leaving
->br
;
227 struct net_bridge_port
*p
;
229 /* leaving is no longer in the port list. */
230 list_for_each_entry(p
, &br
->port_list
, list
) {
231 if (p
->hwdom
== leaving
->hwdom
)
235 clear_bit(leaving
->hwdom
, &br
->busy_hwdoms
);
238 static int nbp_switchdev_add(struct net_bridge_port
*p
,
239 struct netdev_phys_item_id ppid
,
241 struct netlink_ext_ack
*extack
)
245 if (p
->offload_count
) {
246 /* Prevent unsupported configurations such as a bridge port
247 * which is a bonding interface, and the member ports are from
248 * different hardware switches.
250 if (!netdev_phys_item_id_same(&p
->ppid
, &ppid
)) {
251 NL_SET_ERR_MSG_MOD(extack
,
252 "Same bridge port cannot be offloaded by two physical switches");
256 /* Tolerate drivers that call switchdev_bridge_port_offload()
257 * more than once for the same bridge port, such as when the
258 * bridge port is an offloaded bonding/team interface.
266 p
->offload_count
= 1;
268 err
= nbp_switchdev_hwdom_set(p
);
272 if (tx_fwd_offload
) {
273 p
->flags
|= BR_TX_FWD_OFFLOAD
;
274 static_branch_inc(&br_switchdev_tx_fwd_offload
);
280 static void nbp_switchdev_del(struct net_bridge_port
*p
)
282 if (WARN_ON(!p
->offload_count
))
287 if (p
->offload_count
)
291 nbp_switchdev_hwdom_put(p
);
293 if (p
->flags
& BR_TX_FWD_OFFLOAD
) {
294 p
->flags
&= ~BR_TX_FWD_OFFLOAD
;
295 static_branch_dec(&br_switchdev_tx_fwd_offload
);
300 br_switchdev_fdb_replay_one(struct net_bridge
*br
, struct notifier_block
*nb
,
301 const struct net_bridge_fdb_entry
*fdb
,
302 unsigned long action
, const void *ctx
)
304 struct switchdev_notifier_fdb_info item
;
307 br_switchdev_fdb_populate(br
, &item
, fdb
, ctx
);
309 err
= nb
->notifier_call(nb
, action
, &item
);
310 return notifier_to_errno(err
);
314 br_switchdev_fdb_replay(const struct net_device
*br_dev
, const void *ctx
,
315 bool adding
, struct notifier_block
*nb
)
317 struct net_bridge_fdb_entry
*fdb
;
318 struct net_bridge
*br
;
319 unsigned long action
;
325 if (!netif_is_bridge_master(br_dev
))
328 br
= netdev_priv(br_dev
);
331 action
= SWITCHDEV_FDB_ADD_TO_DEVICE
;
333 action
= SWITCHDEV_FDB_DEL_TO_DEVICE
;
337 hlist_for_each_entry_rcu(fdb
, &br
->fdb_list
, fdb_node
) {
338 err
= br_switchdev_fdb_replay_one(br
, nb
, fdb
, action
, ctx
);
348 static int br_switchdev_vlan_attr_replay(struct net_device
*br_dev
,
350 struct notifier_block
*nb
,
351 struct netlink_ext_ack
*extack
)
353 struct switchdev_notifier_port_attr_info attr_info
= {
360 struct net_bridge
*br
= netdev_priv(br_dev
);
361 struct net_bridge_vlan_group
*vg
;
362 struct switchdev_attr attr
;
363 struct net_bridge_vlan
*v
;
366 attr_info
.attr
= &attr
;
367 attr
.orig_dev
= br_dev
;
369 vg
= br_vlan_group(br
);
373 list_for_each_entry(v
, &vg
->vlan_list
, vlist
) {
375 attr
.id
= SWITCHDEV_ATTR_ID_VLAN_MSTI
;
376 attr
.u
.vlan_msti
.vid
= v
->vid
;
377 attr
.u
.vlan_msti
.msti
= v
->msti
;
379 err
= nb
->notifier_call(nb
, SWITCHDEV_PORT_ATTR_SET
,
381 err
= notifier_to_errno(err
);
391 br_switchdev_vlan_replay_one(struct notifier_block
*nb
,
392 struct net_device
*dev
,
393 struct switchdev_obj_port_vlan
*vlan
,
394 const void *ctx
, unsigned long action
,
395 struct netlink_ext_ack
*extack
)
397 struct switchdev_notifier_port_obj_info obj_info
= {
407 err
= nb
->notifier_call(nb
, action
, &obj_info
);
408 return notifier_to_errno(err
);
411 static int br_switchdev_vlan_replay_group(struct notifier_block
*nb
,
412 struct net_device
*dev
,
413 struct net_bridge_vlan_group
*vg
,
414 const void *ctx
, unsigned long action
,
415 struct netlink_ext_ack
*extack
)
417 struct net_bridge_vlan
*v
;
424 pvid
= br_get_pvid(vg
);
426 list_for_each_entry(v
, &vg
->vlan_list
, vlist
) {
427 struct switchdev_obj_port_vlan vlan
= {
429 .obj
.id
= SWITCHDEV_OBJ_ID_PORT_VLAN
,
430 .flags
= br_vlan_flags(v
, pvid
),
434 if (!br_vlan_should_use(v
))
437 err
= br_switchdev_vlan_replay_one(nb
, dev
, &vlan
, ctx
,
446 static int br_switchdev_vlan_replay(struct net_device
*br_dev
,
447 const void *ctx
, bool adding
,
448 struct notifier_block
*nb
,
449 struct netlink_ext_ack
*extack
)
451 struct net_bridge
*br
= netdev_priv(br_dev
);
452 struct net_bridge_port
*p
;
453 unsigned long action
;
461 if (!netif_is_bridge_master(br_dev
))
465 action
= SWITCHDEV_PORT_OBJ_ADD
;
467 action
= SWITCHDEV_PORT_OBJ_DEL
;
469 err
= br_switchdev_vlan_replay_group(nb
, br_dev
, br_vlan_group(br
),
470 ctx
, action
, extack
);
474 list_for_each_entry(p
, &br
->port_list
, list
) {
475 struct net_device
*dev
= p
->dev
;
477 err
= br_switchdev_vlan_replay_group(nb
, dev
,
479 ctx
, action
, extack
);
485 err
= br_switchdev_vlan_attr_replay(br_dev
, ctx
, nb
, extack
);
493 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
494 struct br_switchdev_mdb_complete_info
{
495 struct net_bridge_port
*port
;
499 static void br_switchdev_mdb_complete(struct net_device
*dev
, int err
, void *priv
)
501 struct br_switchdev_mdb_complete_info
*data
= priv
;
502 struct net_bridge_port_group __rcu
**pp
;
503 struct net_bridge_port_group
*p
;
504 struct net_bridge_mdb_entry
*mp
;
505 struct net_bridge_port
*port
= data
->port
;
506 struct net_bridge
*br
= port
->br
;
511 spin_lock_bh(&br
->multicast_lock
);
512 mp
= br_mdb_ip_get(br
, &data
->ip
);
515 for (pp
= &mp
->ports
; (p
= mlock_dereference(*pp
, br
)) != NULL
;
517 if (p
->key
.port
!= port
)
519 p
->flags
|= MDB_PG_FLAGS_OFFLOAD
;
522 spin_unlock_bh(&br
->multicast_lock
);
527 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb
*mdb
,
528 const struct net_bridge_mdb_entry
*mp
)
530 if (mp
->addr
.proto
== htons(ETH_P_IP
))
531 ip_eth_mc_map(mp
->addr
.dst
.ip4
, mdb
->addr
);
532 #if IS_ENABLED(CONFIG_IPV6)
533 else if (mp
->addr
.proto
== htons(ETH_P_IPV6
))
534 ipv6_eth_mc_map(&mp
->addr
.dst
.ip6
, mdb
->addr
);
537 ether_addr_copy(mdb
->addr
, mp
->addr
.dst
.mac_addr
);
539 mdb
->vid
= mp
->addr
.vid
;
542 static void br_switchdev_host_mdb_one(struct net_device
*dev
,
543 struct net_device
*lower_dev
,
544 struct net_bridge_mdb_entry
*mp
,
547 struct switchdev_obj_port_mdb mdb
= {
549 .id
= SWITCHDEV_OBJ_ID_HOST_MDB
,
550 .flags
= SWITCHDEV_F_DEFER
,
555 br_switchdev_mdb_populate(&mdb
, mp
);
559 switchdev_port_obj_add(lower_dev
, &mdb
.obj
, NULL
);
562 switchdev_port_obj_del(lower_dev
, &mdb
.obj
);
567 static void br_switchdev_host_mdb(struct net_device
*dev
,
568 struct net_bridge_mdb_entry
*mp
, int type
)
570 struct net_device
*lower_dev
;
571 struct list_head
*iter
;
573 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
574 br_switchdev_host_mdb_one(dev
, lower_dev
, mp
, type
);
578 br_switchdev_mdb_replay_one(struct notifier_block
*nb
, struct net_device
*dev
,
579 const struct switchdev_obj_port_mdb
*mdb
,
580 unsigned long action
, const void *ctx
,
581 struct netlink_ext_ack
*extack
)
583 struct switchdev_notifier_port_obj_info obj_info
= {
593 err
= nb
->notifier_call(nb
, action
, &obj_info
);
594 return notifier_to_errno(err
);
597 static int br_switchdev_mdb_queue_one(struct list_head
*mdb_list
,
598 struct net_device
*dev
,
599 unsigned long action
,
600 enum switchdev_obj_id id
,
601 const struct net_bridge_mdb_entry
*mp
,
602 struct net_device
*orig_dev
)
604 struct switchdev_obj_port_mdb mdb
= {
607 .orig_dev
= orig_dev
,
610 struct switchdev_obj_port_mdb
*pmdb
;
612 br_switchdev_mdb_populate(&mdb
, mp
);
614 if (action
== SWITCHDEV_PORT_OBJ_ADD
&&
615 switchdev_port_obj_act_is_deferred(dev
, action
, &mdb
.obj
)) {
616 /* This event is already in the deferred queue of
617 * events, so this replay must be elided, lest the
618 * driver receives duplicate events for it. This can
619 * only happen when replaying additions, since
620 * modifications are always immediately visible in
621 * br->mdb_list, whereas actual event delivery may be
627 pmdb
= kmemdup(&mdb
, sizeof(mdb
), GFP_ATOMIC
);
631 list_add_tail(&pmdb
->obj
.list
, mdb_list
);
635 void br_switchdev_mdb_notify(struct net_device
*dev
,
636 struct net_bridge_mdb_entry
*mp
,
637 struct net_bridge_port_group
*pg
,
640 struct br_switchdev_mdb_complete_info
*complete_info
;
641 struct switchdev_obj_port_mdb mdb
= {
643 .id
= SWITCHDEV_OBJ_ID_PORT_MDB
,
644 .flags
= SWITCHDEV_F_DEFER
,
649 return br_switchdev_host_mdb(dev
, mp
, type
);
651 br_switchdev_mdb_populate(&mdb
, mp
);
653 mdb
.obj
.orig_dev
= pg
->key
.port
->dev
;
656 complete_info
= kmalloc(sizeof(*complete_info
), GFP_ATOMIC
);
659 complete_info
->port
= pg
->key
.port
;
660 complete_info
->ip
= mp
->addr
;
661 mdb
.obj
.complete_priv
= complete_info
;
662 mdb
.obj
.complete
= br_switchdev_mdb_complete
;
663 if (switchdev_port_obj_add(pg
->key
.port
->dev
, &mdb
.obj
, NULL
))
664 kfree(complete_info
);
667 switchdev_port_obj_del(pg
->key
.port
->dev
, &mdb
.obj
);
674 br_switchdev_mdb_replay(struct net_device
*br_dev
, struct net_device
*dev
,
675 const void *ctx
, bool adding
, struct notifier_block
*nb
,
676 struct netlink_ext_ack
*extack
)
678 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
679 const struct net_bridge_mdb_entry
*mp
;
680 struct switchdev_obj
*obj
, *tmp
;
681 struct net_bridge
*br
;
682 unsigned long action
;
691 if (!netif_is_bridge_master(br_dev
) || !netif_is_bridge_port(dev
))
694 br
= netdev_priv(br_dev
);
696 if (!br_opt_get(br
, BROPT_MULTICAST_ENABLED
))
700 action
= SWITCHDEV_PORT_OBJ_ADD
;
702 action
= SWITCHDEV_PORT_OBJ_DEL
;
704 /* br_switchdev_mdb_queue_one() will take care to not queue a
705 * replay of an event that is already pending in the switchdev
706 * deferred queue. In order to safely determine that, there
707 * must be no new deferred MDB notifications enqueued for the
708 * duration of the MDB scan. Therefore, grab the write-side
709 * lock to avoid racing with any concurrent IGMP/MLD snooping.
711 spin_lock_bh(&br
->multicast_lock
);
713 hlist_for_each_entry(mp
, &br
->mdb_list
, mdb_node
) {
714 struct net_bridge_port_group __rcu
* const *pp
;
715 const struct net_bridge_port_group
*p
;
717 if (mp
->host_joined
) {
718 err
= br_switchdev_mdb_queue_one(&mdb_list
, dev
, action
,
719 SWITCHDEV_OBJ_ID_HOST_MDB
,
722 spin_unlock_bh(&br
->multicast_lock
);
727 for (pp
= &mp
->ports
; (p
= mlock_dereference(*pp
, br
)) != NULL
;
729 if (p
->key
.port
->dev
!= dev
)
732 err
= br_switchdev_mdb_queue_one(&mdb_list
, dev
, action
,
733 SWITCHDEV_OBJ_ID_PORT_MDB
,
736 spin_unlock_bh(&br
->multicast_lock
);
742 spin_unlock_bh(&br
->multicast_lock
);
744 list_for_each_entry(obj
, &mdb_list
, list
) {
745 err
= br_switchdev_mdb_replay_one(nb
, dev
,
746 SWITCHDEV_OBJ_PORT_MDB(obj
),
747 action
, ctx
, extack
);
748 if (err
== -EOPNOTSUPP
)
755 list_for_each_entry_safe(obj
, tmp
, &mdb_list
, list
) {
756 list_del(&obj
->list
);
757 kfree(SWITCHDEV_OBJ_PORT_MDB(obj
));
767 static int nbp_switchdev_sync_objs(struct net_bridge_port
*p
, const void *ctx
,
768 struct notifier_block
*atomic_nb
,
769 struct notifier_block
*blocking_nb
,
770 struct netlink_ext_ack
*extack
)
772 struct net_device
*br_dev
= p
->br
->dev
;
773 struct net_device
*dev
= p
->dev
;
776 err
= br_switchdev_vlan_replay(br_dev
, ctx
, true, blocking_nb
, extack
);
777 if (err
&& err
!= -EOPNOTSUPP
)
780 err
= br_switchdev_mdb_replay(br_dev
, dev
, ctx
, true, blocking_nb
,
783 /* -EOPNOTSUPP not propagated from MDB replay. */
787 err
= br_switchdev_fdb_replay(br_dev
, ctx
, true, atomic_nb
);
788 if (err
&& err
!= -EOPNOTSUPP
)
794 static void nbp_switchdev_unsync_objs(struct net_bridge_port
*p
,
796 struct notifier_block
*atomic_nb
,
797 struct notifier_block
*blocking_nb
)
799 struct net_device
*br_dev
= p
->br
->dev
;
800 struct net_device
*dev
= p
->dev
;
802 br_switchdev_fdb_replay(br_dev
, ctx
, false, atomic_nb
);
804 br_switchdev_mdb_replay(br_dev
, dev
, ctx
, false, blocking_nb
, NULL
);
806 br_switchdev_vlan_replay(br_dev
, ctx
, false, blocking_nb
, NULL
);
808 /* Make sure that the device leaving this bridge has seen all
809 * relevant events before it is disassociated. In the normal
810 * case, when the device is directly attached to the bridge,
811 * this is covered by del_nbp(). If the association was indirect
812 * however, e.g. via a team or bond, and the device is leaving
813 * that intermediate device, then the bridge port remains in
816 switchdev_deferred_process();
819 /* Let the bridge know that this port is offloaded, so that it can assign a
820 * switchdev hardware domain to it.
822 int br_switchdev_port_offload(struct net_bridge_port
*p
,
823 struct net_device
*dev
, const void *ctx
,
824 struct notifier_block
*atomic_nb
,
825 struct notifier_block
*blocking_nb
,
827 struct netlink_ext_ack
*extack
)
829 struct netdev_phys_item_id ppid
;
832 err
= dev_get_port_parent_id(dev
, &ppid
, false);
836 err
= nbp_switchdev_add(p
, ppid
, tx_fwd_offload
, extack
);
840 err
= nbp_switchdev_sync_objs(p
, ctx
, atomic_nb
, blocking_nb
, extack
);
842 goto out_switchdev_del
;
847 nbp_switchdev_del(p
);
852 void br_switchdev_port_unoffload(struct net_bridge_port
*p
, const void *ctx
,
853 struct notifier_block
*atomic_nb
,
854 struct notifier_block
*blocking_nb
)
856 nbp_switchdev_unsync_objs(p
, ctx
, atomic_nb
, blocking_nb
);
858 nbp_switchdev_del(p
);
861 int br_switchdev_port_replay(struct net_bridge_port
*p
,
862 struct net_device
*dev
, const void *ctx
,
863 struct notifier_block
*atomic_nb
,
864 struct notifier_block
*blocking_nb
,
865 struct netlink_ext_ack
*extack
)
867 return nbp_switchdev_sync_objs(p
, ctx
, atomic_nb
, blocking_nb
, extack
);