3 * Linux ethernet bridge
6 * Lennert Buytenhek <buytenh@gnu.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/netpoll.h>
18 #include <linux/ethtool.h>
19 #include <linux/if_arp.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/if_ether.h>
24 #include <linux/slab.h>
27 #include <linux/if_vlan.h>
28 #include <net/switchdev.h>
29 #include <net/net_namespace.h>
31 #include "br_private.h"
34 * Determine initial path cost based on speed.
35 * using recommendations from 802.1d standard
37 * Since driver might sleep need to not be holding any locks.
39 static int port_cost(struct net_device
*dev
)
41 struct ethtool_link_ksettings ecmd
;
43 if (!__ethtool_get_link_ksettings(dev
, &ecmd
)) {
44 switch (ecmd
.base
.speed
) {
56 /* Old silly heuristics based on name */
57 if (!strncmp(dev
->name
, "lec", 3))
60 if (!strncmp(dev
->name
, "plip", 4))
63 return 100; /* assume old 10Mbps */
67 /* Check for port carrier transitions. */
68 void br_port_carrier_check(struct net_bridge_port
*p
, bool *notified
)
70 struct net_device
*dev
= p
->dev
;
71 struct net_bridge
*br
= p
->br
;
73 if (!(p
->flags
& BR_ADMIN_COST
) &&
74 netif_running(dev
) && netif_oper_up(dev
))
75 p
->path_cost
= port_cost(dev
);
78 if (!netif_running(br
->dev
))
81 spin_lock_bh(&br
->lock
);
82 if (netif_running(dev
) && netif_oper_up(dev
)) {
83 if (p
->state
== BR_STATE_DISABLED
) {
84 br_stp_enable_port(p
);
88 if (p
->state
!= BR_STATE_DISABLED
) {
89 br_stp_disable_port(p
);
93 spin_unlock_bh(&br
->lock
);
96 static void br_port_set_promisc(struct net_bridge_port
*p
)
100 if (br_promisc_port(p
))
103 err
= dev_set_promiscuity(p
->dev
, 1);
107 br_fdb_unsync_static(p
->br
, p
);
108 p
->flags
|= BR_PROMISC
;
111 static void br_port_clear_promisc(struct net_bridge_port
*p
)
115 /* Check if the port is already non-promisc or if it doesn't
116 * support UNICAST filtering. Without unicast filtering support
117 * we'll end up re-enabling promisc mode anyway, so just check for
120 if (!br_promisc_port(p
) || !(p
->dev
->priv_flags
& IFF_UNICAST_FLT
))
123 /* Since we'll be clearing the promisc mode, program the port
124 * first so that we don't have interruption in traffic.
126 err
= br_fdb_sync_static(p
->br
, p
);
130 dev_set_promiscuity(p
->dev
, -1);
131 p
->flags
&= ~BR_PROMISC
;
134 /* When a port is added or removed or when certain port flags
135 * change, this function is called to automatically manage
136 * promiscuity setting of all the bridge ports. We are always called
137 * under RTNL so can skip using rcu primitives.
139 void br_manage_promisc(struct net_bridge
*br
)
141 struct net_bridge_port
*p
;
142 bool set_all
= false;
144 /* If vlan filtering is disabled or bridge interface is placed
145 * into promiscuous mode, place all ports in promiscuous mode.
147 if ((br
->dev
->flags
& IFF_PROMISC
) || !br_vlan_enabled(br
->dev
))
150 list_for_each_entry(p
, &br
->port_list
, list
) {
152 br_port_set_promisc(p
);
154 /* If the number of auto-ports is <= 1, then all other
155 * ports will have their output configuration
156 * statically specified through fdbs. Since ingress
157 * on the auto-port becomes forwarding/egress to other
158 * ports and egress configuration is statically known,
159 * we can say that ingress configuration of the
160 * auto-port is also statically known.
161 * This lets us disable promiscuous mode and write
164 if (br
->auto_cnt
== 0 ||
165 (br
->auto_cnt
== 1 && br_auto_port(p
)))
166 br_port_clear_promisc(p
);
168 br_port_set_promisc(p
);
173 int nbp_backup_change(struct net_bridge_port
*p
,
174 struct net_device
*backup_dev
)
176 struct net_bridge_port
*old_backup
= rtnl_dereference(p
->backup_port
);
177 struct net_bridge_port
*backup_p
= NULL
;
182 if (!br_port_exists(backup_dev
))
185 backup_p
= br_port_get_rtnl(backup_dev
);
186 if (backup_p
->br
!= p
->br
)
193 if (old_backup
== backup_p
)
196 /* if the backup link is already set, clear it */
198 old_backup
->backup_redirected_cnt
--;
201 backup_p
->backup_redirected_cnt
++;
202 rcu_assign_pointer(p
->backup_port
, backup_p
);
207 static void nbp_backup_clear(struct net_bridge_port
*p
)
209 nbp_backup_change(p
, NULL
);
210 if (p
->backup_redirected_cnt
) {
211 struct net_bridge_port
*cur_p
;
213 list_for_each_entry(cur_p
, &p
->br
->port_list
, list
) {
214 struct net_bridge_port
*backup_p
;
216 backup_p
= rtnl_dereference(cur_p
->backup_port
);
218 nbp_backup_change(cur_p
, NULL
);
222 WARN_ON(rcu_access_pointer(p
->backup_port
) || p
->backup_redirected_cnt
);
225 static void nbp_update_port_count(struct net_bridge
*br
)
227 struct net_bridge_port
*p
;
230 list_for_each_entry(p
, &br
->port_list
, list
) {
234 if (br
->auto_cnt
!= cnt
) {
236 br_manage_promisc(br
);
240 static void nbp_delete_promisc(struct net_bridge_port
*p
)
242 /* If port is currently promiscuous, unset promiscuity.
243 * Otherwise, it is a static port so remove all addresses
246 dev_set_allmulti(p
->dev
, -1);
247 if (br_promisc_port(p
))
248 dev_set_promiscuity(p
->dev
, -1);
250 br_fdb_unsync_static(p
->br
, p
);
253 static void release_nbp(struct kobject
*kobj
)
255 struct net_bridge_port
*p
256 = container_of(kobj
, struct net_bridge_port
, kobj
);
260 static void brport_get_ownership(struct kobject
*kobj
, kuid_t
*uid
, kgid_t
*gid
)
262 struct net_bridge_port
*p
= kobj_to_brport(kobj
);
264 net_ns_get_ownership(dev_net(p
->dev
), uid
, gid
);
267 static struct kobj_type brport_ktype
= {
269 .sysfs_ops
= &brport_sysfs_ops
,
271 .release
= release_nbp
,
272 .get_ownership
= brport_get_ownership
,
275 static void destroy_nbp(struct net_bridge_port
*p
)
277 struct net_device
*dev
= p
->dev
;
283 kobject_put(&p
->kobj
);
286 static void destroy_nbp_rcu(struct rcu_head
*head
)
288 struct net_bridge_port
*p
=
289 container_of(head
, struct net_bridge_port
, rcu
);
293 static unsigned get_max_headroom(struct net_bridge
*br
)
295 unsigned max_headroom
= 0;
296 struct net_bridge_port
*p
;
298 list_for_each_entry(p
, &br
->port_list
, list
) {
299 unsigned dev_headroom
= netdev_get_fwd_headroom(p
->dev
);
301 if (dev_headroom
> max_headroom
)
302 max_headroom
= dev_headroom
;
308 static void update_headroom(struct net_bridge
*br
, int new_hr
)
310 struct net_bridge_port
*p
;
312 list_for_each_entry(p
, &br
->port_list
, list
)
313 netdev_set_rx_headroom(p
->dev
, new_hr
);
315 br
->dev
->needed_headroom
= new_hr
;
318 /* Delete port(interface) from bridge is done in two steps.
319 * via RCU. First step, marks device as down. That deletes
320 * all the timers and stops new packets from flowing through.
322 * Final cleanup doesn't occur until after all CPU's finished
323 * processing packets.
325 * Protected from multiple admin operations by RTNL mutex
327 static void del_nbp(struct net_bridge_port
*p
)
329 struct net_bridge
*br
= p
->br
;
330 struct net_device
*dev
= p
->dev
;
332 sysfs_remove_link(br
->ifobj
, p
->dev
->name
);
334 nbp_delete_promisc(p
);
336 spin_lock_bh(&br
->lock
);
337 br_stp_disable_port(p
);
338 spin_unlock_bh(&br
->lock
);
340 br_ifinfo_notify(RTM_DELLINK
, NULL
, p
);
342 list_del_rcu(&p
->list
);
343 if (netdev_get_fwd_headroom(dev
) == br
->dev
->needed_headroom
)
344 update_headroom(br
, get_max_headroom(br
));
345 netdev_reset_rx_headroom(dev
);
348 br_fdb_delete_by_port(br
, p
, 0, 1);
349 switchdev_deferred_process();
352 nbp_update_port_count(br
);
354 netdev_upper_dev_unlink(dev
, br
->dev
);
356 dev
->priv_flags
&= ~IFF_BRIDGE_PORT
;
358 netdev_rx_handler_unregister(dev
);
360 br_multicast_del_port(p
);
362 kobject_uevent(&p
->kobj
, KOBJ_REMOVE
);
363 kobject_del(&p
->kobj
);
365 br_netpoll_disable(p
);
367 call_rcu(&p
->rcu
, destroy_nbp_rcu
);
370 /* Delete bridge device */
371 void br_dev_delete(struct net_device
*dev
, struct list_head
*head
)
373 struct net_bridge
*br
= netdev_priv(dev
);
374 struct net_bridge_port
*p
, *n
;
376 list_for_each_entry_safe(p
, n
, &br
->port_list
, list
) {
380 br_recalculate_neigh_suppress_enabled(br
);
382 br_fdb_delete_by_port(br
, NULL
, 0, 1);
384 cancel_delayed_work_sync(&br
->gc_work
);
386 br_sysfs_delbr(br
->dev
);
387 unregister_netdevice_queue(br
->dev
, head
);
390 /* find an available port number */
391 static int find_portno(struct net_bridge
*br
)
394 struct net_bridge_port
*p
;
395 unsigned long *inuse
;
397 inuse
= kcalloc(BITS_TO_LONGS(BR_MAX_PORTS
), sizeof(unsigned long),
402 set_bit(0, inuse
); /* zero is reserved */
403 list_for_each_entry(p
, &br
->port_list
, list
) {
404 set_bit(p
->port_no
, inuse
);
406 index
= find_first_zero_bit(inuse
, BR_MAX_PORTS
);
409 return (index
>= BR_MAX_PORTS
) ? -EXFULL
: index
;
412 /* called with RTNL but without bridge lock */
413 static struct net_bridge_port
*new_nbp(struct net_bridge
*br
,
414 struct net_device
*dev
)
416 struct net_bridge_port
*p
;
419 index
= find_portno(br
);
421 return ERR_PTR(index
);
423 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
425 return ERR_PTR(-ENOMEM
);
430 p
->path_cost
= port_cost(dev
);
431 p
->priority
= 0x8000 >> BR_PORT_BITS
;
433 p
->flags
= BR_LEARNING
| BR_FLOOD
| BR_MCAST_FLOOD
| BR_BCAST_FLOOD
;
435 br_set_state(p
, BR_STATE_DISABLED
);
436 br_stp_port_timer_init(p
);
437 err
= br_multicast_add_port(p
);
447 int br_add_bridge(struct net
*net
, const char *name
)
449 struct net_device
*dev
;
452 dev
= alloc_netdev(sizeof(struct net_bridge
), name
, NET_NAME_UNKNOWN
,
458 dev_net_set(dev
, net
);
459 dev
->rtnl_link_ops
= &br_link_ops
;
461 res
= register_netdev(dev
);
467 int br_del_bridge(struct net
*net
, const char *name
)
469 struct net_device
*dev
;
473 dev
= __dev_get_by_name(net
, name
);
475 ret
= -ENXIO
; /* Could not find device */
477 else if (!(dev
->priv_flags
& IFF_EBRIDGE
)) {
478 /* Attempt to delete non bridge device! */
482 else if (dev
->flags
& IFF_UP
) {
483 /* Not shutdown yet. */
488 br_dev_delete(dev
, NULL
);
494 /* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */
495 static int br_mtu_min(const struct net_bridge
*br
)
497 const struct net_bridge_port
*p
;
500 list_for_each_entry(p
, &br
->port_list
, list
)
501 if (!ret_mtu
|| ret_mtu
> p
->dev
->mtu
)
502 ret_mtu
= p
->dev
->mtu
;
504 return ret_mtu
? ret_mtu
: ETH_DATA_LEN
;
507 void br_mtu_auto_adjust(struct net_bridge
*br
)
511 /* if the bridge MTU was manually configured don't mess with it */
512 if (br
->mtu_set_by_user
)
515 /* change to the minimum MTU and clear the flag which was set by
516 * the bridge ndo_change_mtu callback
518 dev_set_mtu(br
->dev
, br_mtu_min(br
));
519 br
->mtu_set_by_user
= false;
522 static void br_set_gso_limits(struct net_bridge
*br
)
524 unsigned int gso_max_size
= GSO_MAX_SIZE
;
525 u16 gso_max_segs
= GSO_MAX_SEGS
;
526 const struct net_bridge_port
*p
;
528 list_for_each_entry(p
, &br
->port_list
, list
) {
529 gso_max_size
= min(gso_max_size
, p
->dev
->gso_max_size
);
530 gso_max_segs
= min(gso_max_segs
, p
->dev
->gso_max_segs
);
532 br
->dev
->gso_max_size
= gso_max_size
;
533 br
->dev
->gso_max_segs
= gso_max_segs
;
537 * Recomputes features using slave's features
539 netdev_features_t
br_features_recompute(struct net_bridge
*br
,
540 netdev_features_t features
)
542 struct net_bridge_port
*p
;
543 netdev_features_t mask
;
545 if (list_empty(&br
->port_list
))
549 features
&= ~NETIF_F_ONE_FOR_ALL
;
551 list_for_each_entry(p
, &br
->port_list
, list
) {
552 features
= netdev_increment_features(features
,
553 p
->dev
->features
, mask
);
555 features
= netdev_add_tso_features(features
, mask
);
560 /* called with RTNL */
561 int br_add_if(struct net_bridge
*br
, struct net_device
*dev
,
562 struct netlink_ext_ack
*extack
)
564 struct net_bridge_port
*p
;
566 unsigned br_hr
, dev_hr
;
569 /* Don't allow bridging non-ethernet like devices, or DSA-enabled
570 * master network devices since the bridge layer rx_handler prevents
571 * the DSA fake ethertype handler to be invoked, so we do not strip off
572 * the DSA switch tag protocol header and the bridge layer just return
573 * RX_HANDLER_CONSUMED, stopping RX processing for these frames.
575 if ((dev
->flags
& IFF_LOOPBACK
) ||
576 dev
->type
!= ARPHRD_ETHER
|| dev
->addr_len
!= ETH_ALEN
||
577 !is_valid_ether_addr(dev
->dev_addr
) ||
578 netdev_uses_dsa(dev
))
581 /* No bridging of bridges */
582 if (dev
->netdev_ops
->ndo_start_xmit
== br_dev_xmit
) {
583 NL_SET_ERR_MSG(extack
,
584 "Can not enslave a bridge to a bridge");
588 /* Device has master upper dev */
589 if (netdev_master_upper_dev_get(dev
))
592 /* No bridging devices that dislike that (e.g. wireless) */
593 if (dev
->priv_flags
& IFF_DONT_BRIDGE
) {
594 NL_SET_ERR_MSG(extack
,
595 "Device does not allow enslaving to a bridge");
599 p
= new_nbp(br
, dev
);
603 call_netdevice_notifiers(NETDEV_JOIN
, dev
);
605 err
= dev_set_allmulti(dev
, 1);
607 kfree(p
); /* kobject not yet init'd, manually free */
611 err
= kobject_init_and_add(&p
->kobj
, &brport_ktype
, &(dev
->dev
.kobj
),
612 SYSFS_BRIDGE_PORT_ATTR
);
616 err
= br_sysfs_addif(p
);
620 err
= br_netpoll_enable(p
);
624 err
= netdev_rx_handler_register(dev
, br_handle_frame
, p
);
628 dev
->priv_flags
|= IFF_BRIDGE_PORT
;
630 err
= netdev_master_upper_dev_link(dev
, br
->dev
, NULL
, NULL
, extack
);
634 err
= nbp_switchdev_mark_set(p
);
638 dev_disable_lro(dev
);
640 list_add_rcu(&p
->list
, &br
->port_list
);
642 nbp_update_port_count(br
);
644 netdev_update_features(br
->dev
);
646 br_hr
= br
->dev
->needed_headroom
;
647 dev_hr
= netdev_get_fwd_headroom(dev
);
649 update_headroom(br
, dev_hr
);
651 netdev_set_rx_headroom(dev
, br_hr
);
653 if (br_fdb_insert(br
, p
, dev
->dev_addr
, 0))
654 netdev_err(dev
, "failed insert local address bridge forwarding table\n");
656 err
= nbp_vlan_init(p
);
658 netdev_err(dev
, "failed to initialize vlan filtering on this port\n");
662 spin_lock_bh(&br
->lock
);
663 changed_addr
= br_stp_recalculate_bridge_id(br
);
665 if (netif_running(dev
) && netif_oper_up(dev
) &&
666 (br
->dev
->flags
& IFF_UP
))
667 br_stp_enable_port(p
);
668 spin_unlock_bh(&br
->lock
);
670 br_ifinfo_notify(RTM_NEWLINK
, NULL
, p
);
673 call_netdevice_notifiers(NETDEV_CHANGEADDR
, br
->dev
);
675 br_mtu_auto_adjust(br
);
676 br_set_gso_limits(br
);
678 kobject_uevent(&p
->kobj
, KOBJ_ADD
);
683 list_del_rcu(&p
->list
);
684 br_fdb_delete_by_port(br
, p
, 0, 1);
685 nbp_update_port_count(br
);
687 netdev_upper_dev_unlink(dev
, br
->dev
);
689 dev
->priv_flags
&= ~IFF_BRIDGE_PORT
;
690 netdev_rx_handler_unregister(dev
);
692 br_netpoll_disable(p
);
694 sysfs_remove_link(br
->ifobj
, p
->dev
->name
);
696 kobject_put(&p
->kobj
);
697 dev_set_allmulti(dev
, -1);
703 /* called with RTNL */
704 int br_del_if(struct net_bridge
*br
, struct net_device
*dev
)
706 struct net_bridge_port
*p
;
709 p
= br_port_get_rtnl(dev
);
710 if (!p
|| p
->br
!= br
)
713 /* Since more than one interface can be attached to a bridge,
714 * there still maybe an alternate path for netconsole to use;
715 * therefore there is no reason for a NETDEV_RELEASE event.
719 br_mtu_auto_adjust(br
);
720 br_set_gso_limits(br
);
722 spin_lock_bh(&br
->lock
);
723 changed_addr
= br_stp_recalculate_bridge_id(br
);
724 spin_unlock_bh(&br
->lock
);
727 call_netdevice_notifiers(NETDEV_CHANGEADDR
, br
->dev
);
729 netdev_update_features(br
->dev
);
734 void br_port_flags_change(struct net_bridge_port
*p
, unsigned long mask
)
736 struct net_bridge
*br
= p
->br
;
738 if (mask
& BR_AUTO_MASK
)
739 nbp_update_port_count(br
);
741 if (mask
& BR_NEIGH_SUPPRESS
)
742 br_recalculate_neigh_suppress_enabled(br
);