1 #include <linux/kernel.h>
2 #include <linux/netdevice.h>
3 #include <linux/rtnetlink.h>
4 #include <linux/slab.h>
5 #include <net/switchdev.h>
7 #include "br_private.h"
8 #include "br_private_tunnel.h"
10 static inline int br_vlan_cmp(struct rhashtable_compare_arg
*arg
,
13 const struct net_bridge_vlan
*vle
= ptr
;
14 u16 vid
= *(u16
*)arg
->key
;
16 return vle
->vid
!= vid
;
19 static const struct rhashtable_params br_vlan_rht_params
= {
20 .head_offset
= offsetof(struct net_bridge_vlan
, vnode
),
21 .key_offset
= offsetof(struct net_bridge_vlan
, vid
),
22 .key_len
= sizeof(u16
),
25 .max_size
= VLAN_N_VID
,
26 .obj_cmpfn
= br_vlan_cmp
,
27 .automatic_shrinking
= true,
30 static struct net_bridge_vlan
*br_vlan_lookup(struct rhashtable
*tbl
, u16 vid
)
32 return rhashtable_lookup_fast(tbl
, &vid
, br_vlan_rht_params
);
35 static bool __vlan_add_pvid(struct net_bridge_vlan_group
*vg
, u16 vid
)
46 static bool __vlan_delete_pvid(struct net_bridge_vlan_group
*vg
, u16 vid
)
57 /* return true if anything changed, false otherwise */
58 static bool __vlan_add_flags(struct net_bridge_vlan
*v
, u16 flags
)
60 struct net_bridge_vlan_group
*vg
;
61 u16 old_flags
= v
->flags
;
64 if (br_vlan_is_master(v
))
65 vg
= br_vlan_group(v
->br
);
67 vg
= nbp_vlan_group(v
->port
);
69 if (flags
& BRIDGE_VLAN_INFO_PVID
)
70 ret
= __vlan_add_pvid(vg
, v
->vid
);
72 ret
= __vlan_delete_pvid(vg
, v
->vid
);
74 if (flags
& BRIDGE_VLAN_INFO_UNTAGGED
)
75 v
->flags
|= BRIDGE_VLAN_INFO_UNTAGGED
;
77 v
->flags
&= ~BRIDGE_VLAN_INFO_UNTAGGED
;
79 return ret
|| !!(old_flags
^ v
->flags
);
82 static int __vlan_vid_add(struct net_device
*dev
, struct net_bridge
*br
,
87 /* Try switchdev op first. In case it is not supported, fallback to
90 err
= br_switchdev_port_vlan_add(dev
, vid
, flags
);
91 if (err
== -EOPNOTSUPP
)
92 return vlan_vid_add(dev
, br
->vlan_proto
, vid
);
96 static void __vlan_add_list(struct net_bridge_vlan
*v
)
98 struct net_bridge_vlan_group
*vg
;
99 struct list_head
*headp
, *hpos
;
100 struct net_bridge_vlan
*vent
;
102 if (br_vlan_is_master(v
))
103 vg
= br_vlan_group(v
->br
);
105 vg
= nbp_vlan_group(v
->port
);
107 headp
= &vg
->vlan_list
;
108 list_for_each_prev(hpos
, headp
) {
109 vent
= list_entry(hpos
, struct net_bridge_vlan
, vlist
);
110 if (v
->vid
< vent
->vid
)
115 list_add_rcu(&v
->vlist
, hpos
);
118 static void __vlan_del_list(struct net_bridge_vlan
*v
)
120 list_del_rcu(&v
->vlist
);
123 static int __vlan_vid_del(struct net_device
*dev
, struct net_bridge
*br
,
128 /* Try switchdev op first. In case it is not supported, fallback to
131 err
= br_switchdev_port_vlan_del(dev
, vid
);
132 if (err
== -EOPNOTSUPP
) {
133 vlan_vid_del(dev
, br
->vlan_proto
, vid
);
139 /* Returns a master vlan, if it didn't exist it gets created. In all cases a
140 * a reference is taken to the master vlan before returning.
142 static struct net_bridge_vlan
*br_vlan_get_master(struct net_bridge
*br
, u16 vid
)
144 struct net_bridge_vlan_group
*vg
;
145 struct net_bridge_vlan
*masterv
;
147 vg
= br_vlan_group(br
);
148 masterv
= br_vlan_find(vg
, vid
);
152 /* missing global ctx, create it now */
153 if (br_vlan_add(br
, vid
, 0, &changed
))
155 masterv
= br_vlan_find(vg
, vid
);
156 if (WARN_ON(!masterv
))
158 refcount_set(&masterv
->refcnt
, 1);
161 refcount_inc(&masterv
->refcnt
);
166 static void br_master_vlan_rcu_free(struct rcu_head
*rcu
)
168 struct net_bridge_vlan
*v
;
170 v
= container_of(rcu
, struct net_bridge_vlan
, rcu
);
171 WARN_ON(!br_vlan_is_master(v
));
172 free_percpu(v
->stats
);
177 static void br_vlan_put_master(struct net_bridge_vlan
*masterv
)
179 struct net_bridge_vlan_group
*vg
;
181 if (!br_vlan_is_master(masterv
))
184 vg
= br_vlan_group(masterv
->br
);
185 if (refcount_dec_and_test(&masterv
->refcnt
)) {
186 rhashtable_remove_fast(&vg
->vlan_hash
,
187 &masterv
->vnode
, br_vlan_rht_params
);
188 __vlan_del_list(masterv
);
189 call_rcu(&masterv
->rcu
, br_master_vlan_rcu_free
);
193 static void nbp_vlan_rcu_free(struct rcu_head
*rcu
)
195 struct net_bridge_vlan
*v
;
197 v
= container_of(rcu
, struct net_bridge_vlan
, rcu
);
198 WARN_ON(br_vlan_is_master(v
));
199 /* if we had per-port stats configured then free them here */
200 if (v
->brvlan
->stats
!= v
->stats
)
201 free_percpu(v
->stats
);
206 /* This is the shared VLAN add function which works for both ports and bridge
207 * devices. There are four possible calls to this function in terms of the
209 * 1. vlan is being added on a port (no master flags, global entry exists)
210 * 2. vlan is being added on a bridge (both master and brentry flags)
211 * 3. vlan is being added on a port, but a global entry didn't exist which
212 * is being created right now (master flag set, brentry flag unset), the
213 * global entry is used for global per-vlan features, but not for filtering
214 * 4. same as 3 but with both master and brentry flags set so the entry
215 * will be used for filtering in both the port and the bridge
217 static int __vlan_add(struct net_bridge_vlan
*v
, u16 flags
)
219 struct net_bridge_vlan
*masterv
= NULL
;
220 struct net_bridge_port
*p
= NULL
;
221 struct net_bridge_vlan_group
*vg
;
222 struct net_device
*dev
;
223 struct net_bridge
*br
;
226 if (br_vlan_is_master(v
)) {
229 vg
= br_vlan_group(br
);
234 vg
= nbp_vlan_group(p
);
238 /* Add VLAN to the device filter if it is supported.
239 * This ensures tagged traffic enters the bridge when
240 * promiscuous mode is disabled by br_manage_promisc().
242 err
= __vlan_vid_add(dev
, br
, v
->vid
, flags
);
246 /* need to work on the master vlan too */
247 if (flags
& BRIDGE_VLAN_INFO_MASTER
) {
250 err
= br_vlan_add(br
, v
->vid
,
251 flags
| BRIDGE_VLAN_INFO_BRENTRY
,
257 masterv
= br_vlan_get_master(br
, v
->vid
);
261 if (br_opt_get(br
, BROPT_VLAN_STATS_PER_PORT
)) {
262 v
->stats
= netdev_alloc_pcpu_stats(struct br_vlan_stats
);
268 v
->stats
= masterv
->stats
;
271 err
= br_switchdev_port_vlan_add(dev
, v
->vid
, flags
);
272 if (err
&& err
!= -EOPNOTSUPP
)
276 /* Add the dev mac and count the vlan only if it's usable */
277 if (br_vlan_should_use(v
)) {
278 err
= br_fdb_insert(br
, p
, dev
->dev_addr
, v
->vid
);
280 br_err(br
, "failed insert local address into bridge forwarding table\n");
286 err
= rhashtable_lookup_insert_fast(&vg
->vlan_hash
, &v
->vnode
,
292 __vlan_add_flags(v
, flags
);
297 if (br_vlan_should_use(v
)) {
298 br_fdb_find_delete_local(br
, p
, dev
->dev_addr
, v
->vid
);
304 __vlan_vid_del(dev
, br
, v
->vid
);
306 if (v
->stats
&& masterv
->stats
!= v
->stats
)
307 free_percpu(v
->stats
);
310 br_vlan_put_master(masterv
);
314 br_switchdev_port_vlan_del(dev
, v
->vid
);
320 static int __vlan_del(struct net_bridge_vlan
*v
)
322 struct net_bridge_vlan
*masterv
= v
;
323 struct net_bridge_vlan_group
*vg
;
324 struct net_bridge_port
*p
= NULL
;
327 if (br_vlan_is_master(v
)) {
328 vg
= br_vlan_group(v
->br
);
331 vg
= nbp_vlan_group(v
->port
);
335 __vlan_delete_pvid(vg
, v
->vid
);
337 err
= __vlan_vid_del(p
->dev
, p
->br
, v
->vid
);
341 err
= br_switchdev_port_vlan_del(v
->br
->dev
, v
->vid
);
342 if (err
&& err
!= -EOPNOTSUPP
)
347 if (br_vlan_should_use(v
)) {
348 v
->flags
&= ~BRIDGE_VLAN_INFO_BRENTRY
;
353 vlan_tunnel_info_del(vg
, v
);
354 rhashtable_remove_fast(&vg
->vlan_hash
, &v
->vnode
,
357 call_rcu(&v
->rcu
, nbp_vlan_rcu_free
);
360 br_vlan_put_master(masterv
);
365 static void __vlan_group_free(struct net_bridge_vlan_group
*vg
)
367 WARN_ON(!list_empty(&vg
->vlan_list
));
368 rhashtable_destroy(&vg
->vlan_hash
);
369 vlan_tunnel_deinit(vg
);
373 static void __vlan_flush(struct net_bridge_vlan_group
*vg
)
375 struct net_bridge_vlan
*vlan
, *tmp
;
377 __vlan_delete_pvid(vg
, vg
->pvid
);
378 list_for_each_entry_safe(vlan
, tmp
, &vg
->vlan_list
, vlist
)
382 struct sk_buff
*br_handle_vlan(struct net_bridge
*br
,
383 const struct net_bridge_port
*p
,
384 struct net_bridge_vlan_group
*vg
,
387 struct br_vlan_stats
*stats
;
388 struct net_bridge_vlan
*v
;
391 /* If this packet was not filtered at input, let it pass */
392 if (!BR_INPUT_SKB_CB(skb
)->vlan_filtered
)
395 /* At this point, we know that the frame was filtered and contains
396 * a valid vlan id. If the vlan id has untagged flag set,
397 * send untagged; otherwise, send tagged.
399 br_vlan_get_tag(skb
, &vid
);
400 v
= br_vlan_find(vg
, vid
);
401 /* Vlan entry must be configured at this point. The
402 * only exception is the bridge is set in promisc mode and the
403 * packet is destined for the bridge device. In this case
404 * pass the packet as is.
406 if (!v
|| !br_vlan_should_use(v
)) {
407 if ((br
->dev
->flags
& IFF_PROMISC
) && skb
->dev
== br
->dev
) {
414 if (br_opt_get(br
, BROPT_VLAN_STATS_ENABLED
)) {
415 stats
= this_cpu_ptr(v
->stats
);
416 u64_stats_update_begin(&stats
->syncp
);
417 stats
->tx_bytes
+= skb
->len
;
419 u64_stats_update_end(&stats
->syncp
);
422 if (v
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
)
425 if (p
&& (p
->flags
& BR_VLAN_TUNNEL
) &&
426 br_handle_egress_vlan_tunnel(skb
, v
)) {
434 /* Called under RCU */
435 static bool __allowed_ingress(const struct net_bridge
*br
,
436 struct net_bridge_vlan_group
*vg
,
437 struct sk_buff
*skb
, u16
*vid
)
439 struct br_vlan_stats
*stats
;
440 struct net_bridge_vlan
*v
;
443 BR_INPUT_SKB_CB(skb
)->vlan_filtered
= true;
444 /* If vlan tx offload is disabled on bridge device and frame was
445 * sent from vlan device on the bridge device, it does not have
446 * HW accelerated vlan tag.
448 if (unlikely(!skb_vlan_tag_present(skb
) &&
449 skb
->protocol
== br
->vlan_proto
)) {
450 skb
= skb_vlan_untag(skb
);
455 if (!br_vlan_get_tag(skb
, vid
)) {
457 if (skb
->vlan_proto
!= br
->vlan_proto
) {
458 /* Protocol-mismatch, empty out vlan_tci for new tag */
459 skb_push(skb
, ETH_HLEN
);
460 skb
= vlan_insert_tag_set_proto(skb
, skb
->vlan_proto
,
461 skb_vlan_tag_get(skb
));
465 skb_pull(skb
, ETH_HLEN
);
466 skb_reset_mac_len(skb
);
478 u16 pvid
= br_get_pvid(vg
);
480 /* Frame had a tag with VID 0 or did not have a tag.
481 * See if pvid is set on this port. That tells us which
482 * vlan untagged or priority-tagged traffic belongs to.
487 /* PVID is set on this port. Any untagged or priority-tagged
488 * ingress frame is considered to belong to this vlan.
492 /* Untagged Frame. */
493 __vlan_hwaccel_put_tag(skb
, br
->vlan_proto
, pvid
);
495 /* Priority-tagged Frame.
496 * At this point, We know that skb->vlan_tci had
497 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
498 * We update only VID field and preserve PCP field.
500 skb
->vlan_tci
|= pvid
;
502 /* if stats are disabled we can avoid the lookup */
503 if (!br_opt_get(br
, BROPT_VLAN_STATS_ENABLED
))
506 v
= br_vlan_find(vg
, *vid
);
507 if (!v
|| !br_vlan_should_use(v
))
510 if (br_opt_get(br
, BROPT_VLAN_STATS_ENABLED
)) {
511 stats
= this_cpu_ptr(v
->stats
);
512 u64_stats_update_begin(&stats
->syncp
);
513 stats
->rx_bytes
+= skb
->len
;
515 u64_stats_update_end(&stats
->syncp
);
525 bool br_allowed_ingress(const struct net_bridge
*br
,
526 struct net_bridge_vlan_group
*vg
, struct sk_buff
*skb
,
529 /* If VLAN filtering is disabled on the bridge, all packets are
532 if (!br_opt_get(br
, BROPT_VLAN_ENABLED
)) {
533 BR_INPUT_SKB_CB(skb
)->vlan_filtered
= false;
537 return __allowed_ingress(br
, vg
, skb
, vid
);
540 /* Called under RCU. */
541 bool br_allowed_egress(struct net_bridge_vlan_group
*vg
,
542 const struct sk_buff
*skb
)
544 const struct net_bridge_vlan
*v
;
547 /* If this packet was not filtered at input, let it pass */
548 if (!BR_INPUT_SKB_CB(skb
)->vlan_filtered
)
551 br_vlan_get_tag(skb
, &vid
);
552 v
= br_vlan_find(vg
, vid
);
553 if (v
&& br_vlan_should_use(v
))
559 /* Called under RCU */
560 bool br_should_learn(struct net_bridge_port
*p
, struct sk_buff
*skb
, u16
*vid
)
562 struct net_bridge_vlan_group
*vg
;
563 struct net_bridge
*br
= p
->br
;
565 /* If filtering was disabled at input, let it pass. */
566 if (!br_opt_get(br
, BROPT_VLAN_ENABLED
))
569 vg
= nbp_vlan_group_rcu(p
);
570 if (!vg
|| !vg
->num_vlans
)
573 if (!br_vlan_get_tag(skb
, vid
) && skb
->vlan_proto
!= br
->vlan_proto
)
577 *vid
= br_get_pvid(vg
);
584 if (br_vlan_find(vg
, *vid
))
590 static int br_vlan_add_existing(struct net_bridge
*br
,
591 struct net_bridge_vlan_group
*vg
,
592 struct net_bridge_vlan
*vlan
,
593 u16 flags
, bool *changed
)
597 err
= br_switchdev_port_vlan_add(br
->dev
, vlan
->vid
, flags
);
598 if (err
&& err
!= -EOPNOTSUPP
)
601 if (!br_vlan_is_brentry(vlan
)) {
602 /* Trying to change flags of non-existent bridge vlan */
603 if (!(flags
& BRIDGE_VLAN_INFO_BRENTRY
)) {
607 /* It was only kept for port vlans, now make it real */
608 err
= br_fdb_insert(br
, NULL
, br
->dev
->dev_addr
,
611 br_err(br
, "failed to insert local address into bridge forwarding table\n");
615 refcount_inc(&vlan
->refcnt
);
616 vlan
->flags
|= BRIDGE_VLAN_INFO_BRENTRY
;
621 if (__vlan_add_flags(vlan
, flags
))
628 br_switchdev_port_vlan_del(br
->dev
, vlan
->vid
);
632 /* Must be protected by RTNL.
633 * Must be called with vid in range from 1 to 4094 inclusive.
634 * changed must be true only if the vlan was created or updated
636 int br_vlan_add(struct net_bridge
*br
, u16 vid
, u16 flags
, bool *changed
)
638 struct net_bridge_vlan_group
*vg
;
639 struct net_bridge_vlan
*vlan
;
645 vg
= br_vlan_group(br
);
646 vlan
= br_vlan_find(vg
, vid
);
648 return br_vlan_add_existing(br
, vg
, vlan
, flags
, changed
);
650 vlan
= kzalloc(sizeof(*vlan
), GFP_KERNEL
);
654 vlan
->stats
= netdev_alloc_pcpu_stats(struct br_vlan_stats
);
660 vlan
->flags
= flags
| BRIDGE_VLAN_INFO_MASTER
;
661 vlan
->flags
&= ~BRIDGE_VLAN_INFO_PVID
;
663 if (flags
& BRIDGE_VLAN_INFO_BRENTRY
)
664 refcount_set(&vlan
->refcnt
, 1);
665 ret
= __vlan_add(vlan
, flags
);
667 free_percpu(vlan
->stats
);
676 /* Must be protected by RTNL.
677 * Must be called with vid in range from 1 to 4094 inclusive.
679 int br_vlan_delete(struct net_bridge
*br
, u16 vid
)
681 struct net_bridge_vlan_group
*vg
;
682 struct net_bridge_vlan
*v
;
686 vg
= br_vlan_group(br
);
687 v
= br_vlan_find(vg
, vid
);
688 if (!v
|| !br_vlan_is_brentry(v
))
691 br_fdb_find_delete_local(br
, NULL
, br
->dev
->dev_addr
, vid
);
692 br_fdb_delete_by_port(br
, NULL
, vid
, 0);
694 vlan_tunnel_info_del(vg
, v
);
696 return __vlan_del(v
);
699 void br_vlan_flush(struct net_bridge
*br
)
701 struct net_bridge_vlan_group
*vg
;
705 vg
= br_vlan_group(br
);
707 RCU_INIT_POINTER(br
->vlgrp
, NULL
);
709 __vlan_group_free(vg
);
712 struct net_bridge_vlan
*br_vlan_find(struct net_bridge_vlan_group
*vg
, u16 vid
)
717 return br_vlan_lookup(&vg
->vlan_hash
, vid
);
720 /* Must be protected by RTNL. */
721 static void recalculate_group_addr(struct net_bridge
*br
)
723 if (br_opt_get(br
, BROPT_GROUP_ADDR_SET
))
726 spin_lock_bh(&br
->lock
);
727 if (!br_opt_get(br
, BROPT_VLAN_ENABLED
) ||
728 br
->vlan_proto
== htons(ETH_P_8021Q
)) {
729 /* Bridge Group Address */
730 br
->group_addr
[5] = 0x00;
731 } else { /* vlan_enabled && ETH_P_8021AD */
732 /* Provider Bridge Group Address */
733 br
->group_addr
[5] = 0x08;
735 spin_unlock_bh(&br
->lock
);
738 /* Must be protected by RTNL. */
739 void br_recalculate_fwd_mask(struct net_bridge
*br
)
741 if (!br_opt_get(br
, BROPT_VLAN_ENABLED
) ||
742 br
->vlan_proto
== htons(ETH_P_8021Q
))
743 br
->group_fwd_mask_required
= BR_GROUPFWD_DEFAULT
;
744 else /* vlan_enabled && ETH_P_8021AD */
745 br
->group_fwd_mask_required
= BR_GROUPFWD_8021AD
&
746 ~(1u << br
->group_addr
[5]);
749 int __br_vlan_filter_toggle(struct net_bridge
*br
, unsigned long val
)
751 struct switchdev_attr attr
= {
753 .id
= SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
,
754 .flags
= SWITCHDEV_F_SKIP_EOPNOTSUPP
,
755 .u
.vlan_filtering
= val
,
759 if (br_opt_get(br
, BROPT_VLAN_ENABLED
) == !!val
)
762 err
= switchdev_port_attr_set(br
->dev
, &attr
);
763 if (err
&& err
!= -EOPNOTSUPP
)
766 br_opt_toggle(br
, BROPT_VLAN_ENABLED
, !!val
);
767 br_manage_promisc(br
);
768 recalculate_group_addr(br
);
769 br_recalculate_fwd_mask(br
);
774 int br_vlan_filter_toggle(struct net_bridge
*br
, unsigned long val
)
776 return __br_vlan_filter_toggle(br
, val
);
779 bool br_vlan_enabled(const struct net_device
*dev
)
781 struct net_bridge
*br
= netdev_priv(dev
);
783 return br_opt_get(br
, BROPT_VLAN_ENABLED
);
785 EXPORT_SYMBOL_GPL(br_vlan_enabled
);
787 int __br_vlan_set_proto(struct net_bridge
*br
, __be16 proto
)
790 struct net_bridge_port
*p
;
791 struct net_bridge_vlan
*vlan
;
792 struct net_bridge_vlan_group
*vg
;
795 if (br
->vlan_proto
== proto
)
798 /* Add VLANs for the new proto to the device filter. */
799 list_for_each_entry(p
, &br
->port_list
, list
) {
800 vg
= nbp_vlan_group(p
);
801 list_for_each_entry(vlan
, &vg
->vlan_list
, vlist
) {
802 err
= vlan_vid_add(p
->dev
, proto
, vlan
->vid
);
808 oldproto
= br
->vlan_proto
;
809 br
->vlan_proto
= proto
;
811 recalculate_group_addr(br
);
812 br_recalculate_fwd_mask(br
);
814 /* Delete VLANs for the old proto from the device filter. */
815 list_for_each_entry(p
, &br
->port_list
, list
) {
816 vg
= nbp_vlan_group(p
);
817 list_for_each_entry(vlan
, &vg
->vlan_list
, vlist
)
818 vlan_vid_del(p
->dev
, oldproto
, vlan
->vid
);
824 list_for_each_entry_continue_reverse(vlan
, &vg
->vlan_list
, vlist
)
825 vlan_vid_del(p
->dev
, proto
, vlan
->vid
);
827 list_for_each_entry_continue_reverse(p
, &br
->port_list
, list
) {
828 vg
= nbp_vlan_group(p
);
829 list_for_each_entry(vlan
, &vg
->vlan_list
, vlist
)
830 vlan_vid_del(p
->dev
, proto
, vlan
->vid
);
836 int br_vlan_set_proto(struct net_bridge
*br
, unsigned long val
)
838 if (val
!= ETH_P_8021Q
&& val
!= ETH_P_8021AD
)
839 return -EPROTONOSUPPORT
;
841 return __br_vlan_set_proto(br
, htons(val
));
844 int br_vlan_set_stats(struct net_bridge
*br
, unsigned long val
)
849 br_opt_toggle(br
, BROPT_VLAN_STATS_ENABLED
, !!val
);
858 int br_vlan_set_stats_per_port(struct net_bridge
*br
, unsigned long val
)
860 struct net_bridge_port
*p
;
862 /* allow to change the option if there are no port vlans configured */
863 list_for_each_entry(p
, &br
->port_list
, list
) {
864 struct net_bridge_vlan_group
*vg
= nbp_vlan_group(p
);
873 br_opt_toggle(br
, BROPT_VLAN_STATS_PER_PORT
, !!val
);
882 static bool vlan_default_pvid(struct net_bridge_vlan_group
*vg
, u16 vid
)
884 struct net_bridge_vlan
*v
;
889 v
= br_vlan_lookup(&vg
->vlan_hash
, vid
);
890 if (v
&& br_vlan_should_use(v
) &&
891 (v
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
))
897 static void br_vlan_disable_default_pvid(struct net_bridge
*br
)
899 struct net_bridge_port
*p
;
900 u16 pvid
= br
->default_pvid
;
902 /* Disable default_pvid on all ports where it is still
905 if (vlan_default_pvid(br_vlan_group(br
), pvid
))
906 br_vlan_delete(br
, pvid
);
908 list_for_each_entry(p
, &br
->port_list
, list
) {
909 if (vlan_default_pvid(nbp_vlan_group(p
), pvid
))
910 nbp_vlan_delete(p
, pvid
);
913 br
->default_pvid
= 0;
916 int __br_vlan_set_default_pvid(struct net_bridge
*br
, u16 pvid
)
918 const struct net_bridge_vlan
*pvent
;
919 struct net_bridge_vlan_group
*vg
;
920 struct net_bridge_port
*p
;
921 unsigned long *changed
;
927 br_vlan_disable_default_pvid(br
);
931 changed
= bitmap_zalloc(BR_MAX_PORTS
, GFP_KERNEL
);
935 old_pvid
= br
->default_pvid
;
937 /* Update default_pvid config only if we do not conflict with
938 * user configuration.
940 vg
= br_vlan_group(br
);
941 pvent
= br_vlan_find(vg
, pvid
);
942 if ((!old_pvid
|| vlan_default_pvid(vg
, old_pvid
)) &&
943 (!pvent
|| !br_vlan_should_use(pvent
))) {
944 err
= br_vlan_add(br
, pvid
,
945 BRIDGE_VLAN_INFO_PVID
|
946 BRIDGE_VLAN_INFO_UNTAGGED
|
947 BRIDGE_VLAN_INFO_BRENTRY
,
951 br_vlan_delete(br
, old_pvid
);
955 list_for_each_entry(p
, &br
->port_list
, list
) {
956 /* Update default_pvid config only if we do not conflict with
957 * user configuration.
959 vg
= nbp_vlan_group(p
);
961 !vlan_default_pvid(vg
, old_pvid
)) ||
962 br_vlan_find(vg
, pvid
))
965 err
= nbp_vlan_add(p
, pvid
,
966 BRIDGE_VLAN_INFO_PVID
|
967 BRIDGE_VLAN_INFO_UNTAGGED
,
971 nbp_vlan_delete(p
, old_pvid
);
972 set_bit(p
->port_no
, changed
);
975 br
->default_pvid
= pvid
;
978 bitmap_free(changed
);
982 list_for_each_entry_continue_reverse(p
, &br
->port_list
, list
) {
983 if (!test_bit(p
->port_no
, changed
))
987 nbp_vlan_add(p
, old_pvid
,
988 BRIDGE_VLAN_INFO_PVID
|
989 BRIDGE_VLAN_INFO_UNTAGGED
,
991 nbp_vlan_delete(p
, pvid
);
994 if (test_bit(0, changed
)) {
996 br_vlan_add(br
, old_pvid
,
997 BRIDGE_VLAN_INFO_PVID
|
998 BRIDGE_VLAN_INFO_UNTAGGED
|
999 BRIDGE_VLAN_INFO_BRENTRY
,
1001 br_vlan_delete(br
, pvid
);
1006 int br_vlan_set_default_pvid(struct net_bridge
*br
, unsigned long val
)
1011 if (val
>= VLAN_VID_MASK
)
1014 if (pvid
== br
->default_pvid
)
1017 /* Only allow default pvid change when filtering is disabled */
1018 if (br_opt_get(br
, BROPT_VLAN_ENABLED
)) {
1019 pr_info_once("Please disable vlan filtering to change default_pvid\n");
1023 err
= __br_vlan_set_default_pvid(br
, pvid
);
1028 int br_vlan_init(struct net_bridge
*br
)
1030 struct net_bridge_vlan_group
*vg
;
1034 vg
= kzalloc(sizeof(*vg
), GFP_KERNEL
);
1037 ret
= rhashtable_init(&vg
->vlan_hash
, &br_vlan_rht_params
);
1040 ret
= vlan_tunnel_init(vg
);
1042 goto err_tunnel_init
;
1043 INIT_LIST_HEAD(&vg
->vlan_list
);
1044 br
->vlan_proto
= htons(ETH_P_8021Q
);
1045 br
->default_pvid
= 1;
1046 rcu_assign_pointer(br
->vlgrp
, vg
);
1047 ret
= br_vlan_add(br
, 1,
1048 BRIDGE_VLAN_INFO_PVID
| BRIDGE_VLAN_INFO_UNTAGGED
|
1049 BRIDGE_VLAN_INFO_BRENTRY
, &changed
);
1057 vlan_tunnel_deinit(vg
);
1059 rhashtable_destroy(&vg
->vlan_hash
);
1066 int nbp_vlan_init(struct net_bridge_port
*p
)
1068 struct switchdev_attr attr
= {
1069 .orig_dev
= p
->br
->dev
,
1070 .id
= SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
,
1071 .flags
= SWITCHDEV_F_SKIP_EOPNOTSUPP
,
1072 .u
.vlan_filtering
= br_opt_get(p
->br
, BROPT_VLAN_ENABLED
),
1074 struct net_bridge_vlan_group
*vg
;
1077 vg
= kzalloc(sizeof(struct net_bridge_vlan_group
), GFP_KERNEL
);
1081 ret
= switchdev_port_attr_set(p
->dev
, &attr
);
1082 if (ret
&& ret
!= -EOPNOTSUPP
)
1083 goto err_vlan_enabled
;
1085 ret
= rhashtable_init(&vg
->vlan_hash
, &br_vlan_rht_params
);
1088 ret
= vlan_tunnel_init(vg
);
1090 goto err_tunnel_init
;
1091 INIT_LIST_HEAD(&vg
->vlan_list
);
1092 rcu_assign_pointer(p
->vlgrp
, vg
);
1093 if (p
->br
->default_pvid
) {
1096 ret
= nbp_vlan_add(p
, p
->br
->default_pvid
,
1097 BRIDGE_VLAN_INFO_PVID
|
1098 BRIDGE_VLAN_INFO_UNTAGGED
,
1107 RCU_INIT_POINTER(p
->vlgrp
, NULL
);
1109 vlan_tunnel_deinit(vg
);
1111 rhashtable_destroy(&vg
->vlan_hash
);
1119 /* Must be protected by RTNL.
1120 * Must be called with vid in range from 1 to 4094 inclusive.
1121 * changed must be true only if the vlan was created or updated
1123 int nbp_vlan_add(struct net_bridge_port
*port
, u16 vid
, u16 flags
,
1126 struct net_bridge_vlan
*vlan
;
1132 vlan
= br_vlan_find(nbp_vlan_group(port
), vid
);
1134 /* Pass the flags to the hardware bridge */
1135 ret
= br_switchdev_port_vlan_add(port
->dev
, vid
, flags
);
1136 if (ret
&& ret
!= -EOPNOTSUPP
)
1138 *changed
= __vlan_add_flags(vlan
, flags
);
1143 vlan
= kzalloc(sizeof(*vlan
), GFP_KERNEL
);
1149 ret
= __vlan_add(vlan
, flags
);
1158 /* Must be protected by RTNL.
1159 * Must be called with vid in range from 1 to 4094 inclusive.
1161 int nbp_vlan_delete(struct net_bridge_port
*port
, u16 vid
)
1163 struct net_bridge_vlan
*v
;
1167 v
= br_vlan_find(nbp_vlan_group(port
), vid
);
1170 br_fdb_find_delete_local(port
->br
, port
, port
->dev
->dev_addr
, vid
);
1171 br_fdb_delete_by_port(port
->br
, port
, vid
, 0);
1173 return __vlan_del(v
);
1176 void nbp_vlan_flush(struct net_bridge_port
*port
)
1178 struct net_bridge_vlan_group
*vg
;
1182 vg
= nbp_vlan_group(port
);
1184 RCU_INIT_POINTER(port
->vlgrp
, NULL
);
1186 __vlan_group_free(vg
);
1189 void br_vlan_get_stats(const struct net_bridge_vlan
*v
,
1190 struct br_vlan_stats
*stats
)
1194 memset(stats
, 0, sizeof(*stats
));
1195 for_each_possible_cpu(i
) {
1196 u64 rxpackets
, rxbytes
, txpackets
, txbytes
;
1197 struct br_vlan_stats
*cpu_stats
;
1200 cpu_stats
= per_cpu_ptr(v
->stats
, i
);
1202 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
1203 rxpackets
= cpu_stats
->rx_packets
;
1204 rxbytes
= cpu_stats
->rx_bytes
;
1205 txbytes
= cpu_stats
->tx_bytes
;
1206 txpackets
= cpu_stats
->tx_packets
;
1207 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
1209 stats
->rx_packets
+= rxpackets
;
1210 stats
->rx_bytes
+= rxbytes
;
1211 stats
->tx_bytes
+= txbytes
;
1212 stats
->tx_packets
+= txpackets
;
1216 int br_vlan_get_pvid(const struct net_device
*dev
, u16
*p_pvid
)
1218 struct net_bridge_vlan_group
*vg
;
1221 if (netif_is_bridge_master(dev
))
1222 vg
= br_vlan_group(netdev_priv(dev
));
1226 *p_pvid
= br_get_pvid(vg
);
1229 EXPORT_SYMBOL_GPL(br_vlan_get_pvid
);
1231 int br_vlan_get_info(const struct net_device
*dev
, u16 vid
,
1232 struct bridge_vlan_info
*p_vinfo
)
1234 struct net_bridge_vlan_group
*vg
;
1235 struct net_bridge_vlan
*v
;
1236 struct net_bridge_port
*p
;
1239 p
= br_port_get_check_rtnl(dev
);
1241 vg
= nbp_vlan_group(p
);
1242 else if (netif_is_bridge_master(dev
))
1243 vg
= br_vlan_group(netdev_priv(dev
));
1247 v
= br_vlan_find(vg
, vid
);
1252 p_vinfo
->flags
= v
->flags
;
1255 EXPORT_SYMBOL_GPL(br_vlan_get_info
);