1 #include <linux/kernel.h>
2 #include <linux/netdevice.h>
3 #include <linux/rtnetlink.h>
4 #include <linux/slab.h>
5 #include <net/switchdev.h>
7 #include "br_private.h"
9 static inline int br_vlan_cmp(struct rhashtable_compare_arg
*arg
,
12 const struct net_bridge_vlan
*vle
= ptr
;
13 u16 vid
= *(u16
*)arg
->key
;
15 return vle
->vid
!= vid
;
18 static const struct rhashtable_params br_vlan_rht_params
= {
19 .head_offset
= offsetof(struct net_bridge_vlan
, vnode
),
20 .key_offset
= offsetof(struct net_bridge_vlan
, vid
),
21 .key_len
= sizeof(u16
),
24 .max_size
= VLAN_N_VID
,
25 .obj_cmpfn
= br_vlan_cmp
,
26 .automatic_shrinking
= true,
29 static struct net_bridge_vlan
*br_vlan_lookup(struct rhashtable
*tbl
, u16 vid
)
31 return rhashtable_lookup_fast(tbl
, &vid
, br_vlan_rht_params
);
34 static void __vlan_add_pvid(struct net_bridge_vlan_group
*vg
, u16 vid
)
43 static void __vlan_delete_pvid(struct net_bridge_vlan_group
*vg
, u16 vid
)
52 static void __vlan_add_flags(struct net_bridge_vlan
*v
, u16 flags
)
54 struct net_bridge_vlan_group
*vg
;
56 if (br_vlan_is_master(v
))
57 vg
= br_vlan_group(v
->br
);
59 vg
= nbp_vlan_group(v
->port
);
61 if (flags
& BRIDGE_VLAN_INFO_PVID
)
62 __vlan_add_pvid(vg
, v
->vid
);
64 __vlan_delete_pvid(vg
, v
->vid
);
66 if (flags
& BRIDGE_VLAN_INFO_UNTAGGED
)
67 v
->flags
|= BRIDGE_VLAN_INFO_UNTAGGED
;
69 v
->flags
&= ~BRIDGE_VLAN_INFO_UNTAGGED
;
72 static int __vlan_vid_add(struct net_device
*dev
, struct net_bridge
*br
,
75 struct switchdev_obj_port_vlan v
= {
76 .obj
.id
= SWITCHDEV_OBJ_ID_PORT_VLAN
,
83 /* Try switchdev op first. In case it is not supported, fallback to
86 err
= switchdev_port_obj_add(dev
, &v
.obj
);
87 if (err
== -EOPNOTSUPP
)
88 return vlan_vid_add(dev
, br
->vlan_proto
, vid
);
92 static void __vlan_add_list(struct net_bridge_vlan
*v
)
94 struct net_bridge_vlan_group
*vg
;
95 struct list_head
*headp
, *hpos
;
96 struct net_bridge_vlan
*vent
;
98 if (br_vlan_is_master(v
))
99 vg
= br_vlan_group(v
->br
);
101 vg
= nbp_vlan_group(v
->port
);
103 headp
= &vg
->vlan_list
;
104 list_for_each_prev(hpos
, headp
) {
105 vent
= list_entry(hpos
, struct net_bridge_vlan
, vlist
);
106 if (v
->vid
< vent
->vid
)
111 list_add_rcu(&v
->vlist
, hpos
);
114 static void __vlan_del_list(struct net_bridge_vlan
*v
)
116 list_del_rcu(&v
->vlist
);
119 static int __vlan_vid_del(struct net_device
*dev
, struct net_bridge
*br
,
122 struct switchdev_obj_port_vlan v
= {
123 .obj
.id
= SWITCHDEV_OBJ_ID_PORT_VLAN
,
129 /* Try switchdev op first. In case it is not supported, fallback to
132 err
= switchdev_port_obj_del(dev
, &v
.obj
);
133 if (err
== -EOPNOTSUPP
) {
134 vlan_vid_del(dev
, br
->vlan_proto
, vid
);
140 /* Returns a master vlan, if it didn't exist it gets created. In all cases a
141 * a reference is taken to the master vlan before returning.
143 static struct net_bridge_vlan
*br_vlan_get_master(struct net_bridge
*br
, u16 vid
)
145 struct net_bridge_vlan_group
*vg
;
146 struct net_bridge_vlan
*masterv
;
148 vg
= br_vlan_group(br
);
149 masterv
= br_vlan_find(vg
, vid
);
151 /* missing global ctx, create it now */
152 if (br_vlan_add(br
, vid
, 0))
154 masterv
= br_vlan_find(vg
, vid
);
155 if (WARN_ON(!masterv
))
158 atomic_inc(&masterv
->refcnt
);
163 static void br_vlan_put_master(struct net_bridge_vlan
*masterv
)
165 struct net_bridge_vlan_group
*vg
;
167 if (!br_vlan_is_master(masterv
))
170 vg
= br_vlan_group(masterv
->br
);
171 if (atomic_dec_and_test(&masterv
->refcnt
)) {
172 rhashtable_remove_fast(&vg
->vlan_hash
,
173 &masterv
->vnode
, br_vlan_rht_params
);
174 __vlan_del_list(masterv
);
175 kfree_rcu(masterv
, rcu
);
179 /* This is the shared VLAN add function which works for both ports and bridge
180 * devices. There are four possible calls to this function in terms of the
182 * 1. vlan is being added on a port (no master flags, global entry exists)
183 * 2. vlan is being added on a bridge (both master and brentry flags)
184 * 3. vlan is being added on a port, but a global entry didn't exist which
185 * is being created right now (master flag set, brentry flag unset), the
186 * global entry is used for global per-vlan features, but not for filtering
187 * 4. same as 3 but with both master and brentry flags set so the entry
188 * will be used for filtering in both the port and the bridge
190 static int __vlan_add(struct net_bridge_vlan
*v
, u16 flags
)
192 struct net_bridge_vlan
*masterv
= NULL
;
193 struct net_bridge_port
*p
= NULL
;
194 struct net_bridge_vlan_group
*vg
;
195 struct net_device
*dev
;
196 struct net_bridge
*br
;
199 if (br_vlan_is_master(v
)) {
202 vg
= br_vlan_group(br
);
207 vg
= nbp_vlan_group(p
);
211 /* Add VLAN to the device filter if it is supported.
212 * This ensures tagged traffic enters the bridge when
213 * promiscuous mode is disabled by br_manage_promisc().
215 err
= __vlan_vid_add(dev
, br
, v
->vid
, flags
);
219 /* need to work on the master vlan too */
220 if (flags
& BRIDGE_VLAN_INFO_MASTER
) {
221 err
= br_vlan_add(br
, v
->vid
, flags
|
222 BRIDGE_VLAN_INFO_BRENTRY
);
227 masterv
= br_vlan_get_master(br
, v
->vid
);
233 /* Add the dev mac and count the vlan only if it's usable */
234 if (br_vlan_should_use(v
)) {
235 err
= br_fdb_insert(br
, p
, dev
->dev_addr
, v
->vid
);
237 br_err(br
, "failed insert local address into bridge forwarding table\n");
243 err
= rhashtable_lookup_insert_fast(&vg
->vlan_hash
, &v
->vnode
,
249 __vlan_add_flags(v
, flags
);
254 if (br_vlan_should_use(v
)) {
255 br_fdb_find_delete_local(br
, p
, dev
->dev_addr
, v
->vid
);
261 __vlan_vid_del(dev
, br
, v
->vid
);
263 br_vlan_put_master(masterv
);
271 static int __vlan_del(struct net_bridge_vlan
*v
)
273 struct net_bridge_vlan
*masterv
= v
;
274 struct net_bridge_vlan_group
*vg
;
275 struct net_bridge_port
*p
= NULL
;
278 if (br_vlan_is_master(v
)) {
279 vg
= br_vlan_group(v
->br
);
282 vg
= nbp_vlan_group(v
->port
);
286 __vlan_delete_pvid(vg
, v
->vid
);
288 err
= __vlan_vid_del(p
->dev
, p
->br
, v
->vid
);
293 if (br_vlan_should_use(v
)) {
294 v
->flags
&= ~BRIDGE_VLAN_INFO_BRENTRY
;
299 rhashtable_remove_fast(&vg
->vlan_hash
, &v
->vnode
,
305 br_vlan_put_master(masterv
);
310 static void __vlan_group_free(struct net_bridge_vlan_group
*vg
)
312 WARN_ON(!list_empty(&vg
->vlan_list
));
313 rhashtable_destroy(&vg
->vlan_hash
);
317 static void __vlan_flush(struct net_bridge_vlan_group
*vg
)
319 struct net_bridge_vlan
*vlan
, *tmp
;
321 __vlan_delete_pvid(vg
, vg
->pvid
);
322 list_for_each_entry_safe(vlan
, tmp
, &vg
->vlan_list
, vlist
)
326 struct sk_buff
*br_handle_vlan(struct net_bridge
*br
,
327 struct net_bridge_vlan_group
*vg
,
330 struct net_bridge_vlan
*v
;
333 /* If this packet was not filtered at input, let it pass */
334 if (!BR_INPUT_SKB_CB(skb
)->vlan_filtered
)
337 /* At this point, we know that the frame was filtered and contains
338 * a valid vlan id. If the vlan id has untagged flag set,
339 * send untagged; otherwise, send tagged.
341 br_vlan_get_tag(skb
, &vid
);
342 v
= br_vlan_find(vg
, vid
);
343 /* Vlan entry must be configured at this point. The
344 * only exception is the bridge is set in promisc mode and the
345 * packet is destined for the bridge device. In this case
346 * pass the packet as is.
348 if (!v
|| !br_vlan_should_use(v
)) {
349 if ((br
->dev
->flags
& IFF_PROMISC
) && skb
->dev
== br
->dev
) {
356 if (v
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
)
363 /* Called under RCU */
364 static bool __allowed_ingress(struct net_bridge_vlan_group
*vg
, __be16 proto
,
365 struct sk_buff
*skb
, u16
*vid
)
367 const struct net_bridge_vlan
*v
;
370 BR_INPUT_SKB_CB(skb
)->vlan_filtered
= true;
371 /* If vlan tx offload is disabled on bridge device and frame was
372 * sent from vlan device on the bridge device, it does not have
373 * HW accelerated vlan tag.
375 if (unlikely(!skb_vlan_tag_present(skb
) &&
376 skb
->protocol
== proto
)) {
377 skb
= skb_vlan_untag(skb
);
382 if (!br_vlan_get_tag(skb
, vid
)) {
384 if (skb
->vlan_proto
!= proto
) {
385 /* Protocol-mismatch, empty out vlan_tci for new tag */
386 skb_push(skb
, ETH_HLEN
);
387 skb
= vlan_insert_tag_set_proto(skb
, skb
->vlan_proto
,
388 skb_vlan_tag_get(skb
));
392 skb_pull(skb
, ETH_HLEN
);
393 skb_reset_mac_len(skb
);
405 u16 pvid
= br_get_pvid(vg
);
407 /* Frame had a tag with VID 0 or did not have a tag.
408 * See if pvid is set on this port. That tells us which
409 * vlan untagged or priority-tagged traffic belongs to.
414 /* PVID is set on this port. Any untagged or priority-tagged
415 * ingress frame is considered to belong to this vlan.
419 /* Untagged Frame. */
420 __vlan_hwaccel_put_tag(skb
, proto
, pvid
);
422 /* Priority-tagged Frame.
423 * At this point, We know that skb->vlan_tci had
424 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
425 * We update only VID field and preserve PCP field.
427 skb
->vlan_tci
|= pvid
;
432 /* Frame had a valid vlan tag. See if vlan is allowed */
433 v
= br_vlan_find(vg
, *vid
);
434 if (v
&& br_vlan_should_use(v
))
441 bool br_allowed_ingress(const struct net_bridge
*br
,
442 struct net_bridge_vlan_group
*vg
, struct sk_buff
*skb
,
445 /* If VLAN filtering is disabled on the bridge, all packets are
448 if (!br
->vlan_enabled
) {
449 BR_INPUT_SKB_CB(skb
)->vlan_filtered
= false;
453 return __allowed_ingress(vg
, br
->vlan_proto
, skb
, vid
);
456 /* Called under RCU. */
457 bool br_allowed_egress(struct net_bridge_vlan_group
*vg
,
458 const struct sk_buff
*skb
)
460 const struct net_bridge_vlan
*v
;
463 /* If this packet was not filtered at input, let it pass */
464 if (!BR_INPUT_SKB_CB(skb
)->vlan_filtered
)
467 br_vlan_get_tag(skb
, &vid
);
468 v
= br_vlan_find(vg
, vid
);
469 if (v
&& br_vlan_should_use(v
))
475 /* Called under RCU */
476 bool br_should_learn(struct net_bridge_port
*p
, struct sk_buff
*skb
, u16
*vid
)
478 struct net_bridge_vlan_group
*vg
;
479 struct net_bridge
*br
= p
->br
;
481 /* If filtering was disabled at input, let it pass. */
482 if (!br
->vlan_enabled
)
485 vg
= nbp_vlan_group_rcu(p
);
486 if (!vg
|| !vg
->num_vlans
)
489 if (!br_vlan_get_tag(skb
, vid
) && skb
->vlan_proto
!= br
->vlan_proto
)
493 *vid
= br_get_pvid(vg
);
500 if (br_vlan_find(vg
, *vid
))
506 /* Must be protected by RTNL.
507 * Must be called with vid in range from 1 to 4094 inclusive.
509 int br_vlan_add(struct net_bridge
*br
, u16 vid
, u16 flags
)
511 struct net_bridge_vlan_group
*vg
;
512 struct net_bridge_vlan
*vlan
;
517 vg
= br_vlan_group(br
);
518 vlan
= br_vlan_find(vg
, vid
);
520 if (!br_vlan_is_brentry(vlan
)) {
521 /* Trying to change flags of non-existent bridge vlan */
522 if (!(flags
& BRIDGE_VLAN_INFO_BRENTRY
))
524 /* It was only kept for port vlans, now make it real */
525 ret
= br_fdb_insert(br
, NULL
, br
->dev
->dev_addr
,
528 br_err(br
, "failed insert local address into bridge forwarding table\n");
531 atomic_inc(&vlan
->refcnt
);
532 vlan
->flags
|= BRIDGE_VLAN_INFO_BRENTRY
;
535 __vlan_add_flags(vlan
, flags
);
539 vlan
= kzalloc(sizeof(*vlan
), GFP_KERNEL
);
544 vlan
->flags
= flags
| BRIDGE_VLAN_INFO_MASTER
;
545 vlan
->flags
&= ~BRIDGE_VLAN_INFO_PVID
;
547 if (flags
& BRIDGE_VLAN_INFO_BRENTRY
)
548 atomic_set(&vlan
->refcnt
, 1);
549 ret
= __vlan_add(vlan
, flags
);
556 /* Must be protected by RTNL.
557 * Must be called with vid in range from 1 to 4094 inclusive.
559 int br_vlan_delete(struct net_bridge
*br
, u16 vid
)
561 struct net_bridge_vlan_group
*vg
;
562 struct net_bridge_vlan
*v
;
566 vg
= br_vlan_group(br
);
567 v
= br_vlan_find(vg
, vid
);
568 if (!v
|| !br_vlan_is_brentry(v
))
571 br_fdb_find_delete_local(br
, NULL
, br
->dev
->dev_addr
, vid
);
572 br_fdb_delete_by_port(br
, NULL
, vid
, 0);
574 return __vlan_del(v
);
577 void br_vlan_flush(struct net_bridge
*br
)
579 struct net_bridge_vlan_group
*vg
;
583 vg
= br_vlan_group(br
);
585 RCU_INIT_POINTER(br
->vlgrp
, NULL
);
587 __vlan_group_free(vg
);
590 struct net_bridge_vlan
*br_vlan_find(struct net_bridge_vlan_group
*vg
, u16 vid
)
595 return br_vlan_lookup(&vg
->vlan_hash
, vid
);
598 /* Must be protected by RTNL. */
599 static void recalculate_group_addr(struct net_bridge
*br
)
601 if (br
->group_addr_set
)
604 spin_lock_bh(&br
->lock
);
605 if (!br
->vlan_enabled
|| br
->vlan_proto
== htons(ETH_P_8021Q
)) {
606 /* Bridge Group Address */
607 br
->group_addr
[5] = 0x00;
608 } else { /* vlan_enabled && ETH_P_8021AD */
609 /* Provider Bridge Group Address */
610 br
->group_addr
[5] = 0x08;
612 spin_unlock_bh(&br
->lock
);
615 /* Must be protected by RTNL. */
616 void br_recalculate_fwd_mask(struct net_bridge
*br
)
618 if (!br
->vlan_enabled
|| br
->vlan_proto
== htons(ETH_P_8021Q
))
619 br
->group_fwd_mask_required
= BR_GROUPFWD_DEFAULT
;
620 else /* vlan_enabled && ETH_P_8021AD */
621 br
->group_fwd_mask_required
= BR_GROUPFWD_8021AD
&
622 ~(1u << br
->group_addr
[5]);
625 int __br_vlan_filter_toggle(struct net_bridge
*br
, unsigned long val
)
627 if (br
->vlan_enabled
== val
)
630 br
->vlan_enabled
= val
;
631 br_manage_promisc(br
);
632 recalculate_group_addr(br
);
633 br_recalculate_fwd_mask(br
);
638 int br_vlan_filter_toggle(struct net_bridge
*br
, unsigned long val
)
641 return restart_syscall();
643 __br_vlan_filter_toggle(br
, val
);
649 int __br_vlan_set_proto(struct net_bridge
*br
, __be16 proto
)
652 struct net_bridge_port
*p
;
653 struct net_bridge_vlan
*vlan
;
654 struct net_bridge_vlan_group
*vg
;
657 if (br
->vlan_proto
== proto
)
660 /* Add VLANs for the new proto to the device filter. */
661 list_for_each_entry(p
, &br
->port_list
, list
) {
662 vg
= nbp_vlan_group(p
);
663 list_for_each_entry(vlan
, &vg
->vlan_list
, vlist
) {
664 err
= vlan_vid_add(p
->dev
, proto
, vlan
->vid
);
670 oldproto
= br
->vlan_proto
;
671 br
->vlan_proto
= proto
;
673 recalculate_group_addr(br
);
674 br_recalculate_fwd_mask(br
);
676 /* Delete VLANs for the old proto from the device filter. */
677 list_for_each_entry(p
, &br
->port_list
, list
) {
678 vg
= nbp_vlan_group(p
);
679 list_for_each_entry(vlan
, &vg
->vlan_list
, vlist
)
680 vlan_vid_del(p
->dev
, oldproto
, vlan
->vid
);
686 list_for_each_entry_continue_reverse(vlan
, &vg
->vlan_list
, vlist
)
687 vlan_vid_del(p
->dev
, proto
, vlan
->vid
);
689 list_for_each_entry_continue_reverse(p
, &br
->port_list
, list
) {
690 vg
= nbp_vlan_group(p
);
691 list_for_each_entry(vlan
, &vg
->vlan_list
, vlist
)
692 vlan_vid_del(p
->dev
, proto
, vlan
->vid
);
698 int br_vlan_set_proto(struct net_bridge
*br
, unsigned long val
)
702 if (val
!= ETH_P_8021Q
&& val
!= ETH_P_8021AD
)
703 return -EPROTONOSUPPORT
;
706 return restart_syscall();
708 err
= __br_vlan_set_proto(br
, htons(val
));
714 static bool vlan_default_pvid(struct net_bridge_vlan_group
*vg
, u16 vid
)
716 struct net_bridge_vlan
*v
;
721 v
= br_vlan_lookup(&vg
->vlan_hash
, vid
);
722 if (v
&& br_vlan_should_use(v
) &&
723 (v
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
))
729 static void br_vlan_disable_default_pvid(struct net_bridge
*br
)
731 struct net_bridge_port
*p
;
732 u16 pvid
= br
->default_pvid
;
734 /* Disable default_pvid on all ports where it is still
737 if (vlan_default_pvid(br_vlan_group(br
), pvid
))
738 br_vlan_delete(br
, pvid
);
740 list_for_each_entry(p
, &br
->port_list
, list
) {
741 if (vlan_default_pvid(nbp_vlan_group(p
), pvid
))
742 nbp_vlan_delete(p
, pvid
);
745 br
->default_pvid
= 0;
748 int __br_vlan_set_default_pvid(struct net_bridge
*br
, u16 pvid
)
750 const struct net_bridge_vlan
*pvent
;
751 struct net_bridge_vlan_group
*vg
;
752 struct net_bridge_port
*p
;
755 unsigned long *changed
;
758 br_vlan_disable_default_pvid(br
);
762 changed
= kcalloc(BITS_TO_LONGS(BR_MAX_PORTS
), sizeof(unsigned long),
767 old_pvid
= br
->default_pvid
;
769 /* Update default_pvid config only if we do not conflict with
770 * user configuration.
772 vg
= br_vlan_group(br
);
773 pvent
= br_vlan_find(vg
, pvid
);
774 if ((!old_pvid
|| vlan_default_pvid(vg
, old_pvid
)) &&
775 (!pvent
|| !br_vlan_should_use(pvent
))) {
776 err
= br_vlan_add(br
, pvid
,
777 BRIDGE_VLAN_INFO_PVID
|
778 BRIDGE_VLAN_INFO_UNTAGGED
|
779 BRIDGE_VLAN_INFO_BRENTRY
);
782 br_vlan_delete(br
, old_pvid
);
786 list_for_each_entry(p
, &br
->port_list
, list
) {
787 /* Update default_pvid config only if we do not conflict with
788 * user configuration.
790 vg
= nbp_vlan_group(p
);
792 !vlan_default_pvid(vg
, old_pvid
)) ||
793 br_vlan_find(vg
, pvid
))
796 err
= nbp_vlan_add(p
, pvid
,
797 BRIDGE_VLAN_INFO_PVID
|
798 BRIDGE_VLAN_INFO_UNTAGGED
);
801 nbp_vlan_delete(p
, old_pvid
);
802 set_bit(p
->port_no
, changed
);
805 br
->default_pvid
= pvid
;
812 list_for_each_entry_continue_reverse(p
, &br
->port_list
, list
) {
813 if (!test_bit(p
->port_no
, changed
))
817 nbp_vlan_add(p
, old_pvid
,
818 BRIDGE_VLAN_INFO_PVID
|
819 BRIDGE_VLAN_INFO_UNTAGGED
);
820 nbp_vlan_delete(p
, pvid
);
823 if (test_bit(0, changed
)) {
825 br_vlan_add(br
, old_pvid
,
826 BRIDGE_VLAN_INFO_PVID
|
827 BRIDGE_VLAN_INFO_UNTAGGED
|
828 BRIDGE_VLAN_INFO_BRENTRY
);
829 br_vlan_delete(br
, pvid
);
834 int br_vlan_set_default_pvid(struct net_bridge
*br
, unsigned long val
)
839 if (val
>= VLAN_VID_MASK
)
843 return restart_syscall();
845 if (pvid
== br
->default_pvid
)
848 /* Only allow default pvid change when filtering is disabled */
849 if (br
->vlan_enabled
) {
850 pr_info_once("Please disable vlan filtering to change default_pvid\n");
854 err
= __br_vlan_set_default_pvid(br
, pvid
);
860 int br_vlan_init(struct net_bridge
*br
)
862 struct net_bridge_vlan_group
*vg
;
865 vg
= kzalloc(sizeof(*vg
), GFP_KERNEL
);
868 ret
= rhashtable_init(&vg
->vlan_hash
, &br_vlan_rht_params
);
871 INIT_LIST_HEAD(&vg
->vlan_list
);
872 br
->vlan_proto
= htons(ETH_P_8021Q
);
873 br
->default_pvid
= 1;
874 rcu_assign_pointer(br
->vlgrp
, vg
);
875 ret
= br_vlan_add(br
, 1,
876 BRIDGE_VLAN_INFO_PVID
| BRIDGE_VLAN_INFO_UNTAGGED
|
877 BRIDGE_VLAN_INFO_BRENTRY
);
885 rhashtable_destroy(&vg
->vlan_hash
);
892 int nbp_vlan_init(struct net_bridge_port
*p
)
894 struct net_bridge_vlan_group
*vg
;
897 vg
= kzalloc(sizeof(struct net_bridge_vlan_group
), GFP_KERNEL
);
901 ret
= rhashtable_init(&vg
->vlan_hash
, &br_vlan_rht_params
);
904 INIT_LIST_HEAD(&vg
->vlan_list
);
905 rcu_assign_pointer(p
->vlgrp
, vg
);
906 if (p
->br
->default_pvid
) {
907 ret
= nbp_vlan_add(p
, p
->br
->default_pvid
,
908 BRIDGE_VLAN_INFO_PVID
|
909 BRIDGE_VLAN_INFO_UNTAGGED
);
917 RCU_INIT_POINTER(p
->vlgrp
, NULL
);
919 rhashtable_destroy(&vg
->vlan_hash
);
926 /* Must be protected by RTNL.
927 * Must be called with vid in range from 1 to 4094 inclusive.
929 int nbp_vlan_add(struct net_bridge_port
*port
, u16 vid
, u16 flags
)
931 struct net_bridge_vlan
*vlan
;
936 vlan
= br_vlan_find(nbp_vlan_group(port
), vid
);
938 __vlan_add_flags(vlan
, flags
);
942 vlan
= kzalloc(sizeof(*vlan
), GFP_KERNEL
);
948 ret
= __vlan_add(vlan
, flags
);
955 /* Must be protected by RTNL.
956 * Must be called with vid in range from 1 to 4094 inclusive.
958 int nbp_vlan_delete(struct net_bridge_port
*port
, u16 vid
)
960 struct net_bridge_vlan
*v
;
964 v
= br_vlan_find(nbp_vlan_group(port
), vid
);
967 br_fdb_find_delete_local(port
->br
, port
, port
->dev
->dev_addr
, vid
);
968 br_fdb_delete_by_port(port
->br
, port
, vid
, 0);
970 return __vlan_del(v
);
973 void nbp_vlan_flush(struct net_bridge_port
*port
)
975 struct net_bridge_vlan_group
*vg
;
979 vg
= nbp_vlan_group(port
);
981 RCU_INIT_POINTER(port
->vlgrp
, NULL
);
983 __vlan_group_free(vg
);