3 * Ethernet-type device handling.
5 * Authors: Ben Greear <greearb@candelatech.com>
6 * Please send support related email to: netdev@vger.kernel.org
7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
10 * Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
11 * Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
12 * Correct all the locking - David S. Miller <davem@redhat.com>;
13 * Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/capability.h>
24 #include <linux/module.h>
25 #include <linux/netdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/rculist.h>
30 #include <net/p8022.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/notifier.h>
34 #include <net/rtnetlink.h>
35 #include <net/net_namespace.h>
36 #include <net/netns/generic.h>
37 #include <asm/uaccess.h>
39 #include <linux/if_vlan.h>
43 #define DRV_VERSION "1.8"
45 /* Global VLAN variables */
47 int vlan_net_id __read_mostly
;
49 const char vlan_fullname
[] = "802.1Q VLAN Support";
50 const char vlan_version
[] = DRV_VERSION
;
52 /* End of global variables definitions. */
54 static int vlan_group_prealloc_vid(struct vlan_group
*vg
,
55 __be16 vlan_proto
, u16 vlan_id
)
57 struct net_device
**array
;
58 unsigned int pidx
, vidx
;
63 pidx
= vlan_proto_idx(vlan_proto
);
64 vidx
= vlan_id
/ VLAN_GROUP_ARRAY_PART_LEN
;
65 array
= vg
->vlan_devices_arrays
[pidx
][vidx
];
69 size
= sizeof(struct net_device
*) * VLAN_GROUP_ARRAY_PART_LEN
;
70 array
= kzalloc(size
, GFP_KERNEL
);
74 vg
->vlan_devices_arrays
[pidx
][vidx
] = array
;
78 void unregister_vlan_dev(struct net_device
*dev
, struct list_head
*head
)
80 struct vlan_dev_priv
*vlan
= vlan_dev_priv(dev
);
81 struct net_device
*real_dev
= vlan
->real_dev
;
82 struct vlan_info
*vlan_info
;
83 struct vlan_group
*grp
;
84 u16 vlan_id
= vlan
->vlan_id
;
88 vlan_info
= rtnl_dereference(real_dev
->vlan_info
);
91 grp
= &vlan_info
->grp
;
95 if (vlan
->flags
& VLAN_FLAG_MVRP
)
96 vlan_mvrp_request_leave(dev
);
97 if (vlan
->flags
& VLAN_FLAG_GVRP
)
98 vlan_gvrp_request_leave(dev
);
100 vlan_group_set_device(grp
, vlan
->vlan_proto
, vlan_id
, NULL
);
102 netdev_upper_dev_unlink(real_dev
, dev
);
103 /* Because unregister_netdevice_queue() makes sure at least one rcu
104 * grace period is respected before device freeing,
105 * we dont need to call synchronize_net() here.
107 unregister_netdevice_queue(dev
, head
);
109 if (grp
->nr_vlan_devs
== 0) {
110 vlan_mvrp_uninit_applicant(real_dev
);
111 vlan_gvrp_uninit_applicant(real_dev
);
114 vlan_vid_del(real_dev
, vlan
->vlan_proto
, vlan_id
);
116 /* Get rid of the vlan's reference to real_dev */
120 int vlan_check_real_dev(struct net_device
*real_dev
,
121 __be16 protocol
, u16 vlan_id
)
123 const char *name
= real_dev
->name
;
125 if (real_dev
->features
& NETIF_F_VLAN_CHALLENGED
) {
126 pr_info("VLANs not supported on %s\n", name
);
130 if (vlan_find_dev(real_dev
, protocol
, vlan_id
) != NULL
)
136 int register_vlan_dev(struct net_device
*dev
)
138 struct vlan_dev_priv
*vlan
= vlan_dev_priv(dev
);
139 struct net_device
*real_dev
= vlan
->real_dev
;
140 u16 vlan_id
= vlan
->vlan_id
;
141 struct vlan_info
*vlan_info
;
142 struct vlan_group
*grp
;
145 err
= vlan_vid_add(real_dev
, vlan
->vlan_proto
, vlan_id
);
149 vlan_info
= rtnl_dereference(real_dev
->vlan_info
);
150 /* vlan_info should be there now. vlan_vid_add took care of it */
153 grp
= &vlan_info
->grp
;
154 if (grp
->nr_vlan_devs
== 0) {
155 err
= vlan_gvrp_init_applicant(real_dev
);
158 err
= vlan_mvrp_init_applicant(real_dev
);
160 goto out_uninit_gvrp
;
163 err
= vlan_group_prealloc_vid(grp
, vlan
->vlan_proto
, vlan_id
);
165 goto out_uninit_mvrp
;
167 vlan
->nest_level
= dev_get_nest_level(real_dev
, is_vlan_dev
) + 1;
168 err
= register_netdevice(dev
);
170 goto out_uninit_mvrp
;
172 err
= netdev_upper_dev_link(real_dev
, dev
);
174 goto out_unregister_netdev
;
176 /* Account for reference in struct vlan_dev_priv */
179 netif_stacked_transfer_operstate(real_dev
, dev
);
180 linkwatch_fire_event(dev
); /* _MUST_ call rfc2863_policy() */
182 /* So, got the sucker initialized, now lets place
183 * it into our local structure.
185 vlan_group_set_device(grp
, vlan
->vlan_proto
, vlan_id
, dev
);
190 out_unregister_netdev
:
191 unregister_netdevice(dev
);
193 if (grp
->nr_vlan_devs
== 0)
194 vlan_mvrp_uninit_applicant(real_dev
);
196 if (grp
->nr_vlan_devs
== 0)
197 vlan_gvrp_uninit_applicant(real_dev
);
199 vlan_vid_del(real_dev
, vlan
->vlan_proto
, vlan_id
);
203 /* Attach a VLAN device to a mac address (ie Ethernet Card).
204 * Returns 0 if the device was created or a negative error code otherwise.
206 static int register_vlan_device(struct net_device
*real_dev
, u16 vlan_id
)
208 struct net_device
*new_dev
;
209 struct vlan_dev_priv
*vlan
;
210 struct net
*net
= dev_net(real_dev
);
211 struct vlan_net
*vn
= net_generic(net
, vlan_net_id
);
215 if (vlan_id
>= VLAN_VID_MASK
)
218 err
= vlan_check_real_dev(real_dev
, htons(ETH_P_8021Q
), vlan_id
);
222 /* Gotta set up the fields for the device. */
223 switch (vn
->name_type
) {
224 case VLAN_NAME_TYPE_RAW_PLUS_VID
:
225 /* name will look like: eth1.0005 */
226 snprintf(name
, IFNAMSIZ
, "%s.%.4i", real_dev
->name
, vlan_id
);
228 case VLAN_NAME_TYPE_PLUS_VID_NO_PAD
:
229 /* Put our vlan.VID in the name.
230 * Name will look like: vlan5
232 snprintf(name
, IFNAMSIZ
, "vlan%i", vlan_id
);
234 case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD
:
235 /* Put our vlan.VID in the name.
236 * Name will look like: eth0.5
238 snprintf(name
, IFNAMSIZ
, "%s.%i", real_dev
->name
, vlan_id
);
240 case VLAN_NAME_TYPE_PLUS_VID
:
241 /* Put our vlan.VID in the name.
242 * Name will look like: vlan0005
245 snprintf(name
, IFNAMSIZ
, "vlan%.4i", vlan_id
);
248 new_dev
= alloc_netdev(sizeof(struct vlan_dev_priv
), name
,
249 NET_NAME_UNKNOWN
, vlan_setup
);
254 dev_net_set(new_dev
, net
);
255 /* need 4 bytes for extra VLAN header info,
256 * hope the underlying device can handle it.
258 new_dev
->mtu
= real_dev
->mtu
;
259 new_dev
->priv_flags
|= (real_dev
->priv_flags
& IFF_UNICAST_FLT
);
261 vlan
= vlan_dev_priv(new_dev
);
262 vlan
->vlan_proto
= htons(ETH_P_8021Q
);
263 vlan
->vlan_id
= vlan_id
;
264 vlan
->real_dev
= real_dev
;
266 vlan
->flags
= VLAN_FLAG_REORDER_HDR
;
268 new_dev
->rtnl_link_ops
= &vlan_link_ops
;
269 err
= register_vlan_dev(new_dev
);
271 goto out_free_newdev
;
276 if (new_dev
->reg_state
== NETREG_UNINITIALIZED
)
277 free_netdev(new_dev
);
281 static void vlan_sync_address(struct net_device
*dev
,
282 struct net_device
*vlandev
)
284 struct vlan_dev_priv
*vlan
= vlan_dev_priv(vlandev
);
286 /* May be called without an actual change */
287 if (ether_addr_equal(vlan
->real_dev_addr
, dev
->dev_addr
))
290 /* vlan continues to inherit address of lower device */
291 if (vlan_dev_inherit_address(vlandev
, dev
))
294 /* vlan address was different from the old address and is equal to
296 if (!ether_addr_equal(vlandev
->dev_addr
, vlan
->real_dev_addr
) &&
297 ether_addr_equal(vlandev
->dev_addr
, dev
->dev_addr
))
298 dev_uc_del(dev
, vlandev
->dev_addr
);
300 /* vlan address was equal to the old address and is different from
302 if (ether_addr_equal(vlandev
->dev_addr
, vlan
->real_dev_addr
) &&
303 !ether_addr_equal(vlandev
->dev_addr
, dev
->dev_addr
))
304 dev_uc_add(dev
, vlandev
->dev_addr
);
307 ether_addr_copy(vlan
->real_dev_addr
, dev
->dev_addr
);
310 static void vlan_transfer_features(struct net_device
*dev
,
311 struct net_device
*vlandev
)
313 struct vlan_dev_priv
*vlan
= vlan_dev_priv(vlandev
);
315 vlandev
->gso_max_size
= dev
->gso_max_size
;
317 if (vlan_hw_offload_capable(dev
->features
, vlan
->vlan_proto
))
318 vlandev
->hard_header_len
= dev
->hard_header_len
;
320 vlandev
->hard_header_len
= dev
->hard_header_len
+ VLAN_HLEN
;
322 #if IS_ENABLED(CONFIG_FCOE)
323 vlandev
->fcoe_ddp_xid
= dev
->fcoe_ddp_xid
;
326 netdev_update_features(vlandev
);
329 static int __vlan_device_event(struct net_device
*dev
, unsigned long event
)
334 case NETDEV_CHANGENAME
:
335 vlan_proc_rem_dev(dev
);
336 err
= vlan_proc_add_dev(dev
);
338 case NETDEV_REGISTER
:
339 err
= vlan_proc_add_dev(dev
);
341 case NETDEV_UNREGISTER
:
342 vlan_proc_rem_dev(dev
);
349 static int vlan_device_event(struct notifier_block
*unused
, unsigned long event
,
352 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
353 struct vlan_group
*grp
;
354 struct vlan_info
*vlan_info
;
356 struct net_device
*vlandev
;
357 struct vlan_dev_priv
*vlan
;
361 if (is_vlan_dev(dev
)) {
362 int err
= __vlan_device_event(dev
, event
);
365 return notifier_from_errno(err
);
368 if ((event
== NETDEV_UP
) &&
369 (dev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
)) {
370 pr_info("adding VLAN 0 to HW filter on device %s\n",
372 vlan_vid_add(dev
, htons(ETH_P_8021Q
), 0);
374 if (event
== NETDEV_DOWN
&&
375 (dev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
))
376 vlan_vid_del(dev
, htons(ETH_P_8021Q
), 0);
378 vlan_info
= rtnl_dereference(dev
->vlan_info
);
381 grp
= &vlan_info
->grp
;
383 /* It is OK that we do not hold the group lock right now,
384 * as we run under the RTNL lock.
389 /* Propagate real device state to vlan devices */
390 vlan_group_for_each_dev(grp
, i
, vlandev
)
391 netif_stacked_transfer_operstate(dev
, vlandev
);
394 case NETDEV_CHANGEADDR
:
395 /* Adjust unicast filters on underlying device */
396 vlan_group_for_each_dev(grp
, i
, vlandev
) {
397 flgs
= vlandev
->flags
;
398 if (!(flgs
& IFF_UP
))
401 vlan_sync_address(dev
, vlandev
);
405 case NETDEV_CHANGEMTU
:
406 vlan_group_for_each_dev(grp
, i
, vlandev
) {
407 if (vlandev
->mtu
<= dev
->mtu
)
410 dev_set_mtu(vlandev
, dev
->mtu
);
414 case NETDEV_FEAT_CHANGE
:
415 /* Propagate device features to underlying device */
416 vlan_group_for_each_dev(grp
, i
, vlandev
)
417 vlan_transfer_features(dev
, vlandev
);
421 struct net_device
*tmp
;
422 LIST_HEAD(close_list
);
424 /* Put all VLANs for this dev in the down state too. */
425 vlan_group_for_each_dev(grp
, i
, vlandev
) {
426 flgs
= vlandev
->flags
;
427 if (!(flgs
& IFF_UP
))
430 vlan
= vlan_dev_priv(vlandev
);
431 if (!(vlan
->flags
& VLAN_FLAG_LOOSE_BINDING
))
432 list_add(&vlandev
->close_list
, &close_list
);
435 dev_close_many(&close_list
, false);
437 list_for_each_entry_safe(vlandev
, tmp
, &close_list
, close_list
) {
438 netif_stacked_transfer_operstate(dev
, vlandev
);
439 list_del_init(&vlandev
->close_list
);
441 list_del(&close_list
);
445 /* Put all VLANs for this dev in the up state too. */
446 vlan_group_for_each_dev(grp
, i
, vlandev
) {
447 flgs
= dev_get_flags(vlandev
);
451 vlan
= vlan_dev_priv(vlandev
);
452 if (!(vlan
->flags
& VLAN_FLAG_LOOSE_BINDING
))
453 dev_change_flags(vlandev
, flgs
| IFF_UP
);
454 netif_stacked_transfer_operstate(dev
, vlandev
);
458 case NETDEV_UNREGISTER
:
459 /* twiddle thumbs on netns device moves */
460 if (dev
->reg_state
!= NETREG_UNREGISTERING
)
463 vlan_group_for_each_dev(grp
, i
, vlandev
) {
464 /* removal of last vid destroys vlan_info, abort
466 if (vlan_info
->nr_vids
== 1)
469 unregister_vlan_dev(vlandev
, &list
);
473 unregister_netdevice_many(&list
);
476 case NETDEV_PRE_TYPE_CHANGE
:
477 /* Forbid underlaying device to change its type. */
478 if (vlan_uses_dev(dev
))
482 case NETDEV_NOTIFY_PEERS
:
483 case NETDEV_BONDING_FAILOVER
:
484 case NETDEV_RESEND_IGMP
:
485 /* Propagate to vlan devices */
486 vlan_group_for_each_dev(grp
, i
, vlandev
)
487 call_netdevice_notifiers(event
, vlandev
);
495 static struct notifier_block vlan_notifier_block __read_mostly
= {
496 .notifier_call
= vlan_device_event
,
500 * VLAN IOCTL handler.
501 * o execute requested action or pass command to the device driver
502 * arg is really a struct vlan_ioctl_args __user *.
504 static int vlan_ioctl_handler(struct net
*net
, void __user
*arg
)
507 struct vlan_ioctl_args args
;
508 struct net_device
*dev
= NULL
;
510 if (copy_from_user(&args
, arg
, sizeof(struct vlan_ioctl_args
)))
513 /* Null terminate this sucker, just in case. */
514 args
.device1
[23] = 0;
515 args
.u
.device2
[23] = 0;
520 case SET_VLAN_INGRESS_PRIORITY_CMD
:
521 case SET_VLAN_EGRESS_PRIORITY_CMD
:
522 case SET_VLAN_FLAG_CMD
:
525 case GET_VLAN_REALDEV_NAME_CMD
:
526 case GET_VLAN_VID_CMD
:
528 dev
= __dev_get_by_name(net
, args
.device1
);
533 if (args
.cmd
!= ADD_VLAN_CMD
&& !is_vlan_dev(dev
))
538 case SET_VLAN_INGRESS_PRIORITY_CMD
:
540 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
542 vlan_dev_set_ingress_priority(dev
,
548 case SET_VLAN_EGRESS_PRIORITY_CMD
:
550 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
552 err
= vlan_dev_set_egress_priority(dev
,
557 case SET_VLAN_FLAG_CMD
:
559 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
561 err
= vlan_dev_change_flags(dev
,
562 args
.vlan_qos
? args
.u
.flag
: 0,
566 case SET_VLAN_NAME_TYPE_CMD
:
568 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
570 if ((args
.u
.name_type
>= 0) &&
571 (args
.u
.name_type
< VLAN_NAME_TYPE_HIGHEST
)) {
574 vn
= net_generic(net
, vlan_net_id
);
575 vn
->name_type
= args
.u
.name_type
;
584 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
586 err
= register_vlan_device(dev
, args
.u
.VID
);
591 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
593 unregister_vlan_dev(dev
, NULL
);
597 case GET_VLAN_REALDEV_NAME_CMD
:
599 vlan_dev_get_realdev_name(dev
, args
.u
.device2
);
600 if (copy_to_user(arg
, &args
,
601 sizeof(struct vlan_ioctl_args
)))
605 case GET_VLAN_VID_CMD
:
607 args
.u
.VID
= vlan_dev_vlan_id(dev
);
608 if (copy_to_user(arg
, &args
,
609 sizeof(struct vlan_ioctl_args
)))
622 static struct sk_buff
**vlan_gro_receive(struct sk_buff
**head
,
625 struct sk_buff
*p
, **pp
= NULL
;
626 struct vlan_hdr
*vhdr
;
627 unsigned int hlen
, off_vlan
;
628 const struct packet_offload
*ptype
;
632 off_vlan
= skb_gro_offset(skb
);
633 hlen
= off_vlan
+ sizeof(*vhdr
);
634 vhdr
= skb_gro_header_fast(skb
, off_vlan
);
635 if (skb_gro_header_hard(skb
, hlen
)) {
636 vhdr
= skb_gro_header_slow(skb
, hlen
, off_vlan
);
641 type
= vhdr
->h_vlan_encapsulated_proto
;
644 ptype
= gro_find_receive_by_type(type
);
650 for (p
= *head
; p
; p
= p
->next
) {
651 struct vlan_hdr
*vhdr2
;
653 if (!NAPI_GRO_CB(p
)->same_flow
)
656 vhdr2
= (struct vlan_hdr
*)(p
->data
+ off_vlan
);
657 if (compare_vlan_header(vhdr
, vhdr2
))
658 NAPI_GRO_CB(p
)->same_flow
= 0;
661 skb_gro_pull(skb
, sizeof(*vhdr
));
662 skb_gro_postpull_rcsum(skb
, vhdr
, sizeof(*vhdr
));
663 pp
= call_gro_receive(ptype
->callbacks
.gro_receive
, head
, skb
);
668 NAPI_GRO_CB(skb
)->flush
|= flush
;
673 static int vlan_gro_complete(struct sk_buff
*skb
, int nhoff
)
675 struct vlan_hdr
*vhdr
= (struct vlan_hdr
*)(skb
->data
+ nhoff
);
676 __be16 type
= vhdr
->h_vlan_encapsulated_proto
;
677 struct packet_offload
*ptype
;
681 ptype
= gro_find_complete_by_type(type
);
683 err
= ptype
->callbacks
.gro_complete(skb
, nhoff
+ sizeof(*vhdr
));
689 static struct packet_offload vlan_packet_offloads
[] __read_mostly
= {
691 .type
= cpu_to_be16(ETH_P_8021Q
),
694 .gro_receive
= vlan_gro_receive
,
695 .gro_complete
= vlan_gro_complete
,
699 .type
= cpu_to_be16(ETH_P_8021AD
),
702 .gro_receive
= vlan_gro_receive
,
703 .gro_complete
= vlan_gro_complete
,
708 static int __net_init
vlan_init_net(struct net
*net
)
710 struct vlan_net
*vn
= net_generic(net
, vlan_net_id
);
713 vn
->name_type
= VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD
;
715 err
= vlan_proc_init(net
);
720 static void __net_exit
vlan_exit_net(struct net
*net
)
722 vlan_proc_cleanup(net
);
725 static struct pernet_operations vlan_net_ops
= {
726 .init
= vlan_init_net
,
727 .exit
= vlan_exit_net
,
729 .size
= sizeof(struct vlan_net
),
732 static int __init
vlan_proto_init(void)
737 pr_info("%s v%s\n", vlan_fullname
, vlan_version
);
739 err
= register_pernet_subsys(&vlan_net_ops
);
743 err
= register_netdevice_notifier(&vlan_notifier_block
);
747 err
= vlan_gvrp_init();
751 err
= vlan_mvrp_init();
755 err
= vlan_netlink_init();
759 for (i
= 0; i
< ARRAY_SIZE(vlan_packet_offloads
); i
++)
760 dev_add_offload(&vlan_packet_offloads
[i
]);
762 vlan_ioctl_set(vlan_ioctl_handler
);
770 unregister_netdevice_notifier(&vlan_notifier_block
);
772 unregister_pernet_subsys(&vlan_net_ops
);
777 static void __exit
vlan_cleanup_module(void)
781 vlan_ioctl_set(NULL
);
783 for (i
= 0; i
< ARRAY_SIZE(vlan_packet_offloads
); i
++)
784 dev_remove_offload(&vlan_packet_offloads
[i
]);
788 unregister_netdevice_notifier(&vlan_notifier_block
);
790 unregister_pernet_subsys(&vlan_net_ops
);
791 rcu_barrier(); /* Wait for completion of call_rcu()'s */
797 module_init(vlan_proto_init
);
798 module_exit(vlan_cleanup_module
);
800 MODULE_LICENSE("GPL");
801 MODULE_VERSION(DRV_VERSION
);