3 * Ethernet-type device handling.
5 * Authors: Ben Greear <greearb@candelatech.com>
6 * Please send support related email to: netdev@vger.kernel.org
7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
10 * Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
11 * Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
12 * Correct all the locking - David S. Miller <davem@redhat.com>;
13 * Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/capability.h>
24 #include <linux/module.h>
25 #include <linux/netdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/rculist.h>
30 #include <net/p8022.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/notifier.h>
34 #include <net/rtnetlink.h>
35 #include <net/net_namespace.h>
36 #include <net/netns/generic.h>
37 #include <linux/uaccess.h>
39 #include <linux/if_vlan.h>
43 #define DRV_VERSION "1.8"
45 /* Global VLAN variables */
47 unsigned int vlan_net_id __read_mostly
;
49 const char vlan_fullname
[] = "802.1Q VLAN Support";
50 const char vlan_version
[] = DRV_VERSION
;
52 /* End of global variables definitions. */
54 static int vlan_group_prealloc_vid(struct vlan_group
*vg
,
55 __be16 vlan_proto
, u16 vlan_id
)
57 struct net_device
**array
;
58 unsigned int pidx
, vidx
;
63 pidx
= vlan_proto_idx(vlan_proto
);
64 vidx
= vlan_id
/ VLAN_GROUP_ARRAY_PART_LEN
;
65 array
= vg
->vlan_devices_arrays
[pidx
][vidx
];
69 size
= sizeof(struct net_device
*) * VLAN_GROUP_ARRAY_PART_LEN
;
70 array
= kzalloc(size
, GFP_KERNEL
);
74 vg
->vlan_devices_arrays
[pidx
][vidx
] = array
;
78 void unregister_vlan_dev(struct net_device
*dev
, struct list_head
*head
)
80 struct vlan_dev_priv
*vlan
= vlan_dev_priv(dev
);
81 struct net_device
*real_dev
= vlan
->real_dev
;
82 struct vlan_info
*vlan_info
;
83 struct vlan_group
*grp
;
84 u16 vlan_id
= vlan
->vlan_id
;
88 vlan_info
= rtnl_dereference(real_dev
->vlan_info
);
91 grp
= &vlan_info
->grp
;
95 if (vlan
->flags
& VLAN_FLAG_MVRP
)
96 vlan_mvrp_request_leave(dev
);
97 if (vlan
->flags
& VLAN_FLAG_GVRP
)
98 vlan_gvrp_request_leave(dev
);
100 vlan_group_set_device(grp
, vlan
->vlan_proto
, vlan_id
, NULL
);
102 netdev_upper_dev_unlink(real_dev
, dev
);
103 /* Because unregister_netdevice_queue() makes sure at least one rcu
104 * grace period is respected before device freeing,
105 * we dont need to call synchronize_net() here.
107 unregister_netdevice_queue(dev
, head
);
109 if (grp
->nr_vlan_devs
== 0) {
110 vlan_mvrp_uninit_applicant(real_dev
);
111 vlan_gvrp_uninit_applicant(real_dev
);
114 vlan_vid_del(real_dev
, vlan
->vlan_proto
, vlan_id
);
116 /* Get rid of the vlan's reference to real_dev */
120 int vlan_check_real_dev(struct net_device
*real_dev
,
121 __be16 protocol
, u16 vlan_id
)
123 const char *name
= real_dev
->name
;
125 if (real_dev
->features
& NETIF_F_VLAN_CHALLENGED
) {
126 pr_info("VLANs not supported on %s\n", name
);
130 if (vlan_find_dev(real_dev
, protocol
, vlan_id
) != NULL
)
136 int register_vlan_dev(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
138 struct vlan_dev_priv
*vlan
= vlan_dev_priv(dev
);
139 struct net_device
*real_dev
= vlan
->real_dev
;
140 u16 vlan_id
= vlan
->vlan_id
;
141 struct vlan_info
*vlan_info
;
142 struct vlan_group
*grp
;
145 err
= vlan_vid_add(real_dev
, vlan
->vlan_proto
, vlan_id
);
149 vlan_info
= rtnl_dereference(real_dev
->vlan_info
);
150 /* vlan_info should be there now. vlan_vid_add took care of it */
153 grp
= &vlan_info
->grp
;
154 if (grp
->nr_vlan_devs
== 0) {
155 err
= vlan_gvrp_init_applicant(real_dev
);
158 err
= vlan_mvrp_init_applicant(real_dev
);
160 goto out_uninit_gvrp
;
163 err
= vlan_group_prealloc_vid(grp
, vlan
->vlan_proto
, vlan_id
);
165 goto out_uninit_mvrp
;
167 vlan
->nest_level
= dev_get_nest_level(real_dev
) + 1;
168 err
= register_netdevice(dev
);
170 goto out_uninit_mvrp
;
172 err
= netdev_upper_dev_link(real_dev
, dev
, extack
);
174 goto out_unregister_netdev
;
176 /* Account for reference in struct vlan_dev_priv */
179 netif_stacked_transfer_operstate(real_dev
, dev
);
180 linkwatch_fire_event(dev
); /* _MUST_ call rfc2863_policy() */
182 /* So, got the sucker initialized, now lets place
183 * it into our local structure.
185 vlan_group_set_device(grp
, vlan
->vlan_proto
, vlan_id
, dev
);
190 out_unregister_netdev
:
191 unregister_netdevice(dev
);
193 if (grp
->nr_vlan_devs
== 0)
194 vlan_mvrp_uninit_applicant(real_dev
);
196 if (grp
->nr_vlan_devs
== 0)
197 vlan_gvrp_uninit_applicant(real_dev
);
199 vlan_vid_del(real_dev
, vlan
->vlan_proto
, vlan_id
);
203 /* Attach a VLAN device to a mac address (ie Ethernet Card).
204 * Returns 0 if the device was created or a negative error code otherwise.
206 static int register_vlan_device(struct net_device
*real_dev
, u16 vlan_id
)
208 struct net_device
*new_dev
;
209 struct vlan_dev_priv
*vlan
;
210 struct net
*net
= dev_net(real_dev
);
211 struct vlan_net
*vn
= net_generic(net
, vlan_net_id
);
215 if (vlan_id
>= VLAN_VID_MASK
)
218 err
= vlan_check_real_dev(real_dev
, htons(ETH_P_8021Q
), vlan_id
);
222 /* Gotta set up the fields for the device. */
223 switch (vn
->name_type
) {
224 case VLAN_NAME_TYPE_RAW_PLUS_VID
:
225 /* name will look like: eth1.0005 */
226 snprintf(name
, IFNAMSIZ
, "%s.%.4i", real_dev
->name
, vlan_id
);
228 case VLAN_NAME_TYPE_PLUS_VID_NO_PAD
:
229 /* Put our vlan.VID in the name.
230 * Name will look like: vlan5
232 snprintf(name
, IFNAMSIZ
, "vlan%i", vlan_id
);
234 case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD
:
235 /* Put our vlan.VID in the name.
236 * Name will look like: eth0.5
238 snprintf(name
, IFNAMSIZ
, "%s.%i", real_dev
->name
, vlan_id
);
240 case VLAN_NAME_TYPE_PLUS_VID
:
241 /* Put our vlan.VID in the name.
242 * Name will look like: vlan0005
245 snprintf(name
, IFNAMSIZ
, "vlan%.4i", vlan_id
);
248 new_dev
= alloc_netdev(sizeof(struct vlan_dev_priv
), name
,
249 NET_NAME_UNKNOWN
, vlan_setup
);
254 dev_net_set(new_dev
, net
);
255 /* need 4 bytes for extra VLAN header info,
256 * hope the underlying device can handle it.
258 new_dev
->mtu
= real_dev
->mtu
;
260 vlan
= vlan_dev_priv(new_dev
);
261 vlan
->vlan_proto
= htons(ETH_P_8021Q
);
262 vlan
->vlan_id
= vlan_id
;
263 vlan
->real_dev
= real_dev
;
265 vlan
->flags
= VLAN_FLAG_REORDER_HDR
;
267 new_dev
->rtnl_link_ops
= &vlan_link_ops
;
268 err
= register_vlan_dev(new_dev
, NULL
);
270 goto out_free_newdev
;
275 if (new_dev
->reg_state
== NETREG_UNINITIALIZED
)
276 free_netdev(new_dev
);
280 static void vlan_sync_address(struct net_device
*dev
,
281 struct net_device
*vlandev
)
283 struct vlan_dev_priv
*vlan
= vlan_dev_priv(vlandev
);
285 /* May be called without an actual change */
286 if (ether_addr_equal(vlan
->real_dev_addr
, dev
->dev_addr
))
289 /* vlan continues to inherit address of lower device */
290 if (vlan_dev_inherit_address(vlandev
, dev
))
293 /* vlan address was different from the old address and is equal to
295 if (!ether_addr_equal(vlandev
->dev_addr
, vlan
->real_dev_addr
) &&
296 ether_addr_equal(vlandev
->dev_addr
, dev
->dev_addr
))
297 dev_uc_del(dev
, vlandev
->dev_addr
);
299 /* vlan address was equal to the old address and is different from
301 if (ether_addr_equal(vlandev
->dev_addr
, vlan
->real_dev_addr
) &&
302 !ether_addr_equal(vlandev
->dev_addr
, dev
->dev_addr
))
303 dev_uc_add(dev
, vlandev
->dev_addr
);
306 ether_addr_copy(vlan
->real_dev_addr
, dev
->dev_addr
);
309 static void vlan_transfer_features(struct net_device
*dev
,
310 struct net_device
*vlandev
)
312 struct vlan_dev_priv
*vlan
= vlan_dev_priv(vlandev
);
314 vlandev
->gso_max_size
= dev
->gso_max_size
;
315 vlandev
->gso_max_segs
= dev
->gso_max_segs
;
317 if (vlan_hw_offload_capable(dev
->features
, vlan
->vlan_proto
))
318 vlandev
->hard_header_len
= dev
->hard_header_len
;
320 vlandev
->hard_header_len
= dev
->hard_header_len
+ VLAN_HLEN
;
322 #if IS_ENABLED(CONFIG_FCOE)
323 vlandev
->fcoe_ddp_xid
= dev
->fcoe_ddp_xid
;
326 vlandev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
327 vlandev
->priv_flags
|= (vlan
->real_dev
->priv_flags
& IFF_XMIT_DST_RELEASE
);
329 netdev_update_features(vlandev
);
332 static int __vlan_device_event(struct net_device
*dev
, unsigned long event
)
337 case NETDEV_CHANGENAME
:
338 vlan_proc_rem_dev(dev
);
339 err
= vlan_proc_add_dev(dev
);
341 case NETDEV_REGISTER
:
342 err
= vlan_proc_add_dev(dev
);
344 case NETDEV_UNREGISTER
:
345 vlan_proc_rem_dev(dev
);
352 static int vlan_device_event(struct notifier_block
*unused
, unsigned long event
,
355 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
356 struct vlan_group
*grp
;
357 struct vlan_info
*vlan_info
;
359 struct net_device
*vlandev
;
360 struct vlan_dev_priv
*vlan
;
364 if (is_vlan_dev(dev
)) {
365 int err
= __vlan_device_event(dev
, event
);
368 return notifier_from_errno(err
);
371 if ((event
== NETDEV_UP
) &&
372 (dev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
)) {
373 pr_info("adding VLAN 0 to HW filter on device %s\n",
375 vlan_vid_add(dev
, htons(ETH_P_8021Q
), 0);
377 if (event
== NETDEV_DOWN
&&
378 (dev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
))
379 vlan_vid_del(dev
, htons(ETH_P_8021Q
), 0);
381 vlan_info
= rtnl_dereference(dev
->vlan_info
);
384 grp
= &vlan_info
->grp
;
386 /* It is OK that we do not hold the group lock right now,
387 * as we run under the RTNL lock.
392 /* Propagate real device state to vlan devices */
393 vlan_group_for_each_dev(grp
, i
, vlandev
)
394 netif_stacked_transfer_operstate(dev
, vlandev
);
397 case NETDEV_CHANGEADDR
:
398 /* Adjust unicast filters on underlying device */
399 vlan_group_for_each_dev(grp
, i
, vlandev
) {
400 flgs
= vlandev
->flags
;
401 if (!(flgs
& IFF_UP
))
404 vlan_sync_address(dev
, vlandev
);
408 case NETDEV_CHANGEMTU
:
409 vlan_group_for_each_dev(grp
, i
, vlandev
) {
410 if (vlandev
->mtu
<= dev
->mtu
)
413 dev_set_mtu(vlandev
, dev
->mtu
);
417 case NETDEV_FEAT_CHANGE
:
418 /* Propagate device features to underlying device */
419 vlan_group_for_each_dev(grp
, i
, vlandev
)
420 vlan_transfer_features(dev
, vlandev
);
424 struct net_device
*tmp
;
425 LIST_HEAD(close_list
);
427 /* Put all VLANs for this dev in the down state too. */
428 vlan_group_for_each_dev(grp
, i
, vlandev
) {
429 flgs
= vlandev
->flags
;
430 if (!(flgs
& IFF_UP
))
433 vlan
= vlan_dev_priv(vlandev
);
434 if (!(vlan
->flags
& VLAN_FLAG_LOOSE_BINDING
))
435 list_add(&vlandev
->close_list
, &close_list
);
438 dev_close_many(&close_list
, false);
440 list_for_each_entry_safe(vlandev
, tmp
, &close_list
, close_list
) {
441 netif_stacked_transfer_operstate(dev
, vlandev
);
442 list_del_init(&vlandev
->close_list
);
444 list_del(&close_list
);
448 /* Put all VLANs for this dev in the up state too. */
449 vlan_group_for_each_dev(grp
, i
, vlandev
) {
450 flgs
= dev_get_flags(vlandev
);
454 vlan
= vlan_dev_priv(vlandev
);
455 if (!(vlan
->flags
& VLAN_FLAG_LOOSE_BINDING
))
456 dev_change_flags(vlandev
, flgs
| IFF_UP
);
457 netif_stacked_transfer_operstate(dev
, vlandev
);
461 case NETDEV_UNREGISTER
:
462 /* twiddle thumbs on netns device moves */
463 if (dev
->reg_state
!= NETREG_UNREGISTERING
)
466 vlan_group_for_each_dev(grp
, i
, vlandev
) {
467 /* removal of last vid destroys vlan_info, abort
469 if (vlan_info
->nr_vids
== 1)
472 unregister_vlan_dev(vlandev
, &list
);
476 unregister_netdevice_many(&list
);
479 case NETDEV_PRE_TYPE_CHANGE
:
480 /* Forbid underlaying device to change its type. */
481 if (vlan_uses_dev(dev
))
485 case NETDEV_NOTIFY_PEERS
:
486 case NETDEV_BONDING_FAILOVER
:
487 case NETDEV_RESEND_IGMP
:
488 /* Propagate to vlan devices */
489 vlan_group_for_each_dev(grp
, i
, vlandev
)
490 call_netdevice_notifiers(event
, vlandev
);
498 static struct notifier_block vlan_notifier_block __read_mostly
= {
499 .notifier_call
= vlan_device_event
,
503 * VLAN IOCTL handler.
504 * o execute requested action or pass command to the device driver
505 * arg is really a struct vlan_ioctl_args __user *.
507 static int vlan_ioctl_handler(struct net
*net
, void __user
*arg
)
510 struct vlan_ioctl_args args
;
511 struct net_device
*dev
= NULL
;
513 if (copy_from_user(&args
, arg
, sizeof(struct vlan_ioctl_args
)))
516 /* Null terminate this sucker, just in case. */
517 args
.device1
[sizeof(args
.device1
) - 1] = 0;
518 args
.u
.device2
[sizeof(args
.u
.device2
) - 1] = 0;
523 case SET_VLAN_INGRESS_PRIORITY_CMD
:
524 case SET_VLAN_EGRESS_PRIORITY_CMD
:
525 case SET_VLAN_FLAG_CMD
:
528 case GET_VLAN_REALDEV_NAME_CMD
:
529 case GET_VLAN_VID_CMD
:
531 dev
= __dev_get_by_name(net
, args
.device1
);
536 if (args
.cmd
!= ADD_VLAN_CMD
&& !is_vlan_dev(dev
))
541 case SET_VLAN_INGRESS_PRIORITY_CMD
:
543 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
545 vlan_dev_set_ingress_priority(dev
,
551 case SET_VLAN_EGRESS_PRIORITY_CMD
:
553 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
555 err
= vlan_dev_set_egress_priority(dev
,
560 case SET_VLAN_FLAG_CMD
:
562 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
564 err
= vlan_dev_change_flags(dev
,
565 args
.vlan_qos
? args
.u
.flag
: 0,
569 case SET_VLAN_NAME_TYPE_CMD
:
571 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
573 if (args
.u
.name_type
< VLAN_NAME_TYPE_HIGHEST
) {
576 vn
= net_generic(net
, vlan_net_id
);
577 vn
->name_type
= args
.u
.name_type
;
586 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
588 err
= register_vlan_device(dev
, args
.u
.VID
);
593 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
595 unregister_vlan_dev(dev
, NULL
);
599 case GET_VLAN_REALDEV_NAME_CMD
:
601 vlan_dev_get_realdev_name(dev
, args
.u
.device2
);
602 if (copy_to_user(arg
, &args
,
603 sizeof(struct vlan_ioctl_args
)))
607 case GET_VLAN_VID_CMD
:
609 args
.u
.VID
= vlan_dev_vlan_id(dev
);
610 if (copy_to_user(arg
, &args
,
611 sizeof(struct vlan_ioctl_args
)))
624 static struct sk_buff
**vlan_gro_receive(struct sk_buff
**head
,
627 struct sk_buff
*p
, **pp
= NULL
;
628 struct vlan_hdr
*vhdr
;
629 unsigned int hlen
, off_vlan
;
630 const struct packet_offload
*ptype
;
634 off_vlan
= skb_gro_offset(skb
);
635 hlen
= off_vlan
+ sizeof(*vhdr
);
636 vhdr
= skb_gro_header_fast(skb
, off_vlan
);
637 if (skb_gro_header_hard(skb
, hlen
)) {
638 vhdr
= skb_gro_header_slow(skb
, hlen
, off_vlan
);
643 type
= vhdr
->h_vlan_encapsulated_proto
;
646 ptype
= gro_find_receive_by_type(type
);
652 for (p
= *head
; p
; p
= p
->next
) {
653 struct vlan_hdr
*vhdr2
;
655 if (!NAPI_GRO_CB(p
)->same_flow
)
658 vhdr2
= (struct vlan_hdr
*)(p
->data
+ off_vlan
);
659 if (compare_vlan_header(vhdr
, vhdr2
))
660 NAPI_GRO_CB(p
)->same_flow
= 0;
663 skb_gro_pull(skb
, sizeof(*vhdr
));
664 skb_gro_postpull_rcsum(skb
, vhdr
, sizeof(*vhdr
));
665 pp
= call_gro_receive(ptype
->callbacks
.gro_receive
, head
, skb
);
670 NAPI_GRO_CB(skb
)->flush
|= flush
;
675 static int vlan_gro_complete(struct sk_buff
*skb
, int nhoff
)
677 struct vlan_hdr
*vhdr
= (struct vlan_hdr
*)(skb
->data
+ nhoff
);
678 __be16 type
= vhdr
->h_vlan_encapsulated_proto
;
679 struct packet_offload
*ptype
;
683 ptype
= gro_find_complete_by_type(type
);
685 err
= ptype
->callbacks
.gro_complete(skb
, nhoff
+ sizeof(*vhdr
));
691 static struct packet_offload vlan_packet_offloads
[] __read_mostly
= {
693 .type
= cpu_to_be16(ETH_P_8021Q
),
696 .gro_receive
= vlan_gro_receive
,
697 .gro_complete
= vlan_gro_complete
,
701 .type
= cpu_to_be16(ETH_P_8021AD
),
704 .gro_receive
= vlan_gro_receive
,
705 .gro_complete
= vlan_gro_complete
,
710 static int __net_init
vlan_init_net(struct net
*net
)
712 struct vlan_net
*vn
= net_generic(net
, vlan_net_id
);
715 vn
->name_type
= VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD
;
717 err
= vlan_proc_init(net
);
722 static void __net_exit
vlan_exit_net(struct net
*net
)
724 vlan_proc_cleanup(net
);
727 static struct pernet_operations vlan_net_ops
= {
728 .init
= vlan_init_net
,
729 .exit
= vlan_exit_net
,
731 .size
= sizeof(struct vlan_net
),
734 static int __init
vlan_proto_init(void)
739 pr_info("%s v%s\n", vlan_fullname
, vlan_version
);
741 err
= register_pernet_subsys(&vlan_net_ops
);
745 err
= register_netdevice_notifier(&vlan_notifier_block
);
749 err
= vlan_gvrp_init();
753 err
= vlan_mvrp_init();
757 err
= vlan_netlink_init();
761 for (i
= 0; i
< ARRAY_SIZE(vlan_packet_offloads
); i
++)
762 dev_add_offload(&vlan_packet_offloads
[i
]);
764 vlan_ioctl_set(vlan_ioctl_handler
);
772 unregister_netdevice_notifier(&vlan_notifier_block
);
774 unregister_pernet_subsys(&vlan_net_ops
);
779 static void __exit
vlan_cleanup_module(void)
783 vlan_ioctl_set(NULL
);
785 for (i
= 0; i
< ARRAY_SIZE(vlan_packet_offloads
); i
++)
786 dev_remove_offload(&vlan_packet_offloads
[i
]);
790 unregister_netdevice_notifier(&vlan_notifier_block
);
792 unregister_pernet_subsys(&vlan_net_ops
);
793 rcu_barrier(); /* Wait for completion of call_rcu()'s */
799 module_init(vlan_proto_init
);
800 module_exit(vlan_cleanup_module
);
802 MODULE_LICENSE("GPL");
803 MODULE_VERSION(DRV_VERSION
);