Merge tag 'locks-v3.16-2' of git://git.samba.org/jlayton/linux
[linux/fpc-iii.git] / net / 8021q / vlan_core.c
blob9012b1c922b61acd28fffb7f50b4968da9293b2f
1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
5 #include <linux/export.h>
6 #include "vlan.h"
8 bool vlan_do_receive(struct sk_buff **skbp)
10 struct sk_buff *skb = *skbp;
11 __be16 vlan_proto = skb->vlan_proto;
12 u16 vlan_id = vlan_tx_tag_get_id(skb);
13 struct net_device *vlan_dev;
14 struct vlan_pcpu_stats *rx_stats;
16 vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
17 if (!vlan_dev)
18 return false;
20 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
21 if (unlikely(!skb))
22 return false;
24 skb->dev = vlan_dev;
25 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
26 /* Our lower layer thinks this is not local, let's make sure.
27 * This allows the VLAN to have a different MAC than the
28 * underlying device, and still route correctly. */
29 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
30 skb->pkt_type = PACKET_HOST;
33 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
34 unsigned int offset = skb->data - skb_mac_header(skb);
37 * vlan_insert_tag expect skb->data pointing to mac header.
38 * So change skb->data before calling it and change back to
39 * original position later
41 skb_push(skb, offset);
42 skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
43 skb->vlan_tci);
44 if (!skb)
45 return false;
46 skb_pull(skb, offset + VLAN_HLEN);
47 skb_reset_mac_len(skb);
50 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
51 skb->vlan_tci = 0;
53 rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
55 u64_stats_update_begin(&rx_stats->syncp);
56 rx_stats->rx_packets++;
57 rx_stats->rx_bytes += skb->len;
58 if (skb->pkt_type == PACKET_MULTICAST)
59 rx_stats->rx_multicast++;
60 u64_stats_update_end(&rx_stats->syncp);
62 return true;
65 /* Must be invoked with rcu_read_lock. */
66 struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
67 __be16 vlan_proto, u16 vlan_id)
69 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
71 if (vlan_info) {
72 return vlan_group_get_device(&vlan_info->grp,
73 vlan_proto, vlan_id);
74 } else {
76 * Lower devices of master uppers (bonding, team) do not have
77 * grp assigned to themselves. Grp is assigned to upper device
78 * instead.
80 struct net_device *upper_dev;
82 upper_dev = netdev_master_upper_dev_get_rcu(dev);
83 if (upper_dev)
84 return __vlan_find_dev_deep_rcu(upper_dev,
85 vlan_proto, vlan_id);
88 return NULL;
90 EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
92 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
94 struct net_device *ret = vlan_dev_priv(dev)->real_dev;
96 while (is_vlan_dev(ret))
97 ret = vlan_dev_priv(ret)->real_dev;
99 return ret;
101 EXPORT_SYMBOL(vlan_dev_real_dev);
103 u16 vlan_dev_vlan_id(const struct net_device *dev)
105 return vlan_dev_priv(dev)->vlan_id;
107 EXPORT_SYMBOL(vlan_dev_vlan_id);
109 __be16 vlan_dev_vlan_proto(const struct net_device *dev)
111 return vlan_dev_priv(dev)->vlan_proto;
113 EXPORT_SYMBOL(vlan_dev_vlan_proto);
115 static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
117 if (skb_cow(skb, skb_headroom(skb)) < 0)
118 return NULL;
119 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
120 skb->mac_header += VLAN_HLEN;
121 return skb;
124 struct sk_buff *vlan_untag(struct sk_buff *skb)
126 struct vlan_hdr *vhdr;
127 u16 vlan_tci;
129 if (unlikely(vlan_tx_tag_present(skb))) {
130 /* vlan_tci is already set-up so leave this for another time */
131 return skb;
134 skb = skb_share_check(skb, GFP_ATOMIC);
135 if (unlikely(!skb))
136 goto err_free;
138 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
139 goto err_free;
141 vhdr = (struct vlan_hdr *) skb->data;
142 vlan_tci = ntohs(vhdr->h_vlan_TCI);
143 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
145 skb_pull_rcsum(skb, VLAN_HLEN);
146 vlan_set_encap_proto(skb, vhdr);
148 skb = vlan_reorder_header(skb);
149 if (unlikely(!skb))
150 goto err_free;
152 skb_reset_network_header(skb);
153 skb_reset_transport_header(skb);
154 skb_reset_mac_len(skb);
156 return skb;
158 err_free:
159 kfree_skb(skb);
160 return NULL;
162 EXPORT_SYMBOL(vlan_untag);
166 * vlan info and vid list
169 static void vlan_group_free(struct vlan_group *grp)
171 int i, j;
173 for (i = 0; i < VLAN_PROTO_NUM; i++)
174 for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
175 kfree(grp->vlan_devices_arrays[i][j]);
178 static void vlan_info_free(struct vlan_info *vlan_info)
180 vlan_group_free(&vlan_info->grp);
181 kfree(vlan_info);
184 static void vlan_info_rcu_free(struct rcu_head *rcu)
186 vlan_info_free(container_of(rcu, struct vlan_info, rcu));
189 static struct vlan_info *vlan_info_alloc(struct net_device *dev)
191 struct vlan_info *vlan_info;
193 vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
194 if (!vlan_info)
195 return NULL;
197 vlan_info->real_dev = dev;
198 INIT_LIST_HEAD(&vlan_info->vid_list);
199 return vlan_info;
202 struct vlan_vid_info {
203 struct list_head list;
204 __be16 proto;
205 u16 vid;
206 int refcount;
209 static bool vlan_hw_filter_capable(const struct net_device *dev,
210 const struct vlan_vid_info *vid_info)
212 if (vid_info->proto == htons(ETH_P_8021Q) &&
213 dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
214 return true;
215 if (vid_info->proto == htons(ETH_P_8021AD) &&
216 dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
217 return true;
218 return false;
221 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
222 __be16 proto, u16 vid)
224 struct vlan_vid_info *vid_info;
226 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
227 if (vid_info->proto == proto && vid_info->vid == vid)
228 return vid_info;
230 return NULL;
233 static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
235 struct vlan_vid_info *vid_info;
237 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
238 if (!vid_info)
239 return NULL;
240 vid_info->proto = proto;
241 vid_info->vid = vid;
243 return vid_info;
246 static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
247 struct vlan_vid_info **pvid_info)
249 struct net_device *dev = vlan_info->real_dev;
250 const struct net_device_ops *ops = dev->netdev_ops;
251 struct vlan_vid_info *vid_info;
252 int err;
254 vid_info = vlan_vid_info_alloc(proto, vid);
255 if (!vid_info)
256 return -ENOMEM;
258 if (vlan_hw_filter_capable(dev, vid_info)) {
259 err = ops->ndo_vlan_rx_add_vid(dev, proto, vid);
260 if (err) {
261 kfree(vid_info);
262 return err;
265 list_add(&vid_info->list, &vlan_info->vid_list);
266 vlan_info->nr_vids++;
267 *pvid_info = vid_info;
268 return 0;
271 int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
273 struct vlan_info *vlan_info;
274 struct vlan_vid_info *vid_info;
275 bool vlan_info_created = false;
276 int err;
278 ASSERT_RTNL();
280 vlan_info = rtnl_dereference(dev->vlan_info);
281 if (!vlan_info) {
282 vlan_info = vlan_info_alloc(dev);
283 if (!vlan_info)
284 return -ENOMEM;
285 vlan_info_created = true;
287 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
288 if (!vid_info) {
289 err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
290 if (err)
291 goto out_free_vlan_info;
293 vid_info->refcount++;
295 if (vlan_info_created)
296 rcu_assign_pointer(dev->vlan_info, vlan_info);
298 return 0;
300 out_free_vlan_info:
301 if (vlan_info_created)
302 kfree(vlan_info);
303 return err;
305 EXPORT_SYMBOL(vlan_vid_add);
307 static void __vlan_vid_del(struct vlan_info *vlan_info,
308 struct vlan_vid_info *vid_info)
310 struct net_device *dev = vlan_info->real_dev;
311 const struct net_device_ops *ops = dev->netdev_ops;
312 __be16 proto = vid_info->proto;
313 u16 vid = vid_info->vid;
314 int err;
316 if (vlan_hw_filter_capable(dev, vid_info)) {
317 err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
318 if (err) {
319 pr_warn("failed to kill vid %04x/%d for device %s\n",
320 proto, vid, dev->name);
323 list_del(&vid_info->list);
324 kfree(vid_info);
325 vlan_info->nr_vids--;
328 void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
330 struct vlan_info *vlan_info;
331 struct vlan_vid_info *vid_info;
333 ASSERT_RTNL();
335 vlan_info = rtnl_dereference(dev->vlan_info);
336 if (!vlan_info)
337 return;
339 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
340 if (!vid_info)
341 return;
342 vid_info->refcount--;
343 if (vid_info->refcount == 0) {
344 __vlan_vid_del(vlan_info, vid_info);
345 if (vlan_info->nr_vids == 0) {
346 RCU_INIT_POINTER(dev->vlan_info, NULL);
347 call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
351 EXPORT_SYMBOL(vlan_vid_del);
353 int vlan_vids_add_by_dev(struct net_device *dev,
354 const struct net_device *by_dev)
356 struct vlan_vid_info *vid_info;
357 struct vlan_info *vlan_info;
358 int err;
360 ASSERT_RTNL();
362 vlan_info = rtnl_dereference(by_dev->vlan_info);
363 if (!vlan_info)
364 return 0;
366 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
367 err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
368 if (err)
369 goto unwind;
371 return 0;
373 unwind:
374 list_for_each_entry_continue_reverse(vid_info,
375 &vlan_info->vid_list,
376 list) {
377 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
380 return err;
382 EXPORT_SYMBOL(vlan_vids_add_by_dev);
384 void vlan_vids_del_by_dev(struct net_device *dev,
385 const struct net_device *by_dev)
387 struct vlan_vid_info *vid_info;
388 struct vlan_info *vlan_info;
390 ASSERT_RTNL();
392 vlan_info = rtnl_dereference(by_dev->vlan_info);
393 if (!vlan_info)
394 return;
396 list_for_each_entry(vid_info, &vlan_info->vid_list, list)
397 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
399 EXPORT_SYMBOL(vlan_vids_del_by_dev);
401 bool vlan_uses_dev(const struct net_device *dev)
403 struct vlan_info *vlan_info;
405 ASSERT_RTNL();
407 vlan_info = rtnl_dereference(dev->vlan_info);
408 if (!vlan_info)
409 return false;
410 return vlan_info->grp.nr_vlan_devs ? true : false;
412 EXPORT_SYMBOL(vlan_uses_dev);