x86/mm: Add TLB purge to free pmd/pte page interfaces
[linux/fpc-iii.git] / net / 8021q / vlan_core.c
blobe2ed69850489bb79a6a055ee3264456a31acee3e
1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
5 #include <linux/export.h>
6 #include "vlan.h"
8 bool vlan_do_receive(struct sk_buff **skbp)
10 struct sk_buff *skb = *skbp;
11 __be16 vlan_proto = skb->vlan_proto;
12 u16 vlan_id = skb_vlan_tag_get_id(skb);
13 struct net_device *vlan_dev;
14 struct vlan_pcpu_stats *rx_stats;
16 vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
17 if (!vlan_dev)
18 return false;
20 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
21 if (unlikely(!skb))
22 return false;
24 skb->dev = vlan_dev;
25 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
26 /* Our lower layer thinks this is not local, let's make sure.
27 * This allows the VLAN to have a different MAC than the
28 * underlying device, and still route correctly. */
29 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
30 skb->pkt_type = PACKET_HOST;
33 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
34 !netif_is_macvlan_port(vlan_dev) &&
35 !netif_is_bridge_port(vlan_dev)) {
36 unsigned int offset = skb->data - skb_mac_header(skb);
39 * vlan_insert_tag expect skb->data pointing to mac header.
40 * So change skb->data before calling it and change back to
41 * original position later
43 skb_push(skb, offset);
44 skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
45 skb->vlan_tci);
46 if (!skb)
47 return false;
48 skb_pull(skb, offset + VLAN_HLEN);
49 skb_reset_mac_len(skb);
52 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
53 skb->vlan_tci = 0;
55 rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
57 u64_stats_update_begin(&rx_stats->syncp);
58 rx_stats->rx_packets++;
59 rx_stats->rx_bytes += skb->len;
60 if (skb->pkt_type == PACKET_MULTICAST)
61 rx_stats->rx_multicast++;
62 u64_stats_update_end(&rx_stats->syncp);
64 return true;
67 /* Must be invoked with rcu_read_lock. */
68 struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
69 __be16 vlan_proto, u16 vlan_id)
71 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
73 if (vlan_info) {
74 return vlan_group_get_device(&vlan_info->grp,
75 vlan_proto, vlan_id);
76 } else {
78 * Lower devices of master uppers (bonding, team) do not have
79 * grp assigned to themselves. Grp is assigned to upper device
80 * instead.
82 struct net_device *upper_dev;
84 upper_dev = netdev_master_upper_dev_get_rcu(dev);
85 if (upper_dev)
86 return __vlan_find_dev_deep_rcu(upper_dev,
87 vlan_proto, vlan_id);
90 return NULL;
92 EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
94 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
96 struct net_device *ret = vlan_dev_priv(dev)->real_dev;
98 while (is_vlan_dev(ret))
99 ret = vlan_dev_priv(ret)->real_dev;
101 return ret;
103 EXPORT_SYMBOL(vlan_dev_real_dev);
105 u16 vlan_dev_vlan_id(const struct net_device *dev)
107 return vlan_dev_priv(dev)->vlan_id;
109 EXPORT_SYMBOL(vlan_dev_vlan_id);
111 __be16 vlan_dev_vlan_proto(const struct net_device *dev)
113 return vlan_dev_priv(dev)->vlan_proto;
115 EXPORT_SYMBOL(vlan_dev_vlan_proto);
118 * vlan info and vid list
121 static void vlan_group_free(struct vlan_group *grp)
123 int i, j;
125 for (i = 0; i < VLAN_PROTO_NUM; i++)
126 for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
127 kfree(grp->vlan_devices_arrays[i][j]);
130 static void vlan_info_free(struct vlan_info *vlan_info)
132 vlan_group_free(&vlan_info->grp);
133 kfree(vlan_info);
136 static void vlan_info_rcu_free(struct rcu_head *rcu)
138 vlan_info_free(container_of(rcu, struct vlan_info, rcu));
141 static struct vlan_info *vlan_info_alloc(struct net_device *dev)
143 struct vlan_info *vlan_info;
145 vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
146 if (!vlan_info)
147 return NULL;
149 vlan_info->real_dev = dev;
150 INIT_LIST_HEAD(&vlan_info->vid_list);
151 return vlan_info;
154 struct vlan_vid_info {
155 struct list_head list;
156 __be16 proto;
157 u16 vid;
158 int refcount;
161 static bool vlan_hw_filter_capable(const struct net_device *dev,
162 const struct vlan_vid_info *vid_info)
164 if (vid_info->proto == htons(ETH_P_8021Q) &&
165 dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
166 return true;
167 if (vid_info->proto == htons(ETH_P_8021AD) &&
168 dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
169 return true;
170 return false;
173 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
174 __be16 proto, u16 vid)
176 struct vlan_vid_info *vid_info;
178 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
179 if (vid_info->proto == proto && vid_info->vid == vid)
180 return vid_info;
182 return NULL;
185 static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
187 struct vlan_vid_info *vid_info;
189 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
190 if (!vid_info)
191 return NULL;
192 vid_info->proto = proto;
193 vid_info->vid = vid;
195 return vid_info;
198 static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
199 struct vlan_vid_info **pvid_info)
201 struct net_device *dev = vlan_info->real_dev;
202 const struct net_device_ops *ops = dev->netdev_ops;
203 struct vlan_vid_info *vid_info;
204 int err;
206 vid_info = vlan_vid_info_alloc(proto, vid);
207 if (!vid_info)
208 return -ENOMEM;
210 if (vlan_hw_filter_capable(dev, vid_info)) {
211 if (netif_device_present(dev))
212 err = ops->ndo_vlan_rx_add_vid(dev, proto, vid);
213 else
214 err = -ENODEV;
215 if (err) {
216 kfree(vid_info);
217 return err;
220 list_add(&vid_info->list, &vlan_info->vid_list);
221 vlan_info->nr_vids++;
222 *pvid_info = vid_info;
223 return 0;
226 int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
228 struct vlan_info *vlan_info;
229 struct vlan_vid_info *vid_info;
230 bool vlan_info_created = false;
231 int err;
233 ASSERT_RTNL();
235 vlan_info = rtnl_dereference(dev->vlan_info);
236 if (!vlan_info) {
237 vlan_info = vlan_info_alloc(dev);
238 if (!vlan_info)
239 return -ENOMEM;
240 vlan_info_created = true;
242 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
243 if (!vid_info) {
244 err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
245 if (err)
246 goto out_free_vlan_info;
248 vid_info->refcount++;
250 if (vlan_info_created)
251 rcu_assign_pointer(dev->vlan_info, vlan_info);
253 return 0;
255 out_free_vlan_info:
256 if (vlan_info_created)
257 kfree(vlan_info);
258 return err;
260 EXPORT_SYMBOL(vlan_vid_add);
262 static void __vlan_vid_del(struct vlan_info *vlan_info,
263 struct vlan_vid_info *vid_info)
265 struct net_device *dev = vlan_info->real_dev;
266 const struct net_device_ops *ops = dev->netdev_ops;
267 __be16 proto = vid_info->proto;
268 u16 vid = vid_info->vid;
269 int err;
271 if (vlan_hw_filter_capable(dev, vid_info)) {
272 if (netif_device_present(dev))
273 err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
274 else
275 err = -ENODEV;
276 if (err) {
277 pr_warn("failed to kill vid %04x/%d for device %s\n",
278 proto, vid, dev->name);
281 list_del(&vid_info->list);
282 kfree(vid_info);
283 vlan_info->nr_vids--;
286 void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
288 struct vlan_info *vlan_info;
289 struct vlan_vid_info *vid_info;
291 ASSERT_RTNL();
293 vlan_info = rtnl_dereference(dev->vlan_info);
294 if (!vlan_info)
295 return;
297 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
298 if (!vid_info)
299 return;
300 vid_info->refcount--;
301 if (vid_info->refcount == 0) {
302 __vlan_vid_del(vlan_info, vid_info);
303 if (vlan_info->nr_vids == 0) {
304 RCU_INIT_POINTER(dev->vlan_info, NULL);
305 call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
309 EXPORT_SYMBOL(vlan_vid_del);
311 int vlan_vids_add_by_dev(struct net_device *dev,
312 const struct net_device *by_dev)
314 struct vlan_vid_info *vid_info;
315 struct vlan_info *vlan_info;
316 int err;
318 ASSERT_RTNL();
320 vlan_info = rtnl_dereference(by_dev->vlan_info);
321 if (!vlan_info)
322 return 0;
324 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
325 err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
326 if (err)
327 goto unwind;
329 return 0;
331 unwind:
332 list_for_each_entry_continue_reverse(vid_info,
333 &vlan_info->vid_list,
334 list) {
335 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
338 return err;
340 EXPORT_SYMBOL(vlan_vids_add_by_dev);
342 void vlan_vids_del_by_dev(struct net_device *dev,
343 const struct net_device *by_dev)
345 struct vlan_vid_info *vid_info;
346 struct vlan_info *vlan_info;
348 ASSERT_RTNL();
350 vlan_info = rtnl_dereference(by_dev->vlan_info);
351 if (!vlan_info)
352 return;
354 list_for_each_entry(vid_info, &vlan_info->vid_list, list)
355 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
357 EXPORT_SYMBOL(vlan_vids_del_by_dev);
359 bool vlan_uses_dev(const struct net_device *dev)
361 struct vlan_info *vlan_info;
363 ASSERT_RTNL();
365 vlan_info = rtnl_dereference(dev->vlan_info);
366 if (!vlan_info)
367 return false;
368 return vlan_info->grp.nr_vlan_devs ? true : false;
370 EXPORT_SYMBOL(vlan_uses_dev);