drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / net / 8021q / vlan_core.c
blob9404dd551dfd2850117d4edf9b7fd25e3ba84322
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/skbuff.h>
3 #include <linux/netdevice.h>
4 #include <linux/if_vlan.h>
5 #include <linux/netpoll.h>
6 #include <linux/export.h>
7 #include <net/gro.h>
8 #include "vlan.h"
10 bool vlan_do_receive(struct sk_buff **skbp)
12 struct sk_buff *skb = *skbp;
13 __be16 vlan_proto = skb->vlan_proto;
14 u16 vlan_id = skb_vlan_tag_get_id(skb);
15 struct net_device *vlan_dev;
16 struct vlan_pcpu_stats *rx_stats;
18 vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
19 if (!vlan_dev)
20 return false;
22 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
23 if (unlikely(!skb))
24 return false;
26 if (unlikely(!(vlan_dev->flags & IFF_UP))) {
27 kfree_skb(skb);
28 *skbp = NULL;
29 return false;
32 skb->dev = vlan_dev;
33 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
34 /* Our lower layer thinks this is not local, let's make sure.
35 * This allows the VLAN to have a different MAC than the
36 * underlying device, and still route correctly. */
37 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
38 skb->pkt_type = PACKET_HOST;
41 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
42 !netif_is_macvlan_port(vlan_dev) &&
43 !netif_is_bridge_port(vlan_dev)) {
44 unsigned int offset = skb->data - skb_mac_header(skb);
47 * vlan_insert_tag expect skb->data pointing to mac header.
48 * So change skb->data before calling it and change back to
49 * original position later
51 skb_push(skb, offset);
52 skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
53 skb->vlan_tci, skb->mac_len);
54 if (!skb)
55 return false;
56 skb_pull(skb, offset + VLAN_HLEN);
57 skb_reset_mac_len(skb);
60 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
61 __vlan_hwaccel_clear_tag(skb);
63 rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
65 u64_stats_update_begin(&rx_stats->syncp);
66 u64_stats_inc(&rx_stats->rx_packets);
67 u64_stats_add(&rx_stats->rx_bytes, skb->len);
68 if (skb->pkt_type == PACKET_MULTICAST)
69 u64_stats_inc(&rx_stats->rx_multicast);
70 u64_stats_update_end(&rx_stats->syncp);
72 return true;
75 /* Must be invoked with rcu_read_lock. */
76 struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
77 __be16 vlan_proto, u16 vlan_id)
79 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
81 if (vlan_info) {
82 return vlan_group_get_device(&vlan_info->grp,
83 vlan_proto, vlan_id);
84 } else {
86 * Lower devices of master uppers (bonding, team) do not have
87 * grp assigned to themselves. Grp is assigned to upper device
88 * instead.
90 struct net_device *upper_dev;
92 upper_dev = netdev_master_upper_dev_get_rcu(dev);
93 if (upper_dev)
94 return __vlan_find_dev_deep_rcu(upper_dev,
95 vlan_proto, vlan_id);
98 return NULL;
100 EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
102 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
104 struct net_device *ret = vlan_dev_priv(dev)->real_dev;
106 while (is_vlan_dev(ret))
107 ret = vlan_dev_priv(ret)->real_dev;
109 return ret;
111 EXPORT_SYMBOL(vlan_dev_real_dev);
113 u16 vlan_dev_vlan_id(const struct net_device *dev)
115 return vlan_dev_priv(dev)->vlan_id;
117 EXPORT_SYMBOL(vlan_dev_vlan_id);
119 __be16 vlan_dev_vlan_proto(const struct net_device *dev)
121 return vlan_dev_priv(dev)->vlan_proto;
123 EXPORT_SYMBOL(vlan_dev_vlan_proto);
126 * vlan info and vid list
129 static void vlan_group_free(struct vlan_group *grp)
131 int i, j;
133 for (i = 0; i < VLAN_PROTO_NUM; i++)
134 for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
135 kfree(grp->vlan_devices_arrays[i][j]);
138 static void vlan_info_free(struct vlan_info *vlan_info)
140 vlan_group_free(&vlan_info->grp);
141 kfree(vlan_info);
144 static void vlan_info_rcu_free(struct rcu_head *rcu)
146 vlan_info_free(container_of(rcu, struct vlan_info, rcu));
149 static struct vlan_info *vlan_info_alloc(struct net_device *dev)
151 struct vlan_info *vlan_info;
153 vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
154 if (!vlan_info)
155 return NULL;
157 vlan_info->real_dev = dev;
158 INIT_LIST_HEAD(&vlan_info->vid_list);
159 return vlan_info;
162 struct vlan_vid_info {
163 struct list_head list;
164 __be16 proto;
165 u16 vid;
166 int refcount;
169 static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto)
171 if (proto == htons(ETH_P_8021Q) &&
172 dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
173 return true;
174 if (proto == htons(ETH_P_8021AD) &&
175 dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
176 return true;
177 return false;
180 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
181 __be16 proto, u16 vid)
183 struct vlan_vid_info *vid_info;
185 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
186 if (vid_info->proto == proto && vid_info->vid == vid)
187 return vid_info;
189 return NULL;
192 static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
194 struct vlan_vid_info *vid_info;
196 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
197 if (!vid_info)
198 return NULL;
199 vid_info->proto = proto;
200 vid_info->vid = vid;
202 return vid_info;
205 static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
207 if (!vlan_hw_filter_capable(dev, proto))
208 return 0;
210 if (netif_device_present(dev))
211 return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid);
212 else
213 return -ENODEV;
216 static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
218 if (!vlan_hw_filter_capable(dev, proto))
219 return 0;
221 if (netif_device_present(dev))
222 return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
223 else
224 return -ENODEV;
227 int vlan_for_each(struct net_device *dev,
228 int (*action)(struct net_device *dev, int vid, void *arg),
229 void *arg)
231 struct vlan_vid_info *vid_info;
232 struct vlan_info *vlan_info;
233 struct net_device *vdev;
234 int ret;
236 ASSERT_RTNL();
238 vlan_info = rtnl_dereference(dev->vlan_info);
239 if (!vlan_info)
240 return 0;
242 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
243 vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto,
244 vid_info->vid);
245 ret = action(vdev, vid_info->vid, arg);
246 if (ret)
247 return ret;
250 return 0;
252 EXPORT_SYMBOL(vlan_for_each);
254 int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
256 struct net_device *real_dev = vlan_info->real_dev;
257 struct vlan_vid_info *vlan_vid_info;
258 int err;
260 list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) {
261 if (vlan_vid_info->proto == proto) {
262 err = vlan_add_rx_filter_info(real_dev, proto,
263 vlan_vid_info->vid);
264 if (err)
265 goto unwind;
269 return 0;
271 unwind:
272 list_for_each_entry_continue_reverse(vlan_vid_info,
273 &vlan_info->vid_list, list) {
274 if (vlan_vid_info->proto == proto)
275 vlan_kill_rx_filter_info(real_dev, proto,
276 vlan_vid_info->vid);
279 return err;
281 EXPORT_SYMBOL(vlan_filter_push_vids);
283 void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto)
285 struct vlan_vid_info *vlan_vid_info;
287 list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list)
288 if (vlan_vid_info->proto == proto)
289 vlan_kill_rx_filter_info(vlan_info->real_dev,
290 vlan_vid_info->proto,
291 vlan_vid_info->vid);
293 EXPORT_SYMBOL(vlan_filter_drop_vids);
295 static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
296 struct vlan_vid_info **pvid_info)
298 struct net_device *dev = vlan_info->real_dev;
299 struct vlan_vid_info *vid_info;
300 int err;
302 vid_info = vlan_vid_info_alloc(proto, vid);
303 if (!vid_info)
304 return -ENOMEM;
306 err = vlan_add_rx_filter_info(dev, proto, vid);
307 if (err) {
308 kfree(vid_info);
309 return err;
312 list_add(&vid_info->list, &vlan_info->vid_list);
313 vlan_info->nr_vids++;
314 *pvid_info = vid_info;
315 return 0;
318 int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
320 struct vlan_info *vlan_info;
321 struct vlan_vid_info *vid_info;
322 bool vlan_info_created = false;
323 int err;
325 ASSERT_RTNL();
327 vlan_info = rtnl_dereference(dev->vlan_info);
328 if (!vlan_info) {
329 vlan_info = vlan_info_alloc(dev);
330 if (!vlan_info)
331 return -ENOMEM;
332 vlan_info_created = true;
334 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
335 if (!vid_info) {
336 err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
337 if (err)
338 goto out_free_vlan_info;
340 vid_info->refcount++;
342 if (vlan_info_created)
343 rcu_assign_pointer(dev->vlan_info, vlan_info);
345 return 0;
347 out_free_vlan_info:
348 if (vlan_info_created)
349 kfree(vlan_info);
350 return err;
352 EXPORT_SYMBOL(vlan_vid_add);
354 static void __vlan_vid_del(struct vlan_info *vlan_info,
355 struct vlan_vid_info *vid_info)
357 struct net_device *dev = vlan_info->real_dev;
358 __be16 proto = vid_info->proto;
359 u16 vid = vid_info->vid;
360 int err;
362 err = vlan_kill_rx_filter_info(dev, proto, vid);
363 if (err && dev->reg_state != NETREG_UNREGISTERING)
364 netdev_warn(dev, "failed to kill vid %04x/%d\n", proto, vid);
366 list_del(&vid_info->list);
367 kfree(vid_info);
368 vlan_info->nr_vids--;
371 void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
373 struct vlan_info *vlan_info;
374 struct vlan_vid_info *vid_info;
376 ASSERT_RTNL();
378 vlan_info = rtnl_dereference(dev->vlan_info);
379 if (!vlan_info)
380 return;
382 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
383 if (!vid_info)
384 return;
385 vid_info->refcount--;
386 if (vid_info->refcount == 0) {
387 __vlan_vid_del(vlan_info, vid_info);
388 if (vlan_info->nr_vids == 0) {
389 RCU_INIT_POINTER(dev->vlan_info, NULL);
390 call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
394 EXPORT_SYMBOL(vlan_vid_del);
396 int vlan_vids_add_by_dev(struct net_device *dev,
397 const struct net_device *by_dev)
399 struct vlan_vid_info *vid_info;
400 struct vlan_info *vlan_info;
401 int err;
403 ASSERT_RTNL();
405 vlan_info = rtnl_dereference(by_dev->vlan_info);
406 if (!vlan_info)
407 return 0;
409 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
410 if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
411 continue;
412 err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
413 if (err)
414 goto unwind;
416 return 0;
418 unwind:
419 list_for_each_entry_continue_reverse(vid_info,
420 &vlan_info->vid_list,
421 list) {
422 if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
423 continue;
424 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
427 return err;
429 EXPORT_SYMBOL(vlan_vids_add_by_dev);
431 void vlan_vids_del_by_dev(struct net_device *dev,
432 const struct net_device *by_dev)
434 struct vlan_vid_info *vid_info;
435 struct vlan_info *vlan_info;
437 ASSERT_RTNL();
439 vlan_info = rtnl_dereference(by_dev->vlan_info);
440 if (!vlan_info)
441 return;
443 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
444 if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
445 continue;
446 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
449 EXPORT_SYMBOL(vlan_vids_del_by_dev);
451 bool vlan_uses_dev(const struct net_device *dev)
453 struct vlan_info *vlan_info;
455 ASSERT_RTNL();
457 vlan_info = rtnl_dereference(dev->vlan_info);
458 if (!vlan_info)
459 return false;
460 return vlan_info->grp.nr_vlan_devs ? true : false;
462 EXPORT_SYMBOL(vlan_uses_dev);
464 static struct sk_buff *vlan_gro_receive(struct list_head *head,
465 struct sk_buff *skb)
467 const struct packet_offload *ptype;
468 unsigned int hlen, off_vlan;
469 struct sk_buff *pp = NULL;
470 struct vlan_hdr *vhdr;
471 struct sk_buff *p;
472 __be16 type;
473 int flush = 1;
475 off_vlan = skb_gro_offset(skb);
476 hlen = off_vlan + sizeof(*vhdr);
477 vhdr = skb_gro_header(skb, hlen, off_vlan);
478 if (unlikely(!vhdr))
479 goto out;
481 NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = hlen;
483 type = vhdr->h_vlan_encapsulated_proto;
485 ptype = gro_find_receive_by_type(type);
486 if (!ptype)
487 goto out;
489 flush = 0;
491 list_for_each_entry(p, head, list) {
492 struct vlan_hdr *vhdr2;
494 if (!NAPI_GRO_CB(p)->same_flow)
495 continue;
497 vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
498 if (compare_vlan_header(vhdr, vhdr2))
499 NAPI_GRO_CB(p)->same_flow = 0;
502 skb_gro_pull(skb, sizeof(*vhdr));
503 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
505 pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive,
506 ipv6_gro_receive, inet_gro_receive,
507 head, skb);
509 out:
510 skb_gro_flush_final(skb, pp, flush);
512 return pp;
515 static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
517 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
518 __be16 type = vhdr->h_vlan_encapsulated_proto;
519 struct packet_offload *ptype;
520 int err = -ENOENT;
522 ptype = gro_find_complete_by_type(type);
523 if (ptype)
524 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
525 ipv6_gro_complete, inet_gro_complete,
526 skb, nhoff + sizeof(*vhdr));
528 return err;
531 static struct packet_offload vlan_packet_offloads[] __read_mostly = {
533 .type = cpu_to_be16(ETH_P_8021Q),
534 .priority = 10,
535 .callbacks = {
536 .gro_receive = vlan_gro_receive,
537 .gro_complete = vlan_gro_complete,
541 .type = cpu_to_be16(ETH_P_8021AD),
542 .priority = 10,
543 .callbacks = {
544 .gro_receive = vlan_gro_receive,
545 .gro_complete = vlan_gro_complete,
550 static int __init vlan_offload_init(void)
552 unsigned int i;
554 for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
555 dev_add_offload(&vlan_packet_offloads[i]);
557 return 0;
560 fs_initcall(vlan_offload_init);