usb: dwc3: pci: Enable extcon driver for Intel Merrifield
[linux/fpc-iii.git] / net / ipv6 / ip6_gre.c
blob90621d498fd144aef81068519806e208069a8327
1 /*
2 * GRE over IPv6 protocol decoder.
4 * Authors: Dmitry Kozlov (xeb@mail.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/init.h>
28 #include <linux/in6.h>
29 #include <linux/inetdevice.h>
30 #include <linux/igmp.h>
31 #include <linux/netfilter_ipv4.h>
32 #include <linux/etherdevice.h>
33 #include <linux/if_ether.h>
34 #include <linux/hash.h>
35 #include <linux/if_tunnel.h>
36 #include <linux/ip6_tunnel.h>
38 #include <net/sock.h>
39 #include <net/ip.h>
40 #include <net/ip_tunnels.h>
41 #include <net/icmp.h>
42 #include <net/protocol.h>
43 #include <net/addrconf.h>
44 #include <net/arp.h>
45 #include <net/checksum.h>
46 #include <net/dsfield.h>
47 #include <net/inet_ecn.h>
48 #include <net/xfrm.h>
49 #include <net/net_namespace.h>
50 #include <net/netns/generic.h>
51 #include <net/rtnetlink.h>
53 #include <net/ipv6.h>
54 #include <net/ip6_fib.h>
55 #include <net/ip6_route.h>
56 #include <net/ip6_tunnel.h>
57 #include <net/gre.h>
58 #include <net/erspan.h>
59 #include <net/dst_metadata.h>
62 static bool log_ecn_error = true;
63 module_param(log_ecn_error, bool, 0644);
64 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
66 #define IP6_GRE_HASH_SIZE_SHIFT 5
67 #define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT)
69 static unsigned int ip6gre_net_id __read_mostly;
70 struct ip6gre_net {
71 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
73 struct ip6_tnl __rcu *collect_md_tun;
74 struct ip6_tnl __rcu *collect_md_tun_erspan;
75 struct net_device *fb_tunnel_dev;
78 static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
79 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
80 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly;
81 static int ip6gre_tunnel_init(struct net_device *dev);
82 static void ip6gre_tunnel_setup(struct net_device *dev);
83 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
84 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
85 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
87 /* Tunnel hash table */
90 4 hash tables:
92 3: (remote,local)
93 2: (remote,*)
94 1: (*,local)
95 0: (*,*)
97 We require exact key match i.e. if a key is present in packet
98 it will match only tunnel with the same key; if it is not present,
99 it will match only keyless tunnel.
101 All keysless packets, if not matched configured keyless tunnels
102 will match fallback tunnel.
105 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1))
106 static u32 HASH_ADDR(const struct in6_addr *addr)
108 u32 hash = ipv6_addr_hash(addr);
110 return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT);
113 #define tunnels_r_l tunnels[3]
114 #define tunnels_r tunnels[2]
115 #define tunnels_l tunnels[1]
116 #define tunnels_wc tunnels[0]
118 /* Given src, dst and key, find appropriate for input tunnel. */
120 static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
121 const struct in6_addr *remote, const struct in6_addr *local,
122 __be32 key, __be16 gre_proto)
124 struct net *net = dev_net(dev);
125 int link = dev->ifindex;
126 unsigned int h0 = HASH_ADDR(remote);
127 unsigned int h1 = HASH_KEY(key);
128 struct ip6_tnl *t, *cand = NULL;
129 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
130 int dev_type = (gre_proto == htons(ETH_P_TEB) ||
131 gre_proto == htons(ETH_P_ERSPAN) ||
132 gre_proto == htons(ETH_P_ERSPAN2)) ?
133 ARPHRD_ETHER : ARPHRD_IP6GRE;
134 int score, cand_score = 4;
136 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
137 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
138 !ipv6_addr_equal(remote, &t->parms.raddr) ||
139 key != t->parms.i_key ||
140 !(t->dev->flags & IFF_UP))
141 continue;
143 if (t->dev->type != ARPHRD_IP6GRE &&
144 t->dev->type != dev_type)
145 continue;
147 score = 0;
148 if (t->parms.link != link)
149 score |= 1;
150 if (t->dev->type != dev_type)
151 score |= 2;
152 if (score == 0)
153 return t;
155 if (score < cand_score) {
156 cand = t;
157 cand_score = score;
161 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
162 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
163 key != t->parms.i_key ||
164 !(t->dev->flags & IFF_UP))
165 continue;
167 if (t->dev->type != ARPHRD_IP6GRE &&
168 t->dev->type != dev_type)
169 continue;
171 score = 0;
172 if (t->parms.link != link)
173 score |= 1;
174 if (t->dev->type != dev_type)
175 score |= 2;
176 if (score == 0)
177 return t;
179 if (score < cand_score) {
180 cand = t;
181 cand_score = score;
185 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
186 if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
187 (!ipv6_addr_equal(local, &t->parms.raddr) ||
188 !ipv6_addr_is_multicast(local))) ||
189 key != t->parms.i_key ||
190 !(t->dev->flags & IFF_UP))
191 continue;
193 if (t->dev->type != ARPHRD_IP6GRE &&
194 t->dev->type != dev_type)
195 continue;
197 score = 0;
198 if (t->parms.link != link)
199 score |= 1;
200 if (t->dev->type != dev_type)
201 score |= 2;
202 if (score == 0)
203 return t;
205 if (score < cand_score) {
206 cand = t;
207 cand_score = score;
211 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
212 if (t->parms.i_key != key ||
213 !(t->dev->flags & IFF_UP))
214 continue;
216 if (t->dev->type != ARPHRD_IP6GRE &&
217 t->dev->type != dev_type)
218 continue;
220 score = 0;
221 if (t->parms.link != link)
222 score |= 1;
223 if (t->dev->type != dev_type)
224 score |= 2;
225 if (score == 0)
226 return t;
228 if (score < cand_score) {
229 cand = t;
230 cand_score = score;
234 if (cand)
235 return cand;
237 if (gre_proto == htons(ETH_P_ERSPAN) ||
238 gre_proto == htons(ETH_P_ERSPAN2))
239 t = rcu_dereference(ign->collect_md_tun_erspan);
240 else
241 t = rcu_dereference(ign->collect_md_tun);
243 if (t && t->dev->flags & IFF_UP)
244 return t;
246 dev = ign->fb_tunnel_dev;
247 if (dev && dev->flags & IFF_UP)
248 return netdev_priv(dev);
250 return NULL;
253 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
254 const struct __ip6_tnl_parm *p)
256 const struct in6_addr *remote = &p->raddr;
257 const struct in6_addr *local = &p->laddr;
258 unsigned int h = HASH_KEY(p->i_key);
259 int prio = 0;
261 if (!ipv6_addr_any(local))
262 prio |= 1;
263 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
264 prio |= 2;
265 h ^= HASH_ADDR(remote);
268 return &ign->tunnels[prio][h];
271 static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
273 if (t->parms.collect_md)
274 rcu_assign_pointer(ign->collect_md_tun, t);
277 static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
279 if (t->parms.collect_md)
280 rcu_assign_pointer(ign->collect_md_tun_erspan, t);
283 static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
285 if (t->parms.collect_md)
286 rcu_assign_pointer(ign->collect_md_tun, NULL);
289 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
290 struct ip6_tnl *t)
292 if (t->parms.collect_md)
293 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
296 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
297 const struct ip6_tnl *t)
299 return __ip6gre_bucket(ign, &t->parms);
302 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
304 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
306 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
307 rcu_assign_pointer(*tp, t);
310 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
312 struct ip6_tnl __rcu **tp;
313 struct ip6_tnl *iter;
315 for (tp = ip6gre_bucket(ign, t);
316 (iter = rtnl_dereference(*tp)) != NULL;
317 tp = &iter->next) {
318 if (t == iter) {
319 rcu_assign_pointer(*tp, t->next);
320 break;
325 static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
326 const struct __ip6_tnl_parm *parms,
327 int type)
329 const struct in6_addr *remote = &parms->raddr;
330 const struct in6_addr *local = &parms->laddr;
331 __be32 key = parms->i_key;
332 int link = parms->link;
333 struct ip6_tnl *t;
334 struct ip6_tnl __rcu **tp;
335 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
337 for (tp = __ip6gre_bucket(ign, parms);
338 (t = rtnl_dereference(*tp)) != NULL;
339 tp = &t->next)
340 if (ipv6_addr_equal(local, &t->parms.laddr) &&
341 ipv6_addr_equal(remote, &t->parms.raddr) &&
342 key == t->parms.i_key &&
343 link == t->parms.link &&
344 type == t->dev->type)
345 break;
347 return t;
350 static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
351 const struct __ip6_tnl_parm *parms, int create)
353 struct ip6_tnl *t, *nt;
354 struct net_device *dev;
355 char name[IFNAMSIZ];
356 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
358 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
359 if (t && create)
360 return NULL;
361 if (t || !create)
362 return t;
364 if (parms->name[0]) {
365 if (!dev_valid_name(parms->name))
366 return NULL;
367 strlcpy(name, parms->name, IFNAMSIZ);
368 } else {
369 strcpy(name, "ip6gre%d");
371 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
372 ip6gre_tunnel_setup);
373 if (!dev)
374 return NULL;
376 dev_net_set(dev, net);
378 nt = netdev_priv(dev);
379 nt->parms = *parms;
380 dev->rtnl_link_ops = &ip6gre_link_ops;
382 nt->dev = dev;
383 nt->net = dev_net(dev);
385 if (register_netdevice(dev) < 0)
386 goto failed_free;
388 ip6gre_tnl_link_config(nt, 1);
390 /* Can use a lockless transmit, unless we generate output sequences */
391 if (!(nt->parms.o_flags & TUNNEL_SEQ))
392 dev->features |= NETIF_F_LLTX;
394 dev_hold(dev);
395 ip6gre_tunnel_link(ign, nt);
396 return nt;
398 failed_free:
399 free_netdev(dev);
400 return NULL;
403 static void ip6erspan_tunnel_uninit(struct net_device *dev)
405 struct ip6_tnl *t = netdev_priv(dev);
406 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
408 ip6erspan_tunnel_unlink_md(ign, t);
409 ip6gre_tunnel_unlink(ign, t);
410 dst_cache_reset(&t->dst_cache);
411 dev_put(dev);
414 static void ip6gre_tunnel_uninit(struct net_device *dev)
416 struct ip6_tnl *t = netdev_priv(dev);
417 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
419 ip6gre_tunnel_unlink_md(ign, t);
420 ip6gre_tunnel_unlink(ign, t);
421 dst_cache_reset(&t->dst_cache);
422 dev_put(dev);
426 static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
427 u8 type, u8 code, int offset, __be32 info)
429 struct net *net = dev_net(skb->dev);
430 const struct gre_base_hdr *greh;
431 const struct ipv6hdr *ipv6h;
432 int grehlen = sizeof(*greh);
433 struct ip6_tnl *t;
434 int key_off = 0;
435 __be16 flags;
436 __be32 key;
438 if (!pskb_may_pull(skb, offset + grehlen))
439 return;
440 greh = (const struct gre_base_hdr *)(skb->data + offset);
441 flags = greh->flags;
442 if (flags & (GRE_VERSION | GRE_ROUTING))
443 return;
444 if (flags & GRE_CSUM)
445 grehlen += 4;
446 if (flags & GRE_KEY) {
447 key_off = grehlen + offset;
448 grehlen += 4;
451 if (!pskb_may_pull(skb, offset + grehlen))
452 return;
453 ipv6h = (const struct ipv6hdr *)skb->data;
454 greh = (const struct gre_base_hdr *)(skb->data + offset);
455 key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
457 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
458 key, greh->protocol);
459 if (!t)
460 return;
462 switch (type) {
463 struct ipv6_tlv_tnl_enc_lim *tel;
464 __u32 teli;
465 case ICMPV6_DEST_UNREACH:
466 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
467 t->parms.name);
468 if (code != ICMPV6_PORT_UNREACH)
469 break;
470 return;
471 case ICMPV6_TIME_EXCEED:
472 if (code == ICMPV6_EXC_HOPLIMIT) {
473 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
474 t->parms.name);
475 break;
477 return;
478 case ICMPV6_PARAMPROB:
479 teli = 0;
480 if (code == ICMPV6_HDR_FIELD)
481 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
483 if (teli && teli == be32_to_cpu(info) - 2) {
484 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
485 if (tel->encap_limit == 0) {
486 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
487 t->parms.name);
489 } else {
490 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
491 t->parms.name);
493 return;
494 case ICMPV6_PKT_TOOBIG:
495 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
496 return;
497 case NDISC_REDIRECT:
498 ip6_redirect(skb, net, skb->dev->ifindex, 0,
499 sock_net_uid(net, NULL));
500 return;
503 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
504 t->err_count++;
505 else
506 t->err_count = 1;
507 t->err_time = jiffies;
510 static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
512 const struct ipv6hdr *ipv6h;
513 struct ip6_tnl *tunnel;
515 ipv6h = ipv6_hdr(skb);
516 tunnel = ip6gre_tunnel_lookup(skb->dev,
517 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
518 tpi->proto);
519 if (tunnel) {
520 if (tunnel->parms.collect_md) {
521 struct metadata_dst *tun_dst;
522 __be64 tun_id;
523 __be16 flags;
525 flags = tpi->flags;
526 tun_id = key32_to_tunnel_id(tpi->key);
528 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
529 if (!tun_dst)
530 return PACKET_REJECT;
532 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
533 } else {
534 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
537 return PACKET_RCVD;
540 return PACKET_REJECT;
543 static int ip6erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
544 int gre_hdr_len)
546 struct erspan_base_hdr *ershdr;
547 const struct ipv6hdr *ipv6h;
548 struct erspan_md2 *md2;
549 struct ip6_tnl *tunnel;
550 u8 ver;
552 ipv6h = ipv6_hdr(skb);
553 ershdr = (struct erspan_base_hdr *)skb->data;
554 ver = ershdr->ver;
556 tunnel = ip6gre_tunnel_lookup(skb->dev,
557 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
558 tpi->proto);
559 if (tunnel) {
560 int len = erspan_hdr_len(ver);
562 if (unlikely(!pskb_may_pull(skb, len)))
563 return PACKET_REJECT;
565 if (__iptunnel_pull_header(skb, len,
566 htons(ETH_P_TEB),
567 false, false) < 0)
568 return PACKET_REJECT;
570 if (tunnel->parms.collect_md) {
571 struct erspan_metadata *pkt_md, *md;
572 struct metadata_dst *tun_dst;
573 struct ip_tunnel_info *info;
574 unsigned char *gh;
575 __be64 tun_id;
576 __be16 flags;
578 tpi->flags |= TUNNEL_KEY;
579 flags = tpi->flags;
580 tun_id = key32_to_tunnel_id(tpi->key);
582 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
583 sizeof(*md));
584 if (!tun_dst)
585 return PACKET_REJECT;
587 /* skb can be uncloned in __iptunnel_pull_header, so
588 * old pkt_md is no longer valid and we need to reset
589 * it
591 gh = skb_network_header(skb) +
592 skb_network_header_len(skb);
593 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
594 sizeof(*ershdr));
595 info = &tun_dst->u.tun_info;
596 md = ip_tunnel_info_opts(info);
597 md->version = ver;
598 md2 = &md->u.md2;
599 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
600 ERSPAN_V2_MDSIZE);
601 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
602 info->options_len = sizeof(*md);
604 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
606 } else {
607 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
610 return PACKET_RCVD;
613 return PACKET_REJECT;
616 static int gre_rcv(struct sk_buff *skb)
618 struct tnl_ptk_info tpi;
619 bool csum_err = false;
620 int hdr_len;
622 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
623 if (hdr_len < 0)
624 goto drop;
626 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
627 goto drop;
629 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
630 tpi.proto == htons(ETH_P_ERSPAN2))) {
631 if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
632 return 0;
633 goto out;
636 if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
637 return 0;
639 out:
640 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
641 drop:
642 kfree_skb(skb);
643 return 0;
646 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
648 return iptunnel_handle_offloads(skb,
649 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
652 static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
653 struct net_device *dev,
654 struct flowi6 *fl6, __u8 *dsfield,
655 int *encap_limit)
657 const struct iphdr *iph = ip_hdr(skb);
658 struct ip6_tnl *t = netdev_priv(dev);
660 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
661 *encap_limit = t->parms.encap_limit;
663 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
665 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
666 *dsfield = ipv4_get_dsfield(iph);
667 else
668 *dsfield = ip6_tclass(t->parms.flowinfo);
670 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
671 fl6->flowi6_mark = skb->mark;
672 else
673 fl6->flowi6_mark = t->parms.fwmark;
675 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
678 static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
679 struct net_device *dev,
680 struct flowi6 *fl6, __u8 *dsfield,
681 int *encap_limit)
683 struct ipv6hdr *ipv6h;
684 struct ip6_tnl *t = netdev_priv(dev);
685 __u16 offset;
687 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
688 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
689 ipv6h = ipv6_hdr(skb);
691 if (offset > 0) {
692 struct ipv6_tlv_tnl_enc_lim *tel;
694 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
695 if (tel->encap_limit == 0) {
696 icmpv6_send(skb, ICMPV6_PARAMPROB,
697 ICMPV6_HDR_FIELD, offset + 2);
698 return -1;
700 *encap_limit = tel->encap_limit - 1;
701 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
702 *encap_limit = t->parms.encap_limit;
705 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
707 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
708 *dsfield = ipv6_get_dsfield(ipv6h);
709 else
710 *dsfield = ip6_tclass(t->parms.flowinfo);
712 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
713 fl6->flowlabel |= ip6_flowlabel(ipv6h);
715 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
716 fl6->flowi6_mark = skb->mark;
717 else
718 fl6->flowi6_mark = t->parms.fwmark;
720 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
722 return 0;
725 static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
726 struct net_device *dev, __u8 dsfield,
727 struct flowi6 *fl6, int encap_limit,
728 __u32 *pmtu, __be16 proto)
730 struct ip6_tnl *tunnel = netdev_priv(dev);
731 __be16 protocol;
733 if (dev->type == ARPHRD_ETHER)
734 IPCB(skb)->flags = 0;
736 if (dev->header_ops && dev->type == ARPHRD_IP6GRE)
737 fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr;
738 else
739 fl6->daddr = tunnel->parms.raddr;
741 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
742 return -ENOMEM;
744 /* Push GRE header. */
745 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
747 if (tunnel->parms.collect_md) {
748 struct ip_tunnel_info *tun_info;
749 const struct ip_tunnel_key *key;
750 __be16 flags;
752 tun_info = skb_tunnel_info(skb);
753 if (unlikely(!tun_info ||
754 !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
755 ip_tunnel_info_af(tun_info) != AF_INET6))
756 return -EINVAL;
758 key = &tun_info->key;
759 memset(fl6, 0, sizeof(*fl6));
760 fl6->flowi6_proto = IPPROTO_GRE;
761 fl6->daddr = key->u.ipv6.dst;
762 fl6->flowlabel = key->label;
763 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
765 dsfield = key->tos;
766 flags = key->tun_flags &
767 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
768 tunnel->tun_hlen = gre_calc_hlen(flags);
770 gre_build_header(skb, tunnel->tun_hlen,
771 flags, protocol,
772 tunnel_id_to_key32(tun_info->key.tun_id),
773 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
774 : 0);
776 } else {
777 if (tunnel->parms.o_flags & TUNNEL_SEQ)
778 tunnel->o_seqno++;
780 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
781 protocol, tunnel->parms.o_key,
782 htonl(tunnel->o_seqno));
785 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
786 NEXTHDR_GRE);
789 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
791 struct ip6_tnl *t = netdev_priv(dev);
792 int encap_limit = -1;
793 struct flowi6 fl6;
794 __u8 dsfield = 0;
795 __u32 mtu;
796 int err;
798 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
800 if (!t->parms.collect_md)
801 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
802 &dsfield, &encap_limit);
804 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
805 if (err)
806 return -1;
808 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
809 skb->protocol);
810 if (err != 0) {
811 /* XXX: send ICMP error even if DF is not set. */
812 if (err == -EMSGSIZE)
813 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
814 htonl(mtu));
815 return -1;
818 return 0;
821 static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
823 struct ip6_tnl *t = netdev_priv(dev);
824 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
825 int encap_limit = -1;
826 struct flowi6 fl6;
827 __u8 dsfield = 0;
828 __u32 mtu;
829 int err;
831 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
832 return -1;
834 if (!t->parms.collect_md &&
835 prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
836 return -1;
838 if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
839 return -1;
841 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
842 &mtu, skb->protocol);
843 if (err != 0) {
844 if (err == -EMSGSIZE)
845 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
846 return -1;
849 return 0;
853 * ip6gre_tnl_addr_conflict - compare packet addresses to tunnel's own
854 * @t: the outgoing tunnel device
855 * @hdr: IPv6 header from the incoming packet
857 * Description:
858 * Avoid trivial tunneling loop by checking that tunnel exit-point
859 * doesn't match source of incoming packet.
861 * Return:
862 * 1 if conflict,
863 * 0 else
866 static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
867 const struct ipv6hdr *hdr)
869 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
872 static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
874 struct ip6_tnl *t = netdev_priv(dev);
875 int encap_limit = -1;
876 struct flowi6 fl6;
877 __u32 mtu;
878 int err;
880 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
881 encap_limit = t->parms.encap_limit;
883 if (!t->parms.collect_md)
884 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
886 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
887 if (err)
888 return err;
890 err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol);
892 return err;
895 static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
896 struct net_device *dev)
898 struct ip6_tnl *t = netdev_priv(dev);
899 struct net_device_stats *stats = &t->dev->stats;
900 int ret;
902 if (!pskb_inet_may_pull(skb))
903 goto tx_err;
905 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
906 goto tx_err;
908 switch (skb->protocol) {
909 case htons(ETH_P_IP):
910 ret = ip6gre_xmit_ipv4(skb, dev);
911 break;
912 case htons(ETH_P_IPV6):
913 ret = ip6gre_xmit_ipv6(skb, dev);
914 break;
915 default:
916 ret = ip6gre_xmit_other(skb, dev);
917 break;
920 if (ret < 0)
921 goto tx_err;
923 return NETDEV_TX_OK;
925 tx_err:
926 stats->tx_errors++;
927 stats->tx_dropped++;
928 kfree_skb(skb);
929 return NETDEV_TX_OK;
932 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
933 struct net_device *dev)
935 struct ip6_tnl *t = netdev_priv(dev);
936 struct dst_entry *dst = skb_dst(skb);
937 struct net_device_stats *stats;
938 bool truncate = false;
939 int encap_limit = -1;
940 __u8 dsfield = false;
941 struct flowi6 fl6;
942 int err = -EINVAL;
943 __be16 proto;
944 __u32 mtu;
945 int nhoff;
946 int thoff;
948 if (!pskb_inet_may_pull(skb))
949 goto tx_err;
951 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
952 goto tx_err;
954 if (gre_handle_offloads(skb, false))
955 goto tx_err;
957 if (skb->len > dev->mtu + dev->hard_header_len) {
958 pskb_trim(skb, dev->mtu + dev->hard_header_len);
959 truncate = true;
962 nhoff = skb_network_header(skb) - skb_mac_header(skb);
963 if (skb->protocol == htons(ETH_P_IP) &&
964 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
965 truncate = true;
967 thoff = skb_transport_header(skb) - skb_mac_header(skb);
968 if (skb->protocol == htons(ETH_P_IPV6) &&
969 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
970 truncate = true;
972 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
973 goto tx_err;
975 t->parms.o_flags &= ~TUNNEL_KEY;
976 IPCB(skb)->flags = 0;
978 /* For collect_md mode, derive fl6 from the tunnel key,
979 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
981 if (t->parms.collect_md) {
982 struct ip_tunnel_info *tun_info;
983 const struct ip_tunnel_key *key;
984 struct erspan_metadata *md;
985 __be32 tun_id;
987 tun_info = skb_tunnel_info(skb);
988 if (unlikely(!tun_info ||
989 !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
990 ip_tunnel_info_af(tun_info) != AF_INET6))
991 goto tx_err;
993 key = &tun_info->key;
994 memset(&fl6, 0, sizeof(fl6));
995 fl6.flowi6_proto = IPPROTO_GRE;
996 fl6.daddr = key->u.ipv6.dst;
997 fl6.flowlabel = key->label;
998 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1000 dsfield = key->tos;
1001 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
1002 goto tx_err;
1003 if (tun_info->options_len < sizeof(*md))
1004 goto tx_err;
1005 md = ip_tunnel_info_opts(tun_info);
1007 tun_id = tunnel_id_to_key32(key->tun_id);
1008 if (md->version == 1) {
1009 erspan_build_header(skb,
1010 ntohl(tun_id),
1011 ntohl(md->u.index), truncate,
1012 false);
1013 } else if (md->version == 2) {
1014 erspan_build_header_v2(skb,
1015 ntohl(tun_id),
1016 md->u.md2.dir,
1017 get_hwid(&md->u.md2),
1018 truncate, false);
1019 } else {
1020 goto tx_err;
1022 } else {
1023 switch (skb->protocol) {
1024 case htons(ETH_P_IP):
1025 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1026 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
1027 &dsfield, &encap_limit);
1028 break;
1029 case htons(ETH_P_IPV6):
1030 if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
1031 goto tx_err;
1032 if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
1033 &dsfield, &encap_limit))
1034 goto tx_err;
1035 break;
1036 default:
1037 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1038 break;
1041 if (t->parms.erspan_ver == 1)
1042 erspan_build_header(skb, ntohl(t->parms.o_key),
1043 t->parms.index,
1044 truncate, false);
1045 else if (t->parms.erspan_ver == 2)
1046 erspan_build_header_v2(skb, ntohl(t->parms.o_key),
1047 t->parms.dir,
1048 t->parms.hwid,
1049 truncate, false);
1050 else
1051 goto tx_err;
1053 fl6.daddr = t->parms.raddr;
1056 /* Push GRE header. */
1057 proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
1058 : htons(ETH_P_ERSPAN2);
1059 gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
1061 /* TooBig packet may have updated dst->dev's mtu */
1062 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
1063 dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
1065 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1066 NEXTHDR_GRE);
1067 if (err != 0) {
1068 /* XXX: send ICMP error even if DF is not set. */
1069 if (err == -EMSGSIZE) {
1070 if (skb->protocol == htons(ETH_P_IP))
1071 icmp_send(skb, ICMP_DEST_UNREACH,
1072 ICMP_FRAG_NEEDED, htonl(mtu));
1073 else
1074 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1077 goto tx_err;
1079 return NETDEV_TX_OK;
1081 tx_err:
1082 stats = &t->dev->stats;
1083 stats->tx_errors++;
1084 stats->tx_dropped++;
1085 kfree_skb(skb);
1086 return NETDEV_TX_OK;
1089 static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
1091 struct net_device *dev = t->dev;
1092 struct __ip6_tnl_parm *p = &t->parms;
1093 struct flowi6 *fl6 = &t->fl.u.ip6;
1095 if (dev->type != ARPHRD_ETHER) {
1096 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1097 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1100 /* Set up flowi template */
1101 fl6->saddr = p->laddr;
1102 fl6->daddr = p->raddr;
1103 fl6->flowi6_oif = p->link;
1104 fl6->flowlabel = 0;
1105 fl6->flowi6_proto = IPPROTO_GRE;
1107 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1108 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1109 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1110 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1112 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1113 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1115 if (p->flags&IP6_TNL_F_CAP_XMIT &&
1116 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
1117 dev->flags |= IFF_POINTOPOINT;
1118 else
1119 dev->flags &= ~IFF_POINTOPOINT;
1122 static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
1123 int t_hlen)
1125 const struct __ip6_tnl_parm *p = &t->parms;
1126 struct net_device *dev = t->dev;
1128 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1129 int strict = (ipv6_addr_type(&p->raddr) &
1130 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1132 struct rt6_info *rt = rt6_lookup(t->net,
1133 &p->raddr, &p->laddr,
1134 p->link, NULL, strict);
1136 if (!rt)
1137 return;
1139 if (rt->dst.dev) {
1140 dev->needed_headroom = rt->dst.dev->hard_header_len +
1141 t_hlen;
1143 if (set_mtu) {
1144 dev->mtu = rt->dst.dev->mtu - t_hlen;
1145 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1146 dev->mtu -= 8;
1147 if (dev->type == ARPHRD_ETHER)
1148 dev->mtu -= ETH_HLEN;
1150 if (dev->mtu < IPV6_MIN_MTU)
1151 dev->mtu = IPV6_MIN_MTU;
1154 ip6_rt_put(rt);
1158 static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
1160 int t_hlen;
1162 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
1163 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1165 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1166 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1167 return t_hlen;
1170 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1172 ip6gre_tnl_link_config_common(t);
1173 ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
1176 static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
1177 const struct __ip6_tnl_parm *p)
1179 t->parms.laddr = p->laddr;
1180 t->parms.raddr = p->raddr;
1181 t->parms.flags = p->flags;
1182 t->parms.hop_limit = p->hop_limit;
1183 t->parms.encap_limit = p->encap_limit;
1184 t->parms.flowinfo = p->flowinfo;
1185 t->parms.link = p->link;
1186 t->parms.proto = p->proto;
1187 t->parms.i_key = p->i_key;
1188 t->parms.o_key = p->o_key;
1189 t->parms.i_flags = p->i_flags;
1190 t->parms.o_flags = p->o_flags;
1191 t->parms.fwmark = p->fwmark;
1192 t->parms.erspan_ver = p->erspan_ver;
1193 t->parms.index = p->index;
1194 t->parms.dir = p->dir;
1195 t->parms.hwid = p->hwid;
1196 dst_cache_reset(&t->dst_cache);
1199 static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
1200 int set_mtu)
1202 ip6gre_tnl_copy_tnl_parm(t, p);
1203 ip6gre_tnl_link_config(t, set_mtu);
1204 return 0;
1207 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
1208 const struct ip6_tnl_parm2 *u)
1210 p->laddr = u->laddr;
1211 p->raddr = u->raddr;
1212 p->flags = u->flags;
1213 p->hop_limit = u->hop_limit;
1214 p->encap_limit = u->encap_limit;
1215 p->flowinfo = u->flowinfo;
1216 p->link = u->link;
1217 p->i_key = u->i_key;
1218 p->o_key = u->o_key;
1219 p->i_flags = gre_flags_to_tnl_flags(u->i_flags);
1220 p->o_flags = gre_flags_to_tnl_flags(u->o_flags);
1221 memcpy(p->name, u->name, sizeof(u->name));
1224 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
1225 const struct __ip6_tnl_parm *p)
1227 u->proto = IPPROTO_GRE;
1228 u->laddr = p->laddr;
1229 u->raddr = p->raddr;
1230 u->flags = p->flags;
1231 u->hop_limit = p->hop_limit;
1232 u->encap_limit = p->encap_limit;
1233 u->flowinfo = p->flowinfo;
1234 u->link = p->link;
1235 u->i_key = p->i_key;
1236 u->o_key = p->o_key;
1237 u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
1238 u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
1239 memcpy(u->name, p->name, sizeof(u->name));
1242 static int ip6gre_tunnel_ioctl(struct net_device *dev,
1243 struct ifreq *ifr, int cmd)
1245 int err = 0;
1246 struct ip6_tnl_parm2 p;
1247 struct __ip6_tnl_parm p1;
1248 struct ip6_tnl *t = netdev_priv(dev);
1249 struct net *net = t->net;
1250 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1252 memset(&p1, 0, sizeof(p1));
1254 switch (cmd) {
1255 case SIOCGETTUNNEL:
1256 if (dev == ign->fb_tunnel_dev) {
1257 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1258 err = -EFAULT;
1259 break;
1261 ip6gre_tnl_parm_from_user(&p1, &p);
1262 t = ip6gre_tunnel_locate(net, &p1, 0);
1263 if (!t)
1264 t = netdev_priv(dev);
1266 memset(&p, 0, sizeof(p));
1267 ip6gre_tnl_parm_to_user(&p, &t->parms);
1268 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1269 err = -EFAULT;
1270 break;
1272 case SIOCADDTUNNEL:
1273 case SIOCCHGTUNNEL:
1274 err = -EPERM;
1275 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1276 goto done;
1278 err = -EFAULT;
1279 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1280 goto done;
1282 err = -EINVAL;
1283 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
1284 goto done;
1286 if (!(p.i_flags&GRE_KEY))
1287 p.i_key = 0;
1288 if (!(p.o_flags&GRE_KEY))
1289 p.o_key = 0;
1291 ip6gre_tnl_parm_from_user(&p1, &p);
1292 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
1294 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1295 if (t) {
1296 if (t->dev != dev) {
1297 err = -EEXIST;
1298 break;
1300 } else {
1301 t = netdev_priv(dev);
1303 ip6gre_tunnel_unlink(ign, t);
1304 synchronize_net();
1305 ip6gre_tnl_change(t, &p1, 1);
1306 ip6gre_tunnel_link(ign, t);
1307 netdev_state_change(dev);
1311 if (t) {
1312 err = 0;
1314 memset(&p, 0, sizeof(p));
1315 ip6gre_tnl_parm_to_user(&p, &t->parms);
1316 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1317 err = -EFAULT;
1318 } else
1319 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1320 break;
1322 case SIOCDELTUNNEL:
1323 err = -EPERM;
1324 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1325 goto done;
1327 if (dev == ign->fb_tunnel_dev) {
1328 err = -EFAULT;
1329 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1330 goto done;
1331 err = -ENOENT;
1332 ip6gre_tnl_parm_from_user(&p1, &p);
1333 t = ip6gre_tunnel_locate(net, &p1, 0);
1334 if (!t)
1335 goto done;
1336 err = -EPERM;
1337 if (t == netdev_priv(ign->fb_tunnel_dev))
1338 goto done;
1339 dev = t->dev;
1341 unregister_netdevice(dev);
1342 err = 0;
1343 break;
1345 default:
1346 err = -EINVAL;
1349 done:
1350 return err;
1353 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1354 unsigned short type, const void *daddr,
1355 const void *saddr, unsigned int len)
1357 struct ip6_tnl *t = netdev_priv(dev);
1358 struct ipv6hdr *ipv6h;
1359 __be16 *p;
1361 ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
1362 ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
1363 t->fl.u.ip6.flowlabel,
1364 true, &t->fl.u.ip6));
1365 ipv6h->hop_limit = t->parms.hop_limit;
1366 ipv6h->nexthdr = NEXTHDR_GRE;
1367 ipv6h->saddr = t->parms.laddr;
1368 ipv6h->daddr = t->parms.raddr;
1370 p = (__be16 *)(ipv6h + 1);
1371 p[0] = t->parms.o_flags;
1372 p[1] = htons(type);
1375 * Set the source hardware address.
1378 if (saddr)
1379 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
1380 if (daddr)
1381 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
1382 if (!ipv6_addr_any(&ipv6h->daddr))
1383 return t->hlen;
1385 return -t->hlen;
1388 static const struct header_ops ip6gre_header_ops = {
1389 .create = ip6gre_header,
1392 static const struct net_device_ops ip6gre_netdev_ops = {
1393 .ndo_init = ip6gre_tunnel_init,
1394 .ndo_uninit = ip6gre_tunnel_uninit,
1395 .ndo_start_xmit = ip6gre_tunnel_xmit,
1396 .ndo_do_ioctl = ip6gre_tunnel_ioctl,
1397 .ndo_change_mtu = ip6_tnl_change_mtu,
1398 .ndo_get_stats64 = ip_tunnel_get_stats64,
1399 .ndo_get_iflink = ip6_tnl_get_iflink,
1402 static void ip6gre_dev_free(struct net_device *dev)
1404 struct ip6_tnl *t = netdev_priv(dev);
1406 gro_cells_destroy(&t->gro_cells);
1407 dst_cache_destroy(&t->dst_cache);
1408 free_percpu(dev->tstats);
1411 static void ip6gre_tunnel_setup(struct net_device *dev)
1413 dev->netdev_ops = &ip6gre_netdev_ops;
1414 dev->needs_free_netdev = true;
1415 dev->priv_destructor = ip6gre_dev_free;
1417 dev->type = ARPHRD_IP6GRE;
1419 dev->flags |= IFF_NOARP;
1420 dev->addr_len = sizeof(struct in6_addr);
1421 netif_keep_dst(dev);
1422 /* This perm addr will be used as interface identifier by IPv6 */
1423 dev->addr_assign_type = NET_ADDR_RANDOM;
1424 eth_random_addr(dev->perm_addr);
1427 #define GRE6_FEATURES (NETIF_F_SG | \
1428 NETIF_F_FRAGLIST | \
1429 NETIF_F_HIGHDMA | \
1430 NETIF_F_HW_CSUM)
1432 static void ip6gre_tnl_init_features(struct net_device *dev)
1434 struct ip6_tnl *nt = netdev_priv(dev);
1436 dev->features |= GRE6_FEATURES;
1437 dev->hw_features |= GRE6_FEATURES;
1439 if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
1440 /* TCP offload with GRE SEQ is not supported, nor
1441 * can we support 2 levels of outer headers requiring
1442 * an update.
1444 if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
1445 nt->encap.type == TUNNEL_ENCAP_NONE) {
1446 dev->features |= NETIF_F_GSO_SOFTWARE;
1447 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1450 /* Can use a lockless transmit, unless we generate
1451 * output sequences
1453 dev->features |= NETIF_F_LLTX;
1457 static int ip6gre_tunnel_init_common(struct net_device *dev)
1459 struct ip6_tnl *tunnel;
1460 int ret;
1461 int t_hlen;
1463 tunnel = netdev_priv(dev);
1465 tunnel->dev = dev;
1466 tunnel->net = dev_net(dev);
1467 strcpy(tunnel->parms.name, dev->name);
1469 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1470 if (!dev->tstats)
1471 return -ENOMEM;
1473 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1474 if (ret)
1475 goto cleanup_alloc_pcpu_stats;
1477 ret = gro_cells_init(&tunnel->gro_cells, dev);
1478 if (ret)
1479 goto cleanup_dst_cache_init;
1481 t_hlen = ip6gre_calc_hlen(tunnel);
1482 dev->mtu = ETH_DATA_LEN - t_hlen;
1483 if (dev->type == ARPHRD_ETHER)
1484 dev->mtu -= ETH_HLEN;
1485 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1486 dev->mtu -= 8;
1488 if (tunnel->parms.collect_md) {
1489 netif_keep_dst(dev);
1491 ip6gre_tnl_init_features(dev);
1493 return 0;
1495 cleanup_dst_cache_init:
1496 dst_cache_destroy(&tunnel->dst_cache);
1497 cleanup_alloc_pcpu_stats:
1498 free_percpu(dev->tstats);
1499 dev->tstats = NULL;
1500 return ret;
1503 static int ip6gre_tunnel_init(struct net_device *dev)
1505 struct ip6_tnl *tunnel;
1506 int ret;
1508 ret = ip6gre_tunnel_init_common(dev);
1509 if (ret)
1510 return ret;
1512 tunnel = netdev_priv(dev);
1514 if (tunnel->parms.collect_md)
1515 return 0;
1517 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
1518 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1520 if (ipv6_addr_any(&tunnel->parms.raddr))
1521 dev->header_ops = &ip6gre_header_ops;
1523 return 0;
1526 static void ip6gre_fb_tunnel_init(struct net_device *dev)
1528 struct ip6_tnl *tunnel = netdev_priv(dev);
1530 tunnel->dev = dev;
1531 tunnel->net = dev_net(dev);
1532 strcpy(tunnel->parms.name, dev->name);
1534 tunnel->hlen = sizeof(struct ipv6hdr) + 4;
1536 dev_hold(dev);
1539 static struct inet6_protocol ip6gre_protocol __read_mostly = {
1540 .handler = gre_rcv,
1541 .err_handler = ip6gre_err,
1542 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1545 static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
1547 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1548 struct net_device *dev, *aux;
1549 int prio;
1551 for_each_netdev_safe(net, dev, aux)
1552 if (dev->rtnl_link_ops == &ip6gre_link_ops ||
1553 dev->rtnl_link_ops == &ip6gre_tap_ops ||
1554 dev->rtnl_link_ops == &ip6erspan_tap_ops)
1555 unregister_netdevice_queue(dev, head);
1557 for (prio = 0; prio < 4; prio++) {
1558 int h;
1559 for (h = 0; h < IP6_GRE_HASH_SIZE; h++) {
1560 struct ip6_tnl *t;
1562 t = rtnl_dereference(ign->tunnels[prio][h]);
1564 while (t) {
1565 /* If dev is in the same netns, it has already
1566 * been added to the list by the previous loop.
1568 if (!net_eq(dev_net(t->dev), net))
1569 unregister_netdevice_queue(t->dev,
1570 head);
1571 t = rtnl_dereference(t->next);
1577 static int __net_init ip6gre_init_net(struct net *net)
1579 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1580 int err;
1582 if (!net_has_fallback_tunnels(net))
1583 return 0;
1584 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
1585 NET_NAME_UNKNOWN,
1586 ip6gre_tunnel_setup);
1587 if (!ign->fb_tunnel_dev) {
1588 err = -ENOMEM;
1589 goto err_alloc_dev;
1591 dev_net_set(ign->fb_tunnel_dev, net);
1592 /* FB netdevice is special: we have one, and only one per netns.
1593 * Allowing to move it to another netns is clearly unsafe.
1595 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1598 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1599 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1601 err = register_netdev(ign->fb_tunnel_dev);
1602 if (err)
1603 goto err_reg_dev;
1605 rcu_assign_pointer(ign->tunnels_wc[0],
1606 netdev_priv(ign->fb_tunnel_dev));
1607 return 0;
1609 err_reg_dev:
1610 free_netdev(ign->fb_tunnel_dev);
1611 err_alloc_dev:
1612 return err;
1615 static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list)
1617 struct net *net;
1618 LIST_HEAD(list);
1620 rtnl_lock();
1621 list_for_each_entry(net, net_list, exit_list)
1622 ip6gre_destroy_tunnels(net, &list);
1623 unregister_netdevice_many(&list);
1624 rtnl_unlock();
1627 static struct pernet_operations ip6gre_net_ops = {
1628 .init = ip6gre_init_net,
1629 .exit_batch = ip6gre_exit_batch_net,
1630 .id = &ip6gre_net_id,
1631 .size = sizeof(struct ip6gre_net),
1634 static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1635 struct netlink_ext_ack *extack)
1637 __be16 flags;
1639 if (!data)
1640 return 0;
1642 flags = 0;
1643 if (data[IFLA_GRE_IFLAGS])
1644 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1645 if (data[IFLA_GRE_OFLAGS])
1646 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1647 if (flags & (GRE_VERSION|GRE_ROUTING))
1648 return -EINVAL;
1650 return 0;
1653 static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1654 struct netlink_ext_ack *extack)
1656 struct in6_addr daddr;
1658 if (tb[IFLA_ADDRESS]) {
1659 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1660 return -EINVAL;
1661 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1662 return -EADDRNOTAVAIL;
1665 if (!data)
1666 goto out;
1668 if (data[IFLA_GRE_REMOTE]) {
1669 daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1670 if (ipv6_addr_any(&daddr))
1671 return -EINVAL;
1674 out:
1675 return ip6gre_tunnel_validate(tb, data, extack);
1678 static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1679 struct netlink_ext_ack *extack)
1681 __be16 flags = 0;
1682 int ret, ver = 0;
1684 if (!data)
1685 return 0;
1687 ret = ip6gre_tap_validate(tb, data, extack);
1688 if (ret)
1689 return ret;
1691 /* ERSPAN should only have GRE sequence and key flag */
1692 if (data[IFLA_GRE_OFLAGS])
1693 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1694 if (data[IFLA_GRE_IFLAGS])
1695 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1696 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1697 flags != (GRE_SEQ | GRE_KEY))
1698 return -EINVAL;
1700 /* ERSPAN Session ID only has 10-bit. Since we reuse
1701 * 32-bit key field as ID, check it's range.
1703 if (data[IFLA_GRE_IKEY] &&
1704 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1705 return -EINVAL;
1707 if (data[IFLA_GRE_OKEY] &&
1708 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1709 return -EINVAL;
1711 if (data[IFLA_GRE_ERSPAN_VER]) {
1712 ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1713 if (ver != 1 && ver != 2)
1714 return -EINVAL;
1717 if (ver == 1) {
1718 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1719 u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1721 if (index & ~INDEX_MASK)
1722 return -EINVAL;
1724 } else if (ver == 2) {
1725 if (data[IFLA_GRE_ERSPAN_DIR]) {
1726 u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1728 if (dir & ~(DIR_MASK >> DIR_OFFSET))
1729 return -EINVAL;
1732 if (data[IFLA_GRE_ERSPAN_HWID]) {
1733 u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1735 if (hwid & ~(HWID_MASK >> HWID_OFFSET))
1736 return -EINVAL;
1740 return 0;
1743 static void ip6erspan_set_version(struct nlattr *data[],
1744 struct __ip6_tnl_parm *parms)
1746 if (!data)
1747 return;
1749 parms->erspan_ver = 1;
1750 if (data[IFLA_GRE_ERSPAN_VER])
1751 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1753 if (parms->erspan_ver == 1) {
1754 if (data[IFLA_GRE_ERSPAN_INDEX])
1755 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1756 } else if (parms->erspan_ver == 2) {
1757 if (data[IFLA_GRE_ERSPAN_DIR])
1758 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1759 if (data[IFLA_GRE_ERSPAN_HWID])
1760 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1764 static void ip6gre_netlink_parms(struct nlattr *data[],
1765 struct __ip6_tnl_parm *parms)
1767 memset(parms, 0, sizeof(*parms));
1769 if (!data)
1770 return;
1772 if (data[IFLA_GRE_LINK])
1773 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1775 if (data[IFLA_GRE_IFLAGS])
1776 parms->i_flags = gre_flags_to_tnl_flags(
1777 nla_get_be16(data[IFLA_GRE_IFLAGS]));
1779 if (data[IFLA_GRE_OFLAGS])
1780 parms->o_flags = gre_flags_to_tnl_flags(
1781 nla_get_be16(data[IFLA_GRE_OFLAGS]));
1783 if (data[IFLA_GRE_IKEY])
1784 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1786 if (data[IFLA_GRE_OKEY])
1787 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1789 if (data[IFLA_GRE_LOCAL])
1790 parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]);
1792 if (data[IFLA_GRE_REMOTE])
1793 parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1795 if (data[IFLA_GRE_TTL])
1796 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
1798 if (data[IFLA_GRE_ENCAP_LIMIT])
1799 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
1801 if (data[IFLA_GRE_FLOWINFO])
1802 parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]);
1804 if (data[IFLA_GRE_FLAGS])
1805 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
1807 if (data[IFLA_GRE_FWMARK])
1808 parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1810 if (data[IFLA_GRE_COLLECT_METADATA])
1811 parms->collect_md = true;
1814 static int ip6gre_tap_init(struct net_device *dev)
1816 int ret;
1818 ret = ip6gre_tunnel_init_common(dev);
1819 if (ret)
1820 return ret;
1822 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1824 return 0;
1827 static const struct net_device_ops ip6gre_tap_netdev_ops = {
1828 .ndo_init = ip6gre_tap_init,
1829 .ndo_uninit = ip6gre_tunnel_uninit,
1830 .ndo_start_xmit = ip6gre_tunnel_xmit,
1831 .ndo_set_mac_address = eth_mac_addr,
1832 .ndo_validate_addr = eth_validate_addr,
1833 .ndo_change_mtu = ip6_tnl_change_mtu,
1834 .ndo_get_stats64 = ip_tunnel_get_stats64,
1835 .ndo_get_iflink = ip6_tnl_get_iflink,
1838 static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
1840 int t_hlen;
1842 tunnel->tun_hlen = 8;
1843 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1844 erspan_hdr_len(tunnel->parms.erspan_ver);
1846 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1847 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1848 return t_hlen;
1851 static int ip6erspan_tap_init(struct net_device *dev)
1853 struct ip6_tnl *tunnel;
1854 int t_hlen;
1855 int ret;
1857 tunnel = netdev_priv(dev);
1859 tunnel->dev = dev;
1860 tunnel->net = dev_net(dev);
1861 strcpy(tunnel->parms.name, dev->name);
1863 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1864 if (!dev->tstats)
1865 return -ENOMEM;
1867 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1868 if (ret)
1869 goto cleanup_alloc_pcpu_stats;
1871 ret = gro_cells_init(&tunnel->gro_cells, dev);
1872 if (ret)
1873 goto cleanup_dst_cache_init;
1875 t_hlen = ip6erspan_calc_hlen(tunnel);
1876 dev->mtu = ETH_DATA_LEN - t_hlen;
1877 if (dev->type == ARPHRD_ETHER)
1878 dev->mtu -= ETH_HLEN;
1879 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1880 dev->mtu -= 8;
1882 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1883 ip6erspan_tnl_link_config(tunnel, 1);
1885 return 0;
1887 cleanup_dst_cache_init:
1888 dst_cache_destroy(&tunnel->dst_cache);
1889 cleanup_alloc_pcpu_stats:
1890 free_percpu(dev->tstats);
1891 dev->tstats = NULL;
1892 return ret;
1895 static const struct net_device_ops ip6erspan_netdev_ops = {
1896 .ndo_init = ip6erspan_tap_init,
1897 .ndo_uninit = ip6erspan_tunnel_uninit,
1898 .ndo_start_xmit = ip6erspan_tunnel_xmit,
1899 .ndo_set_mac_address = eth_mac_addr,
1900 .ndo_validate_addr = eth_validate_addr,
1901 .ndo_change_mtu = ip6_tnl_change_mtu,
1902 .ndo_get_stats64 = ip_tunnel_get_stats64,
1903 .ndo_get_iflink = ip6_tnl_get_iflink,
1906 static void ip6gre_tap_setup(struct net_device *dev)
1909 ether_setup(dev);
1911 dev->max_mtu = 0;
1912 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1913 dev->needs_free_netdev = true;
1914 dev->priv_destructor = ip6gre_dev_free;
1916 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1917 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1918 netif_keep_dst(dev);
1921 bool is_ip6gretap_dev(const struct net_device *dev)
1923 return dev->netdev_ops == &ip6gre_tap_netdev_ops;
1925 EXPORT_SYMBOL_GPL(is_ip6gretap_dev);
1927 static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
1928 struct ip_tunnel_encap *ipencap)
1930 bool ret = false;
1932 memset(ipencap, 0, sizeof(*ipencap));
1934 if (!data)
1935 return ret;
1937 if (data[IFLA_GRE_ENCAP_TYPE]) {
1938 ret = true;
1939 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1942 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1943 ret = true;
1944 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1947 if (data[IFLA_GRE_ENCAP_SPORT]) {
1948 ret = true;
1949 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1952 if (data[IFLA_GRE_ENCAP_DPORT]) {
1953 ret = true;
1954 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1957 return ret;
1960 static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
1961 struct nlattr *tb[], struct nlattr *data[],
1962 struct netlink_ext_ack *extack)
1964 struct ip6_tnl *nt;
1965 struct ip_tunnel_encap ipencap;
1966 int err;
1968 nt = netdev_priv(dev);
1970 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
1971 int err = ip6_tnl_encap_setup(nt, &ipencap);
1973 if (err < 0)
1974 return err;
1977 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1978 eth_hw_addr_random(dev);
1980 nt->dev = dev;
1981 nt->net = dev_net(dev);
1983 err = register_netdevice(dev);
1984 if (err)
1985 goto out;
1987 if (tb[IFLA_MTU])
1988 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1990 dev_hold(dev);
1992 out:
1993 return err;
1996 static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1997 struct nlattr *tb[], struct nlattr *data[],
1998 struct netlink_ext_ack *extack)
2000 struct ip6_tnl *nt = netdev_priv(dev);
2001 struct net *net = dev_net(dev);
2002 struct ip6gre_net *ign;
2003 int err;
2005 ip6gre_netlink_parms(data, &nt->parms);
2006 ign = net_generic(net, ip6gre_net_id);
2008 if (nt->parms.collect_md) {
2009 if (rtnl_dereference(ign->collect_md_tun))
2010 return -EEXIST;
2011 } else {
2012 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
2013 return -EEXIST;
2016 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
2017 if (!err) {
2018 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
2019 ip6gre_tunnel_link_md(ign, nt);
2020 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2022 return err;
2025 static struct ip6_tnl *
2026 ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
2027 struct nlattr *data[], struct __ip6_tnl_parm *p_p,
2028 struct netlink_ext_ack *extack)
2030 struct ip6_tnl *t, *nt = netdev_priv(dev);
2031 struct net *net = nt->net;
2032 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2033 struct ip_tunnel_encap ipencap;
2035 if (dev == ign->fb_tunnel_dev)
2036 return ERR_PTR(-EINVAL);
2038 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
2039 int err = ip6_tnl_encap_setup(nt, &ipencap);
2041 if (err < 0)
2042 return ERR_PTR(err);
2045 ip6gre_netlink_parms(data, p_p);
2047 t = ip6gre_tunnel_locate(net, p_p, 0);
2049 if (t) {
2050 if (t->dev != dev)
2051 return ERR_PTR(-EEXIST);
2052 } else {
2053 t = nt;
2056 return t;
2059 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
2060 struct nlattr *data[],
2061 struct netlink_ext_ack *extack)
2063 struct ip6_tnl *t = netdev_priv(dev);
2064 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
2065 struct __ip6_tnl_parm p;
2067 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2068 if (IS_ERR(t))
2069 return PTR_ERR(t);
2071 ip6gre_tunnel_unlink_md(ign, t);
2072 ip6gre_tunnel_unlink(ign, t);
2073 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
2074 ip6gre_tunnel_link_md(ign, t);
2075 ip6gre_tunnel_link(ign, t);
2076 return 0;
2079 static void ip6gre_dellink(struct net_device *dev, struct list_head *head)
2081 struct net *net = dev_net(dev);
2082 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2084 if (dev != ign->fb_tunnel_dev)
2085 unregister_netdevice_queue(dev, head);
2088 static size_t ip6gre_get_size(const struct net_device *dev)
2090 return
2091 /* IFLA_GRE_LINK */
2092 nla_total_size(4) +
2093 /* IFLA_GRE_IFLAGS */
2094 nla_total_size(2) +
2095 /* IFLA_GRE_OFLAGS */
2096 nla_total_size(2) +
2097 /* IFLA_GRE_IKEY */
2098 nla_total_size(4) +
2099 /* IFLA_GRE_OKEY */
2100 nla_total_size(4) +
2101 /* IFLA_GRE_LOCAL */
2102 nla_total_size(sizeof(struct in6_addr)) +
2103 /* IFLA_GRE_REMOTE */
2104 nla_total_size(sizeof(struct in6_addr)) +
2105 /* IFLA_GRE_TTL */
2106 nla_total_size(1) +
2107 /* IFLA_GRE_ENCAP_LIMIT */
2108 nla_total_size(1) +
2109 /* IFLA_GRE_FLOWINFO */
2110 nla_total_size(4) +
2111 /* IFLA_GRE_FLAGS */
2112 nla_total_size(4) +
2113 /* IFLA_GRE_ENCAP_TYPE */
2114 nla_total_size(2) +
2115 /* IFLA_GRE_ENCAP_FLAGS */
2116 nla_total_size(2) +
2117 /* IFLA_GRE_ENCAP_SPORT */
2118 nla_total_size(2) +
2119 /* IFLA_GRE_ENCAP_DPORT */
2120 nla_total_size(2) +
2121 /* IFLA_GRE_COLLECT_METADATA */
2122 nla_total_size(0) +
2123 /* IFLA_GRE_FWMARK */
2124 nla_total_size(4) +
2125 /* IFLA_GRE_ERSPAN_INDEX */
2126 nla_total_size(4) +
2130 static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2132 struct ip6_tnl *t = netdev_priv(dev);
2133 struct __ip6_tnl_parm *p = &t->parms;
2134 __be16 o_flags = p->o_flags;
2136 if (p->erspan_ver == 1 || p->erspan_ver == 2) {
2137 if (!p->collect_md)
2138 o_flags |= TUNNEL_KEY;
2140 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2141 goto nla_put_failure;
2143 if (p->erspan_ver == 1) {
2144 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2145 goto nla_put_failure;
2146 } else {
2147 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2148 goto nla_put_failure;
2149 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2150 goto nla_put_failure;
2154 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2155 nla_put_be16(skb, IFLA_GRE_IFLAGS,
2156 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2157 nla_put_be16(skb, IFLA_GRE_OFLAGS,
2158 gre_tnl_flags_to_gre_flags(o_flags)) ||
2159 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2160 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2161 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
2162 nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) ||
2163 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
2164 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
2165 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
2166 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
2167 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
2168 goto nla_put_failure;
2170 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
2171 t->encap.type) ||
2172 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
2173 t->encap.sport) ||
2174 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
2175 t->encap.dport) ||
2176 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
2177 t->encap.flags))
2178 goto nla_put_failure;
2180 if (p->collect_md) {
2181 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
2182 goto nla_put_failure;
2185 return 0;
2187 nla_put_failure:
2188 return -EMSGSIZE;
2191 static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
2192 [IFLA_GRE_LINK] = { .type = NLA_U32 },
2193 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
2194 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
2195 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
2196 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
2197 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
2198 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
2199 [IFLA_GRE_TTL] = { .type = NLA_U8 },
2200 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
2201 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
2202 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
2203 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
2204 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
2205 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
2206 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
2207 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
2208 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
2209 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
2210 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
2211 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
2212 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
2215 static void ip6erspan_tap_setup(struct net_device *dev)
2217 ether_setup(dev);
2219 dev->max_mtu = 0;
2220 dev->netdev_ops = &ip6erspan_netdev_ops;
2221 dev->needs_free_netdev = true;
2222 dev->priv_destructor = ip6gre_dev_free;
2224 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2225 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2226 netif_keep_dst(dev);
2229 static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
2230 struct nlattr *tb[], struct nlattr *data[],
2231 struct netlink_ext_ack *extack)
2233 struct ip6_tnl *nt = netdev_priv(dev);
2234 struct net *net = dev_net(dev);
2235 struct ip6gre_net *ign;
2236 int err;
2238 ip6gre_netlink_parms(data, &nt->parms);
2239 ip6erspan_set_version(data, &nt->parms);
2240 ign = net_generic(net, ip6gre_net_id);
2242 if (nt->parms.collect_md) {
2243 if (rtnl_dereference(ign->collect_md_tun_erspan))
2244 return -EEXIST;
2245 } else {
2246 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
2247 return -EEXIST;
2250 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
2251 if (!err) {
2252 ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
2253 ip6erspan_tunnel_link_md(ign, nt);
2254 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2256 return err;
2259 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
2261 ip6gre_tnl_link_config_common(t);
2262 ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
2265 static int ip6erspan_tnl_change(struct ip6_tnl *t,
2266 const struct __ip6_tnl_parm *p, int set_mtu)
2268 ip6gre_tnl_copy_tnl_parm(t, p);
2269 ip6erspan_tnl_link_config(t, set_mtu);
2270 return 0;
2273 static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
2274 struct nlattr *data[],
2275 struct netlink_ext_ack *extack)
2277 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
2278 struct __ip6_tnl_parm p;
2279 struct ip6_tnl *t;
2281 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2282 if (IS_ERR(t))
2283 return PTR_ERR(t);
2285 ip6erspan_set_version(data, &p);
2286 ip6gre_tunnel_unlink_md(ign, t);
2287 ip6gre_tunnel_unlink(ign, t);
2288 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
2289 ip6erspan_tunnel_link_md(ign, t);
2290 ip6gre_tunnel_link(ign, t);
2291 return 0;
2294 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
2295 .kind = "ip6gre",
2296 .maxtype = IFLA_GRE_MAX,
2297 .policy = ip6gre_policy,
2298 .priv_size = sizeof(struct ip6_tnl),
2299 .setup = ip6gre_tunnel_setup,
2300 .validate = ip6gre_tunnel_validate,
2301 .newlink = ip6gre_newlink,
2302 .changelink = ip6gre_changelink,
2303 .dellink = ip6gre_dellink,
2304 .get_size = ip6gre_get_size,
2305 .fill_info = ip6gre_fill_info,
2306 .get_link_net = ip6_tnl_get_link_net,
2309 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
2310 .kind = "ip6gretap",
2311 .maxtype = IFLA_GRE_MAX,
2312 .policy = ip6gre_policy,
2313 .priv_size = sizeof(struct ip6_tnl),
2314 .setup = ip6gre_tap_setup,
2315 .validate = ip6gre_tap_validate,
2316 .newlink = ip6gre_newlink,
2317 .changelink = ip6gre_changelink,
2318 .get_size = ip6gre_get_size,
2319 .fill_info = ip6gre_fill_info,
2320 .get_link_net = ip6_tnl_get_link_net,
2323 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
2324 .kind = "ip6erspan",
2325 .maxtype = IFLA_GRE_MAX,
2326 .policy = ip6gre_policy,
2327 .priv_size = sizeof(struct ip6_tnl),
2328 .setup = ip6erspan_tap_setup,
2329 .validate = ip6erspan_tap_validate,
2330 .newlink = ip6erspan_newlink,
2331 .changelink = ip6erspan_changelink,
2332 .get_size = ip6gre_get_size,
2333 .fill_info = ip6gre_fill_info,
2334 .get_link_net = ip6_tnl_get_link_net,
2338 * And now the modules code and kernel interface.
2341 static int __init ip6gre_init(void)
2343 int err;
2345 pr_info("GRE over IPv6 tunneling driver\n");
2347 err = register_pernet_device(&ip6gre_net_ops);
2348 if (err < 0)
2349 return err;
2351 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
2352 if (err < 0) {
2353 pr_info("%s: can't add protocol\n", __func__);
2354 goto add_proto_failed;
2357 err = rtnl_link_register(&ip6gre_link_ops);
2358 if (err < 0)
2359 goto rtnl_link_failed;
2361 err = rtnl_link_register(&ip6gre_tap_ops);
2362 if (err < 0)
2363 goto tap_ops_failed;
2365 err = rtnl_link_register(&ip6erspan_tap_ops);
2366 if (err < 0)
2367 goto erspan_link_failed;
2369 out:
2370 return err;
2372 erspan_link_failed:
2373 rtnl_link_unregister(&ip6gre_tap_ops);
2374 tap_ops_failed:
2375 rtnl_link_unregister(&ip6gre_link_ops);
2376 rtnl_link_failed:
2377 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2378 add_proto_failed:
2379 unregister_pernet_device(&ip6gre_net_ops);
2380 goto out;
2383 static void __exit ip6gre_fini(void)
2385 rtnl_link_unregister(&ip6gre_tap_ops);
2386 rtnl_link_unregister(&ip6gre_link_ops);
2387 rtnl_link_unregister(&ip6erspan_tap_ops);
2388 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2389 unregister_pernet_device(&ip6gre_net_ops);
2392 module_init(ip6gre_init);
2393 module_exit(ip6gre_fini);
2394 MODULE_LICENSE("GPL");
2395 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
2396 MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
2397 MODULE_ALIAS_RTNL_LINK("ip6gre");
2398 MODULE_ALIAS_RTNL_LINK("ip6gretap");
2399 MODULE_ALIAS_RTNL_LINK("ip6erspan");
2400 MODULE_ALIAS_NETDEV("ip6gre0");