Linux 4.19.133
[linux/fpc-iii.git] / net / ipv6 / ip6_gre.c
blobf5144573c45c80f9a3656530a2694d01b7aae0b6
1 /*
2 * GRE over IPv6 protocol decoder.
4 * Authors: Dmitry Kozlov (xeb@mail.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/init.h>
28 #include <linux/in6.h>
29 #include <linux/inetdevice.h>
30 #include <linux/igmp.h>
31 #include <linux/netfilter_ipv4.h>
32 #include <linux/etherdevice.h>
33 #include <linux/if_ether.h>
34 #include <linux/hash.h>
35 #include <linux/if_tunnel.h>
36 #include <linux/ip6_tunnel.h>
38 #include <net/sock.h>
39 #include <net/ip.h>
40 #include <net/ip_tunnels.h>
41 #include <net/icmp.h>
42 #include <net/protocol.h>
43 #include <net/addrconf.h>
44 #include <net/arp.h>
45 #include <net/checksum.h>
46 #include <net/dsfield.h>
47 #include <net/inet_ecn.h>
48 #include <net/xfrm.h>
49 #include <net/net_namespace.h>
50 #include <net/netns/generic.h>
51 #include <net/rtnetlink.h>
53 #include <net/ipv6.h>
54 #include <net/ip6_fib.h>
55 #include <net/ip6_route.h>
56 #include <net/ip6_tunnel.h>
57 #include <net/gre.h>
58 #include <net/erspan.h>
59 #include <net/dst_metadata.h>
62 static bool log_ecn_error = true;
63 module_param(log_ecn_error, bool, 0644);
64 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
66 #define IP6_GRE_HASH_SIZE_SHIFT 5
67 #define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT)
69 static unsigned int ip6gre_net_id __read_mostly;
70 struct ip6gre_net {
71 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
73 struct ip6_tnl __rcu *collect_md_tun;
74 struct ip6_tnl __rcu *collect_md_tun_erspan;
75 struct net_device *fb_tunnel_dev;
78 static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
79 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
80 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly;
81 static int ip6gre_tunnel_init(struct net_device *dev);
82 static void ip6gre_tunnel_setup(struct net_device *dev);
83 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
84 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
85 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
87 /* Tunnel hash table */
90 4 hash tables:
92 3: (remote,local)
93 2: (remote,*)
94 1: (*,local)
95 0: (*,*)
97 We require exact key match i.e. if a key is present in packet
98 it will match only tunnel with the same key; if it is not present,
99 it will match only keyless tunnel.
101 All keysless packets, if not matched configured keyless tunnels
102 will match fallback tunnel.
105 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1))
106 static u32 HASH_ADDR(const struct in6_addr *addr)
108 u32 hash = ipv6_addr_hash(addr);
110 return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT);
113 #define tunnels_r_l tunnels[3]
114 #define tunnels_r tunnels[2]
115 #define tunnels_l tunnels[1]
116 #define tunnels_wc tunnels[0]
118 /* Given src, dst and key, find appropriate for input tunnel. */
120 static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
121 const struct in6_addr *remote, const struct in6_addr *local,
122 __be32 key, __be16 gre_proto)
124 struct net *net = dev_net(dev);
125 int link = dev->ifindex;
126 unsigned int h0 = HASH_ADDR(remote);
127 unsigned int h1 = HASH_KEY(key);
128 struct ip6_tnl *t, *cand = NULL;
129 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
130 int dev_type = (gre_proto == htons(ETH_P_TEB) ||
131 gre_proto == htons(ETH_P_ERSPAN) ||
132 gre_proto == htons(ETH_P_ERSPAN2)) ?
133 ARPHRD_ETHER : ARPHRD_IP6GRE;
134 int score, cand_score = 4;
135 struct net_device *ndev;
137 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
138 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
139 !ipv6_addr_equal(remote, &t->parms.raddr) ||
140 key != t->parms.i_key ||
141 !(t->dev->flags & IFF_UP))
142 continue;
144 if (t->dev->type != ARPHRD_IP6GRE &&
145 t->dev->type != dev_type)
146 continue;
148 score = 0;
149 if (t->parms.link != link)
150 score |= 1;
151 if (t->dev->type != dev_type)
152 score |= 2;
153 if (score == 0)
154 return t;
156 if (score < cand_score) {
157 cand = t;
158 cand_score = score;
162 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
163 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
164 key != t->parms.i_key ||
165 !(t->dev->flags & IFF_UP))
166 continue;
168 if (t->dev->type != ARPHRD_IP6GRE &&
169 t->dev->type != dev_type)
170 continue;
172 score = 0;
173 if (t->parms.link != link)
174 score |= 1;
175 if (t->dev->type != dev_type)
176 score |= 2;
177 if (score == 0)
178 return t;
180 if (score < cand_score) {
181 cand = t;
182 cand_score = score;
186 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
187 if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
188 (!ipv6_addr_equal(local, &t->parms.raddr) ||
189 !ipv6_addr_is_multicast(local))) ||
190 key != t->parms.i_key ||
191 !(t->dev->flags & IFF_UP))
192 continue;
194 if (t->dev->type != ARPHRD_IP6GRE &&
195 t->dev->type != dev_type)
196 continue;
198 score = 0;
199 if (t->parms.link != link)
200 score |= 1;
201 if (t->dev->type != dev_type)
202 score |= 2;
203 if (score == 0)
204 return t;
206 if (score < cand_score) {
207 cand = t;
208 cand_score = score;
212 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
213 if (t->parms.i_key != key ||
214 !(t->dev->flags & IFF_UP))
215 continue;
217 if (t->dev->type != ARPHRD_IP6GRE &&
218 t->dev->type != dev_type)
219 continue;
221 score = 0;
222 if (t->parms.link != link)
223 score |= 1;
224 if (t->dev->type != dev_type)
225 score |= 2;
226 if (score == 0)
227 return t;
229 if (score < cand_score) {
230 cand = t;
231 cand_score = score;
235 if (cand)
236 return cand;
238 if (gre_proto == htons(ETH_P_ERSPAN) ||
239 gre_proto == htons(ETH_P_ERSPAN2))
240 t = rcu_dereference(ign->collect_md_tun_erspan);
241 else
242 t = rcu_dereference(ign->collect_md_tun);
244 if (t && t->dev->flags & IFF_UP)
245 return t;
247 ndev = READ_ONCE(ign->fb_tunnel_dev);
248 if (ndev && ndev->flags & IFF_UP)
249 return netdev_priv(ndev);
251 return NULL;
254 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
255 const struct __ip6_tnl_parm *p)
257 const struct in6_addr *remote = &p->raddr;
258 const struct in6_addr *local = &p->laddr;
259 unsigned int h = HASH_KEY(p->i_key);
260 int prio = 0;
262 if (!ipv6_addr_any(local))
263 prio |= 1;
264 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
265 prio |= 2;
266 h ^= HASH_ADDR(remote);
269 return &ign->tunnels[prio][h];
272 static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
274 if (t->parms.collect_md)
275 rcu_assign_pointer(ign->collect_md_tun, t);
278 static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
280 if (t->parms.collect_md)
281 rcu_assign_pointer(ign->collect_md_tun_erspan, t);
284 static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
286 if (t->parms.collect_md)
287 rcu_assign_pointer(ign->collect_md_tun, NULL);
290 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
291 struct ip6_tnl *t)
293 if (t->parms.collect_md)
294 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
297 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
298 const struct ip6_tnl *t)
300 return __ip6gre_bucket(ign, &t->parms);
303 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
305 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
307 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
308 rcu_assign_pointer(*tp, t);
311 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
313 struct ip6_tnl __rcu **tp;
314 struct ip6_tnl *iter;
316 for (tp = ip6gre_bucket(ign, t);
317 (iter = rtnl_dereference(*tp)) != NULL;
318 tp = &iter->next) {
319 if (t == iter) {
320 rcu_assign_pointer(*tp, t->next);
321 break;
326 static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
327 const struct __ip6_tnl_parm *parms,
328 int type)
330 const struct in6_addr *remote = &parms->raddr;
331 const struct in6_addr *local = &parms->laddr;
332 __be32 key = parms->i_key;
333 int link = parms->link;
334 struct ip6_tnl *t;
335 struct ip6_tnl __rcu **tp;
336 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
338 for (tp = __ip6gre_bucket(ign, parms);
339 (t = rtnl_dereference(*tp)) != NULL;
340 tp = &t->next)
341 if (ipv6_addr_equal(local, &t->parms.laddr) &&
342 ipv6_addr_equal(remote, &t->parms.raddr) &&
343 key == t->parms.i_key &&
344 link == t->parms.link &&
345 type == t->dev->type)
346 break;
348 return t;
351 static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
352 const struct __ip6_tnl_parm *parms, int create)
354 struct ip6_tnl *t, *nt;
355 struct net_device *dev;
356 char name[IFNAMSIZ];
357 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
359 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
360 if (t && create)
361 return NULL;
362 if (t || !create)
363 return t;
365 if (parms->name[0]) {
366 if (!dev_valid_name(parms->name))
367 return NULL;
368 strlcpy(name, parms->name, IFNAMSIZ);
369 } else {
370 strcpy(name, "ip6gre%d");
372 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
373 ip6gre_tunnel_setup);
374 if (!dev)
375 return NULL;
377 dev_net_set(dev, net);
379 nt = netdev_priv(dev);
380 nt->parms = *parms;
381 dev->rtnl_link_ops = &ip6gre_link_ops;
383 nt->dev = dev;
384 nt->net = dev_net(dev);
386 if (register_netdevice(dev) < 0)
387 goto failed_free;
389 ip6gre_tnl_link_config(nt, 1);
391 /* Can use a lockless transmit, unless we generate output sequences */
392 if (!(nt->parms.o_flags & TUNNEL_SEQ))
393 dev->features |= NETIF_F_LLTX;
395 dev_hold(dev);
396 ip6gre_tunnel_link(ign, nt);
397 return nt;
399 failed_free:
400 free_netdev(dev);
401 return NULL;
404 static void ip6erspan_tunnel_uninit(struct net_device *dev)
406 struct ip6_tnl *t = netdev_priv(dev);
407 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
409 ip6erspan_tunnel_unlink_md(ign, t);
410 ip6gre_tunnel_unlink(ign, t);
411 dst_cache_reset(&t->dst_cache);
412 dev_put(dev);
415 static void ip6gre_tunnel_uninit(struct net_device *dev)
417 struct ip6_tnl *t = netdev_priv(dev);
418 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
420 ip6gre_tunnel_unlink_md(ign, t);
421 ip6gre_tunnel_unlink(ign, t);
422 if (ign->fb_tunnel_dev == dev)
423 WRITE_ONCE(ign->fb_tunnel_dev, NULL);
424 dst_cache_reset(&t->dst_cache);
425 dev_put(dev);
429 static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
430 u8 type, u8 code, int offset, __be32 info)
432 struct net *net = dev_net(skb->dev);
433 const struct gre_base_hdr *greh;
434 const struct ipv6hdr *ipv6h;
435 int grehlen = sizeof(*greh);
436 struct ip6_tnl *t;
437 int key_off = 0;
438 __be16 flags;
439 __be32 key;
441 if (!pskb_may_pull(skb, offset + grehlen))
442 return;
443 greh = (const struct gre_base_hdr *)(skb->data + offset);
444 flags = greh->flags;
445 if (flags & (GRE_VERSION | GRE_ROUTING))
446 return;
447 if (flags & GRE_CSUM)
448 grehlen += 4;
449 if (flags & GRE_KEY) {
450 key_off = grehlen + offset;
451 grehlen += 4;
454 if (!pskb_may_pull(skb, offset + grehlen))
455 return;
456 ipv6h = (const struct ipv6hdr *)skb->data;
457 greh = (const struct gre_base_hdr *)(skb->data + offset);
458 key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
460 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
461 key, greh->protocol);
462 if (!t)
463 return;
465 switch (type) {
466 struct ipv6_tlv_tnl_enc_lim *tel;
467 __u32 teli;
468 case ICMPV6_DEST_UNREACH:
469 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
470 t->parms.name);
471 if (code != ICMPV6_PORT_UNREACH)
472 break;
473 return;
474 case ICMPV6_TIME_EXCEED:
475 if (code == ICMPV6_EXC_HOPLIMIT) {
476 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
477 t->parms.name);
478 break;
480 return;
481 case ICMPV6_PARAMPROB:
482 teli = 0;
483 if (code == ICMPV6_HDR_FIELD)
484 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
486 if (teli && teli == be32_to_cpu(info) - 2) {
487 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
488 if (tel->encap_limit == 0) {
489 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
490 t->parms.name);
492 } else {
493 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
494 t->parms.name);
496 return;
497 case ICMPV6_PKT_TOOBIG:
498 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
499 return;
500 case NDISC_REDIRECT:
501 ip6_redirect(skb, net, skb->dev->ifindex, 0,
502 sock_net_uid(net, NULL));
503 return;
506 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
507 t->err_count++;
508 else
509 t->err_count = 1;
510 t->err_time = jiffies;
513 static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
515 const struct ipv6hdr *ipv6h;
516 struct ip6_tnl *tunnel;
518 ipv6h = ipv6_hdr(skb);
519 tunnel = ip6gre_tunnel_lookup(skb->dev,
520 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
521 tpi->proto);
522 if (tunnel) {
523 if (tunnel->parms.collect_md) {
524 struct metadata_dst *tun_dst;
525 __be64 tun_id;
526 __be16 flags;
528 flags = tpi->flags;
529 tun_id = key32_to_tunnel_id(tpi->key);
531 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
532 if (!tun_dst)
533 return PACKET_REJECT;
535 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
536 } else {
537 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
540 return PACKET_RCVD;
543 return PACKET_REJECT;
546 static int ip6erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
547 int gre_hdr_len)
549 struct erspan_base_hdr *ershdr;
550 const struct ipv6hdr *ipv6h;
551 struct erspan_md2 *md2;
552 struct ip6_tnl *tunnel;
553 u8 ver;
555 ipv6h = ipv6_hdr(skb);
556 ershdr = (struct erspan_base_hdr *)skb->data;
557 ver = ershdr->ver;
559 tunnel = ip6gre_tunnel_lookup(skb->dev,
560 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
561 tpi->proto);
562 if (tunnel) {
563 int len = erspan_hdr_len(ver);
565 if (unlikely(!pskb_may_pull(skb, len)))
566 return PACKET_REJECT;
568 if (__iptunnel_pull_header(skb, len,
569 htons(ETH_P_TEB),
570 false, false) < 0)
571 return PACKET_REJECT;
573 if (tunnel->parms.collect_md) {
574 struct erspan_metadata *pkt_md, *md;
575 struct metadata_dst *tun_dst;
576 struct ip_tunnel_info *info;
577 unsigned char *gh;
578 __be64 tun_id;
579 __be16 flags;
581 tpi->flags |= TUNNEL_KEY;
582 flags = tpi->flags;
583 tun_id = key32_to_tunnel_id(tpi->key);
585 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
586 sizeof(*md));
587 if (!tun_dst)
588 return PACKET_REJECT;
590 /* skb can be uncloned in __iptunnel_pull_header, so
591 * old pkt_md is no longer valid and we need to reset
592 * it
594 gh = skb_network_header(skb) +
595 skb_network_header_len(skb);
596 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
597 sizeof(*ershdr));
598 info = &tun_dst->u.tun_info;
599 md = ip_tunnel_info_opts(info);
600 md->version = ver;
601 md2 = &md->u.md2;
602 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
603 ERSPAN_V2_MDSIZE);
604 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
605 info->options_len = sizeof(*md);
607 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
609 } else {
610 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
613 return PACKET_RCVD;
616 return PACKET_REJECT;
619 static int gre_rcv(struct sk_buff *skb)
621 struct tnl_ptk_info tpi;
622 bool csum_err = false;
623 int hdr_len;
625 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
626 if (hdr_len < 0)
627 goto drop;
629 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
630 goto drop;
632 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
633 tpi.proto == htons(ETH_P_ERSPAN2))) {
634 if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
635 return 0;
636 goto out;
639 if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
640 return 0;
642 out:
643 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
644 drop:
645 kfree_skb(skb);
646 return 0;
649 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
651 return iptunnel_handle_offloads(skb,
652 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
655 static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
656 struct net_device *dev,
657 struct flowi6 *fl6, __u8 *dsfield,
658 int *encap_limit)
660 const struct iphdr *iph = ip_hdr(skb);
661 struct ip6_tnl *t = netdev_priv(dev);
663 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
664 *encap_limit = t->parms.encap_limit;
666 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
668 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
669 *dsfield = ipv4_get_dsfield(iph);
670 else
671 *dsfield = ip6_tclass(t->parms.flowinfo);
673 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
674 fl6->flowi6_mark = skb->mark;
675 else
676 fl6->flowi6_mark = t->parms.fwmark;
678 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
681 static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
682 struct net_device *dev,
683 struct flowi6 *fl6, __u8 *dsfield,
684 int *encap_limit)
686 struct ipv6hdr *ipv6h;
687 struct ip6_tnl *t = netdev_priv(dev);
688 __u16 offset;
690 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
691 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
692 ipv6h = ipv6_hdr(skb);
694 if (offset > 0) {
695 struct ipv6_tlv_tnl_enc_lim *tel;
697 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
698 if (tel->encap_limit == 0) {
699 icmpv6_send(skb, ICMPV6_PARAMPROB,
700 ICMPV6_HDR_FIELD, offset + 2);
701 return -1;
703 *encap_limit = tel->encap_limit - 1;
704 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
705 *encap_limit = t->parms.encap_limit;
708 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
710 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
711 *dsfield = ipv6_get_dsfield(ipv6h);
712 else
713 *dsfield = ip6_tclass(t->parms.flowinfo);
715 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
716 fl6->flowlabel |= ip6_flowlabel(ipv6h);
718 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
719 fl6->flowi6_mark = skb->mark;
720 else
721 fl6->flowi6_mark = t->parms.fwmark;
723 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
725 return 0;
728 static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
729 struct net_device *dev, __u8 dsfield,
730 struct flowi6 *fl6, int encap_limit,
731 __u32 *pmtu, __be16 proto)
733 struct ip6_tnl *tunnel = netdev_priv(dev);
734 __be16 protocol;
736 if (dev->type == ARPHRD_ETHER)
737 IPCB(skb)->flags = 0;
739 if (dev->header_ops && dev->type == ARPHRD_IP6GRE)
740 fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr;
741 else
742 fl6->daddr = tunnel->parms.raddr;
744 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
745 return -ENOMEM;
747 /* Push GRE header. */
748 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
750 if (tunnel->parms.collect_md) {
751 struct ip_tunnel_info *tun_info;
752 const struct ip_tunnel_key *key;
753 __be16 flags;
755 tun_info = skb_tunnel_info(skb);
756 if (unlikely(!tun_info ||
757 !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
758 ip_tunnel_info_af(tun_info) != AF_INET6))
759 return -EINVAL;
761 key = &tun_info->key;
762 memset(fl6, 0, sizeof(*fl6));
763 fl6->flowi6_proto = IPPROTO_GRE;
764 fl6->daddr = key->u.ipv6.dst;
765 fl6->flowlabel = key->label;
766 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
768 dsfield = key->tos;
769 flags = key->tun_flags &
770 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
771 tunnel->tun_hlen = gre_calc_hlen(flags);
773 gre_build_header(skb, tunnel->tun_hlen,
774 flags, protocol,
775 tunnel_id_to_key32(tun_info->key.tun_id),
776 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
777 : 0);
779 } else {
780 if (tunnel->parms.o_flags & TUNNEL_SEQ)
781 tunnel->o_seqno++;
783 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
784 protocol, tunnel->parms.o_key,
785 htonl(tunnel->o_seqno));
788 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
789 NEXTHDR_GRE);
792 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
794 struct ip6_tnl *t = netdev_priv(dev);
795 int encap_limit = -1;
796 struct flowi6 fl6;
797 __u8 dsfield = 0;
798 __u32 mtu;
799 int err;
801 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
803 if (!t->parms.collect_md)
804 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
805 &dsfield, &encap_limit);
807 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
808 if (err)
809 return -1;
811 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
812 skb->protocol);
813 if (err != 0) {
814 /* XXX: send ICMP error even if DF is not set. */
815 if (err == -EMSGSIZE)
816 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
817 htonl(mtu));
818 return -1;
821 return 0;
824 static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
826 struct ip6_tnl *t = netdev_priv(dev);
827 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
828 int encap_limit = -1;
829 struct flowi6 fl6;
830 __u8 dsfield = 0;
831 __u32 mtu;
832 int err;
834 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
835 return -1;
837 if (!t->parms.collect_md &&
838 prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
839 return -1;
841 if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
842 return -1;
844 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
845 &mtu, skb->protocol);
846 if (err != 0) {
847 if (err == -EMSGSIZE)
848 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
849 return -1;
852 return 0;
856 * ip6gre_tnl_addr_conflict - compare packet addresses to tunnel's own
857 * @t: the outgoing tunnel device
858 * @hdr: IPv6 header from the incoming packet
860 * Description:
861 * Avoid trivial tunneling loop by checking that tunnel exit-point
862 * doesn't match source of incoming packet.
864 * Return:
865 * 1 if conflict,
866 * 0 else
869 static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
870 const struct ipv6hdr *hdr)
872 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
875 static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
877 struct ip6_tnl *t = netdev_priv(dev);
878 int encap_limit = -1;
879 struct flowi6 fl6;
880 __u32 mtu;
881 int err;
883 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
884 encap_limit = t->parms.encap_limit;
886 if (!t->parms.collect_md)
887 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
889 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
890 if (err)
891 return err;
893 err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol);
895 return err;
898 static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
899 struct net_device *dev)
901 struct ip6_tnl *t = netdev_priv(dev);
902 struct net_device_stats *stats = &t->dev->stats;
903 int ret;
905 if (!pskb_inet_may_pull(skb))
906 goto tx_err;
908 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
909 goto tx_err;
911 switch (skb->protocol) {
912 case htons(ETH_P_IP):
913 ret = ip6gre_xmit_ipv4(skb, dev);
914 break;
915 case htons(ETH_P_IPV6):
916 ret = ip6gre_xmit_ipv6(skb, dev);
917 break;
918 default:
919 ret = ip6gre_xmit_other(skb, dev);
920 break;
923 if (ret < 0)
924 goto tx_err;
926 return NETDEV_TX_OK;
928 tx_err:
929 stats->tx_errors++;
930 stats->tx_dropped++;
931 kfree_skb(skb);
932 return NETDEV_TX_OK;
935 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
936 struct net_device *dev)
938 struct ip6_tnl *t = netdev_priv(dev);
939 struct dst_entry *dst = skb_dst(skb);
940 struct net_device_stats *stats;
941 bool truncate = false;
942 int encap_limit = -1;
943 __u8 dsfield = false;
944 struct flowi6 fl6;
945 int err = -EINVAL;
946 __be16 proto;
947 __u32 mtu;
948 int nhoff;
949 int thoff;
951 if (!pskb_inet_may_pull(skb))
952 goto tx_err;
954 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
955 goto tx_err;
957 if (gre_handle_offloads(skb, false))
958 goto tx_err;
960 if (skb->len > dev->mtu + dev->hard_header_len) {
961 pskb_trim(skb, dev->mtu + dev->hard_header_len);
962 truncate = true;
965 nhoff = skb_network_header(skb) - skb_mac_header(skb);
966 if (skb->protocol == htons(ETH_P_IP) &&
967 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
968 truncate = true;
970 thoff = skb_transport_header(skb) - skb_mac_header(skb);
971 if (skb->protocol == htons(ETH_P_IPV6) &&
972 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
973 truncate = true;
975 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
976 goto tx_err;
978 t->parms.o_flags &= ~TUNNEL_KEY;
979 IPCB(skb)->flags = 0;
981 /* For collect_md mode, derive fl6 from the tunnel key,
982 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
984 if (t->parms.collect_md) {
985 struct ip_tunnel_info *tun_info;
986 const struct ip_tunnel_key *key;
987 struct erspan_metadata *md;
988 __be32 tun_id;
990 tun_info = skb_tunnel_info(skb);
991 if (unlikely(!tun_info ||
992 !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
993 ip_tunnel_info_af(tun_info) != AF_INET6))
994 goto tx_err;
996 key = &tun_info->key;
997 memset(&fl6, 0, sizeof(fl6));
998 fl6.flowi6_proto = IPPROTO_GRE;
999 fl6.daddr = key->u.ipv6.dst;
1000 fl6.flowlabel = key->label;
1001 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1003 dsfield = key->tos;
1004 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
1005 goto tx_err;
1006 if (tun_info->options_len < sizeof(*md))
1007 goto tx_err;
1008 md = ip_tunnel_info_opts(tun_info);
1010 tun_id = tunnel_id_to_key32(key->tun_id);
1011 if (md->version == 1) {
1012 erspan_build_header(skb,
1013 ntohl(tun_id),
1014 ntohl(md->u.index), truncate,
1015 false);
1016 } else if (md->version == 2) {
1017 erspan_build_header_v2(skb,
1018 ntohl(tun_id),
1019 md->u.md2.dir,
1020 get_hwid(&md->u.md2),
1021 truncate, false);
1022 } else {
1023 goto tx_err;
1025 } else {
1026 switch (skb->protocol) {
1027 case htons(ETH_P_IP):
1028 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1029 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
1030 &dsfield, &encap_limit);
1031 break;
1032 case htons(ETH_P_IPV6):
1033 if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
1034 goto tx_err;
1035 if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
1036 &dsfield, &encap_limit))
1037 goto tx_err;
1038 break;
1039 default:
1040 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1041 break;
1044 if (t->parms.erspan_ver == 1)
1045 erspan_build_header(skb, ntohl(t->parms.o_key),
1046 t->parms.index,
1047 truncate, false);
1048 else if (t->parms.erspan_ver == 2)
1049 erspan_build_header_v2(skb, ntohl(t->parms.o_key),
1050 t->parms.dir,
1051 t->parms.hwid,
1052 truncate, false);
1053 else
1054 goto tx_err;
1056 fl6.daddr = t->parms.raddr;
1059 /* Push GRE header. */
1060 proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
1061 : htons(ETH_P_ERSPAN2);
1062 gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
1064 /* TooBig packet may have updated dst->dev's mtu */
1065 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
1066 dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
1068 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1069 NEXTHDR_GRE);
1070 if (err != 0) {
1071 /* XXX: send ICMP error even if DF is not set. */
1072 if (err == -EMSGSIZE) {
1073 if (skb->protocol == htons(ETH_P_IP))
1074 icmp_send(skb, ICMP_DEST_UNREACH,
1075 ICMP_FRAG_NEEDED, htonl(mtu));
1076 else
1077 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1080 goto tx_err;
1082 return NETDEV_TX_OK;
1084 tx_err:
1085 stats = &t->dev->stats;
1086 stats->tx_errors++;
1087 stats->tx_dropped++;
1088 kfree_skb(skb);
1089 return NETDEV_TX_OK;
1092 static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
1094 struct net_device *dev = t->dev;
1095 struct __ip6_tnl_parm *p = &t->parms;
1096 struct flowi6 *fl6 = &t->fl.u.ip6;
1098 if (dev->type != ARPHRD_ETHER) {
1099 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1100 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1103 /* Set up flowi template */
1104 fl6->saddr = p->laddr;
1105 fl6->daddr = p->raddr;
1106 fl6->flowi6_oif = p->link;
1107 fl6->flowlabel = 0;
1108 fl6->flowi6_proto = IPPROTO_GRE;
1110 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1111 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1112 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1113 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1115 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1116 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1118 if (p->flags&IP6_TNL_F_CAP_XMIT &&
1119 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
1120 dev->flags |= IFF_POINTOPOINT;
1121 else
1122 dev->flags &= ~IFF_POINTOPOINT;
1125 static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
1126 int t_hlen)
1128 const struct __ip6_tnl_parm *p = &t->parms;
1129 struct net_device *dev = t->dev;
1131 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1132 int strict = (ipv6_addr_type(&p->raddr) &
1133 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1135 struct rt6_info *rt = rt6_lookup(t->net,
1136 &p->raddr, &p->laddr,
1137 p->link, NULL, strict);
1139 if (!rt)
1140 return;
1142 if (rt->dst.dev) {
1143 dev->needed_headroom = rt->dst.dev->hard_header_len +
1144 t_hlen;
1146 if (set_mtu) {
1147 dev->mtu = rt->dst.dev->mtu - t_hlen;
1148 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1149 dev->mtu -= 8;
1150 if (dev->type == ARPHRD_ETHER)
1151 dev->mtu -= ETH_HLEN;
1153 if (dev->mtu < IPV6_MIN_MTU)
1154 dev->mtu = IPV6_MIN_MTU;
1157 ip6_rt_put(rt);
1161 static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
1163 int t_hlen;
1165 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
1166 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1168 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1169 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1170 return t_hlen;
1173 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1175 ip6gre_tnl_link_config_common(t);
1176 ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
1179 static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
1180 const struct __ip6_tnl_parm *p)
1182 t->parms.laddr = p->laddr;
1183 t->parms.raddr = p->raddr;
1184 t->parms.flags = p->flags;
1185 t->parms.hop_limit = p->hop_limit;
1186 t->parms.encap_limit = p->encap_limit;
1187 t->parms.flowinfo = p->flowinfo;
1188 t->parms.link = p->link;
1189 t->parms.proto = p->proto;
1190 t->parms.i_key = p->i_key;
1191 t->parms.o_key = p->o_key;
1192 t->parms.i_flags = p->i_flags;
1193 t->parms.o_flags = p->o_flags;
1194 t->parms.fwmark = p->fwmark;
1195 t->parms.erspan_ver = p->erspan_ver;
1196 t->parms.index = p->index;
1197 t->parms.dir = p->dir;
1198 t->parms.hwid = p->hwid;
1199 dst_cache_reset(&t->dst_cache);
1202 static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
1203 int set_mtu)
1205 ip6gre_tnl_copy_tnl_parm(t, p);
1206 ip6gre_tnl_link_config(t, set_mtu);
1207 return 0;
1210 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
1211 const struct ip6_tnl_parm2 *u)
1213 p->laddr = u->laddr;
1214 p->raddr = u->raddr;
1215 p->flags = u->flags;
1216 p->hop_limit = u->hop_limit;
1217 p->encap_limit = u->encap_limit;
1218 p->flowinfo = u->flowinfo;
1219 p->link = u->link;
1220 p->i_key = u->i_key;
1221 p->o_key = u->o_key;
1222 p->i_flags = gre_flags_to_tnl_flags(u->i_flags);
1223 p->o_flags = gre_flags_to_tnl_flags(u->o_flags);
1224 memcpy(p->name, u->name, sizeof(u->name));
1227 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
1228 const struct __ip6_tnl_parm *p)
1230 u->proto = IPPROTO_GRE;
1231 u->laddr = p->laddr;
1232 u->raddr = p->raddr;
1233 u->flags = p->flags;
1234 u->hop_limit = p->hop_limit;
1235 u->encap_limit = p->encap_limit;
1236 u->flowinfo = p->flowinfo;
1237 u->link = p->link;
1238 u->i_key = p->i_key;
1239 u->o_key = p->o_key;
1240 u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
1241 u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
1242 memcpy(u->name, p->name, sizeof(u->name));
1245 static int ip6gre_tunnel_ioctl(struct net_device *dev,
1246 struct ifreq *ifr, int cmd)
1248 int err = 0;
1249 struct ip6_tnl_parm2 p;
1250 struct __ip6_tnl_parm p1;
1251 struct ip6_tnl *t = netdev_priv(dev);
1252 struct net *net = t->net;
1253 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1255 memset(&p1, 0, sizeof(p1));
1257 switch (cmd) {
1258 case SIOCGETTUNNEL:
1259 if (dev == ign->fb_tunnel_dev) {
1260 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1261 err = -EFAULT;
1262 break;
1264 ip6gre_tnl_parm_from_user(&p1, &p);
1265 t = ip6gre_tunnel_locate(net, &p1, 0);
1266 if (!t)
1267 t = netdev_priv(dev);
1269 memset(&p, 0, sizeof(p));
1270 ip6gre_tnl_parm_to_user(&p, &t->parms);
1271 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1272 err = -EFAULT;
1273 break;
1275 case SIOCADDTUNNEL:
1276 case SIOCCHGTUNNEL:
1277 err = -EPERM;
1278 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1279 goto done;
1281 err = -EFAULT;
1282 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1283 goto done;
1285 err = -EINVAL;
1286 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
1287 goto done;
1289 if (!(p.i_flags&GRE_KEY))
1290 p.i_key = 0;
1291 if (!(p.o_flags&GRE_KEY))
1292 p.o_key = 0;
1294 ip6gre_tnl_parm_from_user(&p1, &p);
1295 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
1297 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1298 if (t) {
1299 if (t->dev != dev) {
1300 err = -EEXIST;
1301 break;
1303 } else {
1304 t = netdev_priv(dev);
1306 ip6gre_tunnel_unlink(ign, t);
1307 synchronize_net();
1308 ip6gre_tnl_change(t, &p1, 1);
1309 ip6gre_tunnel_link(ign, t);
1310 netdev_state_change(dev);
1314 if (t) {
1315 err = 0;
1317 memset(&p, 0, sizeof(p));
1318 ip6gre_tnl_parm_to_user(&p, &t->parms);
1319 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1320 err = -EFAULT;
1321 } else
1322 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1323 break;
1325 case SIOCDELTUNNEL:
1326 err = -EPERM;
1327 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1328 goto done;
1330 if (dev == ign->fb_tunnel_dev) {
1331 err = -EFAULT;
1332 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1333 goto done;
1334 err = -ENOENT;
1335 ip6gre_tnl_parm_from_user(&p1, &p);
1336 t = ip6gre_tunnel_locate(net, &p1, 0);
1337 if (!t)
1338 goto done;
1339 err = -EPERM;
1340 if (t == netdev_priv(ign->fb_tunnel_dev))
1341 goto done;
1342 dev = t->dev;
1344 unregister_netdevice(dev);
1345 err = 0;
1346 break;
1348 default:
1349 err = -EINVAL;
1352 done:
1353 return err;
1356 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1357 unsigned short type, const void *daddr,
1358 const void *saddr, unsigned int len)
1360 struct ip6_tnl *t = netdev_priv(dev);
1361 struct ipv6hdr *ipv6h;
1362 __be16 *p;
1364 ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
1365 ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
1366 t->fl.u.ip6.flowlabel,
1367 true, &t->fl.u.ip6));
1368 ipv6h->hop_limit = t->parms.hop_limit;
1369 ipv6h->nexthdr = NEXTHDR_GRE;
1370 ipv6h->saddr = t->parms.laddr;
1371 ipv6h->daddr = t->parms.raddr;
1373 p = (__be16 *)(ipv6h + 1);
1374 p[0] = t->parms.o_flags;
1375 p[1] = htons(type);
1378 * Set the source hardware address.
1381 if (saddr)
1382 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
1383 if (daddr)
1384 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
1385 if (!ipv6_addr_any(&ipv6h->daddr))
1386 return t->hlen;
1388 return -t->hlen;
1391 static const struct header_ops ip6gre_header_ops = {
1392 .create = ip6gre_header,
1395 static const struct net_device_ops ip6gre_netdev_ops = {
1396 .ndo_init = ip6gre_tunnel_init,
1397 .ndo_uninit = ip6gre_tunnel_uninit,
1398 .ndo_start_xmit = ip6gre_tunnel_xmit,
1399 .ndo_do_ioctl = ip6gre_tunnel_ioctl,
1400 .ndo_change_mtu = ip6_tnl_change_mtu,
1401 .ndo_get_stats64 = ip_tunnel_get_stats64,
1402 .ndo_get_iflink = ip6_tnl_get_iflink,
1405 static void ip6gre_dev_free(struct net_device *dev)
1407 struct ip6_tnl *t = netdev_priv(dev);
1409 gro_cells_destroy(&t->gro_cells);
1410 dst_cache_destroy(&t->dst_cache);
1411 free_percpu(dev->tstats);
1414 static void ip6gre_tunnel_setup(struct net_device *dev)
1416 dev->netdev_ops = &ip6gre_netdev_ops;
1417 dev->needs_free_netdev = true;
1418 dev->priv_destructor = ip6gre_dev_free;
1420 dev->type = ARPHRD_IP6GRE;
1422 dev->flags |= IFF_NOARP;
1423 dev->addr_len = sizeof(struct in6_addr);
1424 netif_keep_dst(dev);
1425 /* This perm addr will be used as interface identifier by IPv6 */
1426 dev->addr_assign_type = NET_ADDR_RANDOM;
1427 eth_random_addr(dev->perm_addr);
1430 #define GRE6_FEATURES (NETIF_F_SG | \
1431 NETIF_F_FRAGLIST | \
1432 NETIF_F_HIGHDMA | \
1433 NETIF_F_HW_CSUM)
1435 static void ip6gre_tnl_init_features(struct net_device *dev)
1437 struct ip6_tnl *nt = netdev_priv(dev);
1439 dev->features |= GRE6_FEATURES;
1440 dev->hw_features |= GRE6_FEATURES;
1442 if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
1443 /* TCP offload with GRE SEQ is not supported, nor
1444 * can we support 2 levels of outer headers requiring
1445 * an update.
1447 if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
1448 nt->encap.type == TUNNEL_ENCAP_NONE) {
1449 dev->features |= NETIF_F_GSO_SOFTWARE;
1450 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1453 /* Can use a lockless transmit, unless we generate
1454 * output sequences
1456 dev->features |= NETIF_F_LLTX;
1460 static int ip6gre_tunnel_init_common(struct net_device *dev)
1462 struct ip6_tnl *tunnel;
1463 int ret;
1464 int t_hlen;
1466 tunnel = netdev_priv(dev);
1468 tunnel->dev = dev;
1469 tunnel->net = dev_net(dev);
1470 strcpy(tunnel->parms.name, dev->name);
1472 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1473 if (!dev->tstats)
1474 return -ENOMEM;
1476 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1477 if (ret)
1478 goto cleanup_alloc_pcpu_stats;
1480 ret = gro_cells_init(&tunnel->gro_cells, dev);
1481 if (ret)
1482 goto cleanup_dst_cache_init;
1484 t_hlen = ip6gre_calc_hlen(tunnel);
1485 dev->mtu = ETH_DATA_LEN - t_hlen;
1486 if (dev->type == ARPHRD_ETHER)
1487 dev->mtu -= ETH_HLEN;
1488 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1489 dev->mtu -= 8;
1491 if (tunnel->parms.collect_md) {
1492 netif_keep_dst(dev);
1494 ip6gre_tnl_init_features(dev);
1496 return 0;
1498 cleanup_dst_cache_init:
1499 dst_cache_destroy(&tunnel->dst_cache);
1500 cleanup_alloc_pcpu_stats:
1501 free_percpu(dev->tstats);
1502 dev->tstats = NULL;
1503 return ret;
1506 static int ip6gre_tunnel_init(struct net_device *dev)
1508 struct ip6_tnl *tunnel;
1509 int ret;
1511 ret = ip6gre_tunnel_init_common(dev);
1512 if (ret)
1513 return ret;
1515 tunnel = netdev_priv(dev);
1517 if (tunnel->parms.collect_md)
1518 return 0;
1520 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
1521 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1523 if (ipv6_addr_any(&tunnel->parms.raddr))
1524 dev->header_ops = &ip6gre_header_ops;
1526 return 0;
1529 static void ip6gre_fb_tunnel_init(struct net_device *dev)
1531 struct ip6_tnl *tunnel = netdev_priv(dev);
1533 tunnel->dev = dev;
1534 tunnel->net = dev_net(dev);
1535 strcpy(tunnel->parms.name, dev->name);
1537 tunnel->hlen = sizeof(struct ipv6hdr) + 4;
1539 dev_hold(dev);
1542 static struct inet6_protocol ip6gre_protocol __read_mostly = {
1543 .handler = gre_rcv,
1544 .err_handler = ip6gre_err,
1545 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1548 static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
1550 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1551 struct net_device *dev, *aux;
1552 int prio;
1554 for_each_netdev_safe(net, dev, aux)
1555 if (dev->rtnl_link_ops == &ip6gre_link_ops ||
1556 dev->rtnl_link_ops == &ip6gre_tap_ops ||
1557 dev->rtnl_link_ops == &ip6erspan_tap_ops)
1558 unregister_netdevice_queue(dev, head);
1560 for (prio = 0; prio < 4; prio++) {
1561 int h;
1562 for (h = 0; h < IP6_GRE_HASH_SIZE; h++) {
1563 struct ip6_tnl *t;
1565 t = rtnl_dereference(ign->tunnels[prio][h]);
1567 while (t) {
1568 /* If dev is in the same netns, it has already
1569 * been added to the list by the previous loop.
1571 if (!net_eq(dev_net(t->dev), net))
1572 unregister_netdevice_queue(t->dev,
1573 head);
1574 t = rtnl_dereference(t->next);
1580 static int __net_init ip6gre_init_net(struct net *net)
1582 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1583 int err;
1585 if (!net_has_fallback_tunnels(net))
1586 return 0;
1587 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
1588 NET_NAME_UNKNOWN,
1589 ip6gre_tunnel_setup);
1590 if (!ign->fb_tunnel_dev) {
1591 err = -ENOMEM;
1592 goto err_alloc_dev;
1594 dev_net_set(ign->fb_tunnel_dev, net);
1595 /* FB netdevice is special: we have one, and only one per netns.
1596 * Allowing to move it to another netns is clearly unsafe.
1598 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1601 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1602 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1604 err = register_netdev(ign->fb_tunnel_dev);
1605 if (err)
1606 goto err_reg_dev;
1608 rcu_assign_pointer(ign->tunnels_wc[0],
1609 netdev_priv(ign->fb_tunnel_dev));
1610 return 0;
1612 err_reg_dev:
1613 free_netdev(ign->fb_tunnel_dev);
1614 err_alloc_dev:
1615 return err;
1618 static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list)
1620 struct net *net;
1621 LIST_HEAD(list);
1623 rtnl_lock();
1624 list_for_each_entry(net, net_list, exit_list)
1625 ip6gre_destroy_tunnels(net, &list);
1626 unregister_netdevice_many(&list);
1627 rtnl_unlock();
1630 static struct pernet_operations ip6gre_net_ops = {
1631 .init = ip6gre_init_net,
1632 .exit_batch = ip6gre_exit_batch_net,
1633 .id = &ip6gre_net_id,
1634 .size = sizeof(struct ip6gre_net),
1637 static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1638 struct netlink_ext_ack *extack)
1640 __be16 flags;
1642 if (!data)
1643 return 0;
1645 flags = 0;
1646 if (data[IFLA_GRE_IFLAGS])
1647 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1648 if (data[IFLA_GRE_OFLAGS])
1649 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1650 if (flags & (GRE_VERSION|GRE_ROUTING))
1651 return -EINVAL;
1653 return 0;
1656 static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1657 struct netlink_ext_ack *extack)
1659 struct in6_addr daddr;
1661 if (tb[IFLA_ADDRESS]) {
1662 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1663 return -EINVAL;
1664 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1665 return -EADDRNOTAVAIL;
1668 if (!data)
1669 goto out;
1671 if (data[IFLA_GRE_REMOTE]) {
1672 daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1673 if (ipv6_addr_any(&daddr))
1674 return -EINVAL;
1677 out:
1678 return ip6gre_tunnel_validate(tb, data, extack);
1681 static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1682 struct netlink_ext_ack *extack)
1684 __be16 flags = 0;
1685 int ret, ver = 0;
1687 if (!data)
1688 return 0;
1690 ret = ip6gre_tap_validate(tb, data, extack);
1691 if (ret)
1692 return ret;
1694 /* ERSPAN should only have GRE sequence and key flag */
1695 if (data[IFLA_GRE_OFLAGS])
1696 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1697 if (data[IFLA_GRE_IFLAGS])
1698 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1699 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1700 flags != (GRE_SEQ | GRE_KEY))
1701 return -EINVAL;
1703 /* ERSPAN Session ID only has 10-bit. Since we reuse
1704 * 32-bit key field as ID, check it's range.
1706 if (data[IFLA_GRE_IKEY] &&
1707 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1708 return -EINVAL;
1710 if (data[IFLA_GRE_OKEY] &&
1711 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1712 return -EINVAL;
1714 if (data[IFLA_GRE_ERSPAN_VER]) {
1715 ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1716 if (ver != 1 && ver != 2)
1717 return -EINVAL;
1720 if (ver == 1) {
1721 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1722 u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1724 if (index & ~INDEX_MASK)
1725 return -EINVAL;
1727 } else if (ver == 2) {
1728 if (data[IFLA_GRE_ERSPAN_DIR]) {
1729 u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1731 if (dir & ~(DIR_MASK >> DIR_OFFSET))
1732 return -EINVAL;
1735 if (data[IFLA_GRE_ERSPAN_HWID]) {
1736 u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1738 if (hwid & ~(HWID_MASK >> HWID_OFFSET))
1739 return -EINVAL;
1743 return 0;
1746 static void ip6erspan_set_version(struct nlattr *data[],
1747 struct __ip6_tnl_parm *parms)
1749 if (!data)
1750 return;
1752 parms->erspan_ver = 1;
1753 if (data[IFLA_GRE_ERSPAN_VER])
1754 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1756 if (parms->erspan_ver == 1) {
1757 if (data[IFLA_GRE_ERSPAN_INDEX])
1758 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1759 } else if (parms->erspan_ver == 2) {
1760 if (data[IFLA_GRE_ERSPAN_DIR])
1761 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1762 if (data[IFLA_GRE_ERSPAN_HWID])
1763 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1767 static void ip6gre_netlink_parms(struct nlattr *data[],
1768 struct __ip6_tnl_parm *parms)
1770 memset(parms, 0, sizeof(*parms));
1772 if (!data)
1773 return;
1775 if (data[IFLA_GRE_LINK])
1776 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1778 if (data[IFLA_GRE_IFLAGS])
1779 parms->i_flags = gre_flags_to_tnl_flags(
1780 nla_get_be16(data[IFLA_GRE_IFLAGS]));
1782 if (data[IFLA_GRE_OFLAGS])
1783 parms->o_flags = gre_flags_to_tnl_flags(
1784 nla_get_be16(data[IFLA_GRE_OFLAGS]));
1786 if (data[IFLA_GRE_IKEY])
1787 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1789 if (data[IFLA_GRE_OKEY])
1790 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1792 if (data[IFLA_GRE_LOCAL])
1793 parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]);
1795 if (data[IFLA_GRE_REMOTE])
1796 parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1798 if (data[IFLA_GRE_TTL])
1799 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
1801 if (data[IFLA_GRE_ENCAP_LIMIT])
1802 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
1804 if (data[IFLA_GRE_FLOWINFO])
1805 parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]);
1807 if (data[IFLA_GRE_FLAGS])
1808 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
1810 if (data[IFLA_GRE_FWMARK])
1811 parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1813 if (data[IFLA_GRE_COLLECT_METADATA])
1814 parms->collect_md = true;
1817 static int ip6gre_tap_init(struct net_device *dev)
1819 int ret;
1821 ret = ip6gre_tunnel_init_common(dev);
1822 if (ret)
1823 return ret;
1825 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1827 return 0;
1830 static const struct net_device_ops ip6gre_tap_netdev_ops = {
1831 .ndo_init = ip6gre_tap_init,
1832 .ndo_uninit = ip6gre_tunnel_uninit,
1833 .ndo_start_xmit = ip6gre_tunnel_xmit,
1834 .ndo_set_mac_address = eth_mac_addr,
1835 .ndo_validate_addr = eth_validate_addr,
1836 .ndo_change_mtu = ip6_tnl_change_mtu,
1837 .ndo_get_stats64 = ip_tunnel_get_stats64,
1838 .ndo_get_iflink = ip6_tnl_get_iflink,
1841 static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
1843 int t_hlen;
1845 tunnel->tun_hlen = 8;
1846 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1847 erspan_hdr_len(tunnel->parms.erspan_ver);
1849 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1850 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1851 return t_hlen;
1854 static int ip6erspan_tap_init(struct net_device *dev)
1856 struct ip6_tnl *tunnel;
1857 int t_hlen;
1858 int ret;
1860 tunnel = netdev_priv(dev);
1862 tunnel->dev = dev;
1863 tunnel->net = dev_net(dev);
1864 strcpy(tunnel->parms.name, dev->name);
1866 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1867 if (!dev->tstats)
1868 return -ENOMEM;
1870 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1871 if (ret)
1872 goto cleanup_alloc_pcpu_stats;
1874 ret = gro_cells_init(&tunnel->gro_cells, dev);
1875 if (ret)
1876 goto cleanup_dst_cache_init;
1878 t_hlen = ip6erspan_calc_hlen(tunnel);
1879 dev->mtu = ETH_DATA_LEN - t_hlen;
1880 if (dev->type == ARPHRD_ETHER)
1881 dev->mtu -= ETH_HLEN;
1882 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1883 dev->mtu -= 8;
1885 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1886 ip6erspan_tnl_link_config(tunnel, 1);
1888 return 0;
1890 cleanup_dst_cache_init:
1891 dst_cache_destroy(&tunnel->dst_cache);
1892 cleanup_alloc_pcpu_stats:
1893 free_percpu(dev->tstats);
1894 dev->tstats = NULL;
1895 return ret;
1898 static const struct net_device_ops ip6erspan_netdev_ops = {
1899 .ndo_init = ip6erspan_tap_init,
1900 .ndo_uninit = ip6erspan_tunnel_uninit,
1901 .ndo_start_xmit = ip6erspan_tunnel_xmit,
1902 .ndo_set_mac_address = eth_mac_addr,
1903 .ndo_validate_addr = eth_validate_addr,
1904 .ndo_change_mtu = ip6_tnl_change_mtu,
1905 .ndo_get_stats64 = ip_tunnel_get_stats64,
1906 .ndo_get_iflink = ip6_tnl_get_iflink,
1909 static void ip6gre_tap_setup(struct net_device *dev)
1912 ether_setup(dev);
1914 dev->max_mtu = 0;
1915 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1916 dev->needs_free_netdev = true;
1917 dev->priv_destructor = ip6gre_dev_free;
1919 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1920 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1921 netif_keep_dst(dev);
1924 bool is_ip6gretap_dev(const struct net_device *dev)
1926 return dev->netdev_ops == &ip6gre_tap_netdev_ops;
1928 EXPORT_SYMBOL_GPL(is_ip6gretap_dev);
1930 static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
1931 struct ip_tunnel_encap *ipencap)
1933 bool ret = false;
1935 memset(ipencap, 0, sizeof(*ipencap));
1937 if (!data)
1938 return ret;
1940 if (data[IFLA_GRE_ENCAP_TYPE]) {
1941 ret = true;
1942 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1945 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1946 ret = true;
1947 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1950 if (data[IFLA_GRE_ENCAP_SPORT]) {
1951 ret = true;
1952 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1955 if (data[IFLA_GRE_ENCAP_DPORT]) {
1956 ret = true;
1957 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1960 return ret;
1963 static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
1964 struct nlattr *tb[], struct nlattr *data[],
1965 struct netlink_ext_ack *extack)
1967 struct ip6_tnl *nt;
1968 struct ip_tunnel_encap ipencap;
1969 int err;
1971 nt = netdev_priv(dev);
1973 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
1974 int err = ip6_tnl_encap_setup(nt, &ipencap);
1976 if (err < 0)
1977 return err;
1980 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1981 eth_hw_addr_random(dev);
1983 nt->dev = dev;
1984 nt->net = dev_net(dev);
1986 err = register_netdevice(dev);
1987 if (err)
1988 goto out;
1990 if (tb[IFLA_MTU])
1991 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1993 dev_hold(dev);
1995 out:
1996 return err;
1999 static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
2000 struct nlattr *tb[], struct nlattr *data[],
2001 struct netlink_ext_ack *extack)
2003 struct ip6_tnl *nt = netdev_priv(dev);
2004 struct net *net = dev_net(dev);
2005 struct ip6gre_net *ign;
2006 int err;
2008 ip6gre_netlink_parms(data, &nt->parms);
2009 ign = net_generic(net, ip6gre_net_id);
2011 if (nt->parms.collect_md) {
2012 if (rtnl_dereference(ign->collect_md_tun))
2013 return -EEXIST;
2014 } else {
2015 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
2016 return -EEXIST;
2019 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
2020 if (!err) {
2021 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
2022 ip6gre_tunnel_link_md(ign, nt);
2023 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2025 return err;
2028 static struct ip6_tnl *
2029 ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
2030 struct nlattr *data[], struct __ip6_tnl_parm *p_p,
2031 struct netlink_ext_ack *extack)
2033 struct ip6_tnl *t, *nt = netdev_priv(dev);
2034 struct net *net = nt->net;
2035 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2036 struct ip_tunnel_encap ipencap;
2038 if (dev == ign->fb_tunnel_dev)
2039 return ERR_PTR(-EINVAL);
2041 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
2042 int err = ip6_tnl_encap_setup(nt, &ipencap);
2044 if (err < 0)
2045 return ERR_PTR(err);
2048 ip6gre_netlink_parms(data, p_p);
2050 t = ip6gre_tunnel_locate(net, p_p, 0);
2052 if (t) {
2053 if (t->dev != dev)
2054 return ERR_PTR(-EEXIST);
2055 } else {
2056 t = nt;
2059 return t;
2062 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
2063 struct nlattr *data[],
2064 struct netlink_ext_ack *extack)
2066 struct ip6_tnl *t = netdev_priv(dev);
2067 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
2068 struct __ip6_tnl_parm p;
2070 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2071 if (IS_ERR(t))
2072 return PTR_ERR(t);
2074 ip6gre_tunnel_unlink_md(ign, t);
2075 ip6gre_tunnel_unlink(ign, t);
2076 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
2077 ip6gre_tunnel_link_md(ign, t);
2078 ip6gre_tunnel_link(ign, t);
2079 return 0;
2082 static void ip6gre_dellink(struct net_device *dev, struct list_head *head)
2084 struct net *net = dev_net(dev);
2085 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2087 if (dev != ign->fb_tunnel_dev)
2088 unregister_netdevice_queue(dev, head);
2091 static size_t ip6gre_get_size(const struct net_device *dev)
2093 return
2094 /* IFLA_GRE_LINK */
2095 nla_total_size(4) +
2096 /* IFLA_GRE_IFLAGS */
2097 nla_total_size(2) +
2098 /* IFLA_GRE_OFLAGS */
2099 nla_total_size(2) +
2100 /* IFLA_GRE_IKEY */
2101 nla_total_size(4) +
2102 /* IFLA_GRE_OKEY */
2103 nla_total_size(4) +
2104 /* IFLA_GRE_LOCAL */
2105 nla_total_size(sizeof(struct in6_addr)) +
2106 /* IFLA_GRE_REMOTE */
2107 nla_total_size(sizeof(struct in6_addr)) +
2108 /* IFLA_GRE_TTL */
2109 nla_total_size(1) +
2110 /* IFLA_GRE_ENCAP_LIMIT */
2111 nla_total_size(1) +
2112 /* IFLA_GRE_FLOWINFO */
2113 nla_total_size(4) +
2114 /* IFLA_GRE_FLAGS */
2115 nla_total_size(4) +
2116 /* IFLA_GRE_ENCAP_TYPE */
2117 nla_total_size(2) +
2118 /* IFLA_GRE_ENCAP_FLAGS */
2119 nla_total_size(2) +
2120 /* IFLA_GRE_ENCAP_SPORT */
2121 nla_total_size(2) +
2122 /* IFLA_GRE_ENCAP_DPORT */
2123 nla_total_size(2) +
2124 /* IFLA_GRE_COLLECT_METADATA */
2125 nla_total_size(0) +
2126 /* IFLA_GRE_FWMARK */
2127 nla_total_size(4) +
2128 /* IFLA_GRE_ERSPAN_INDEX */
2129 nla_total_size(4) +
2133 static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2135 struct ip6_tnl *t = netdev_priv(dev);
2136 struct __ip6_tnl_parm *p = &t->parms;
2137 __be16 o_flags = p->o_flags;
2139 if (p->erspan_ver == 1 || p->erspan_ver == 2) {
2140 if (!p->collect_md)
2141 o_flags |= TUNNEL_KEY;
2143 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2144 goto nla_put_failure;
2146 if (p->erspan_ver == 1) {
2147 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2148 goto nla_put_failure;
2149 } else {
2150 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2151 goto nla_put_failure;
2152 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2153 goto nla_put_failure;
2157 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2158 nla_put_be16(skb, IFLA_GRE_IFLAGS,
2159 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2160 nla_put_be16(skb, IFLA_GRE_OFLAGS,
2161 gre_tnl_flags_to_gre_flags(o_flags)) ||
2162 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2163 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2164 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
2165 nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) ||
2166 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
2167 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
2168 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
2169 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
2170 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
2171 goto nla_put_failure;
2173 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
2174 t->encap.type) ||
2175 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
2176 t->encap.sport) ||
2177 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
2178 t->encap.dport) ||
2179 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
2180 t->encap.flags))
2181 goto nla_put_failure;
2183 if (p->collect_md) {
2184 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
2185 goto nla_put_failure;
2188 return 0;
2190 nla_put_failure:
2191 return -EMSGSIZE;
2194 static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
2195 [IFLA_GRE_LINK] = { .type = NLA_U32 },
2196 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
2197 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
2198 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
2199 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
2200 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
2201 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
2202 [IFLA_GRE_TTL] = { .type = NLA_U8 },
2203 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
2204 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
2205 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
2206 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
2207 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
2208 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
2209 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
2210 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
2211 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
2212 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
2213 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
2214 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
2215 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
2218 static void ip6erspan_tap_setup(struct net_device *dev)
2220 ether_setup(dev);
2222 dev->max_mtu = 0;
2223 dev->netdev_ops = &ip6erspan_netdev_ops;
2224 dev->needs_free_netdev = true;
2225 dev->priv_destructor = ip6gre_dev_free;
2227 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2228 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2229 netif_keep_dst(dev);
2232 static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
2233 struct nlattr *tb[], struct nlattr *data[],
2234 struct netlink_ext_ack *extack)
2236 struct ip6_tnl *nt = netdev_priv(dev);
2237 struct net *net = dev_net(dev);
2238 struct ip6gre_net *ign;
2239 int err;
2241 ip6gre_netlink_parms(data, &nt->parms);
2242 ip6erspan_set_version(data, &nt->parms);
2243 ign = net_generic(net, ip6gre_net_id);
2245 if (nt->parms.collect_md) {
2246 if (rtnl_dereference(ign->collect_md_tun_erspan))
2247 return -EEXIST;
2248 } else {
2249 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
2250 return -EEXIST;
2253 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
2254 if (!err) {
2255 ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
2256 ip6erspan_tunnel_link_md(ign, nt);
2257 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2259 return err;
2262 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
2264 ip6gre_tnl_link_config_common(t);
2265 ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
2268 static int ip6erspan_tnl_change(struct ip6_tnl *t,
2269 const struct __ip6_tnl_parm *p, int set_mtu)
2271 ip6gre_tnl_copy_tnl_parm(t, p);
2272 ip6erspan_tnl_link_config(t, set_mtu);
2273 return 0;
2276 static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
2277 struct nlattr *data[],
2278 struct netlink_ext_ack *extack)
2280 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
2281 struct __ip6_tnl_parm p;
2282 struct ip6_tnl *t;
2284 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2285 if (IS_ERR(t))
2286 return PTR_ERR(t);
2288 ip6erspan_set_version(data, &p);
2289 ip6gre_tunnel_unlink_md(ign, t);
2290 ip6gre_tunnel_unlink(ign, t);
2291 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
2292 ip6erspan_tunnel_link_md(ign, t);
2293 ip6gre_tunnel_link(ign, t);
2294 return 0;
2297 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
2298 .kind = "ip6gre",
2299 .maxtype = IFLA_GRE_MAX,
2300 .policy = ip6gre_policy,
2301 .priv_size = sizeof(struct ip6_tnl),
2302 .setup = ip6gre_tunnel_setup,
2303 .validate = ip6gre_tunnel_validate,
2304 .newlink = ip6gre_newlink,
2305 .changelink = ip6gre_changelink,
2306 .dellink = ip6gre_dellink,
2307 .get_size = ip6gre_get_size,
2308 .fill_info = ip6gre_fill_info,
2309 .get_link_net = ip6_tnl_get_link_net,
2312 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
2313 .kind = "ip6gretap",
2314 .maxtype = IFLA_GRE_MAX,
2315 .policy = ip6gre_policy,
2316 .priv_size = sizeof(struct ip6_tnl),
2317 .setup = ip6gre_tap_setup,
2318 .validate = ip6gre_tap_validate,
2319 .newlink = ip6gre_newlink,
2320 .changelink = ip6gre_changelink,
2321 .get_size = ip6gre_get_size,
2322 .fill_info = ip6gre_fill_info,
2323 .get_link_net = ip6_tnl_get_link_net,
2326 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
2327 .kind = "ip6erspan",
2328 .maxtype = IFLA_GRE_MAX,
2329 .policy = ip6gre_policy,
2330 .priv_size = sizeof(struct ip6_tnl),
2331 .setup = ip6erspan_tap_setup,
2332 .validate = ip6erspan_tap_validate,
2333 .newlink = ip6erspan_newlink,
2334 .changelink = ip6erspan_changelink,
2335 .get_size = ip6gre_get_size,
2336 .fill_info = ip6gre_fill_info,
2337 .get_link_net = ip6_tnl_get_link_net,
2341 * And now the modules code and kernel interface.
2344 static int __init ip6gre_init(void)
2346 int err;
2348 pr_info("GRE over IPv6 tunneling driver\n");
2350 err = register_pernet_device(&ip6gre_net_ops);
2351 if (err < 0)
2352 return err;
2354 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
2355 if (err < 0) {
2356 pr_info("%s: can't add protocol\n", __func__);
2357 goto add_proto_failed;
2360 err = rtnl_link_register(&ip6gre_link_ops);
2361 if (err < 0)
2362 goto rtnl_link_failed;
2364 err = rtnl_link_register(&ip6gre_tap_ops);
2365 if (err < 0)
2366 goto tap_ops_failed;
2368 err = rtnl_link_register(&ip6erspan_tap_ops);
2369 if (err < 0)
2370 goto erspan_link_failed;
2372 out:
2373 return err;
2375 erspan_link_failed:
2376 rtnl_link_unregister(&ip6gre_tap_ops);
2377 tap_ops_failed:
2378 rtnl_link_unregister(&ip6gre_link_ops);
2379 rtnl_link_failed:
2380 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2381 add_proto_failed:
2382 unregister_pernet_device(&ip6gre_net_ops);
2383 goto out;
2386 static void __exit ip6gre_fini(void)
2388 rtnl_link_unregister(&ip6gre_tap_ops);
2389 rtnl_link_unregister(&ip6gre_link_ops);
2390 rtnl_link_unregister(&ip6erspan_tap_ops);
2391 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2392 unregister_pernet_device(&ip6gre_net_ops);
2395 module_init(ip6gre_init);
2396 module_exit(ip6gre_fini);
2397 MODULE_LICENSE("GPL");
2398 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
2399 MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
2400 MODULE_ALIAS_RTNL_LINK("ip6gre");
2401 MODULE_ALIAS_RTNL_LINK("ip6gretap");
2402 MODULE_ALIAS_RTNL_LINK("ip6erspan");
2403 MODULE_ALIAS_NETDEV("ip6gre0");