ip6_tunnel: disable dst caching if tunnel is dual-stack
[linux/fpc-iii.git] / net / ipv6 / ip6_tunnel.c
bloba7170a23ab0bbe0729917a1cfba62ee51a1c7607
1 /*
2 * IPv6 tunneling device
3 * Linux INET6 implementation
5 * Authors:
6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
9 * Based on:
10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
12 * RFC 2473
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
29 #include <linux/if.h>
30 #include <linux/in.h>
31 #include <linux/ip.h>
32 #include <linux/net.h>
33 #include <linux/in6.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/init.h>
38 #include <linux/route.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/netfilter_ipv6.h>
41 #include <linux/slab.h>
42 #include <linux/hash.h>
43 #include <linux/etherdevice.h>
45 #include <asm/uaccess.h>
46 #include <linux/atomic.h>
48 #include <net/icmp.h>
49 #include <net/ip.h>
50 #include <net/ip_tunnels.h>
51 #include <net/ipv6.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_tunnel.h>
55 #include <net/xfrm.h>
56 #include <net/dsfield.h>
57 #include <net/inet_ecn.h>
58 #include <net/net_namespace.h>
59 #include <net/netns/generic.h>
61 MODULE_AUTHOR("Ville Nuorvala");
62 MODULE_DESCRIPTION("IPv6 tunneling device");
63 MODULE_LICENSE("GPL");
64 MODULE_ALIAS_RTNL_LINK("ip6tnl");
65 MODULE_ALIAS_NETDEV("ip6tnl0");
67 #define HASH_SIZE_SHIFT 5
68 #define HASH_SIZE (1 << HASH_SIZE_SHIFT)
70 static bool log_ecn_error = true;
71 module_param(log_ecn_error, bool, 0644);
72 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
74 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
76 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
78 return hash_32(hash, HASH_SIZE_SHIFT);
81 static int ip6_tnl_dev_init(struct net_device *dev);
82 static void ip6_tnl_dev_setup(struct net_device *dev);
83 static struct rtnl_link_ops ip6_link_ops __read_mostly;
85 static int ip6_tnl_net_id __read_mostly;
86 struct ip6_tnl_net {
87 /* the IPv6 tunnel fallback device */
88 struct net_device *fb_tnl_dev;
89 /* lists for storing tunnels in use */
90 struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
91 struct ip6_tnl __rcu *tnls_wc[1];
92 struct ip6_tnl __rcu **tnls[2];
95 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
97 struct pcpu_sw_netstats tmp, sum = { 0 };
98 int i;
100 for_each_possible_cpu(i) {
101 unsigned int start;
102 const struct pcpu_sw_netstats *tstats =
103 per_cpu_ptr(dev->tstats, i);
105 do {
106 start = u64_stats_fetch_begin_irq(&tstats->syncp);
107 tmp.rx_packets = tstats->rx_packets;
108 tmp.rx_bytes = tstats->rx_bytes;
109 tmp.tx_packets = tstats->tx_packets;
110 tmp.tx_bytes = tstats->tx_bytes;
111 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
113 sum.rx_packets += tmp.rx_packets;
114 sum.rx_bytes += tmp.rx_bytes;
115 sum.tx_packets += tmp.tx_packets;
116 sum.tx_bytes += tmp.tx_bytes;
118 dev->stats.rx_packets = sum.rx_packets;
119 dev->stats.rx_bytes = sum.rx_bytes;
120 dev->stats.tx_packets = sum.tx_packets;
121 dev->stats.tx_bytes = sum.tx_bytes;
122 return &dev->stats;
126 * Locking : hash tables are protected by RCU and RTNL
129 static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
130 struct dst_entry *dst)
132 write_seqlock_bh(&idst->lock);
133 dst_release(rcu_dereference_protected(
134 idst->dst,
135 lockdep_is_held(&idst->lock.lock)));
136 if (dst) {
137 dst_hold(dst);
138 idst->cookie = rt6_get_cookie((struct rt6_info *)dst);
139 } else {
140 idst->cookie = 0;
142 rcu_assign_pointer(idst->dst, dst);
143 write_sequnlock_bh(&idst->lock);
146 struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t)
148 struct ip6_tnl_dst *idst;
149 struct dst_entry *dst;
150 unsigned int seq;
151 u32 cookie;
153 idst = raw_cpu_ptr(t->dst_cache);
155 rcu_read_lock();
156 do {
157 seq = read_seqbegin(&idst->lock);
158 dst = rcu_dereference(idst->dst);
159 cookie = idst->cookie;
160 } while (read_seqretry(&idst->lock, seq));
162 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
163 dst = NULL;
164 rcu_read_unlock();
166 if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) {
167 ip6_tnl_per_cpu_dst_set(idst, NULL);
168 dst_release(dst);
169 dst = NULL;
171 return dst;
173 EXPORT_SYMBOL_GPL(ip6_tnl_dst_get);
175 void ip6_tnl_dst_reset(struct ip6_tnl *t)
177 int i;
179 for_each_possible_cpu(i)
180 ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
182 EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
184 void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst)
186 ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), dst);
189 EXPORT_SYMBOL_GPL(ip6_tnl_dst_set);
191 void ip6_tnl_dst_destroy(struct ip6_tnl *t)
193 if (!t->dst_cache)
194 return;
196 ip6_tnl_dst_reset(t);
197 free_percpu(t->dst_cache);
199 EXPORT_SYMBOL_GPL(ip6_tnl_dst_destroy);
201 int ip6_tnl_dst_init(struct ip6_tnl *t)
203 int i;
205 t->dst_cache = alloc_percpu(struct ip6_tnl_dst);
206 if (!t->dst_cache)
207 return -ENOMEM;
209 for_each_possible_cpu(i)
210 seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
212 return 0;
214 EXPORT_SYMBOL_GPL(ip6_tnl_dst_init);
217 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
218 * @remote: the address of the tunnel exit-point
219 * @local: the address of the tunnel entry-point
221 * Return:
222 * tunnel matching given end-points if found,
223 * else fallback tunnel if its device is up,
224 * else %NULL
227 #define for_each_ip6_tunnel_rcu(start) \
228 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
230 static struct ip6_tnl *
231 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
233 unsigned int hash = HASH(remote, local);
234 struct ip6_tnl *t;
235 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
236 struct in6_addr any;
238 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
239 if (ipv6_addr_equal(local, &t->parms.laddr) &&
240 ipv6_addr_equal(remote, &t->parms.raddr) &&
241 (t->dev->flags & IFF_UP))
242 return t;
245 memset(&any, 0, sizeof(any));
246 hash = HASH(&any, local);
247 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
248 if (ipv6_addr_equal(local, &t->parms.laddr) &&
249 ipv6_addr_any(&t->parms.raddr) &&
250 (t->dev->flags & IFF_UP))
251 return t;
254 hash = HASH(remote, &any);
255 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
256 if (ipv6_addr_equal(remote, &t->parms.raddr) &&
257 ipv6_addr_any(&t->parms.laddr) &&
258 (t->dev->flags & IFF_UP))
259 return t;
262 t = rcu_dereference(ip6n->tnls_wc[0]);
263 if (t && (t->dev->flags & IFF_UP))
264 return t;
266 return NULL;
270 * ip6_tnl_bucket - get head of list matching given tunnel parameters
271 * @p: parameters containing tunnel end-points
273 * Description:
274 * ip6_tnl_bucket() returns the head of the list matching the
275 * &struct in6_addr entries laddr and raddr in @p.
277 * Return: head of IPv6 tunnel list
280 static struct ip6_tnl __rcu **
281 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
283 const struct in6_addr *remote = &p->raddr;
284 const struct in6_addr *local = &p->laddr;
285 unsigned int h = 0;
286 int prio = 0;
288 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
289 prio = 1;
290 h = HASH(remote, local);
292 return &ip6n->tnls[prio][h];
296 * ip6_tnl_link - add tunnel to hash table
297 * @t: tunnel to be added
300 static void
301 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
303 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
305 rcu_assign_pointer(t->next , rtnl_dereference(*tp));
306 rcu_assign_pointer(*tp, t);
310 * ip6_tnl_unlink - remove tunnel from hash table
311 * @t: tunnel to be removed
314 static void
315 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
317 struct ip6_tnl __rcu **tp;
318 struct ip6_tnl *iter;
320 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
321 (iter = rtnl_dereference(*tp)) != NULL;
322 tp = &iter->next) {
323 if (t == iter) {
324 rcu_assign_pointer(*tp, t->next);
325 break;
330 static void ip6_dev_free(struct net_device *dev)
332 struct ip6_tnl *t = netdev_priv(dev);
334 ip6_tnl_dst_destroy(t);
335 free_percpu(dev->tstats);
336 free_netdev(dev);
339 static int ip6_tnl_create2(struct net_device *dev)
341 struct ip6_tnl *t = netdev_priv(dev);
342 struct net *net = dev_net(dev);
343 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
344 int err;
346 t = netdev_priv(dev);
348 dev->rtnl_link_ops = &ip6_link_ops;
349 err = register_netdevice(dev);
350 if (err < 0)
351 goto out;
353 strcpy(t->parms.name, dev->name);
355 dev_hold(dev);
356 ip6_tnl_link(ip6n, t);
357 return 0;
359 out:
360 return err;
364 * ip6_tnl_create - create a new tunnel
365 * @p: tunnel parameters
366 * @pt: pointer to new tunnel
368 * Description:
369 * Create tunnel matching given parameters.
371 * Return:
372 * created tunnel or error pointer
375 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
377 struct net_device *dev;
378 struct ip6_tnl *t;
379 char name[IFNAMSIZ];
380 int err = -ENOMEM;
382 if (p->name[0])
383 strlcpy(name, p->name, IFNAMSIZ);
384 else
385 sprintf(name, "ip6tnl%%d");
387 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
388 ip6_tnl_dev_setup);
389 if (!dev)
390 goto failed;
392 dev_net_set(dev, net);
394 t = netdev_priv(dev);
395 t->parms = *p;
396 t->net = dev_net(dev);
397 err = ip6_tnl_create2(dev);
398 if (err < 0)
399 goto failed_free;
401 return t;
403 failed_free:
404 ip6_dev_free(dev);
405 failed:
406 return ERR_PTR(err);
410 * ip6_tnl_locate - find or create tunnel matching given parameters
411 * @p: tunnel parameters
412 * @create: != 0 if allowed to create new tunnel if no match found
414 * Description:
415 * ip6_tnl_locate() first tries to locate an existing tunnel
416 * based on @parms. If this is unsuccessful, but @create is set a new
417 * tunnel device is created and registered for use.
419 * Return:
420 * matching tunnel or error pointer
423 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
424 struct __ip6_tnl_parm *p, int create)
426 const struct in6_addr *remote = &p->raddr;
427 const struct in6_addr *local = &p->laddr;
428 struct ip6_tnl __rcu **tp;
429 struct ip6_tnl *t;
430 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
432 for (tp = ip6_tnl_bucket(ip6n, p);
433 (t = rtnl_dereference(*tp)) != NULL;
434 tp = &t->next) {
435 if (ipv6_addr_equal(local, &t->parms.laddr) &&
436 ipv6_addr_equal(remote, &t->parms.raddr)) {
437 if (create)
438 return ERR_PTR(-EEXIST);
440 return t;
443 if (!create)
444 return ERR_PTR(-ENODEV);
445 return ip6_tnl_create(net, p);
449 * ip6_tnl_dev_uninit - tunnel device uninitializer
450 * @dev: the device to be destroyed
452 * Description:
453 * ip6_tnl_dev_uninit() removes tunnel from its list
456 static void
457 ip6_tnl_dev_uninit(struct net_device *dev)
459 struct ip6_tnl *t = netdev_priv(dev);
460 struct net *net = t->net;
461 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
463 if (dev == ip6n->fb_tnl_dev)
464 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
465 else
466 ip6_tnl_unlink(ip6n, t);
467 ip6_tnl_dst_reset(t);
468 dev_put(dev);
472 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
473 * @skb: received socket buffer
475 * Return:
476 * 0 if none was found,
477 * else index to encapsulation limit
480 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
482 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
483 unsigned int nhoff = raw - skb->data;
484 unsigned int off = nhoff + sizeof(*ipv6h);
485 u8 next, nexthdr = ipv6h->nexthdr;
487 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
488 struct ipv6_opt_hdr *hdr;
489 u16 optlen;
491 if (!pskb_may_pull(skb, off + sizeof(*hdr)))
492 break;
494 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
495 if (nexthdr == NEXTHDR_FRAGMENT) {
496 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
497 if (frag_hdr->frag_off)
498 break;
499 optlen = 8;
500 } else if (nexthdr == NEXTHDR_AUTH) {
501 optlen = (hdr->hdrlen + 2) << 2;
502 } else {
503 optlen = ipv6_optlen(hdr);
505 /* cache hdr->nexthdr, since pskb_may_pull() might
506 * invalidate hdr
508 next = hdr->nexthdr;
509 if (nexthdr == NEXTHDR_DEST) {
510 u16 i = 2;
512 /* Remember : hdr is no longer valid at this point. */
513 if (!pskb_may_pull(skb, off + optlen))
514 break;
516 while (1) {
517 struct ipv6_tlv_tnl_enc_lim *tel;
519 /* No more room for encapsulation limit */
520 if (i + sizeof(*tel) > optlen)
521 break;
523 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
524 /* return index of option if found and valid */
525 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
526 tel->length == 1)
527 return i + off - nhoff;
528 /* else jump to next option */
529 if (tel->type)
530 i += tel->length + 2;
531 else
532 i++;
535 nexthdr = next;
536 off += optlen;
538 return 0;
540 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
543 * ip6_tnl_err - tunnel error handler
545 * Description:
546 * ip6_tnl_err() should handle errors in the tunnel according
547 * to the specifications in RFC 2473.
550 static int
551 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
552 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
554 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
555 struct ip6_tnl *t;
556 int rel_msg = 0;
557 u8 rel_type = ICMPV6_DEST_UNREACH;
558 u8 rel_code = ICMPV6_ADDR_UNREACH;
559 u8 tproto;
560 __u32 rel_info = 0;
561 __u16 len;
562 int err = -ENOENT;
564 /* If the packet doesn't contain the original IPv6 header we are
565 in trouble since we might need the source address for further
566 processing of the error. */
568 rcu_read_lock();
569 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
570 if (!t)
571 goto out;
573 tproto = ACCESS_ONCE(t->parms.proto);
574 if (tproto != ipproto && tproto != 0)
575 goto out;
577 err = 0;
579 switch (*type) {
580 __u32 teli;
581 struct ipv6_tlv_tnl_enc_lim *tel;
582 __u32 mtu;
583 case ICMPV6_DEST_UNREACH:
584 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
585 t->parms.name);
586 rel_msg = 1;
587 break;
588 case ICMPV6_TIME_EXCEED:
589 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
590 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
591 t->parms.name);
592 rel_msg = 1;
594 break;
595 case ICMPV6_PARAMPROB:
596 teli = 0;
597 if ((*code) == ICMPV6_HDR_FIELD)
598 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
600 if (teli && teli == *info - 2) {
601 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
602 if (tel->encap_limit == 0) {
603 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
604 t->parms.name);
605 rel_msg = 1;
607 } else {
608 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
609 t->parms.name);
611 break;
612 case ICMPV6_PKT_TOOBIG:
613 mtu = *info - offset;
614 if (mtu < IPV6_MIN_MTU)
615 mtu = IPV6_MIN_MTU;
616 t->dev->mtu = mtu;
618 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
619 if (len > mtu) {
620 rel_type = ICMPV6_PKT_TOOBIG;
621 rel_code = 0;
622 rel_info = mtu;
623 rel_msg = 1;
625 break;
628 *type = rel_type;
629 *code = rel_code;
630 *info = rel_info;
631 *msg = rel_msg;
633 out:
634 rcu_read_unlock();
635 return err;
638 static int
639 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
640 u8 type, u8 code, int offset, __be32 info)
642 int rel_msg = 0;
643 u8 rel_type = type;
644 u8 rel_code = code;
645 __u32 rel_info = ntohl(info);
646 int err;
647 struct sk_buff *skb2;
648 const struct iphdr *eiph;
649 struct rtable *rt;
650 struct flowi4 fl4;
652 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
653 &rel_msg, &rel_info, offset);
654 if (err < 0)
655 return err;
657 if (rel_msg == 0)
658 return 0;
660 switch (rel_type) {
661 case ICMPV6_DEST_UNREACH:
662 if (rel_code != ICMPV6_ADDR_UNREACH)
663 return 0;
664 rel_type = ICMP_DEST_UNREACH;
665 rel_code = ICMP_HOST_UNREACH;
666 break;
667 case ICMPV6_PKT_TOOBIG:
668 if (rel_code != 0)
669 return 0;
670 rel_type = ICMP_DEST_UNREACH;
671 rel_code = ICMP_FRAG_NEEDED;
672 break;
673 case NDISC_REDIRECT:
674 rel_type = ICMP_REDIRECT;
675 rel_code = ICMP_REDIR_HOST;
676 default:
677 return 0;
680 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
681 return 0;
683 skb2 = skb_clone(skb, GFP_ATOMIC);
684 if (!skb2)
685 return 0;
687 skb_dst_drop(skb2);
689 skb_pull(skb2, offset);
690 skb_reset_network_header(skb2);
691 eiph = ip_hdr(skb2);
693 /* Try to guess incoming interface */
694 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
695 eiph->saddr, 0,
696 0, 0,
697 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
698 if (IS_ERR(rt))
699 goto out;
701 skb2->dev = rt->dst.dev;
703 /* route "incoming" packet */
704 if (rt->rt_flags & RTCF_LOCAL) {
705 ip_rt_put(rt);
706 rt = NULL;
707 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
708 eiph->daddr, eiph->saddr,
709 0, 0,
710 IPPROTO_IPIP,
711 RT_TOS(eiph->tos), 0);
712 if (IS_ERR(rt) ||
713 rt->dst.dev->type != ARPHRD_TUNNEL) {
714 if (!IS_ERR(rt))
715 ip_rt_put(rt);
716 goto out;
718 skb_dst_set(skb2, &rt->dst);
719 } else {
720 ip_rt_put(rt);
721 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
722 skb2->dev) ||
723 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
724 goto out;
727 /* change mtu on this route */
728 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
729 if (rel_info > dst_mtu(skb_dst(skb2)))
730 goto out;
732 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info);
734 if (rel_type == ICMP_REDIRECT)
735 skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
737 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
739 out:
740 kfree_skb(skb2);
741 return 0;
744 static int
745 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
746 u8 type, u8 code, int offset, __be32 info)
748 int rel_msg = 0;
749 u8 rel_type = type;
750 u8 rel_code = code;
751 __u32 rel_info = ntohl(info);
752 int err;
754 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
755 &rel_msg, &rel_info, offset);
756 if (err < 0)
757 return err;
759 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
760 struct rt6_info *rt;
761 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
763 if (!skb2)
764 return 0;
766 skb_dst_drop(skb2);
767 skb_pull(skb2, offset);
768 skb_reset_network_header(skb2);
770 /* Try to guess incoming interface */
771 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
772 NULL, 0, 0);
774 if (rt && rt->dst.dev)
775 skb2->dev = rt->dst.dev;
777 icmpv6_send(skb2, rel_type, rel_code, rel_info);
779 ip6_rt_put(rt);
781 kfree_skb(skb2);
784 return 0;
787 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
788 const struct ipv6hdr *ipv6h,
789 struct sk_buff *skb)
791 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
793 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
794 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
796 return IP6_ECN_decapsulate(ipv6h, skb);
799 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
800 const struct ipv6hdr *ipv6h,
801 struct sk_buff *skb)
803 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
804 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
806 return IP6_ECN_decapsulate(ipv6h, skb);
809 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
810 const struct in6_addr *laddr,
811 const struct in6_addr *raddr)
813 struct __ip6_tnl_parm *p = &t->parms;
814 int ltype = ipv6_addr_type(laddr);
815 int rtype = ipv6_addr_type(raddr);
816 __u32 flags = 0;
818 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
819 flags = IP6_TNL_F_CAP_PER_PACKET;
820 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
821 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
822 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
823 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
824 if (ltype&IPV6_ADDR_UNICAST)
825 flags |= IP6_TNL_F_CAP_XMIT;
826 if (rtype&IPV6_ADDR_UNICAST)
827 flags |= IP6_TNL_F_CAP_RCV;
829 return flags;
831 EXPORT_SYMBOL(ip6_tnl_get_cap);
833 /* called with rcu_read_lock() */
834 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
835 const struct in6_addr *laddr,
836 const struct in6_addr *raddr)
838 struct __ip6_tnl_parm *p = &t->parms;
839 int ret = 0;
840 struct net *net = t->net;
842 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
843 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
844 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
845 struct net_device *ldev = NULL;
847 if (p->link)
848 ldev = dev_get_by_index_rcu(net, p->link);
850 if ((ipv6_addr_is_multicast(laddr) ||
851 likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
852 likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
853 ret = 1;
855 return ret;
857 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
860 * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
861 * @skb: received socket buffer
862 * @protocol: ethernet protocol ID
863 * @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN
865 * Return: 0
868 static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
869 __u8 ipproto,
870 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
871 const struct ipv6hdr *ipv6h,
872 struct sk_buff *skb))
874 struct ip6_tnl *t;
875 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
876 u8 tproto;
877 int err;
879 rcu_read_lock();
880 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
881 if (t) {
882 struct pcpu_sw_netstats *tstats;
884 tproto = ACCESS_ONCE(t->parms.proto);
885 if (tproto != ipproto && tproto != 0) {
886 rcu_read_unlock();
887 goto discard;
890 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
891 rcu_read_unlock();
892 goto discard;
895 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
896 t->dev->stats.rx_dropped++;
897 rcu_read_unlock();
898 goto discard;
900 skb->mac_header = skb->network_header;
901 skb_reset_network_header(skb);
902 skb->protocol = htons(protocol);
903 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
905 __skb_tunnel_rx(skb, t->dev, t->net);
907 err = dscp_ecn_decapsulate(t, ipv6h, skb);
908 if (unlikely(err)) {
909 if (log_ecn_error)
910 net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
911 &ipv6h->saddr,
912 ipv6_get_dsfield(ipv6h));
913 if (err > 1) {
914 ++t->dev->stats.rx_frame_errors;
915 ++t->dev->stats.rx_errors;
916 rcu_read_unlock();
917 goto discard;
921 tstats = this_cpu_ptr(t->dev->tstats);
922 u64_stats_update_begin(&tstats->syncp);
923 tstats->rx_packets++;
924 tstats->rx_bytes += skb->len;
925 u64_stats_update_end(&tstats->syncp);
927 netif_rx(skb);
929 rcu_read_unlock();
930 return 0;
932 rcu_read_unlock();
933 return 1;
935 discard:
936 kfree_skb(skb);
937 return 0;
940 static int ip4ip6_rcv(struct sk_buff *skb)
942 return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP,
943 ip4ip6_dscp_ecn_decapsulate);
946 static int ip6ip6_rcv(struct sk_buff *skb)
948 return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6,
949 ip6ip6_dscp_ecn_decapsulate);
952 struct ipv6_tel_txoption {
953 struct ipv6_txoptions ops;
954 __u8 dst_opt[8];
957 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
959 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
961 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
962 opt->dst_opt[3] = 1;
963 opt->dst_opt[4] = encap_limit;
964 opt->dst_opt[5] = IPV6_TLV_PADN;
965 opt->dst_opt[6] = 1;
967 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
968 opt->ops.opt_nflen = 8;
972 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
973 * @t: the outgoing tunnel device
974 * @hdr: IPv6 header from the incoming packet
976 * Description:
977 * Avoid trivial tunneling loop by checking that tunnel exit-point
978 * doesn't match source of incoming packet.
980 * Return:
981 * 1 if conflict,
982 * 0 else
985 static inline bool
986 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
988 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
991 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
992 const struct in6_addr *laddr,
993 const struct in6_addr *raddr)
995 struct __ip6_tnl_parm *p = &t->parms;
996 int ret = 0;
997 struct net *net = t->net;
999 if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
1000 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
1001 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
1002 struct net_device *ldev = NULL;
1004 rcu_read_lock();
1005 if (p->link)
1006 ldev = dev_get_by_index_rcu(net, p->link);
1008 if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
1009 pr_warn("%s xmit: Local address not yet configured!\n",
1010 p->name);
1011 else if (!ipv6_addr_is_multicast(raddr) &&
1012 unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
1013 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
1014 p->name);
1015 else
1016 ret = 1;
1017 rcu_read_unlock();
1019 return ret;
1021 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1024 * ip6_tnl_xmit2 - encapsulate packet and send
1025 * @skb: the outgoing socket buffer
1026 * @dev: the outgoing tunnel device
1027 * @dsfield: dscp code for outer header
1028 * @fl: flow of tunneled packet
1029 * @encap_limit: encapsulation limit
1030 * @pmtu: Path MTU is stored if packet is too big
1032 * Description:
1033 * Build new header and do some sanity checks on the packet before sending
1034 * it.
1036 * Return:
1037 * 0 on success
1038 * -1 fail
1039 * %-EMSGSIZE message too big. return mtu in this case.
1042 static int ip6_tnl_xmit2(struct sk_buff *skb,
1043 struct net_device *dev,
1044 __u8 dsfield,
1045 struct flowi6 *fl6,
1046 int encap_limit,
1047 __u32 *pmtu)
1049 struct ip6_tnl *t = netdev_priv(dev);
1050 struct net *net = t->net;
1051 struct net_device_stats *stats = &t->dev->stats;
1052 struct ipv6hdr *ipv6h;
1053 struct ipv6_tel_txoption opt;
1054 struct dst_entry *dst = NULL, *ndst = NULL;
1055 struct net_device *tdev;
1056 bool use_cache = false;
1057 int mtu;
1058 unsigned int max_headroom = sizeof(struct ipv6hdr);
1059 u8 proto;
1060 int err = -1;
1062 /* NBMA tunnel */
1063 if (ipv6_addr_any(&t->parms.raddr)) {
1064 if (skb->protocol == htons(ETH_P_IPV6)) {
1065 struct in6_addr *addr6;
1066 struct neighbour *neigh;
1067 int addr_type;
1069 if (!skb_dst(skb))
1070 goto tx_err_link_failure;
1072 neigh = dst_neigh_lookup(skb_dst(skb),
1073 &ipv6_hdr(skb)->daddr);
1074 if (!neigh)
1075 goto tx_err_link_failure;
1077 addr6 = (struct in6_addr *)&neigh->primary_key;
1078 addr_type = ipv6_addr_type(addr6);
1080 if (addr_type == IPV6_ADDR_ANY)
1081 addr6 = &ipv6_hdr(skb)->daddr;
1083 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1084 neigh_release(neigh);
1086 } else if (t->parms.proto != 0 && !(t->parms.flags &
1087 (IP6_TNL_F_USE_ORIG_TCLASS |
1088 IP6_TNL_F_USE_ORIG_FWMARK))) {
1089 /* enable the cache only if neither the outer protocol nor the
1090 * routing decision depends on the current inner header value
1092 use_cache = true;
1095 if (use_cache)
1096 dst = ip6_tnl_dst_get(t);
1098 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1099 goto tx_err_link_failure;
1101 if (!dst) {
1102 dst = ip6_route_output(net, NULL, fl6);
1104 if (dst->error)
1105 goto tx_err_link_failure;
1106 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1107 if (IS_ERR(dst)) {
1108 err = PTR_ERR(dst);
1109 dst = NULL;
1110 goto tx_err_link_failure;
1112 ndst = dst;
1115 tdev = dst->dev;
1117 if (tdev == dev) {
1118 stats->collisions++;
1119 net_warn_ratelimited("%s: Local routing loop detected!\n",
1120 t->parms.name);
1121 goto tx_err_dst_release;
1123 mtu = dst_mtu(dst) - sizeof(*ipv6h);
1124 if (encap_limit >= 0) {
1125 max_headroom += 8;
1126 mtu -= 8;
1128 if (mtu < IPV6_MIN_MTU)
1129 mtu = IPV6_MIN_MTU;
1130 if (skb_dst(skb))
1131 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1132 if (skb->len > mtu) {
1133 *pmtu = mtu;
1134 err = -EMSGSIZE;
1135 goto tx_err_dst_release;
1138 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1141 * Okay, now see if we can stuff it in the buffer as-is.
1143 max_headroom += LL_RESERVED_SPACE(tdev);
1145 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1146 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1147 struct sk_buff *new_skb;
1149 new_skb = skb_realloc_headroom(skb, max_headroom);
1150 if (!new_skb)
1151 goto tx_err_dst_release;
1153 if (skb->sk)
1154 skb_set_owner_w(new_skb, skb->sk);
1155 consume_skb(skb);
1156 skb = new_skb;
1159 if (use_cache && ndst)
1160 ip6_tnl_dst_set(t, ndst);
1161 skb_dst_set(skb, dst);
1163 skb->transport_header = skb->network_header;
1165 proto = fl6->flowi6_proto;
1166 if (encap_limit >= 0) {
1167 init_tel_txopt(&opt, encap_limit);
1168 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
1171 if (likely(!skb->encapsulation)) {
1172 skb_reset_inner_headers(skb);
1173 skb->encapsulation = 1;
1176 skb_push(skb, sizeof(struct ipv6hdr));
1177 skb_reset_network_header(skb);
1178 ipv6h = ipv6_hdr(skb);
1179 ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
1180 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1181 ipv6h->hop_limit = t->parms.hop_limit;
1182 ipv6h->nexthdr = proto;
1183 ipv6h->saddr = fl6->saddr;
1184 ipv6h->daddr = fl6->daddr;
1185 ip6tunnel_xmit(NULL, skb, dev);
1186 return 0;
1187 tx_err_link_failure:
1188 stats->tx_carrier_errors++;
1189 dst_link_failure(skb);
1190 tx_err_dst_release:
1191 dst_release(dst);
1192 return err;
1195 static inline int
1196 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1198 struct ip6_tnl *t = netdev_priv(dev);
1199 const struct iphdr *iph = ip_hdr(skb);
1200 int encap_limit = -1;
1201 struct flowi6 fl6;
1202 __u8 dsfield;
1203 __u32 mtu;
1204 u8 tproto;
1205 int err;
1207 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1209 tproto = ACCESS_ONCE(t->parms.proto);
1210 if (tproto != IPPROTO_IPIP && tproto != 0)
1211 return -1;
1213 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1214 encap_limit = t->parms.encap_limit;
1216 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1217 fl6.flowi6_proto = IPPROTO_IPIP;
1219 dsfield = ipv4_get_dsfield(iph);
1221 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1222 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
1223 & IPV6_TCLASS_MASK;
1224 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1225 fl6.flowi6_mark = skb->mark;
1227 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
1228 if (err != 0) {
1229 /* XXX: send ICMP error even if DF is not set. */
1230 if (err == -EMSGSIZE)
1231 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1232 htonl(mtu));
1233 return -1;
1236 return 0;
1239 static inline int
1240 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1242 struct ip6_tnl *t = netdev_priv(dev);
1243 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1244 int encap_limit = -1;
1245 __u16 offset;
1246 struct flowi6 fl6;
1247 __u8 dsfield;
1248 __u32 mtu;
1249 u8 tproto;
1250 int err;
1252 tproto = ACCESS_ONCE(t->parms.proto);
1253 if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
1254 ip6_tnl_addr_conflict(t, ipv6h))
1255 return -1;
1257 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1258 if (offset > 0) {
1259 struct ipv6_tlv_tnl_enc_lim *tel;
1260 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
1261 if (tel->encap_limit == 0) {
1262 icmpv6_send(skb, ICMPV6_PARAMPROB,
1263 ICMPV6_HDR_FIELD, offset + 2);
1264 return -1;
1266 encap_limit = tel->encap_limit - 1;
1267 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1268 encap_limit = t->parms.encap_limit;
1270 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1271 fl6.flowi6_proto = IPPROTO_IPV6;
1273 dsfield = ipv6_get_dsfield(ipv6h);
1274 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1275 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
1276 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1277 fl6.flowlabel |= ip6_flowlabel(ipv6h);
1278 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1279 fl6.flowi6_mark = skb->mark;
1281 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
1282 if (err != 0) {
1283 if (err == -EMSGSIZE)
1284 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1285 return -1;
1288 return 0;
1291 static netdev_tx_t
1292 ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1294 struct ip6_tnl *t = netdev_priv(dev);
1295 struct net_device_stats *stats = &t->dev->stats;
1296 int ret;
1298 switch (skb->protocol) {
1299 case htons(ETH_P_IP):
1300 ret = ip4ip6_tnl_xmit(skb, dev);
1301 break;
1302 case htons(ETH_P_IPV6):
1303 ret = ip6ip6_tnl_xmit(skb, dev);
1304 break;
1305 default:
1306 goto tx_err;
1309 if (ret < 0)
1310 goto tx_err;
1312 return NETDEV_TX_OK;
1314 tx_err:
1315 stats->tx_errors++;
1316 stats->tx_dropped++;
1317 kfree_skb(skb);
1318 return NETDEV_TX_OK;
1321 static void ip6_tnl_link_config(struct ip6_tnl *t)
1323 struct net_device *dev = t->dev;
1324 struct __ip6_tnl_parm *p = &t->parms;
1325 struct flowi6 *fl6 = &t->fl.u.ip6;
1327 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1328 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1330 /* Set up flowi template */
1331 fl6->saddr = p->laddr;
1332 fl6->daddr = p->raddr;
1333 fl6->flowi6_oif = p->link;
1334 fl6->flowlabel = 0;
1336 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1337 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1338 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1339 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1341 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1342 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1344 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1345 dev->flags |= IFF_POINTOPOINT;
1346 else
1347 dev->flags &= ~IFF_POINTOPOINT;
1349 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1350 int strict = (ipv6_addr_type(&p->raddr) &
1351 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1353 struct rt6_info *rt = rt6_lookup(t->net,
1354 &p->raddr, &p->laddr,
1355 p->link, strict);
1357 if (!rt)
1358 return;
1360 if (rt->dst.dev) {
1361 dev->hard_header_len = rt->dst.dev->hard_header_len +
1362 sizeof(struct ipv6hdr);
1364 dev->mtu = rt->dst.dev->mtu - sizeof(struct ipv6hdr);
1365 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1366 dev->mtu -= 8;
1368 if (dev->mtu < IPV6_MIN_MTU)
1369 dev->mtu = IPV6_MIN_MTU;
1371 ip6_rt_put(rt);
1376 * ip6_tnl_change - update the tunnel parameters
1377 * @t: tunnel to be changed
1378 * @p: tunnel configuration parameters
1380 * Description:
1381 * ip6_tnl_change() updates the tunnel parameters
1384 static int
1385 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1387 t->parms.laddr = p->laddr;
1388 t->parms.raddr = p->raddr;
1389 t->parms.flags = p->flags;
1390 t->parms.hop_limit = p->hop_limit;
1391 t->parms.encap_limit = p->encap_limit;
1392 t->parms.flowinfo = p->flowinfo;
1393 t->parms.link = p->link;
1394 t->parms.proto = p->proto;
1395 ip6_tnl_dst_reset(t);
1396 ip6_tnl_link_config(t);
1397 return 0;
1400 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1402 struct net *net = t->net;
1403 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1404 int err;
1406 ip6_tnl_unlink(ip6n, t);
1407 synchronize_net();
1408 err = ip6_tnl_change(t, p);
1409 ip6_tnl_link(ip6n, t);
1410 netdev_state_change(t->dev);
1411 return err;
1414 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1416 /* for default tnl0 device allow to change only the proto */
1417 t->parms.proto = p->proto;
1418 netdev_state_change(t->dev);
1419 return 0;
1422 static void
1423 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1425 p->laddr = u->laddr;
1426 p->raddr = u->raddr;
1427 p->flags = u->flags;
1428 p->hop_limit = u->hop_limit;
1429 p->encap_limit = u->encap_limit;
1430 p->flowinfo = u->flowinfo;
1431 p->link = u->link;
1432 p->proto = u->proto;
1433 memcpy(p->name, u->name, sizeof(u->name));
1436 static void
1437 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1439 u->laddr = p->laddr;
1440 u->raddr = p->raddr;
1441 u->flags = p->flags;
1442 u->hop_limit = p->hop_limit;
1443 u->encap_limit = p->encap_limit;
1444 u->flowinfo = p->flowinfo;
1445 u->link = p->link;
1446 u->proto = p->proto;
1447 memcpy(u->name, p->name, sizeof(u->name));
1451 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1452 * @dev: virtual device associated with tunnel
1453 * @ifr: parameters passed from userspace
1454 * @cmd: command to be performed
1456 * Description:
1457 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1458 * from userspace.
1460 * The possible commands are the following:
1461 * %SIOCGETTUNNEL: get tunnel parameters for device
1462 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1463 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1464 * %SIOCDELTUNNEL: delete tunnel
1466 * The fallback device "ip6tnl0", created during module
1467 * initialization, can be used for creating other tunnel devices.
1469 * Return:
1470 * 0 on success,
1471 * %-EFAULT if unable to copy data to or from userspace,
1472 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1473 * %-EINVAL if passed tunnel parameters are invalid,
1474 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1475 * %-ENODEV if attempting to change or delete a nonexisting device
1478 static int
1479 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1481 int err = 0;
1482 struct ip6_tnl_parm p;
1483 struct __ip6_tnl_parm p1;
1484 struct ip6_tnl *t = netdev_priv(dev);
1485 struct net *net = t->net;
1486 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1488 switch (cmd) {
1489 case SIOCGETTUNNEL:
1490 if (dev == ip6n->fb_tnl_dev) {
1491 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1492 err = -EFAULT;
1493 break;
1495 ip6_tnl_parm_from_user(&p1, &p);
1496 t = ip6_tnl_locate(net, &p1, 0);
1497 if (IS_ERR(t))
1498 t = netdev_priv(dev);
1499 } else {
1500 memset(&p, 0, sizeof(p));
1502 ip6_tnl_parm_to_user(&p, &t->parms);
1503 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1504 err = -EFAULT;
1506 break;
1507 case SIOCADDTUNNEL:
1508 case SIOCCHGTUNNEL:
1509 err = -EPERM;
1510 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1511 break;
1512 err = -EFAULT;
1513 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1514 break;
1515 err = -EINVAL;
1516 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1517 p.proto != 0)
1518 break;
1519 ip6_tnl_parm_from_user(&p1, &p);
1520 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1521 if (cmd == SIOCCHGTUNNEL) {
1522 if (!IS_ERR(t)) {
1523 if (t->dev != dev) {
1524 err = -EEXIST;
1525 break;
1527 } else
1528 t = netdev_priv(dev);
1529 if (dev == ip6n->fb_tnl_dev)
1530 err = ip6_tnl0_update(t, &p1);
1531 else
1532 err = ip6_tnl_update(t, &p1);
1534 if (!IS_ERR(t)) {
1535 err = 0;
1536 ip6_tnl_parm_to_user(&p, &t->parms);
1537 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1538 err = -EFAULT;
1540 } else {
1541 err = PTR_ERR(t);
1543 break;
1544 case SIOCDELTUNNEL:
1545 err = -EPERM;
1546 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1547 break;
1549 if (dev == ip6n->fb_tnl_dev) {
1550 err = -EFAULT;
1551 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1552 break;
1553 err = -ENOENT;
1554 ip6_tnl_parm_from_user(&p1, &p);
1555 t = ip6_tnl_locate(net, &p1, 0);
1556 if (IS_ERR(t))
1557 break;
1558 err = -EPERM;
1559 if (t->dev == ip6n->fb_tnl_dev)
1560 break;
1561 dev = t->dev;
1563 err = 0;
1564 unregister_netdevice(dev);
1565 break;
1566 default:
1567 err = -EINVAL;
1569 return err;
1573 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1574 * @dev: virtual device associated with tunnel
1575 * @new_mtu: the new mtu
1577 * Return:
1578 * 0 on success,
1579 * %-EINVAL if mtu too small
1582 static int
1583 ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1585 struct ip6_tnl *tnl = netdev_priv(dev);
1587 if (tnl->parms.proto == IPPROTO_IPIP) {
1588 if (new_mtu < 68)
1589 return -EINVAL;
1590 } else {
1591 if (new_mtu < IPV6_MIN_MTU)
1592 return -EINVAL;
1594 if (new_mtu > 0xFFF8 - dev->hard_header_len)
1595 return -EINVAL;
1596 dev->mtu = new_mtu;
1597 return 0;
1600 int ip6_tnl_get_iflink(const struct net_device *dev)
1602 struct ip6_tnl *t = netdev_priv(dev);
1604 return t->parms.link;
1606 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1608 static const struct net_device_ops ip6_tnl_netdev_ops = {
1609 .ndo_init = ip6_tnl_dev_init,
1610 .ndo_uninit = ip6_tnl_dev_uninit,
1611 .ndo_start_xmit = ip6_tnl_xmit,
1612 .ndo_do_ioctl = ip6_tnl_ioctl,
1613 .ndo_change_mtu = ip6_tnl_change_mtu,
1614 .ndo_get_stats = ip6_get_stats,
1615 .ndo_get_iflink = ip6_tnl_get_iflink,
1620 * ip6_tnl_dev_setup - setup virtual tunnel device
1621 * @dev: virtual device associated with tunnel
1623 * Description:
1624 * Initialize function pointers and device parameters
1627 static void ip6_tnl_dev_setup(struct net_device *dev)
1629 struct ip6_tnl *t;
1631 dev->netdev_ops = &ip6_tnl_netdev_ops;
1632 dev->destructor = ip6_dev_free;
1634 dev->type = ARPHRD_TUNNEL6;
1635 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
1636 dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr);
1637 t = netdev_priv(dev);
1638 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1639 dev->mtu -= 8;
1640 dev->flags |= IFF_NOARP;
1641 dev->addr_len = sizeof(struct in6_addr);
1642 netif_keep_dst(dev);
1643 /* This perm addr will be used as interface identifier by IPv6 */
1644 dev->addr_assign_type = NET_ADDR_RANDOM;
1645 eth_random_addr(dev->perm_addr);
1650 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1651 * @dev: virtual device associated with tunnel
1654 static inline int
1655 ip6_tnl_dev_init_gen(struct net_device *dev)
1657 struct ip6_tnl *t = netdev_priv(dev);
1658 int ret;
1660 t->dev = dev;
1661 t->net = dev_net(dev);
1662 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1663 if (!dev->tstats)
1664 return -ENOMEM;
1666 ret = ip6_tnl_dst_init(t);
1667 if (ret) {
1668 free_percpu(dev->tstats);
1669 dev->tstats = NULL;
1670 return ret;
1673 return 0;
1677 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1678 * @dev: virtual device associated with tunnel
1681 static int ip6_tnl_dev_init(struct net_device *dev)
1683 struct ip6_tnl *t = netdev_priv(dev);
1684 int err = ip6_tnl_dev_init_gen(dev);
1686 if (err)
1687 return err;
1688 ip6_tnl_link_config(t);
1689 return 0;
1693 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1694 * @dev: fallback device
1696 * Return: 0
1699 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1701 struct ip6_tnl *t = netdev_priv(dev);
1702 struct net *net = dev_net(dev);
1703 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1705 t->parms.proto = IPPROTO_IPV6;
1706 dev_hold(dev);
1708 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1709 return 0;
1712 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
1714 u8 proto;
1716 if (!data || !data[IFLA_IPTUN_PROTO])
1717 return 0;
1719 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1720 if (proto != IPPROTO_IPV6 &&
1721 proto != IPPROTO_IPIP &&
1722 proto != 0)
1723 return -EINVAL;
1725 return 0;
1728 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1729 struct __ip6_tnl_parm *parms)
1731 memset(parms, 0, sizeof(*parms));
1733 if (!data)
1734 return;
1736 if (data[IFLA_IPTUN_LINK])
1737 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1739 if (data[IFLA_IPTUN_LOCAL])
1740 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1742 if (data[IFLA_IPTUN_REMOTE])
1743 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1745 if (data[IFLA_IPTUN_TTL])
1746 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1748 if (data[IFLA_IPTUN_ENCAP_LIMIT])
1749 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1751 if (data[IFLA_IPTUN_FLOWINFO])
1752 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1754 if (data[IFLA_IPTUN_FLAGS])
1755 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1757 if (data[IFLA_IPTUN_PROTO])
1758 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1761 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1762 struct nlattr *tb[], struct nlattr *data[])
1764 struct net *net = dev_net(dev);
1765 struct ip6_tnl *nt, *t;
1767 nt = netdev_priv(dev);
1768 ip6_tnl_netlink_parms(data, &nt->parms);
1770 t = ip6_tnl_locate(net, &nt->parms, 0);
1771 if (!IS_ERR(t))
1772 return -EEXIST;
1774 return ip6_tnl_create2(dev);
1777 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
1778 struct nlattr *data[])
1780 struct ip6_tnl *t = netdev_priv(dev);
1781 struct __ip6_tnl_parm p;
1782 struct net *net = t->net;
1783 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1785 if (dev == ip6n->fb_tnl_dev)
1786 return -EINVAL;
1788 ip6_tnl_netlink_parms(data, &p);
1790 t = ip6_tnl_locate(net, &p, 0);
1791 if (!IS_ERR(t)) {
1792 if (t->dev != dev)
1793 return -EEXIST;
1794 } else
1795 t = netdev_priv(dev);
1797 return ip6_tnl_update(t, &p);
1800 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
1802 struct net *net = dev_net(dev);
1803 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1805 if (dev != ip6n->fb_tnl_dev)
1806 unregister_netdevice_queue(dev, head);
1809 static size_t ip6_tnl_get_size(const struct net_device *dev)
1811 return
1812 /* IFLA_IPTUN_LINK */
1813 nla_total_size(4) +
1814 /* IFLA_IPTUN_LOCAL */
1815 nla_total_size(sizeof(struct in6_addr)) +
1816 /* IFLA_IPTUN_REMOTE */
1817 nla_total_size(sizeof(struct in6_addr)) +
1818 /* IFLA_IPTUN_TTL */
1819 nla_total_size(1) +
1820 /* IFLA_IPTUN_ENCAP_LIMIT */
1821 nla_total_size(1) +
1822 /* IFLA_IPTUN_FLOWINFO */
1823 nla_total_size(4) +
1824 /* IFLA_IPTUN_FLAGS */
1825 nla_total_size(4) +
1826 /* IFLA_IPTUN_PROTO */
1827 nla_total_size(1) +
1831 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
1833 struct ip6_tnl *tunnel = netdev_priv(dev);
1834 struct __ip6_tnl_parm *parm = &tunnel->parms;
1836 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
1837 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
1838 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
1839 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
1840 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
1841 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
1842 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
1843 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto))
1844 goto nla_put_failure;
1845 return 0;
1847 nla_put_failure:
1848 return -EMSGSIZE;
1851 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
1853 struct ip6_tnl *tunnel = netdev_priv(dev);
1855 return tunnel->net;
1857 EXPORT_SYMBOL(ip6_tnl_get_link_net);
1859 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
1860 [IFLA_IPTUN_LINK] = { .type = NLA_U32 },
1861 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) },
1862 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) },
1863 [IFLA_IPTUN_TTL] = { .type = NLA_U8 },
1864 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 },
1865 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 },
1866 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 },
1867 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
1870 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
1871 .kind = "ip6tnl",
1872 .maxtype = IFLA_IPTUN_MAX,
1873 .policy = ip6_tnl_policy,
1874 .priv_size = sizeof(struct ip6_tnl),
1875 .setup = ip6_tnl_dev_setup,
1876 .validate = ip6_tnl_validate,
1877 .newlink = ip6_tnl_newlink,
1878 .changelink = ip6_tnl_changelink,
1879 .dellink = ip6_tnl_dellink,
1880 .get_size = ip6_tnl_get_size,
1881 .fill_info = ip6_tnl_fill_info,
1882 .get_link_net = ip6_tnl_get_link_net,
1885 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
1886 .handler = ip4ip6_rcv,
1887 .err_handler = ip4ip6_err,
1888 .priority = 1,
1891 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
1892 .handler = ip6ip6_rcv,
1893 .err_handler = ip6ip6_err,
1894 .priority = 1,
1897 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
1899 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1900 struct net_device *dev, *aux;
1901 int h;
1902 struct ip6_tnl *t;
1903 LIST_HEAD(list);
1905 for_each_netdev_safe(net, dev, aux)
1906 if (dev->rtnl_link_ops == &ip6_link_ops)
1907 unregister_netdevice_queue(dev, &list);
1909 for (h = 0; h < HASH_SIZE; h++) {
1910 t = rtnl_dereference(ip6n->tnls_r_l[h]);
1911 while (t) {
1912 /* If dev is in the same netns, it has already
1913 * been added to the list by the previous loop.
1915 if (!net_eq(dev_net(t->dev), net))
1916 unregister_netdevice_queue(t->dev, &list);
1917 t = rtnl_dereference(t->next);
1921 unregister_netdevice_many(&list);
1924 static int __net_init ip6_tnl_init_net(struct net *net)
1926 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1927 struct ip6_tnl *t = NULL;
1928 int err;
1930 ip6n->tnls[0] = ip6n->tnls_wc;
1931 ip6n->tnls[1] = ip6n->tnls_r_l;
1933 err = -ENOMEM;
1934 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
1935 NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
1937 if (!ip6n->fb_tnl_dev)
1938 goto err_alloc_dev;
1939 dev_net_set(ip6n->fb_tnl_dev, net);
1940 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
1941 /* FB netdevice is special: we have one, and only one per netns.
1942 * Allowing to move it to another netns is clearly unsafe.
1944 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
1946 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
1947 if (err < 0)
1948 goto err_register;
1950 err = register_netdev(ip6n->fb_tnl_dev);
1951 if (err < 0)
1952 goto err_register;
1954 t = netdev_priv(ip6n->fb_tnl_dev);
1956 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
1957 return 0;
1959 err_register:
1960 ip6_dev_free(ip6n->fb_tnl_dev);
1961 err_alloc_dev:
1962 return err;
1965 static void __net_exit ip6_tnl_exit_net(struct net *net)
1967 rtnl_lock();
1968 ip6_tnl_destroy_tunnels(net);
1969 rtnl_unlock();
1972 static struct pernet_operations ip6_tnl_net_ops = {
1973 .init = ip6_tnl_init_net,
1974 .exit = ip6_tnl_exit_net,
1975 .id = &ip6_tnl_net_id,
1976 .size = sizeof(struct ip6_tnl_net),
1980 * ip6_tunnel_init - register protocol and reserve needed resources
1982 * Return: 0 on success
1985 static int __init ip6_tunnel_init(void)
1987 int err;
1989 err = register_pernet_device(&ip6_tnl_net_ops);
1990 if (err < 0)
1991 goto out_pernet;
1993 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
1994 if (err < 0) {
1995 pr_err("%s: can't register ip4ip6\n", __func__);
1996 goto out_ip4ip6;
1999 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2000 if (err < 0) {
2001 pr_err("%s: can't register ip6ip6\n", __func__);
2002 goto out_ip6ip6;
2004 err = rtnl_link_register(&ip6_link_ops);
2005 if (err < 0)
2006 goto rtnl_link_failed;
2008 return 0;
2010 rtnl_link_failed:
2011 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2012 out_ip6ip6:
2013 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2014 out_ip4ip6:
2015 unregister_pernet_device(&ip6_tnl_net_ops);
2016 out_pernet:
2017 return err;
2021 * ip6_tunnel_cleanup - free resources and unregister protocol
2024 static void __exit ip6_tunnel_cleanup(void)
2026 rtnl_link_unregister(&ip6_link_ops);
2027 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2028 pr_info("%s: can't deregister ip4ip6\n", __func__);
2030 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2031 pr_info("%s: can't deregister ip6ip6\n", __func__);
2033 unregister_pernet_device(&ip6_tnl_net_ops);
2036 module_init(ip6_tunnel_init);
2037 module_exit(ip6_tunnel_cleanup);