x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / net / ipv4 / ip_tunnel.c
blobfabc299cb875f51cc572bd7fa301a7436e7e24e5
1 /*
2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/in.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/init.h>
34 #include <linux/in6.h>
35 #include <linux/inetdevice.h>
36 #include <linux/igmp.h>
37 #include <linux/netfilter_ipv4.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_ether.h>
40 #include <linux/if_vlan.h>
41 #include <linux/rculist.h>
42 #include <linux/err.h>
44 #include <net/sock.h>
45 #include <net/ip.h>
46 #include <net/icmp.h>
47 #include <net/protocol.h>
48 #include <net/ip_tunnels.h>
49 #include <net/arp.h>
50 #include <net/checksum.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
53 #include <net/xfrm.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56 #include <net/rtnetlink.h>
57 #include <net/udp.h>
58 #include <net/dst_metadata.h>
60 #if IS_ENABLED(CONFIG_IPV6)
61 #include <net/ipv6.h>
62 #include <net/ip6_fib.h>
63 #include <net/ip6_route.h>
64 #endif
66 static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
68 return hash_32((__force u32)key ^ (__force u32)remote,
69 IP_TNL_HASH_BITS);
72 static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
73 __be16 flags, __be32 key)
75 if (p->i_flags & TUNNEL_KEY) {
76 if (flags & TUNNEL_KEY)
77 return key == p->i_key;
78 else
79 /* key expected, none present */
80 return false;
81 } else
82 return !(flags & TUNNEL_KEY);
85 /* Fallback tunnel: no source, no destination, no key, no options
87 Tunnel hash table:
88 We require exact key match i.e. if a key is present in packet
89 it will match only tunnel with the same key; if it is not present,
90 it will match only keyless tunnel.
92 All keysless packets, if not matched configured keyless tunnels
93 will match fallback tunnel.
94 Given src, dst and key, find appropriate for input tunnel.
96 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
97 int link, __be16 flags,
98 __be32 remote, __be32 local,
99 __be32 key)
101 unsigned int hash;
102 struct ip_tunnel *t, *cand = NULL;
103 struct hlist_head *head;
105 hash = ip_tunnel_hash(key, remote);
106 head = &itn->tunnels[hash];
108 hlist_for_each_entry_rcu(t, head, hash_node) {
109 if (local != t->parms.iph.saddr ||
110 remote != t->parms.iph.daddr ||
111 !(t->dev->flags & IFF_UP))
112 continue;
114 if (!ip_tunnel_key_match(&t->parms, flags, key))
115 continue;
117 if (t->parms.link == link)
118 return t;
119 else
120 cand = t;
123 hlist_for_each_entry_rcu(t, head, hash_node) {
124 if (remote != t->parms.iph.daddr ||
125 t->parms.iph.saddr != 0 ||
126 !(t->dev->flags & IFF_UP))
127 continue;
129 if (!ip_tunnel_key_match(&t->parms, flags, key))
130 continue;
132 if (t->parms.link == link)
133 return t;
134 else if (!cand)
135 cand = t;
138 hash = ip_tunnel_hash(key, 0);
139 head = &itn->tunnels[hash];
141 hlist_for_each_entry_rcu(t, head, hash_node) {
142 if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
143 (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
144 continue;
146 if (!(t->dev->flags & IFF_UP))
147 continue;
149 if (!ip_tunnel_key_match(&t->parms, flags, key))
150 continue;
152 if (t->parms.link == link)
153 return t;
154 else if (!cand)
155 cand = t;
158 if (flags & TUNNEL_NO_KEY)
159 goto skip_key_lookup;
161 hlist_for_each_entry_rcu(t, head, hash_node) {
162 if (t->parms.i_key != key ||
163 t->parms.iph.saddr != 0 ||
164 t->parms.iph.daddr != 0 ||
165 !(t->dev->flags & IFF_UP))
166 continue;
168 if (t->parms.link == link)
169 return t;
170 else if (!cand)
171 cand = t;
174 skip_key_lookup:
175 if (cand)
176 return cand;
178 t = rcu_dereference(itn->collect_md_tun);
179 if (t && t->dev->flags & IFF_UP)
180 return t;
182 if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
183 return netdev_priv(itn->fb_tunnel_dev);
185 return NULL;
187 EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
189 static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
190 struct ip_tunnel_parm *parms)
192 unsigned int h;
193 __be32 remote;
194 __be32 i_key = parms->i_key;
196 if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
197 remote = parms->iph.daddr;
198 else
199 remote = 0;
201 if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI))
202 i_key = 0;
204 h = ip_tunnel_hash(i_key, remote);
205 return &itn->tunnels[h];
208 static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
210 struct hlist_head *head = ip_bucket(itn, &t->parms);
212 if (t->collect_md)
213 rcu_assign_pointer(itn->collect_md_tun, t);
214 hlist_add_head_rcu(&t->hash_node, head);
217 static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
219 if (t->collect_md)
220 rcu_assign_pointer(itn->collect_md_tun, NULL);
221 hlist_del_init_rcu(&t->hash_node);
224 static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
225 struct ip_tunnel_parm *parms,
226 int type)
228 __be32 remote = parms->iph.daddr;
229 __be32 local = parms->iph.saddr;
230 __be32 key = parms->i_key;
231 __be16 flags = parms->i_flags;
232 int link = parms->link;
233 struct ip_tunnel *t = NULL;
234 struct hlist_head *head = ip_bucket(itn, parms);
236 hlist_for_each_entry_rcu(t, head, hash_node) {
237 if (local == t->parms.iph.saddr &&
238 remote == t->parms.iph.daddr &&
239 link == t->parms.link &&
240 type == t->dev->type &&
241 ip_tunnel_key_match(&t->parms, flags, key))
242 break;
244 return t;
247 static struct net_device *__ip_tunnel_create(struct net *net,
248 const struct rtnl_link_ops *ops,
249 struct ip_tunnel_parm *parms)
251 int err;
252 struct ip_tunnel *tunnel;
253 struct net_device *dev;
254 char name[IFNAMSIZ];
256 err = -E2BIG;
257 if (parms->name[0]) {
258 if (!dev_valid_name(parms->name))
259 goto failed;
260 strlcpy(name, parms->name, IFNAMSIZ);
261 } else {
262 if (strlen(ops->kind) > (IFNAMSIZ - 3))
263 goto failed;
264 strcpy(name, ops->kind);
265 strcat(name, "%d");
268 ASSERT_RTNL();
269 dev = alloc_netdev(ops->priv_size, name, NET_NAME_UNKNOWN, ops->setup);
270 if (!dev) {
271 err = -ENOMEM;
272 goto failed;
274 dev_net_set(dev, net);
276 dev->rtnl_link_ops = ops;
278 tunnel = netdev_priv(dev);
279 tunnel->parms = *parms;
280 tunnel->net = net;
282 err = register_netdevice(dev);
283 if (err)
284 goto failed_free;
286 return dev;
288 failed_free:
289 free_netdev(dev);
290 failed:
291 return ERR_PTR(err);
294 static inline void init_tunnel_flow(struct flowi4 *fl4,
295 int proto,
296 __be32 daddr, __be32 saddr,
297 __be32 key, __u8 tos, int oif,
298 __u32 mark)
300 memset(fl4, 0, sizeof(*fl4));
301 fl4->flowi4_oif = oif;
302 fl4->daddr = daddr;
303 fl4->saddr = saddr;
304 fl4->flowi4_tos = tos;
305 fl4->flowi4_proto = proto;
306 fl4->fl4_gre_key = key;
307 fl4->flowi4_mark = mark;
310 static int ip_tunnel_bind_dev(struct net_device *dev)
312 struct net_device *tdev = NULL;
313 struct ip_tunnel *tunnel = netdev_priv(dev);
314 const struct iphdr *iph;
315 int hlen = LL_MAX_HEADER;
316 int mtu = ETH_DATA_LEN;
317 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
319 iph = &tunnel->parms.iph;
321 /* Guess output device to choose reasonable mtu and needed_headroom */
322 if (iph->daddr) {
323 struct flowi4 fl4;
324 struct rtable *rt;
326 init_tunnel_flow(&fl4, iph->protocol, iph->daddr,
327 iph->saddr, tunnel->parms.o_key,
328 RT_TOS(iph->tos), tunnel->parms.link,
329 tunnel->fwmark);
330 rt = ip_route_output_key(tunnel->net, &fl4);
332 if (!IS_ERR(rt)) {
333 tdev = rt->dst.dev;
334 ip_rt_put(rt);
336 if (dev->type != ARPHRD_ETHER)
337 dev->flags |= IFF_POINTOPOINT;
339 dst_cache_reset(&tunnel->dst_cache);
342 if (!tdev && tunnel->parms.link)
343 tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
345 if (tdev) {
346 hlen = tdev->hard_header_len + tdev->needed_headroom;
347 mtu = tdev->mtu;
350 dev->needed_headroom = t_hlen + hlen;
351 mtu -= (dev->hard_header_len + t_hlen);
353 if (mtu < IPV4_MIN_MTU)
354 mtu = IPV4_MIN_MTU;
356 return mtu;
359 static struct ip_tunnel *ip_tunnel_create(struct net *net,
360 struct ip_tunnel_net *itn,
361 struct ip_tunnel_parm *parms)
363 struct ip_tunnel *nt;
364 struct net_device *dev;
365 int t_hlen;
367 BUG_ON(!itn->fb_tunnel_dev);
368 dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
369 if (IS_ERR(dev))
370 return ERR_CAST(dev);
372 dev->mtu = ip_tunnel_bind_dev(dev);
374 nt = netdev_priv(dev);
375 t_hlen = nt->hlen + sizeof(struct iphdr);
376 dev->min_mtu = ETH_MIN_MTU;
377 dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
378 ip_tunnel_add(itn, nt);
379 return nt;
382 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
383 const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
384 bool log_ecn_error)
386 struct pcpu_sw_netstats *tstats;
387 const struct iphdr *iph = ip_hdr(skb);
388 int err;
390 #ifdef CONFIG_NET_IPGRE_BROADCAST
391 if (ipv4_is_multicast(iph->daddr)) {
392 tunnel->dev->stats.multicast++;
393 skb->pkt_type = PACKET_BROADCAST;
395 #endif
397 if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
398 ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
399 tunnel->dev->stats.rx_crc_errors++;
400 tunnel->dev->stats.rx_errors++;
401 goto drop;
404 if (tunnel->parms.i_flags&TUNNEL_SEQ) {
405 if (!(tpi->flags&TUNNEL_SEQ) ||
406 (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
407 tunnel->dev->stats.rx_fifo_errors++;
408 tunnel->dev->stats.rx_errors++;
409 goto drop;
411 tunnel->i_seqno = ntohl(tpi->seq) + 1;
414 skb_reset_network_header(skb);
416 err = IP_ECN_decapsulate(iph, skb);
417 if (unlikely(err)) {
418 if (log_ecn_error)
419 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
420 &iph->saddr, iph->tos);
421 if (err > 1) {
422 ++tunnel->dev->stats.rx_frame_errors;
423 ++tunnel->dev->stats.rx_errors;
424 goto drop;
428 tstats = this_cpu_ptr(tunnel->dev->tstats);
429 u64_stats_update_begin(&tstats->syncp);
430 tstats->rx_packets++;
431 tstats->rx_bytes += skb->len;
432 u64_stats_update_end(&tstats->syncp);
434 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
436 if (tunnel->dev->type == ARPHRD_ETHER) {
437 skb->protocol = eth_type_trans(skb, tunnel->dev);
438 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
439 } else {
440 skb->dev = tunnel->dev;
443 if (tun_dst)
444 skb_dst_set(skb, (struct dst_entry *)tun_dst);
446 gro_cells_receive(&tunnel->gro_cells, skb);
447 return 0;
449 drop:
450 if (tun_dst)
451 dst_release((struct dst_entry *)tun_dst);
452 kfree_skb(skb);
453 return 0;
455 EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
457 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *ops,
458 unsigned int num)
460 if (num >= MAX_IPTUN_ENCAP_OPS)
461 return -ERANGE;
463 return !cmpxchg((const struct ip_tunnel_encap_ops **)
464 &iptun_encaps[num],
465 NULL, ops) ? 0 : -1;
467 EXPORT_SYMBOL(ip_tunnel_encap_add_ops);
469 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops,
470 unsigned int num)
472 int ret;
474 if (num >= MAX_IPTUN_ENCAP_OPS)
475 return -ERANGE;
477 ret = (cmpxchg((const struct ip_tunnel_encap_ops **)
478 &iptun_encaps[num],
479 ops, NULL) == ops) ? 0 : -1;
481 synchronize_net();
483 return ret;
485 EXPORT_SYMBOL(ip_tunnel_encap_del_ops);
487 int ip_tunnel_encap_setup(struct ip_tunnel *t,
488 struct ip_tunnel_encap *ipencap)
490 int hlen;
492 memset(&t->encap, 0, sizeof(t->encap));
494 hlen = ip_encap_hlen(ipencap);
495 if (hlen < 0)
496 return hlen;
498 t->encap.type = ipencap->type;
499 t->encap.sport = ipencap->sport;
500 t->encap.dport = ipencap->dport;
501 t->encap.flags = ipencap->flags;
503 t->encap_hlen = hlen;
504 t->hlen = t->encap_hlen + t->tun_hlen;
506 return 0;
508 EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup);
510 static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
511 struct rtable *rt, __be16 df,
512 const struct iphdr *inner_iph)
514 struct ip_tunnel *tunnel = netdev_priv(dev);
515 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
516 int mtu;
518 if (df)
519 mtu = dst_mtu(&rt->dst) - dev->hard_header_len
520 - sizeof(struct iphdr) - tunnel->hlen;
521 else
522 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
524 skb_dst_update_pmtu(skb, mtu);
526 if (skb->protocol == htons(ETH_P_IP)) {
527 if (!skb_is_gso(skb) &&
528 (inner_iph->frag_off & htons(IP_DF)) &&
529 mtu < pkt_size) {
530 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
531 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
532 return -E2BIG;
535 #if IS_ENABLED(CONFIG_IPV6)
536 else if (skb->protocol == htons(ETH_P_IPV6)) {
537 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
539 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
540 mtu >= IPV6_MIN_MTU) {
541 if ((tunnel->parms.iph.daddr &&
542 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
543 rt6->rt6i_dst.plen == 128) {
544 rt6->rt6i_flags |= RTF_MODIFIED;
545 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
549 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
550 mtu < pkt_size) {
551 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
552 return -E2BIG;
555 #endif
556 return 0;
559 void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto)
561 struct ip_tunnel *tunnel = netdev_priv(dev);
562 u32 headroom = sizeof(struct iphdr);
563 struct ip_tunnel_info *tun_info;
564 const struct ip_tunnel_key *key;
565 const struct iphdr *inner_iph;
566 struct rtable *rt;
567 struct flowi4 fl4;
568 __be16 df = 0;
569 u8 tos, ttl;
571 tun_info = skb_tunnel_info(skb);
572 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
573 ip_tunnel_info_af(tun_info) != AF_INET))
574 goto tx_error;
575 key = &tun_info->key;
576 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
577 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
578 tos = key->tos;
579 if (tos == 1) {
580 if (skb->protocol == htons(ETH_P_IP))
581 tos = inner_iph->tos;
582 else if (skb->protocol == htons(ETH_P_IPV6))
583 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
585 init_tunnel_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0,
586 RT_TOS(tos), tunnel->parms.link, tunnel->fwmark);
587 if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
588 goto tx_error;
589 rt = ip_route_output_key(tunnel->net, &fl4);
590 if (IS_ERR(rt)) {
591 dev->stats.tx_carrier_errors++;
592 goto tx_error;
594 if (rt->dst.dev == dev) {
595 ip_rt_put(rt);
596 dev->stats.collisions++;
597 goto tx_error;
599 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
600 ttl = key->ttl;
601 if (ttl == 0) {
602 if (skb->protocol == htons(ETH_P_IP))
603 ttl = inner_iph->ttl;
604 else if (skb->protocol == htons(ETH_P_IPV6))
605 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
606 else
607 ttl = ip4_dst_hoplimit(&rt->dst);
609 if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
610 df = htons(IP_DF);
611 else if (skb->protocol == htons(ETH_P_IP))
612 df = inner_iph->frag_off & htons(IP_DF);
613 headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
614 if (headroom > dev->needed_headroom)
615 dev->needed_headroom = headroom;
617 if (skb_cow_head(skb, dev->needed_headroom)) {
618 ip_rt_put(rt);
619 goto tx_dropped;
621 iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
622 df, !net_eq(tunnel->net, dev_net(dev)));
623 return;
624 tx_error:
625 dev->stats.tx_errors++;
626 goto kfree;
627 tx_dropped:
628 dev->stats.tx_dropped++;
629 kfree:
630 kfree_skb(skb);
632 EXPORT_SYMBOL_GPL(ip_md_tunnel_xmit);
634 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
635 const struct iphdr *tnl_params, u8 protocol)
637 struct ip_tunnel *tunnel = netdev_priv(dev);
638 unsigned int inner_nhdr_len = 0;
639 const struct iphdr *inner_iph;
640 struct flowi4 fl4;
641 u8 tos, ttl;
642 __be16 df;
643 struct rtable *rt; /* Route to the other host */
644 unsigned int max_headroom; /* The extra header space needed */
645 __be32 dst;
646 bool connected;
648 /* ensure we can access the inner net header, for several users below */
649 if (skb->protocol == htons(ETH_P_IP))
650 inner_nhdr_len = sizeof(struct iphdr);
651 else if (skb->protocol == htons(ETH_P_IPV6))
652 inner_nhdr_len = sizeof(struct ipv6hdr);
653 if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
654 goto tx_error;
656 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
657 connected = (tunnel->parms.iph.daddr != 0);
659 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
661 dst = tnl_params->daddr;
662 if (dst == 0) {
663 /* NBMA tunnel */
665 if (!skb_dst(skb)) {
666 dev->stats.tx_fifo_errors++;
667 goto tx_error;
670 if (skb->protocol == htons(ETH_P_IP)) {
671 rt = skb_rtable(skb);
672 dst = rt_nexthop(rt, inner_iph->daddr);
674 #if IS_ENABLED(CONFIG_IPV6)
675 else if (skb->protocol == htons(ETH_P_IPV6)) {
676 const struct in6_addr *addr6;
677 struct neighbour *neigh;
678 bool do_tx_error_icmp;
679 int addr_type;
681 neigh = dst_neigh_lookup(skb_dst(skb),
682 &ipv6_hdr(skb)->daddr);
683 if (!neigh)
684 goto tx_error;
686 addr6 = (const struct in6_addr *)&neigh->primary_key;
687 addr_type = ipv6_addr_type(addr6);
689 if (addr_type == IPV6_ADDR_ANY) {
690 addr6 = &ipv6_hdr(skb)->daddr;
691 addr_type = ipv6_addr_type(addr6);
694 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
695 do_tx_error_icmp = true;
696 else {
697 do_tx_error_icmp = false;
698 dst = addr6->s6_addr32[3];
700 neigh_release(neigh);
701 if (do_tx_error_icmp)
702 goto tx_error_icmp;
704 #endif
705 else
706 goto tx_error;
708 connected = false;
711 tos = tnl_params->tos;
712 if (tos & 0x1) {
713 tos &= ~0x1;
714 if (skb->protocol == htons(ETH_P_IP)) {
715 tos = inner_iph->tos;
716 connected = false;
717 } else if (skb->protocol == htons(ETH_P_IPV6)) {
718 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
719 connected = false;
723 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
724 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
725 tunnel->fwmark);
727 if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
728 goto tx_error;
730 rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache, &fl4.saddr) :
731 NULL;
733 if (!rt) {
734 rt = ip_route_output_key(tunnel->net, &fl4);
736 if (IS_ERR(rt)) {
737 dev->stats.tx_carrier_errors++;
738 goto tx_error;
740 if (connected)
741 dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
742 fl4.saddr);
745 if (rt->dst.dev == dev) {
746 ip_rt_put(rt);
747 dev->stats.collisions++;
748 goto tx_error;
751 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
752 ip_rt_put(rt);
753 goto tx_error;
756 if (tunnel->err_count > 0) {
757 if (time_before(jiffies,
758 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
759 tunnel->err_count--;
761 dst_link_failure(skb);
762 } else
763 tunnel->err_count = 0;
766 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
767 ttl = tnl_params->ttl;
768 if (ttl == 0) {
769 if (skb->protocol == htons(ETH_P_IP))
770 ttl = inner_iph->ttl;
771 #if IS_ENABLED(CONFIG_IPV6)
772 else if (skb->protocol == htons(ETH_P_IPV6))
773 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
774 #endif
775 else
776 ttl = ip4_dst_hoplimit(&rt->dst);
779 df = tnl_params->frag_off;
780 if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
781 df |= (inner_iph->frag_off&htons(IP_DF));
783 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
784 + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
785 if (max_headroom > dev->needed_headroom)
786 dev->needed_headroom = max_headroom;
788 if (skb_cow_head(skb, dev->needed_headroom)) {
789 ip_rt_put(rt);
790 dev->stats.tx_dropped++;
791 kfree_skb(skb);
792 return;
795 iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
796 df, !net_eq(tunnel->net, dev_net(dev)));
797 return;
799 #if IS_ENABLED(CONFIG_IPV6)
800 tx_error_icmp:
801 dst_link_failure(skb);
802 #endif
803 tx_error:
804 dev->stats.tx_errors++;
805 kfree_skb(skb);
807 EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
809 static void ip_tunnel_update(struct ip_tunnel_net *itn,
810 struct ip_tunnel *t,
811 struct net_device *dev,
812 struct ip_tunnel_parm *p,
813 bool set_mtu,
814 __u32 fwmark)
816 ip_tunnel_del(itn, t);
817 t->parms.iph.saddr = p->iph.saddr;
818 t->parms.iph.daddr = p->iph.daddr;
819 t->parms.i_key = p->i_key;
820 t->parms.o_key = p->o_key;
821 if (dev->type != ARPHRD_ETHER) {
822 memcpy(dev->dev_addr, &p->iph.saddr, 4);
823 memcpy(dev->broadcast, &p->iph.daddr, 4);
825 ip_tunnel_add(itn, t);
827 t->parms.iph.ttl = p->iph.ttl;
828 t->parms.iph.tos = p->iph.tos;
829 t->parms.iph.frag_off = p->iph.frag_off;
831 if (t->parms.link != p->link || t->fwmark != fwmark) {
832 int mtu;
834 t->parms.link = p->link;
835 t->fwmark = fwmark;
836 mtu = ip_tunnel_bind_dev(dev);
837 if (set_mtu)
838 dev->mtu = mtu;
840 dst_cache_reset(&t->dst_cache);
841 netdev_state_change(dev);
844 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
846 int err = 0;
847 struct ip_tunnel *t = netdev_priv(dev);
848 struct net *net = t->net;
849 struct ip_tunnel_net *itn = net_generic(net, t->ip_tnl_net_id);
851 BUG_ON(!itn->fb_tunnel_dev);
852 switch (cmd) {
853 case SIOCGETTUNNEL:
854 if (dev == itn->fb_tunnel_dev) {
855 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
856 if (!t)
857 t = netdev_priv(dev);
859 memcpy(p, &t->parms, sizeof(*p));
860 break;
862 case SIOCADDTUNNEL:
863 case SIOCCHGTUNNEL:
864 err = -EPERM;
865 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
866 goto done;
867 if (p->iph.ttl)
868 p->iph.frag_off |= htons(IP_DF);
869 if (!(p->i_flags & VTI_ISVTI)) {
870 if (!(p->i_flags & TUNNEL_KEY))
871 p->i_key = 0;
872 if (!(p->o_flags & TUNNEL_KEY))
873 p->o_key = 0;
876 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
878 if (cmd == SIOCADDTUNNEL) {
879 if (!t) {
880 t = ip_tunnel_create(net, itn, p);
881 err = PTR_ERR_OR_ZERO(t);
882 break;
885 err = -EEXIST;
886 break;
888 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
889 if (t) {
890 if (t->dev != dev) {
891 err = -EEXIST;
892 break;
894 } else {
895 unsigned int nflags = 0;
897 if (ipv4_is_multicast(p->iph.daddr))
898 nflags = IFF_BROADCAST;
899 else if (p->iph.daddr)
900 nflags = IFF_POINTOPOINT;
902 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
903 err = -EINVAL;
904 break;
907 t = netdev_priv(dev);
911 if (t) {
912 err = 0;
913 ip_tunnel_update(itn, t, dev, p, true, 0);
914 } else {
915 err = -ENOENT;
917 break;
919 case SIOCDELTUNNEL:
920 err = -EPERM;
921 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
922 goto done;
924 if (dev == itn->fb_tunnel_dev) {
925 err = -ENOENT;
926 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
927 if (!t)
928 goto done;
929 err = -EPERM;
930 if (t == netdev_priv(itn->fb_tunnel_dev))
931 goto done;
932 dev = t->dev;
934 unregister_netdevice(dev);
935 err = 0;
936 break;
938 default:
939 err = -EINVAL;
942 done:
943 return err;
945 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
947 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
949 struct ip_tunnel *tunnel = netdev_priv(dev);
950 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
951 int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
953 if (new_mtu < ETH_MIN_MTU)
954 return -EINVAL;
956 if (new_mtu > max_mtu) {
957 if (strict)
958 return -EINVAL;
960 new_mtu = max_mtu;
963 dev->mtu = new_mtu;
964 return 0;
966 EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
968 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
970 return __ip_tunnel_change_mtu(dev, new_mtu, true);
972 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
974 static void ip_tunnel_dev_free(struct net_device *dev)
976 struct ip_tunnel *tunnel = netdev_priv(dev);
978 gro_cells_destroy(&tunnel->gro_cells);
979 dst_cache_destroy(&tunnel->dst_cache);
980 free_percpu(dev->tstats);
983 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
985 struct ip_tunnel *tunnel = netdev_priv(dev);
986 struct ip_tunnel_net *itn;
988 itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
990 if (itn->fb_tunnel_dev != dev) {
991 ip_tunnel_del(itn, netdev_priv(dev));
992 unregister_netdevice_queue(dev, head);
995 EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
997 struct net *ip_tunnel_get_link_net(const struct net_device *dev)
999 struct ip_tunnel *tunnel = netdev_priv(dev);
1001 return tunnel->net;
1003 EXPORT_SYMBOL(ip_tunnel_get_link_net);
1005 int ip_tunnel_get_iflink(const struct net_device *dev)
1007 struct ip_tunnel *tunnel = netdev_priv(dev);
1009 return tunnel->parms.link;
1011 EXPORT_SYMBOL(ip_tunnel_get_iflink);
1013 int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
1014 struct rtnl_link_ops *ops, char *devname)
1016 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
1017 struct ip_tunnel_parm parms;
1018 unsigned int i;
1020 for (i = 0; i < IP_TNL_HASH_SIZE; i++)
1021 INIT_HLIST_HEAD(&itn->tunnels[i]);
1023 if (!ops) {
1024 itn->fb_tunnel_dev = NULL;
1025 return 0;
1028 memset(&parms, 0, sizeof(parms));
1029 if (devname)
1030 strlcpy(parms.name, devname, IFNAMSIZ);
1032 rtnl_lock();
1033 itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
1034 /* FB netdevice is special: we have one, and only one per netns.
1035 * Allowing to move it to another netns is clearly unsafe.
1037 if (!IS_ERR(itn->fb_tunnel_dev)) {
1038 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1039 itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
1040 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
1042 rtnl_unlock();
1044 return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
1046 EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
1048 static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
1049 struct rtnl_link_ops *ops)
1051 struct net *net = dev_net(itn->fb_tunnel_dev);
1052 struct net_device *dev, *aux;
1053 int h;
1055 for_each_netdev_safe(net, dev, aux)
1056 if (dev->rtnl_link_ops == ops)
1057 unregister_netdevice_queue(dev, head);
1059 for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
1060 struct ip_tunnel *t;
1061 struct hlist_node *n;
1062 struct hlist_head *thead = &itn->tunnels[h];
1064 hlist_for_each_entry_safe(t, n, thead, hash_node)
1065 /* If dev is in the same netns, it has already
1066 * been added to the list by the previous loop.
1068 if (!net_eq(dev_net(t->dev), net))
1069 unregister_netdevice_queue(t->dev, head);
1073 void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
1075 LIST_HEAD(list);
1077 rtnl_lock();
1078 ip_tunnel_destroy(itn, &list, ops);
1079 unregister_netdevice_many(&list);
1080 rtnl_unlock();
1082 EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
1084 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
1085 struct ip_tunnel_parm *p, __u32 fwmark)
1087 struct ip_tunnel *nt;
1088 struct net *net = dev_net(dev);
1089 struct ip_tunnel_net *itn;
1090 int mtu;
1091 int err;
1093 nt = netdev_priv(dev);
1094 itn = net_generic(net, nt->ip_tnl_net_id);
1096 if (nt->collect_md) {
1097 if (rtnl_dereference(itn->collect_md_tun))
1098 return -EEXIST;
1099 } else {
1100 if (ip_tunnel_find(itn, p, dev->type))
1101 return -EEXIST;
1104 nt->net = net;
1105 nt->parms = *p;
1106 nt->fwmark = fwmark;
1107 err = register_netdevice(dev);
1108 if (err)
1109 goto out;
1111 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1112 eth_hw_addr_random(dev);
1114 mtu = ip_tunnel_bind_dev(dev);
1115 if (tb[IFLA_MTU]) {
1116 unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen;
1118 dev->mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
1119 (unsigned int)(max - sizeof(struct iphdr)));
1120 } else {
1121 dev->mtu = mtu;
1124 ip_tunnel_add(itn, nt);
1125 out:
1126 return err;
1128 EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
1130 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
1131 struct ip_tunnel_parm *p, __u32 fwmark)
1133 struct ip_tunnel *t;
1134 struct ip_tunnel *tunnel = netdev_priv(dev);
1135 struct net *net = tunnel->net;
1136 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
1138 if (dev == itn->fb_tunnel_dev)
1139 return -EINVAL;
1141 t = ip_tunnel_find(itn, p, dev->type);
1143 if (t) {
1144 if (t->dev != dev)
1145 return -EEXIST;
1146 } else {
1147 t = tunnel;
1149 if (dev->type != ARPHRD_ETHER) {
1150 unsigned int nflags = 0;
1152 if (ipv4_is_multicast(p->iph.daddr))
1153 nflags = IFF_BROADCAST;
1154 else if (p->iph.daddr)
1155 nflags = IFF_POINTOPOINT;
1157 if ((dev->flags ^ nflags) &
1158 (IFF_POINTOPOINT | IFF_BROADCAST))
1159 return -EINVAL;
1163 ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU], fwmark);
1164 return 0;
1166 EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
1168 int ip_tunnel_init(struct net_device *dev)
1170 struct ip_tunnel *tunnel = netdev_priv(dev);
1171 struct iphdr *iph = &tunnel->parms.iph;
1172 int err;
1174 dev->needs_free_netdev = true;
1175 dev->priv_destructor = ip_tunnel_dev_free;
1176 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1177 if (!dev->tstats)
1178 return -ENOMEM;
1180 err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1181 if (err) {
1182 free_percpu(dev->tstats);
1183 return err;
1186 err = gro_cells_init(&tunnel->gro_cells, dev);
1187 if (err) {
1188 dst_cache_destroy(&tunnel->dst_cache);
1189 free_percpu(dev->tstats);
1190 return err;
1193 tunnel->dev = dev;
1194 tunnel->net = dev_net(dev);
1195 strcpy(tunnel->parms.name, dev->name);
1196 iph->version = 4;
1197 iph->ihl = 5;
1199 if (tunnel->collect_md) {
1200 dev->features |= NETIF_F_NETNS_LOCAL;
1201 netif_keep_dst(dev);
1203 return 0;
1205 EXPORT_SYMBOL_GPL(ip_tunnel_init);
1207 void ip_tunnel_uninit(struct net_device *dev)
1209 struct ip_tunnel *tunnel = netdev_priv(dev);
1210 struct net *net = tunnel->net;
1211 struct ip_tunnel_net *itn;
1213 itn = net_generic(net, tunnel->ip_tnl_net_id);
1214 /* fb_tunnel_dev will be unregisted in net-exit call. */
1215 if (itn->fb_tunnel_dev != dev)
1216 ip_tunnel_del(itn, netdev_priv(dev));
1218 dst_cache_reset(&tunnel->dst_cache);
1220 EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1222 /* Do least required initialization, rest of init is done in tunnel_init call */
1223 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id)
1225 struct ip_tunnel *tunnel = netdev_priv(dev);
1226 tunnel->ip_tnl_net_id = net_id;
1228 EXPORT_SYMBOL_GPL(ip_tunnel_setup);
1230 MODULE_LICENSE("GPL");