2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/init.h>
34 #include <linux/in6.h>
35 #include <linux/inetdevice.h>
36 #include <linux/igmp.h>
37 #include <linux/netfilter_ipv4.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_ether.h>
40 #include <linux/if_vlan.h>
41 #include <linux/rculist.h>
42 #include <linux/err.h>
47 #include <net/protocol.h>
48 #include <net/ip_tunnels.h>
50 #include <net/checksum.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56 #include <net/rtnetlink.h>
58 #include <net/dst_metadata.h>
60 #if IS_ENABLED(CONFIG_IPV6)
62 #include <net/ip6_fib.h>
63 #include <net/ip6_route.h>
66 static unsigned int ip_tunnel_hash(__be32 key
, __be32 remote
)
68 return hash_32((__force u32
)key
^ (__force u32
)remote
,
72 static bool ip_tunnel_key_match(const struct ip_tunnel_parm
*p
,
73 __be16 flags
, __be32 key
)
75 if (p
->i_flags
& TUNNEL_KEY
) {
76 if (flags
& TUNNEL_KEY
)
77 return key
== p
->i_key
;
79 /* key expected, none present */
82 return !(flags
& TUNNEL_KEY
);
85 /* Fallback tunnel: no source, no destination, no key, no options
88 We require exact key match i.e. if a key is present in packet
89 it will match only tunnel with the same key; if it is not present,
90 it will match only keyless tunnel.
92 All keysless packets, if not matched configured keyless tunnels
93 will match fallback tunnel.
94 Given src, dst and key, find appropriate for input tunnel.
96 struct ip_tunnel
*ip_tunnel_lookup(struct ip_tunnel_net
*itn
,
97 int link
, __be16 flags
,
98 __be32 remote
, __be32 local
,
102 struct ip_tunnel
*t
, *cand
= NULL
;
103 struct hlist_head
*head
;
105 hash
= ip_tunnel_hash(key
, remote
);
106 head
= &itn
->tunnels
[hash
];
108 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
109 if (local
!= t
->parms
.iph
.saddr
||
110 remote
!= t
->parms
.iph
.daddr
||
111 !(t
->dev
->flags
& IFF_UP
))
114 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
117 if (t
->parms
.link
== link
)
123 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
124 if (remote
!= t
->parms
.iph
.daddr
||
125 t
->parms
.iph
.saddr
!= 0 ||
126 !(t
->dev
->flags
& IFF_UP
))
129 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
132 if (t
->parms
.link
== link
)
138 hash
= ip_tunnel_hash(key
, 0);
139 head
= &itn
->tunnels
[hash
];
141 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
142 if ((local
!= t
->parms
.iph
.saddr
|| t
->parms
.iph
.daddr
!= 0) &&
143 (local
!= t
->parms
.iph
.daddr
|| !ipv4_is_multicast(local
)))
146 if (!(t
->dev
->flags
& IFF_UP
))
149 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
152 if (t
->parms
.link
== link
)
158 if (flags
& TUNNEL_NO_KEY
)
159 goto skip_key_lookup
;
161 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
162 if (t
->parms
.i_key
!= key
||
163 t
->parms
.iph
.saddr
!= 0 ||
164 t
->parms
.iph
.daddr
!= 0 ||
165 !(t
->dev
->flags
& IFF_UP
))
168 if (t
->parms
.link
== link
)
178 t
= rcu_dereference(itn
->collect_md_tun
);
182 if (itn
->fb_tunnel_dev
&& itn
->fb_tunnel_dev
->flags
& IFF_UP
)
183 return netdev_priv(itn
->fb_tunnel_dev
);
187 EXPORT_SYMBOL_GPL(ip_tunnel_lookup
);
189 static struct hlist_head
*ip_bucket(struct ip_tunnel_net
*itn
,
190 struct ip_tunnel_parm
*parms
)
194 __be32 i_key
= parms
->i_key
;
196 if (parms
->iph
.daddr
&& !ipv4_is_multicast(parms
->iph
.daddr
))
197 remote
= parms
->iph
.daddr
;
201 if (!(parms
->i_flags
& TUNNEL_KEY
) && (parms
->i_flags
& VTI_ISVTI
))
204 h
= ip_tunnel_hash(i_key
, remote
);
205 return &itn
->tunnels
[h
];
208 static void ip_tunnel_add(struct ip_tunnel_net
*itn
, struct ip_tunnel
*t
)
210 struct hlist_head
*head
= ip_bucket(itn
, &t
->parms
);
213 rcu_assign_pointer(itn
->collect_md_tun
, t
);
214 hlist_add_head_rcu(&t
->hash_node
, head
);
217 static void ip_tunnel_del(struct ip_tunnel_net
*itn
, struct ip_tunnel
*t
)
220 rcu_assign_pointer(itn
->collect_md_tun
, NULL
);
221 hlist_del_init_rcu(&t
->hash_node
);
224 static struct ip_tunnel
*ip_tunnel_find(struct ip_tunnel_net
*itn
,
225 struct ip_tunnel_parm
*parms
,
228 __be32 remote
= parms
->iph
.daddr
;
229 __be32 local
= parms
->iph
.saddr
;
230 __be32 key
= parms
->i_key
;
231 __be16 flags
= parms
->i_flags
;
232 int link
= parms
->link
;
233 struct ip_tunnel
*t
= NULL
;
234 struct hlist_head
*head
= ip_bucket(itn
, parms
);
236 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
237 if (local
== t
->parms
.iph
.saddr
&&
238 remote
== t
->parms
.iph
.daddr
&&
239 link
== t
->parms
.link
&&
240 type
== t
->dev
->type
&&
241 ip_tunnel_key_match(&t
->parms
, flags
, key
))
247 static struct net_device
*__ip_tunnel_create(struct net
*net
,
248 const struct rtnl_link_ops
*ops
,
249 struct ip_tunnel_parm
*parms
)
252 struct ip_tunnel
*tunnel
;
253 struct net_device
*dev
;
257 strlcpy(name
, parms
->name
, IFNAMSIZ
);
259 if (strlen(ops
->kind
) > (IFNAMSIZ
- 3)) {
263 strlcpy(name
, ops
->kind
, IFNAMSIZ
);
264 strncat(name
, "%d", 2);
268 dev
= alloc_netdev(ops
->priv_size
, name
, NET_NAME_UNKNOWN
, ops
->setup
);
273 dev_net_set(dev
, net
);
275 dev
->rtnl_link_ops
= ops
;
277 tunnel
= netdev_priv(dev
);
278 tunnel
->parms
= *parms
;
281 err
= register_netdevice(dev
);
293 static inline void init_tunnel_flow(struct flowi4
*fl4
,
295 __be32 daddr
, __be32 saddr
,
296 __be32 key
, __u8 tos
, int oif
)
298 memset(fl4
, 0, sizeof(*fl4
));
299 fl4
->flowi4_oif
= oif
;
302 fl4
->flowi4_tos
= tos
;
303 fl4
->flowi4_proto
= proto
;
304 fl4
->fl4_gre_key
= key
;
307 static int ip_tunnel_bind_dev(struct net_device
*dev
)
309 struct net_device
*tdev
= NULL
;
310 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
311 const struct iphdr
*iph
;
312 int hlen
= LL_MAX_HEADER
;
313 int mtu
= ETH_DATA_LEN
;
314 int t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
316 iph
= &tunnel
->parms
.iph
;
318 /* Guess output device to choose reasonable mtu and needed_headroom */
323 init_tunnel_flow(&fl4
, iph
->protocol
, iph
->daddr
,
324 iph
->saddr
, tunnel
->parms
.o_key
,
325 RT_TOS(iph
->tos
), tunnel
->parms
.link
);
326 rt
= ip_route_output_key(tunnel
->net
, &fl4
);
332 if (dev
->type
!= ARPHRD_ETHER
)
333 dev
->flags
|= IFF_POINTOPOINT
;
335 dst_cache_reset(&tunnel
->dst_cache
);
338 if (!tdev
&& tunnel
->parms
.link
)
339 tdev
= __dev_get_by_index(tunnel
->net
, tunnel
->parms
.link
);
342 hlen
= tdev
->hard_header_len
+ tdev
->needed_headroom
;
346 dev
->needed_headroom
= t_hlen
+ hlen
;
347 mtu
-= (dev
->hard_header_len
+ t_hlen
);
355 static struct ip_tunnel
*ip_tunnel_create(struct net
*net
,
356 struct ip_tunnel_net
*itn
,
357 struct ip_tunnel_parm
*parms
)
359 struct ip_tunnel
*nt
;
360 struct net_device
*dev
;
363 BUG_ON(!itn
->fb_tunnel_dev
);
364 dev
= __ip_tunnel_create(net
, itn
->fb_tunnel_dev
->rtnl_link_ops
, parms
);
366 return ERR_CAST(dev
);
368 dev
->mtu
= ip_tunnel_bind_dev(dev
);
370 nt
= netdev_priv(dev
);
371 t_hlen
= nt
->hlen
+ sizeof(struct iphdr
);
372 dev
->min_mtu
= ETH_MIN_MTU
;
373 dev
->max_mtu
= 0xFFF8 - dev
->hard_header_len
- t_hlen
;
374 ip_tunnel_add(itn
, nt
);
378 int ip_tunnel_rcv(struct ip_tunnel
*tunnel
, struct sk_buff
*skb
,
379 const struct tnl_ptk_info
*tpi
, struct metadata_dst
*tun_dst
,
382 struct pcpu_sw_netstats
*tstats
;
383 const struct iphdr
*iph
= ip_hdr(skb
);
386 #ifdef CONFIG_NET_IPGRE_BROADCAST
387 if (ipv4_is_multicast(iph
->daddr
)) {
388 tunnel
->dev
->stats
.multicast
++;
389 skb
->pkt_type
= PACKET_BROADCAST
;
393 if ((!(tpi
->flags
&TUNNEL_CSUM
) && (tunnel
->parms
.i_flags
&TUNNEL_CSUM
)) ||
394 ((tpi
->flags
&TUNNEL_CSUM
) && !(tunnel
->parms
.i_flags
&TUNNEL_CSUM
))) {
395 tunnel
->dev
->stats
.rx_crc_errors
++;
396 tunnel
->dev
->stats
.rx_errors
++;
400 if (tunnel
->parms
.i_flags
&TUNNEL_SEQ
) {
401 if (!(tpi
->flags
&TUNNEL_SEQ
) ||
402 (tunnel
->i_seqno
&& (s32
)(ntohl(tpi
->seq
) - tunnel
->i_seqno
) < 0)) {
403 tunnel
->dev
->stats
.rx_fifo_errors
++;
404 tunnel
->dev
->stats
.rx_errors
++;
407 tunnel
->i_seqno
= ntohl(tpi
->seq
) + 1;
410 skb_reset_network_header(skb
);
412 err
= IP_ECN_decapsulate(iph
, skb
);
415 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
416 &iph
->saddr
, iph
->tos
);
418 ++tunnel
->dev
->stats
.rx_frame_errors
;
419 ++tunnel
->dev
->stats
.rx_errors
;
424 tstats
= this_cpu_ptr(tunnel
->dev
->tstats
);
425 u64_stats_update_begin(&tstats
->syncp
);
426 tstats
->rx_packets
++;
427 tstats
->rx_bytes
+= skb
->len
;
428 u64_stats_update_end(&tstats
->syncp
);
430 skb_scrub_packet(skb
, !net_eq(tunnel
->net
, dev_net(tunnel
->dev
)));
432 if (tunnel
->dev
->type
== ARPHRD_ETHER
) {
433 skb
->protocol
= eth_type_trans(skb
, tunnel
->dev
);
434 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
436 skb
->dev
= tunnel
->dev
;
440 skb_dst_set(skb
, (struct dst_entry
*)tun_dst
);
442 gro_cells_receive(&tunnel
->gro_cells
, skb
);
449 EXPORT_SYMBOL_GPL(ip_tunnel_rcv
);
451 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops
*ops
,
454 if (num
>= MAX_IPTUN_ENCAP_OPS
)
457 return !cmpxchg((const struct ip_tunnel_encap_ops
**)
461 EXPORT_SYMBOL(ip_tunnel_encap_add_ops
);
463 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops
*ops
,
468 if (num
>= MAX_IPTUN_ENCAP_OPS
)
471 ret
= (cmpxchg((const struct ip_tunnel_encap_ops
**)
473 ops
, NULL
) == ops
) ? 0 : -1;
479 EXPORT_SYMBOL(ip_tunnel_encap_del_ops
);
481 int ip_tunnel_encap_setup(struct ip_tunnel
*t
,
482 struct ip_tunnel_encap
*ipencap
)
486 memset(&t
->encap
, 0, sizeof(t
->encap
));
488 hlen
= ip_encap_hlen(ipencap
);
492 t
->encap
.type
= ipencap
->type
;
493 t
->encap
.sport
= ipencap
->sport
;
494 t
->encap
.dport
= ipencap
->dport
;
495 t
->encap
.flags
= ipencap
->flags
;
497 t
->encap_hlen
= hlen
;
498 t
->hlen
= t
->encap_hlen
+ t
->tun_hlen
;
502 EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup
);
504 static int tnl_update_pmtu(struct net_device
*dev
, struct sk_buff
*skb
,
505 struct rtable
*rt
, __be16 df
,
506 const struct iphdr
*inner_iph
)
508 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
509 int pkt_size
= skb
->len
- tunnel
->hlen
- dev
->hard_header_len
;
513 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
514 - sizeof(struct iphdr
) - tunnel
->hlen
;
516 mtu
= skb_dst(skb
) ? dst_mtu(skb_dst(skb
)) : dev
->mtu
;
519 skb_dst(skb
)->ops
->update_pmtu(skb_dst(skb
), NULL
, skb
, mtu
);
521 if (skb
->protocol
== htons(ETH_P_IP
)) {
522 if (!skb_is_gso(skb
) &&
523 (inner_iph
->frag_off
& htons(IP_DF
)) &&
525 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
526 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
530 #if IS_ENABLED(CONFIG_IPV6)
531 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
532 struct rt6_info
*rt6
= (struct rt6_info
*)skb_dst(skb
);
534 if (rt6
&& mtu
< dst_mtu(skb_dst(skb
)) &&
535 mtu
>= IPV6_MIN_MTU
) {
536 if ((tunnel
->parms
.iph
.daddr
&&
537 !ipv4_is_multicast(tunnel
->parms
.iph
.daddr
)) ||
538 rt6
->rt6i_dst
.plen
== 128) {
539 rt6
->rt6i_flags
|= RTF_MODIFIED
;
540 dst_metric_set(skb_dst(skb
), RTAX_MTU
, mtu
);
544 if (!skb_is_gso(skb
) && mtu
>= IPV6_MIN_MTU
&&
546 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
554 void ip_md_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
, u8 proto
)
556 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
557 u32 headroom
= sizeof(struct iphdr
);
558 struct ip_tunnel_info
*tun_info
;
559 const struct ip_tunnel_key
*key
;
560 const struct iphdr
*inner_iph
;
566 tun_info
= skb_tunnel_info(skb
);
567 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
568 ip_tunnel_info_af(tun_info
) != AF_INET
))
570 key
= &tun_info
->key
;
571 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
572 inner_iph
= (const struct iphdr
*)skb_inner_network_header(skb
);
575 if (skb
->protocol
== htons(ETH_P_IP
))
576 tos
= inner_iph
->tos
;
577 else if (skb
->protocol
== htons(ETH_P_IPV6
))
578 tos
= ipv6_get_dsfield((const struct ipv6hdr
*)inner_iph
);
580 init_tunnel_flow(&fl4
, proto
, key
->u
.ipv4
.dst
, key
->u
.ipv4
.src
, 0,
581 RT_TOS(tos
), tunnel
->parms
.link
);
582 if (tunnel
->encap
.type
!= TUNNEL_ENCAP_NONE
)
584 rt
= ip_route_output_key(tunnel
->net
, &fl4
);
586 dev
->stats
.tx_carrier_errors
++;
589 if (rt
->dst
.dev
== dev
) {
591 dev
->stats
.collisions
++;
594 tos
= ip_tunnel_ecn_encap(tos
, inner_iph
, skb
);
597 if (skb
->protocol
== htons(ETH_P_IP
))
598 ttl
= inner_iph
->ttl
;
599 else if (skb
->protocol
== htons(ETH_P_IPV6
))
600 ttl
= ((const struct ipv6hdr
*)inner_iph
)->hop_limit
;
602 ttl
= ip4_dst_hoplimit(&rt
->dst
);
604 if (key
->tun_flags
& TUNNEL_DONT_FRAGMENT
)
606 else if (skb
->protocol
== htons(ETH_P_IP
))
607 df
= inner_iph
->frag_off
& htons(IP_DF
);
608 headroom
+= LL_RESERVED_SPACE(rt
->dst
.dev
) + rt
->dst
.header_len
;
609 if (headroom
> dev
->needed_headroom
)
610 dev
->needed_headroom
= headroom
;
612 if (skb_cow_head(skb
, dev
->needed_headroom
)) {
616 iptunnel_xmit(NULL
, rt
, skb
, fl4
.saddr
, fl4
.daddr
, proto
, key
->tos
,
617 key
->ttl
, df
, !net_eq(tunnel
->net
, dev_net(dev
)));
620 dev
->stats
.tx_errors
++;
623 dev
->stats
.tx_dropped
++;
627 EXPORT_SYMBOL_GPL(ip_md_tunnel_xmit
);
629 void ip_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
630 const struct iphdr
*tnl_params
, u8 protocol
)
632 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
633 const struct iphdr
*inner_iph
;
637 struct rtable
*rt
; /* Route to the other host */
638 unsigned int max_headroom
; /* The extra header space needed */
642 inner_iph
= (const struct iphdr
*)skb_inner_network_header(skb
);
643 connected
= (tunnel
->parms
.iph
.daddr
!= 0);
645 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
647 dst
= tnl_params
->daddr
;
652 dev
->stats
.tx_fifo_errors
++;
656 if (skb
->protocol
== htons(ETH_P_IP
)) {
657 rt
= skb_rtable(skb
);
658 dst
= rt_nexthop(rt
, inner_iph
->daddr
);
660 #if IS_ENABLED(CONFIG_IPV6)
661 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
662 const struct in6_addr
*addr6
;
663 struct neighbour
*neigh
;
664 bool do_tx_error_icmp
;
667 neigh
= dst_neigh_lookup(skb_dst(skb
),
668 &ipv6_hdr(skb
)->daddr
);
672 addr6
= (const struct in6_addr
*)&neigh
->primary_key
;
673 addr_type
= ipv6_addr_type(addr6
);
675 if (addr_type
== IPV6_ADDR_ANY
) {
676 addr6
= &ipv6_hdr(skb
)->daddr
;
677 addr_type
= ipv6_addr_type(addr6
);
680 if ((addr_type
& IPV6_ADDR_COMPATv4
) == 0)
681 do_tx_error_icmp
= true;
683 do_tx_error_icmp
= false;
684 dst
= addr6
->s6_addr32
[3];
686 neigh_release(neigh
);
687 if (do_tx_error_icmp
)
697 tos
= tnl_params
->tos
;
700 if (skb
->protocol
== htons(ETH_P_IP
)) {
701 tos
= inner_iph
->tos
;
703 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
704 tos
= ipv6_get_dsfield((const struct ipv6hdr
*)inner_iph
);
709 init_tunnel_flow(&fl4
, protocol
, dst
, tnl_params
->saddr
,
710 tunnel
->parms
.o_key
, RT_TOS(tos
), tunnel
->parms
.link
);
712 if (ip_tunnel_encap(skb
, tunnel
, &protocol
, &fl4
) < 0)
715 rt
= connected
? dst_cache_get_ip4(&tunnel
->dst_cache
, &fl4
.saddr
) :
719 rt
= ip_route_output_key(tunnel
->net
, &fl4
);
722 dev
->stats
.tx_carrier_errors
++;
726 dst_cache_set_ip4(&tunnel
->dst_cache
, &rt
->dst
,
730 if (rt
->dst
.dev
== dev
) {
732 dev
->stats
.collisions
++;
736 if (tnl_update_pmtu(dev
, skb
, rt
, tnl_params
->frag_off
, inner_iph
)) {
741 if (tunnel
->err_count
> 0) {
742 if (time_before(jiffies
,
743 tunnel
->err_time
+ IPTUNNEL_ERR_TIMEO
)) {
746 dst_link_failure(skb
);
748 tunnel
->err_count
= 0;
751 tos
= ip_tunnel_ecn_encap(tos
, inner_iph
, skb
);
752 ttl
= tnl_params
->ttl
;
754 if (skb
->protocol
== htons(ETH_P_IP
))
755 ttl
= inner_iph
->ttl
;
756 #if IS_ENABLED(CONFIG_IPV6)
757 else if (skb
->protocol
== htons(ETH_P_IPV6
))
758 ttl
= ((const struct ipv6hdr
*)inner_iph
)->hop_limit
;
761 ttl
= ip4_dst_hoplimit(&rt
->dst
);
764 df
= tnl_params
->frag_off
;
765 if (skb
->protocol
== htons(ETH_P_IP
) && !tunnel
->ignore_df
)
766 df
|= (inner_iph
->frag_off
&htons(IP_DF
));
768 max_headroom
= LL_RESERVED_SPACE(rt
->dst
.dev
) + sizeof(struct iphdr
)
769 + rt
->dst
.header_len
+ ip_encap_hlen(&tunnel
->encap
);
770 if (max_headroom
> dev
->needed_headroom
)
771 dev
->needed_headroom
= max_headroom
;
773 if (skb_cow_head(skb
, dev
->needed_headroom
)) {
775 dev
->stats
.tx_dropped
++;
780 iptunnel_xmit(NULL
, rt
, skb
, fl4
.saddr
, fl4
.daddr
, protocol
, tos
, ttl
,
781 df
, !net_eq(tunnel
->net
, dev_net(dev
)));
784 #if IS_ENABLED(CONFIG_IPV6)
786 dst_link_failure(skb
);
789 dev
->stats
.tx_errors
++;
792 EXPORT_SYMBOL_GPL(ip_tunnel_xmit
);
794 static void ip_tunnel_update(struct ip_tunnel_net
*itn
,
796 struct net_device
*dev
,
797 struct ip_tunnel_parm
*p
,
800 ip_tunnel_del(itn
, t
);
801 t
->parms
.iph
.saddr
= p
->iph
.saddr
;
802 t
->parms
.iph
.daddr
= p
->iph
.daddr
;
803 t
->parms
.i_key
= p
->i_key
;
804 t
->parms
.o_key
= p
->o_key
;
805 if (dev
->type
!= ARPHRD_ETHER
) {
806 memcpy(dev
->dev_addr
, &p
->iph
.saddr
, 4);
807 memcpy(dev
->broadcast
, &p
->iph
.daddr
, 4);
809 ip_tunnel_add(itn
, t
);
811 t
->parms
.iph
.ttl
= p
->iph
.ttl
;
812 t
->parms
.iph
.tos
= p
->iph
.tos
;
813 t
->parms
.iph
.frag_off
= p
->iph
.frag_off
;
815 if (t
->parms
.link
!= p
->link
) {
818 t
->parms
.link
= p
->link
;
819 mtu
= ip_tunnel_bind_dev(dev
);
823 dst_cache_reset(&t
->dst_cache
);
824 netdev_state_change(dev
);
827 int ip_tunnel_ioctl(struct net_device
*dev
, struct ip_tunnel_parm
*p
, int cmd
)
830 struct ip_tunnel
*t
= netdev_priv(dev
);
831 struct net
*net
= t
->net
;
832 struct ip_tunnel_net
*itn
= net_generic(net
, t
->ip_tnl_net_id
);
834 BUG_ON(!itn
->fb_tunnel_dev
);
837 if (dev
== itn
->fb_tunnel_dev
) {
838 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
840 t
= netdev_priv(dev
);
842 memcpy(p
, &t
->parms
, sizeof(*p
));
848 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
851 p
->iph
.frag_off
|= htons(IP_DF
);
852 if (!(p
->i_flags
& VTI_ISVTI
)) {
853 if (!(p
->i_flags
& TUNNEL_KEY
))
855 if (!(p
->o_flags
& TUNNEL_KEY
))
859 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
861 if (cmd
== SIOCADDTUNNEL
) {
863 t
= ip_tunnel_create(net
, itn
, p
);
864 err
= PTR_ERR_OR_ZERO(t
);
871 if (dev
!= itn
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
878 unsigned int nflags
= 0;
880 if (ipv4_is_multicast(p
->iph
.daddr
))
881 nflags
= IFF_BROADCAST
;
882 else if (p
->iph
.daddr
)
883 nflags
= IFF_POINTOPOINT
;
885 if ((dev
->flags
^nflags
)&(IFF_POINTOPOINT
|IFF_BROADCAST
)) {
890 t
= netdev_priv(dev
);
896 ip_tunnel_update(itn
, t
, dev
, p
, true);
904 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
907 if (dev
== itn
->fb_tunnel_dev
) {
909 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
913 if (t
== netdev_priv(itn
->fb_tunnel_dev
))
917 unregister_netdevice(dev
);
928 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl
);
930 int __ip_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
, bool strict
)
932 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
933 int t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
934 int max_mtu
= 0xFFF8 - dev
->hard_header_len
- t_hlen
;
936 if (new_mtu
< ETH_MIN_MTU
)
939 if (new_mtu
> max_mtu
) {
949 EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu
);
951 int ip_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
)
953 return __ip_tunnel_change_mtu(dev
, new_mtu
, true);
955 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu
);
957 static void ip_tunnel_dev_free(struct net_device
*dev
)
959 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
961 gro_cells_destroy(&tunnel
->gro_cells
);
962 dst_cache_destroy(&tunnel
->dst_cache
);
963 free_percpu(dev
->tstats
);
967 void ip_tunnel_dellink(struct net_device
*dev
, struct list_head
*head
)
969 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
970 struct ip_tunnel_net
*itn
;
972 itn
= net_generic(tunnel
->net
, tunnel
->ip_tnl_net_id
);
974 if (itn
->fb_tunnel_dev
!= dev
) {
975 ip_tunnel_del(itn
, netdev_priv(dev
));
976 unregister_netdevice_queue(dev
, head
);
979 EXPORT_SYMBOL_GPL(ip_tunnel_dellink
);
981 struct net
*ip_tunnel_get_link_net(const struct net_device
*dev
)
983 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
987 EXPORT_SYMBOL(ip_tunnel_get_link_net
);
989 int ip_tunnel_get_iflink(const struct net_device
*dev
)
991 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
993 return tunnel
->parms
.link
;
995 EXPORT_SYMBOL(ip_tunnel_get_iflink
);
997 int ip_tunnel_init_net(struct net
*net
, unsigned int ip_tnl_net_id
,
998 struct rtnl_link_ops
*ops
, char *devname
)
1000 struct ip_tunnel_net
*itn
= net_generic(net
, ip_tnl_net_id
);
1001 struct ip_tunnel_parm parms
;
1004 for (i
= 0; i
< IP_TNL_HASH_SIZE
; i
++)
1005 INIT_HLIST_HEAD(&itn
->tunnels
[i
]);
1008 itn
->fb_tunnel_dev
= NULL
;
1012 memset(&parms
, 0, sizeof(parms
));
1014 strlcpy(parms
.name
, devname
, IFNAMSIZ
);
1017 itn
->fb_tunnel_dev
= __ip_tunnel_create(net
, ops
, &parms
);
1018 /* FB netdevice is special: we have one, and only one per netns.
1019 * Allowing to move it to another netns is clearly unsafe.
1021 if (!IS_ERR(itn
->fb_tunnel_dev
)) {
1022 itn
->fb_tunnel_dev
->features
|= NETIF_F_NETNS_LOCAL
;
1023 itn
->fb_tunnel_dev
->mtu
= ip_tunnel_bind_dev(itn
->fb_tunnel_dev
);
1024 ip_tunnel_add(itn
, netdev_priv(itn
->fb_tunnel_dev
));
1028 return PTR_ERR_OR_ZERO(itn
->fb_tunnel_dev
);
1030 EXPORT_SYMBOL_GPL(ip_tunnel_init_net
);
1032 static void ip_tunnel_destroy(struct ip_tunnel_net
*itn
, struct list_head
*head
,
1033 struct rtnl_link_ops
*ops
)
1035 struct net
*net
= dev_net(itn
->fb_tunnel_dev
);
1036 struct net_device
*dev
, *aux
;
1039 for_each_netdev_safe(net
, dev
, aux
)
1040 if (dev
->rtnl_link_ops
== ops
)
1041 unregister_netdevice_queue(dev
, head
);
1043 for (h
= 0; h
< IP_TNL_HASH_SIZE
; h
++) {
1044 struct ip_tunnel
*t
;
1045 struct hlist_node
*n
;
1046 struct hlist_head
*thead
= &itn
->tunnels
[h
];
1048 hlist_for_each_entry_safe(t
, n
, thead
, hash_node
)
1049 /* If dev is in the same netns, it has already
1050 * been added to the list by the previous loop.
1052 if (!net_eq(dev_net(t
->dev
), net
))
1053 unregister_netdevice_queue(t
->dev
, head
);
1057 void ip_tunnel_delete_net(struct ip_tunnel_net
*itn
, struct rtnl_link_ops
*ops
)
1062 ip_tunnel_destroy(itn
, &list
, ops
);
1063 unregister_netdevice_many(&list
);
1066 EXPORT_SYMBOL_GPL(ip_tunnel_delete_net
);
1068 int ip_tunnel_newlink(struct net_device
*dev
, struct nlattr
*tb
[],
1069 struct ip_tunnel_parm
*p
)
1071 struct ip_tunnel
*nt
;
1072 struct net
*net
= dev_net(dev
);
1073 struct ip_tunnel_net
*itn
;
1077 nt
= netdev_priv(dev
);
1078 itn
= net_generic(net
, nt
->ip_tnl_net_id
);
1080 if (nt
->collect_md
) {
1081 if (rtnl_dereference(itn
->collect_md_tun
))
1084 if (ip_tunnel_find(itn
, p
, dev
->type
))
1090 err
= register_netdevice(dev
);
1094 if (dev
->type
== ARPHRD_ETHER
&& !tb
[IFLA_ADDRESS
])
1095 eth_hw_addr_random(dev
);
1097 mtu
= ip_tunnel_bind_dev(dev
);
1101 ip_tunnel_add(itn
, nt
);
1105 EXPORT_SYMBOL_GPL(ip_tunnel_newlink
);
1107 int ip_tunnel_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
1108 struct ip_tunnel_parm
*p
)
1110 struct ip_tunnel
*t
;
1111 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1112 struct net
*net
= tunnel
->net
;
1113 struct ip_tunnel_net
*itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
1115 if (dev
== itn
->fb_tunnel_dev
)
1118 t
= ip_tunnel_find(itn
, p
, dev
->type
);
1126 if (dev
->type
!= ARPHRD_ETHER
) {
1127 unsigned int nflags
= 0;
1129 if (ipv4_is_multicast(p
->iph
.daddr
))
1130 nflags
= IFF_BROADCAST
;
1131 else if (p
->iph
.daddr
)
1132 nflags
= IFF_POINTOPOINT
;
1134 if ((dev
->flags
^ nflags
) &
1135 (IFF_POINTOPOINT
| IFF_BROADCAST
))
1140 ip_tunnel_update(itn
, t
, dev
, p
, !tb
[IFLA_MTU
]);
1143 EXPORT_SYMBOL_GPL(ip_tunnel_changelink
);
1145 int ip_tunnel_init(struct net_device
*dev
)
1147 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1148 struct iphdr
*iph
= &tunnel
->parms
.iph
;
1151 dev
->destructor
= ip_tunnel_dev_free
;
1152 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1156 err
= dst_cache_init(&tunnel
->dst_cache
, GFP_KERNEL
);
1158 free_percpu(dev
->tstats
);
1162 err
= gro_cells_init(&tunnel
->gro_cells
, dev
);
1164 dst_cache_destroy(&tunnel
->dst_cache
);
1165 free_percpu(dev
->tstats
);
1170 tunnel
->net
= dev_net(dev
);
1171 strcpy(tunnel
->parms
.name
, dev
->name
);
1175 if (tunnel
->collect_md
) {
1176 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1177 netif_keep_dst(dev
);
1181 EXPORT_SYMBOL_GPL(ip_tunnel_init
);
1183 void ip_tunnel_uninit(struct net_device
*dev
)
1185 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1186 struct net
*net
= tunnel
->net
;
1187 struct ip_tunnel_net
*itn
;
1189 itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
1190 /* fb_tunnel_dev will be unregisted in net-exit call. */
1191 if (itn
->fb_tunnel_dev
!= dev
)
1192 ip_tunnel_del(itn
, netdev_priv(dev
));
1194 dst_cache_reset(&tunnel
->dst_cache
);
1196 EXPORT_SYMBOL_GPL(ip_tunnel_uninit
);
1198 /* Do least required initialization, rest of init is done in tunnel_init call */
1199 void ip_tunnel_setup(struct net_device
*dev
, unsigned int net_id
)
1201 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1202 tunnel
->ip_tnl_net_id
= net_id
;
1204 EXPORT_SYMBOL_GPL(ip_tunnel_setup
);
1206 MODULE_LICENSE("GPL");