2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/init.h>
34 #include <linux/in6.h>
35 #include <linux/inetdevice.h>
36 #include <linux/igmp.h>
37 #include <linux/netfilter_ipv4.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_ether.h>
40 #include <linux/if_vlan.h>
41 #include <linux/rculist.h>
42 #include <linux/err.h>
47 #include <net/protocol.h>
48 #include <net/ip_tunnels.h>
50 #include <net/checksum.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56 #include <net/rtnetlink.h>
58 #include <net/dst_metadata.h>
60 #if IS_ENABLED(CONFIG_IPV6)
62 #include <net/ip6_fib.h>
63 #include <net/ip6_route.h>
66 static unsigned int ip_tunnel_hash(__be32 key
, __be32 remote
)
68 return hash_32((__force u32
)key
^ (__force u32
)remote
,
72 static bool ip_tunnel_key_match(const struct ip_tunnel_parm
*p
,
73 __be16 flags
, __be32 key
)
75 if (p
->i_flags
& TUNNEL_KEY
) {
76 if (flags
& TUNNEL_KEY
)
77 return key
== p
->i_key
;
79 /* key expected, none present */
82 return !(flags
& TUNNEL_KEY
);
85 /* Fallback tunnel: no source, no destination, no key, no options
88 We require exact key match i.e. if a key is present in packet
89 it will match only tunnel with the same key; if it is not present,
90 it will match only keyless tunnel.
92 All keysless packets, if not matched configured keyless tunnels
93 will match fallback tunnel.
94 Given src, dst and key, find appropriate for input tunnel.
96 struct ip_tunnel
*ip_tunnel_lookup(struct ip_tunnel_net
*itn
,
97 int link
, __be16 flags
,
98 __be32 remote
, __be32 local
,
102 struct ip_tunnel
*t
, *cand
= NULL
;
103 struct hlist_head
*head
;
105 hash
= ip_tunnel_hash(key
, remote
);
106 head
= &itn
->tunnels
[hash
];
108 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
109 if (local
!= t
->parms
.iph
.saddr
||
110 remote
!= t
->parms
.iph
.daddr
||
111 !(t
->dev
->flags
& IFF_UP
))
114 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
117 if (t
->parms
.link
== link
)
123 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
124 if (remote
!= t
->parms
.iph
.daddr
||
125 t
->parms
.iph
.saddr
!= 0 ||
126 !(t
->dev
->flags
& IFF_UP
))
129 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
132 if (t
->parms
.link
== link
)
138 hash
= ip_tunnel_hash(key
, 0);
139 head
= &itn
->tunnels
[hash
];
141 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
142 if ((local
!= t
->parms
.iph
.saddr
|| t
->parms
.iph
.daddr
!= 0) &&
143 (local
!= t
->parms
.iph
.daddr
|| !ipv4_is_multicast(local
)))
146 if (!(t
->dev
->flags
& IFF_UP
))
149 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
152 if (t
->parms
.link
== link
)
158 if (flags
& TUNNEL_NO_KEY
)
159 goto skip_key_lookup
;
161 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
162 if (t
->parms
.i_key
!= key
||
163 t
->parms
.iph
.saddr
!= 0 ||
164 t
->parms
.iph
.daddr
!= 0 ||
165 !(t
->dev
->flags
& IFF_UP
))
168 if (t
->parms
.link
== link
)
178 t
= rcu_dereference(itn
->collect_md_tun
);
182 if (itn
->fb_tunnel_dev
&& itn
->fb_tunnel_dev
->flags
& IFF_UP
)
183 return netdev_priv(itn
->fb_tunnel_dev
);
187 EXPORT_SYMBOL_GPL(ip_tunnel_lookup
);
189 static struct hlist_head
*ip_bucket(struct ip_tunnel_net
*itn
,
190 struct ip_tunnel_parm
*parms
)
194 __be32 i_key
= parms
->i_key
;
196 if (parms
->iph
.daddr
&& !ipv4_is_multicast(parms
->iph
.daddr
))
197 remote
= parms
->iph
.daddr
;
201 if (!(parms
->i_flags
& TUNNEL_KEY
) && (parms
->i_flags
& VTI_ISVTI
))
204 h
= ip_tunnel_hash(i_key
, remote
);
205 return &itn
->tunnels
[h
];
208 static void ip_tunnel_add(struct ip_tunnel_net
*itn
, struct ip_tunnel
*t
)
210 struct hlist_head
*head
= ip_bucket(itn
, &t
->parms
);
213 rcu_assign_pointer(itn
->collect_md_tun
, t
);
214 hlist_add_head_rcu(&t
->hash_node
, head
);
217 static void ip_tunnel_del(struct ip_tunnel_net
*itn
, struct ip_tunnel
*t
)
220 rcu_assign_pointer(itn
->collect_md_tun
, NULL
);
221 hlist_del_init_rcu(&t
->hash_node
);
224 static struct ip_tunnel
*ip_tunnel_find(struct ip_tunnel_net
*itn
,
225 struct ip_tunnel_parm
*parms
,
228 __be32 remote
= parms
->iph
.daddr
;
229 __be32 local
= parms
->iph
.saddr
;
230 __be32 key
= parms
->i_key
;
231 __be16 flags
= parms
->i_flags
;
232 int link
= parms
->link
;
233 struct ip_tunnel
*t
= NULL
;
234 struct hlist_head
*head
= ip_bucket(itn
, parms
);
236 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
237 if (local
== t
->parms
.iph
.saddr
&&
238 remote
== t
->parms
.iph
.daddr
&&
239 link
== t
->parms
.link
&&
240 type
== t
->dev
->type
&&
241 ip_tunnel_key_match(&t
->parms
, flags
, key
))
247 static struct net_device
*__ip_tunnel_create(struct net
*net
,
248 const struct rtnl_link_ops
*ops
,
249 struct ip_tunnel_parm
*parms
)
252 struct ip_tunnel
*tunnel
;
253 struct net_device
*dev
;
257 strlcpy(name
, parms
->name
, IFNAMSIZ
);
259 if (strlen(ops
->kind
) > (IFNAMSIZ
- 3)) {
263 strlcpy(name
, ops
->kind
, IFNAMSIZ
);
264 strncat(name
, "%d", 2);
268 dev
= alloc_netdev(ops
->priv_size
, name
, NET_NAME_UNKNOWN
, ops
->setup
);
273 dev_net_set(dev
, net
);
275 dev
->rtnl_link_ops
= ops
;
277 tunnel
= netdev_priv(dev
);
278 tunnel
->parms
= *parms
;
281 err
= register_netdevice(dev
);
293 static inline void init_tunnel_flow(struct flowi4
*fl4
,
295 __be32 daddr
, __be32 saddr
,
296 __be32 key
, __u8 tos
, int oif
)
298 memset(fl4
, 0, sizeof(*fl4
));
299 fl4
->flowi4_oif
= oif
;
302 fl4
->flowi4_tos
= tos
;
303 fl4
->flowi4_proto
= proto
;
304 fl4
->fl4_gre_key
= key
;
307 static int ip_tunnel_bind_dev(struct net_device
*dev
)
309 struct net_device
*tdev
= NULL
;
310 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
311 const struct iphdr
*iph
;
312 int hlen
= LL_MAX_HEADER
;
313 int mtu
= ETH_DATA_LEN
;
314 int t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
316 iph
= &tunnel
->parms
.iph
;
318 /* Guess output device to choose reasonable mtu and needed_headroom */
323 init_tunnel_flow(&fl4
, iph
->protocol
, iph
->daddr
,
324 iph
->saddr
, tunnel
->parms
.o_key
,
325 RT_TOS(iph
->tos
), tunnel
->parms
.link
);
326 rt
= ip_route_output_key(tunnel
->net
, &fl4
);
332 if (dev
->type
!= ARPHRD_ETHER
)
333 dev
->flags
|= IFF_POINTOPOINT
;
335 dst_cache_reset(&tunnel
->dst_cache
);
338 if (!tdev
&& tunnel
->parms
.link
)
339 tdev
= __dev_get_by_index(tunnel
->net
, tunnel
->parms
.link
);
342 hlen
= tdev
->hard_header_len
+ tdev
->needed_headroom
;
346 dev
->needed_headroom
= t_hlen
+ hlen
;
347 mtu
-= (dev
->hard_header_len
+ t_hlen
);
355 static struct ip_tunnel
*ip_tunnel_create(struct net
*net
,
356 struct ip_tunnel_net
*itn
,
357 struct ip_tunnel_parm
*parms
)
359 struct ip_tunnel
*nt
;
360 struct net_device
*dev
;
362 BUG_ON(!itn
->fb_tunnel_dev
);
363 dev
= __ip_tunnel_create(net
, itn
->fb_tunnel_dev
->rtnl_link_ops
, parms
);
365 return ERR_CAST(dev
);
367 dev
->mtu
= ip_tunnel_bind_dev(dev
);
369 nt
= netdev_priv(dev
);
370 ip_tunnel_add(itn
, nt
);
374 int ip_tunnel_rcv(struct ip_tunnel
*tunnel
, struct sk_buff
*skb
,
375 const struct tnl_ptk_info
*tpi
, struct metadata_dst
*tun_dst
,
378 struct pcpu_sw_netstats
*tstats
;
379 const struct iphdr
*iph
= ip_hdr(skb
);
382 #ifdef CONFIG_NET_IPGRE_BROADCAST
383 if (ipv4_is_multicast(iph
->daddr
)) {
384 tunnel
->dev
->stats
.multicast
++;
385 skb
->pkt_type
= PACKET_BROADCAST
;
389 if ((!(tpi
->flags
&TUNNEL_CSUM
) && (tunnel
->parms
.i_flags
&TUNNEL_CSUM
)) ||
390 ((tpi
->flags
&TUNNEL_CSUM
) && !(tunnel
->parms
.i_flags
&TUNNEL_CSUM
))) {
391 tunnel
->dev
->stats
.rx_crc_errors
++;
392 tunnel
->dev
->stats
.rx_errors
++;
396 if (tunnel
->parms
.i_flags
&TUNNEL_SEQ
) {
397 if (!(tpi
->flags
&TUNNEL_SEQ
) ||
398 (tunnel
->i_seqno
&& (s32
)(ntohl(tpi
->seq
) - tunnel
->i_seqno
) < 0)) {
399 tunnel
->dev
->stats
.rx_fifo_errors
++;
400 tunnel
->dev
->stats
.rx_errors
++;
403 tunnel
->i_seqno
= ntohl(tpi
->seq
) + 1;
406 skb_reset_network_header(skb
);
408 err
= IP_ECN_decapsulate(iph
, skb
);
411 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
412 &iph
->saddr
, iph
->tos
);
414 ++tunnel
->dev
->stats
.rx_frame_errors
;
415 ++tunnel
->dev
->stats
.rx_errors
;
420 tstats
= this_cpu_ptr(tunnel
->dev
->tstats
);
421 u64_stats_update_begin(&tstats
->syncp
);
422 tstats
->rx_packets
++;
423 tstats
->rx_bytes
+= skb
->len
;
424 u64_stats_update_end(&tstats
->syncp
);
426 skb_scrub_packet(skb
, !net_eq(tunnel
->net
, dev_net(tunnel
->dev
)));
428 if (tunnel
->dev
->type
== ARPHRD_ETHER
) {
429 skb
->protocol
= eth_type_trans(skb
, tunnel
->dev
);
430 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
432 skb
->dev
= tunnel
->dev
;
436 skb_dst_set(skb
, (struct dst_entry
*)tun_dst
);
438 gro_cells_receive(&tunnel
->gro_cells
, skb
);
445 EXPORT_SYMBOL_GPL(ip_tunnel_rcv
);
447 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops
*ops
,
450 if (num
>= MAX_IPTUN_ENCAP_OPS
)
453 return !cmpxchg((const struct ip_tunnel_encap_ops
**)
457 EXPORT_SYMBOL(ip_tunnel_encap_add_ops
);
459 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops
*ops
,
464 if (num
>= MAX_IPTUN_ENCAP_OPS
)
467 ret
= (cmpxchg((const struct ip_tunnel_encap_ops
**)
469 ops
, NULL
) == ops
) ? 0 : -1;
475 EXPORT_SYMBOL(ip_tunnel_encap_del_ops
);
477 int ip_tunnel_encap_setup(struct ip_tunnel
*t
,
478 struct ip_tunnel_encap
*ipencap
)
482 memset(&t
->encap
, 0, sizeof(t
->encap
));
484 hlen
= ip_encap_hlen(ipencap
);
488 t
->encap
.type
= ipencap
->type
;
489 t
->encap
.sport
= ipencap
->sport
;
490 t
->encap
.dport
= ipencap
->dport
;
491 t
->encap
.flags
= ipencap
->flags
;
493 t
->encap_hlen
= hlen
;
494 t
->hlen
= t
->encap_hlen
+ t
->tun_hlen
;
498 EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup
);
500 static int tnl_update_pmtu(struct net_device
*dev
, struct sk_buff
*skb
,
501 struct rtable
*rt
, __be16 df
,
502 const struct iphdr
*inner_iph
)
504 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
505 int pkt_size
= skb
->len
- tunnel
->hlen
- dev
->hard_header_len
;
509 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
510 - sizeof(struct iphdr
) - tunnel
->hlen
;
512 mtu
= skb_dst(skb
) ? dst_mtu(skb_dst(skb
)) : dev
->mtu
;
515 skb_dst(skb
)->ops
->update_pmtu(skb_dst(skb
), NULL
, skb
, mtu
);
517 if (skb
->protocol
== htons(ETH_P_IP
)) {
518 if (!skb_is_gso(skb
) &&
519 (inner_iph
->frag_off
& htons(IP_DF
)) &&
521 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
522 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
526 #if IS_ENABLED(CONFIG_IPV6)
527 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
528 struct rt6_info
*rt6
= (struct rt6_info
*)skb_dst(skb
);
530 if (rt6
&& mtu
< dst_mtu(skb_dst(skb
)) &&
531 mtu
>= IPV6_MIN_MTU
) {
532 if ((tunnel
->parms
.iph
.daddr
&&
533 !ipv4_is_multicast(tunnel
->parms
.iph
.daddr
)) ||
534 rt6
->rt6i_dst
.plen
== 128) {
535 rt6
->rt6i_flags
|= RTF_MODIFIED
;
536 dst_metric_set(skb_dst(skb
), RTAX_MTU
, mtu
);
540 if (!skb_is_gso(skb
) && mtu
>= IPV6_MIN_MTU
&&
542 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
550 void ip_md_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
, u8 proto
)
552 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
553 u32 headroom
= sizeof(struct iphdr
);
554 struct ip_tunnel_info
*tun_info
;
555 const struct ip_tunnel_key
*key
;
556 const struct iphdr
*inner_iph
;
562 tun_info
= skb_tunnel_info(skb
);
563 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
564 ip_tunnel_info_af(tun_info
) != AF_INET
))
566 key
= &tun_info
->key
;
567 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
568 inner_iph
= (const struct iphdr
*)skb_inner_network_header(skb
);
571 if (skb
->protocol
== htons(ETH_P_IP
))
572 tos
= inner_iph
->tos
;
573 else if (skb
->protocol
== htons(ETH_P_IPV6
))
574 tos
= ipv6_get_dsfield((const struct ipv6hdr
*)inner_iph
);
576 init_tunnel_flow(&fl4
, proto
, key
->u
.ipv4
.dst
, key
->u
.ipv4
.src
, 0,
577 RT_TOS(tos
), tunnel
->parms
.link
);
578 if (tunnel
->encap
.type
!= TUNNEL_ENCAP_NONE
)
580 rt
= ip_route_output_key(tunnel
->net
, &fl4
);
582 dev
->stats
.tx_carrier_errors
++;
585 if (rt
->dst
.dev
== dev
) {
587 dev
->stats
.collisions
++;
590 tos
= ip_tunnel_ecn_encap(tos
, inner_iph
, skb
);
593 if (skb
->protocol
== htons(ETH_P_IP
))
594 ttl
= inner_iph
->ttl
;
595 else if (skb
->protocol
== htons(ETH_P_IPV6
))
596 ttl
= ((const struct ipv6hdr
*)inner_iph
)->hop_limit
;
598 ttl
= ip4_dst_hoplimit(&rt
->dst
);
600 if (key
->tun_flags
& TUNNEL_DONT_FRAGMENT
)
602 else if (skb
->protocol
== htons(ETH_P_IP
))
603 df
= inner_iph
->frag_off
& htons(IP_DF
);
604 headroom
+= LL_RESERVED_SPACE(rt
->dst
.dev
) + rt
->dst
.header_len
;
605 if (headroom
> dev
->needed_headroom
)
606 dev
->needed_headroom
= headroom
;
608 if (skb_cow_head(skb
, dev
->needed_headroom
)) {
612 iptunnel_xmit(NULL
, rt
, skb
, fl4
.saddr
, fl4
.daddr
, proto
, key
->tos
,
613 key
->ttl
, df
, !net_eq(tunnel
->net
, dev_net(dev
)));
616 dev
->stats
.tx_errors
++;
619 dev
->stats
.tx_dropped
++;
623 EXPORT_SYMBOL_GPL(ip_md_tunnel_xmit
);
625 void ip_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
626 const struct iphdr
*tnl_params
, u8 protocol
)
628 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
629 const struct iphdr
*inner_iph
;
633 struct rtable
*rt
; /* Route to the other host */
634 unsigned int max_headroom
; /* The extra header space needed */
638 inner_iph
= (const struct iphdr
*)skb_inner_network_header(skb
);
639 connected
= (tunnel
->parms
.iph
.daddr
!= 0);
641 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
643 dst
= tnl_params
->daddr
;
648 dev
->stats
.tx_fifo_errors
++;
652 if (skb
->protocol
== htons(ETH_P_IP
)) {
653 rt
= skb_rtable(skb
);
654 dst
= rt_nexthop(rt
, inner_iph
->daddr
);
656 #if IS_ENABLED(CONFIG_IPV6)
657 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
658 const struct in6_addr
*addr6
;
659 struct neighbour
*neigh
;
660 bool do_tx_error_icmp
;
663 neigh
= dst_neigh_lookup(skb_dst(skb
),
664 &ipv6_hdr(skb
)->daddr
);
668 addr6
= (const struct in6_addr
*)&neigh
->primary_key
;
669 addr_type
= ipv6_addr_type(addr6
);
671 if (addr_type
== IPV6_ADDR_ANY
) {
672 addr6
= &ipv6_hdr(skb
)->daddr
;
673 addr_type
= ipv6_addr_type(addr6
);
676 if ((addr_type
& IPV6_ADDR_COMPATv4
) == 0)
677 do_tx_error_icmp
= true;
679 do_tx_error_icmp
= false;
680 dst
= addr6
->s6_addr32
[3];
682 neigh_release(neigh
);
683 if (do_tx_error_icmp
)
693 tos
= tnl_params
->tos
;
696 if (skb
->protocol
== htons(ETH_P_IP
)) {
697 tos
= inner_iph
->tos
;
699 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
700 tos
= ipv6_get_dsfield((const struct ipv6hdr
*)inner_iph
);
705 init_tunnel_flow(&fl4
, protocol
, dst
, tnl_params
->saddr
,
706 tunnel
->parms
.o_key
, RT_TOS(tos
), tunnel
->parms
.link
);
708 if (ip_tunnel_encap(skb
, tunnel
, &protocol
, &fl4
) < 0)
711 rt
= connected
? dst_cache_get_ip4(&tunnel
->dst_cache
, &fl4
.saddr
) :
715 rt
= ip_route_output_key(tunnel
->net
, &fl4
);
718 dev
->stats
.tx_carrier_errors
++;
722 dst_cache_set_ip4(&tunnel
->dst_cache
, &rt
->dst
,
726 if (rt
->dst
.dev
== dev
) {
728 dev
->stats
.collisions
++;
732 if (tnl_update_pmtu(dev
, skb
, rt
, tnl_params
->frag_off
, inner_iph
)) {
737 if (tunnel
->err_count
> 0) {
738 if (time_before(jiffies
,
739 tunnel
->err_time
+ IPTUNNEL_ERR_TIMEO
)) {
742 dst_link_failure(skb
);
744 tunnel
->err_count
= 0;
747 tos
= ip_tunnel_ecn_encap(tos
, inner_iph
, skb
);
748 ttl
= tnl_params
->ttl
;
750 if (skb
->protocol
== htons(ETH_P_IP
))
751 ttl
= inner_iph
->ttl
;
752 #if IS_ENABLED(CONFIG_IPV6)
753 else if (skb
->protocol
== htons(ETH_P_IPV6
))
754 ttl
= ((const struct ipv6hdr
*)inner_iph
)->hop_limit
;
757 ttl
= ip4_dst_hoplimit(&rt
->dst
);
760 df
= tnl_params
->frag_off
;
761 if (skb
->protocol
== htons(ETH_P_IP
) && !tunnel
->ignore_df
)
762 df
|= (inner_iph
->frag_off
&htons(IP_DF
));
764 max_headroom
= LL_RESERVED_SPACE(rt
->dst
.dev
) + sizeof(struct iphdr
)
765 + rt
->dst
.header_len
+ ip_encap_hlen(&tunnel
->encap
);
766 if (max_headroom
> dev
->needed_headroom
)
767 dev
->needed_headroom
= max_headroom
;
769 if (skb_cow_head(skb
, dev
->needed_headroom
)) {
771 dev
->stats
.tx_dropped
++;
776 iptunnel_xmit(NULL
, rt
, skb
, fl4
.saddr
, fl4
.daddr
, protocol
, tos
, ttl
,
777 df
, !net_eq(tunnel
->net
, dev_net(dev
)));
780 #if IS_ENABLED(CONFIG_IPV6)
782 dst_link_failure(skb
);
785 dev
->stats
.tx_errors
++;
788 EXPORT_SYMBOL_GPL(ip_tunnel_xmit
);
790 static void ip_tunnel_update(struct ip_tunnel_net
*itn
,
792 struct net_device
*dev
,
793 struct ip_tunnel_parm
*p
,
796 ip_tunnel_del(itn
, t
);
797 t
->parms
.iph
.saddr
= p
->iph
.saddr
;
798 t
->parms
.iph
.daddr
= p
->iph
.daddr
;
799 t
->parms
.i_key
= p
->i_key
;
800 t
->parms
.o_key
= p
->o_key
;
801 if (dev
->type
!= ARPHRD_ETHER
) {
802 memcpy(dev
->dev_addr
, &p
->iph
.saddr
, 4);
803 memcpy(dev
->broadcast
, &p
->iph
.daddr
, 4);
805 ip_tunnel_add(itn
, t
);
807 t
->parms
.iph
.ttl
= p
->iph
.ttl
;
808 t
->parms
.iph
.tos
= p
->iph
.tos
;
809 t
->parms
.iph
.frag_off
= p
->iph
.frag_off
;
811 if (t
->parms
.link
!= p
->link
) {
814 t
->parms
.link
= p
->link
;
815 mtu
= ip_tunnel_bind_dev(dev
);
819 dst_cache_reset(&t
->dst_cache
);
820 netdev_state_change(dev
);
823 int ip_tunnel_ioctl(struct net_device
*dev
, struct ip_tunnel_parm
*p
, int cmd
)
826 struct ip_tunnel
*t
= netdev_priv(dev
);
827 struct net
*net
= t
->net
;
828 struct ip_tunnel_net
*itn
= net_generic(net
, t
->ip_tnl_net_id
);
830 BUG_ON(!itn
->fb_tunnel_dev
);
833 if (dev
== itn
->fb_tunnel_dev
) {
834 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
836 t
= netdev_priv(dev
);
838 memcpy(p
, &t
->parms
, sizeof(*p
));
844 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
847 p
->iph
.frag_off
|= htons(IP_DF
);
848 if (!(p
->i_flags
& VTI_ISVTI
)) {
849 if (!(p
->i_flags
& TUNNEL_KEY
))
851 if (!(p
->o_flags
& TUNNEL_KEY
))
855 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
857 if (cmd
== SIOCADDTUNNEL
) {
859 t
= ip_tunnel_create(net
, itn
, p
);
860 err
= PTR_ERR_OR_ZERO(t
);
867 if (dev
!= itn
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
874 unsigned int nflags
= 0;
876 if (ipv4_is_multicast(p
->iph
.daddr
))
877 nflags
= IFF_BROADCAST
;
878 else if (p
->iph
.daddr
)
879 nflags
= IFF_POINTOPOINT
;
881 if ((dev
->flags
^nflags
)&(IFF_POINTOPOINT
|IFF_BROADCAST
)) {
886 t
= netdev_priv(dev
);
892 ip_tunnel_update(itn
, t
, dev
, p
, true);
900 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
903 if (dev
== itn
->fb_tunnel_dev
) {
905 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
909 if (t
== netdev_priv(itn
->fb_tunnel_dev
))
913 unregister_netdevice(dev
);
924 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl
);
926 int __ip_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
, bool strict
)
928 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
929 int t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
930 int max_mtu
= 0xFFF8 - dev
->hard_header_len
- t_hlen
;
935 if (new_mtu
> max_mtu
) {
945 EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu
);
947 int ip_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
)
949 return __ip_tunnel_change_mtu(dev
, new_mtu
, true);
951 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu
);
953 static void ip_tunnel_dev_free(struct net_device
*dev
)
955 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
957 gro_cells_destroy(&tunnel
->gro_cells
);
958 dst_cache_destroy(&tunnel
->dst_cache
);
959 free_percpu(dev
->tstats
);
963 void ip_tunnel_dellink(struct net_device
*dev
, struct list_head
*head
)
965 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
966 struct ip_tunnel_net
*itn
;
968 itn
= net_generic(tunnel
->net
, tunnel
->ip_tnl_net_id
);
970 if (itn
->fb_tunnel_dev
!= dev
) {
971 ip_tunnel_del(itn
, netdev_priv(dev
));
972 unregister_netdevice_queue(dev
, head
);
975 EXPORT_SYMBOL_GPL(ip_tunnel_dellink
);
977 struct net
*ip_tunnel_get_link_net(const struct net_device
*dev
)
979 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
983 EXPORT_SYMBOL(ip_tunnel_get_link_net
);
985 int ip_tunnel_get_iflink(const struct net_device
*dev
)
987 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
989 return tunnel
->parms
.link
;
991 EXPORT_SYMBOL(ip_tunnel_get_iflink
);
993 int ip_tunnel_init_net(struct net
*net
, int ip_tnl_net_id
,
994 struct rtnl_link_ops
*ops
, char *devname
)
996 struct ip_tunnel_net
*itn
= net_generic(net
, ip_tnl_net_id
);
997 struct ip_tunnel_parm parms
;
1000 for (i
= 0; i
< IP_TNL_HASH_SIZE
; i
++)
1001 INIT_HLIST_HEAD(&itn
->tunnels
[i
]);
1004 itn
->fb_tunnel_dev
= NULL
;
1008 memset(&parms
, 0, sizeof(parms
));
1010 strlcpy(parms
.name
, devname
, IFNAMSIZ
);
1013 itn
->fb_tunnel_dev
= __ip_tunnel_create(net
, ops
, &parms
);
1014 /* FB netdevice is special: we have one, and only one per netns.
1015 * Allowing to move it to another netns is clearly unsafe.
1017 if (!IS_ERR(itn
->fb_tunnel_dev
)) {
1018 itn
->fb_tunnel_dev
->features
|= NETIF_F_NETNS_LOCAL
;
1019 itn
->fb_tunnel_dev
->mtu
= ip_tunnel_bind_dev(itn
->fb_tunnel_dev
);
1020 ip_tunnel_add(itn
, netdev_priv(itn
->fb_tunnel_dev
));
1024 return PTR_ERR_OR_ZERO(itn
->fb_tunnel_dev
);
1026 EXPORT_SYMBOL_GPL(ip_tunnel_init_net
);
1028 static void ip_tunnel_destroy(struct ip_tunnel_net
*itn
, struct list_head
*head
,
1029 struct rtnl_link_ops
*ops
)
1031 struct net
*net
= dev_net(itn
->fb_tunnel_dev
);
1032 struct net_device
*dev
, *aux
;
1035 for_each_netdev_safe(net
, dev
, aux
)
1036 if (dev
->rtnl_link_ops
== ops
)
1037 unregister_netdevice_queue(dev
, head
);
1039 for (h
= 0; h
< IP_TNL_HASH_SIZE
; h
++) {
1040 struct ip_tunnel
*t
;
1041 struct hlist_node
*n
;
1042 struct hlist_head
*thead
= &itn
->tunnels
[h
];
1044 hlist_for_each_entry_safe(t
, n
, thead
, hash_node
)
1045 /* If dev is in the same netns, it has already
1046 * been added to the list by the previous loop.
1048 if (!net_eq(dev_net(t
->dev
), net
))
1049 unregister_netdevice_queue(t
->dev
, head
);
1053 void ip_tunnel_delete_net(struct ip_tunnel_net
*itn
, struct rtnl_link_ops
*ops
)
1058 ip_tunnel_destroy(itn
, &list
, ops
);
1059 unregister_netdevice_many(&list
);
1062 EXPORT_SYMBOL_GPL(ip_tunnel_delete_net
);
1064 int ip_tunnel_newlink(struct net_device
*dev
, struct nlattr
*tb
[],
1065 struct ip_tunnel_parm
*p
)
1067 struct ip_tunnel
*nt
;
1068 struct net
*net
= dev_net(dev
);
1069 struct ip_tunnel_net
*itn
;
1073 nt
= netdev_priv(dev
);
1074 itn
= net_generic(net
, nt
->ip_tnl_net_id
);
1076 if (nt
->collect_md
) {
1077 if (rtnl_dereference(itn
->collect_md_tun
))
1080 if (ip_tunnel_find(itn
, p
, dev
->type
))
1086 err
= register_netdevice(dev
);
1090 if (dev
->type
== ARPHRD_ETHER
&& !tb
[IFLA_ADDRESS
])
1091 eth_hw_addr_random(dev
);
1093 mtu
= ip_tunnel_bind_dev(dev
);
1097 ip_tunnel_add(itn
, nt
);
1101 EXPORT_SYMBOL_GPL(ip_tunnel_newlink
);
1103 int ip_tunnel_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
1104 struct ip_tunnel_parm
*p
)
1106 struct ip_tunnel
*t
;
1107 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1108 struct net
*net
= tunnel
->net
;
1109 struct ip_tunnel_net
*itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
1111 if (dev
== itn
->fb_tunnel_dev
)
1114 t
= ip_tunnel_find(itn
, p
, dev
->type
);
1122 if (dev
->type
!= ARPHRD_ETHER
) {
1123 unsigned int nflags
= 0;
1125 if (ipv4_is_multicast(p
->iph
.daddr
))
1126 nflags
= IFF_BROADCAST
;
1127 else if (p
->iph
.daddr
)
1128 nflags
= IFF_POINTOPOINT
;
1130 if ((dev
->flags
^ nflags
) &
1131 (IFF_POINTOPOINT
| IFF_BROADCAST
))
1136 ip_tunnel_update(itn
, t
, dev
, p
, !tb
[IFLA_MTU
]);
1139 EXPORT_SYMBOL_GPL(ip_tunnel_changelink
);
1141 int ip_tunnel_init(struct net_device
*dev
)
1143 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1144 struct iphdr
*iph
= &tunnel
->parms
.iph
;
1147 dev
->destructor
= ip_tunnel_dev_free
;
1148 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1152 err
= dst_cache_init(&tunnel
->dst_cache
, GFP_KERNEL
);
1154 free_percpu(dev
->tstats
);
1158 err
= gro_cells_init(&tunnel
->gro_cells
, dev
);
1160 dst_cache_destroy(&tunnel
->dst_cache
);
1161 free_percpu(dev
->tstats
);
1166 tunnel
->net
= dev_net(dev
);
1167 strcpy(tunnel
->parms
.name
, dev
->name
);
1171 if (tunnel
->collect_md
) {
1172 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1173 netif_keep_dst(dev
);
1177 EXPORT_SYMBOL_GPL(ip_tunnel_init
);
1179 void ip_tunnel_uninit(struct net_device
*dev
)
1181 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1182 struct net
*net
= tunnel
->net
;
1183 struct ip_tunnel_net
*itn
;
1185 itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
1186 /* fb_tunnel_dev will be unregisted in net-exit call. */
1187 if (itn
->fb_tunnel_dev
!= dev
)
1188 ip_tunnel_del(itn
, netdev_priv(dev
));
1190 dst_cache_reset(&tunnel
->dst_cache
);
1192 EXPORT_SYMBOL_GPL(ip_tunnel_uninit
);
1194 /* Do least required initialization, rest of init is done in tunnel_init call */
1195 void ip_tunnel_setup(struct net_device
*dev
, int net_id
)
1197 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1198 tunnel
->ip_tnl_net_id
= net_id
;
1200 EXPORT_SYMBOL_GPL(ip_tunnel_setup
);
1202 MODULE_LICENSE("GPL");