2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/init.h>
34 #include <linux/in6.h>
35 #include <linux/inetdevice.h>
36 #include <linux/igmp.h>
37 #include <linux/netfilter_ipv4.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_ether.h>
40 #include <linux/if_vlan.h>
41 #include <linux/rculist.h>
42 #include <linux/err.h>
47 #include <net/protocol.h>
48 #include <net/ip_tunnels.h>
50 #include <net/checksum.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56 #include <net/rtnetlink.h>
58 #include <net/dst_metadata.h>
60 #if IS_ENABLED(CONFIG_IPV6)
62 #include <net/ip6_fib.h>
63 #include <net/ip6_route.h>
66 static unsigned int ip_tunnel_hash(__be32 key
, __be32 remote
)
68 return hash_32((__force u32
)key
^ (__force u32
)remote
,
72 static bool ip_tunnel_key_match(const struct ip_tunnel_parm
*p
,
73 __be16 flags
, __be32 key
)
75 if (p
->i_flags
& TUNNEL_KEY
) {
76 if (flags
& TUNNEL_KEY
)
77 return key
== p
->i_key
;
79 /* key expected, none present */
82 return !(flags
& TUNNEL_KEY
);
85 /* Fallback tunnel: no source, no destination, no key, no options
88 We require exact key match i.e. if a key is present in packet
89 it will match only tunnel with the same key; if it is not present,
90 it will match only keyless tunnel.
92 All keysless packets, if not matched configured keyless tunnels
93 will match fallback tunnel.
94 Given src, dst and key, find appropriate for input tunnel.
96 struct ip_tunnel
*ip_tunnel_lookup(struct ip_tunnel_net
*itn
,
97 int link
, __be16 flags
,
98 __be32 remote
, __be32 local
,
102 struct ip_tunnel
*t
, *cand
= NULL
;
103 struct hlist_head
*head
;
105 hash
= ip_tunnel_hash(key
, remote
);
106 head
= &itn
->tunnels
[hash
];
108 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
109 if (local
!= t
->parms
.iph
.saddr
||
110 remote
!= t
->parms
.iph
.daddr
||
111 !(t
->dev
->flags
& IFF_UP
))
114 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
117 if (t
->parms
.link
== link
)
123 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
124 if (remote
!= t
->parms
.iph
.daddr
||
125 t
->parms
.iph
.saddr
!= 0 ||
126 !(t
->dev
->flags
& IFF_UP
))
129 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
132 if (t
->parms
.link
== link
)
138 hash
= ip_tunnel_hash(key
, 0);
139 head
= &itn
->tunnels
[hash
];
141 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
142 if ((local
!= t
->parms
.iph
.saddr
|| t
->parms
.iph
.daddr
!= 0) &&
143 (local
!= t
->parms
.iph
.daddr
|| !ipv4_is_multicast(local
)))
146 if (!(t
->dev
->flags
& IFF_UP
))
149 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
152 if (t
->parms
.link
== link
)
158 if (flags
& TUNNEL_NO_KEY
)
159 goto skip_key_lookup
;
161 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
162 if (t
->parms
.i_key
!= key
||
163 t
->parms
.iph
.saddr
!= 0 ||
164 t
->parms
.iph
.daddr
!= 0 ||
165 !(t
->dev
->flags
& IFF_UP
))
168 if (t
->parms
.link
== link
)
178 t
= rcu_dereference(itn
->collect_md_tun
);
182 if (itn
->fb_tunnel_dev
&& itn
->fb_tunnel_dev
->flags
& IFF_UP
)
183 return netdev_priv(itn
->fb_tunnel_dev
);
187 EXPORT_SYMBOL_GPL(ip_tunnel_lookup
);
189 static struct hlist_head
*ip_bucket(struct ip_tunnel_net
*itn
,
190 struct ip_tunnel_parm
*parms
)
194 __be32 i_key
= parms
->i_key
;
196 if (parms
->iph
.daddr
&& !ipv4_is_multicast(parms
->iph
.daddr
))
197 remote
= parms
->iph
.daddr
;
201 if (!(parms
->i_flags
& TUNNEL_KEY
) && (parms
->i_flags
& VTI_ISVTI
))
204 h
= ip_tunnel_hash(i_key
, remote
);
205 return &itn
->tunnels
[h
];
208 static void ip_tunnel_add(struct ip_tunnel_net
*itn
, struct ip_tunnel
*t
)
210 struct hlist_head
*head
= ip_bucket(itn
, &t
->parms
);
213 rcu_assign_pointer(itn
->collect_md_tun
, t
);
214 hlist_add_head_rcu(&t
->hash_node
, head
);
217 static void ip_tunnel_del(struct ip_tunnel_net
*itn
, struct ip_tunnel
*t
)
220 rcu_assign_pointer(itn
->collect_md_tun
, NULL
);
221 hlist_del_init_rcu(&t
->hash_node
);
224 static struct ip_tunnel
*ip_tunnel_find(struct ip_tunnel_net
*itn
,
225 struct ip_tunnel_parm
*parms
,
228 __be32 remote
= parms
->iph
.daddr
;
229 __be32 local
= parms
->iph
.saddr
;
230 __be32 key
= parms
->i_key
;
231 __be16 flags
= parms
->i_flags
;
232 int link
= parms
->link
;
233 struct ip_tunnel
*t
= NULL
;
234 struct hlist_head
*head
= ip_bucket(itn
, parms
);
236 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
237 if (local
== t
->parms
.iph
.saddr
&&
238 remote
== t
->parms
.iph
.daddr
&&
239 link
== t
->parms
.link
&&
240 type
== t
->dev
->type
&&
241 ip_tunnel_key_match(&t
->parms
, flags
, key
))
247 static struct net_device
*__ip_tunnel_create(struct net
*net
,
248 const struct rtnl_link_ops
*ops
,
249 struct ip_tunnel_parm
*parms
)
252 struct ip_tunnel
*tunnel
;
253 struct net_device
*dev
;
257 if (parms
->name
[0]) {
258 if (!dev_valid_name(parms
->name
))
260 strlcpy(name
, parms
->name
, IFNAMSIZ
);
262 if (strlen(ops
->kind
) > (IFNAMSIZ
- 3))
264 strlcpy(name
, ops
->kind
, IFNAMSIZ
);
265 strncat(name
, "%d", 2);
269 dev
= alloc_netdev(ops
->priv_size
, name
, NET_NAME_UNKNOWN
, ops
->setup
);
274 dev_net_set(dev
, net
);
276 dev
->rtnl_link_ops
= ops
;
278 tunnel
= netdev_priv(dev
);
279 tunnel
->parms
= *parms
;
282 err
= register_netdevice(dev
);
294 static inline void init_tunnel_flow(struct flowi4
*fl4
,
296 __be32 daddr
, __be32 saddr
,
297 __be32 key
, __u8 tos
, int oif
)
299 memset(fl4
, 0, sizeof(*fl4
));
300 fl4
->flowi4_oif
= oif
;
303 fl4
->flowi4_tos
= tos
;
304 fl4
->flowi4_proto
= proto
;
305 fl4
->fl4_gre_key
= key
;
308 static int ip_tunnel_bind_dev(struct net_device
*dev
)
310 struct net_device
*tdev
= NULL
;
311 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
312 const struct iphdr
*iph
;
313 int hlen
= LL_MAX_HEADER
;
314 int mtu
= ETH_DATA_LEN
;
315 int t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
317 iph
= &tunnel
->parms
.iph
;
319 /* Guess output device to choose reasonable mtu and needed_headroom */
324 init_tunnel_flow(&fl4
, iph
->protocol
, iph
->daddr
,
325 iph
->saddr
, tunnel
->parms
.o_key
,
326 RT_TOS(iph
->tos
), tunnel
->parms
.link
);
327 rt
= ip_route_output_key(tunnel
->net
, &fl4
);
333 if (dev
->type
!= ARPHRD_ETHER
)
334 dev
->flags
|= IFF_POINTOPOINT
;
336 dst_cache_reset(&tunnel
->dst_cache
);
339 if (!tdev
&& tunnel
->parms
.link
)
340 tdev
= __dev_get_by_index(tunnel
->net
, tunnel
->parms
.link
);
343 hlen
= tdev
->hard_header_len
+ tdev
->needed_headroom
;
347 dev
->needed_headroom
= t_hlen
+ hlen
;
348 mtu
-= (dev
->hard_header_len
+ t_hlen
);
350 if (mtu
< IPV4_MIN_MTU
)
356 static struct ip_tunnel
*ip_tunnel_create(struct net
*net
,
357 struct ip_tunnel_net
*itn
,
358 struct ip_tunnel_parm
*parms
)
360 struct ip_tunnel
*nt
;
361 struct net_device
*dev
;
363 BUG_ON(!itn
->fb_tunnel_dev
);
364 dev
= __ip_tunnel_create(net
, itn
->fb_tunnel_dev
->rtnl_link_ops
, parms
);
366 return ERR_CAST(dev
);
368 dev
->mtu
= ip_tunnel_bind_dev(dev
);
370 nt
= netdev_priv(dev
);
371 ip_tunnel_add(itn
, nt
);
375 int ip_tunnel_rcv(struct ip_tunnel
*tunnel
, struct sk_buff
*skb
,
376 const struct tnl_ptk_info
*tpi
, struct metadata_dst
*tun_dst
,
379 struct pcpu_sw_netstats
*tstats
;
380 const struct iphdr
*iph
= ip_hdr(skb
);
383 #ifdef CONFIG_NET_IPGRE_BROADCAST
384 if (ipv4_is_multicast(iph
->daddr
)) {
385 tunnel
->dev
->stats
.multicast
++;
386 skb
->pkt_type
= PACKET_BROADCAST
;
390 if ((!(tpi
->flags
&TUNNEL_CSUM
) && (tunnel
->parms
.i_flags
&TUNNEL_CSUM
)) ||
391 ((tpi
->flags
&TUNNEL_CSUM
) && !(tunnel
->parms
.i_flags
&TUNNEL_CSUM
))) {
392 tunnel
->dev
->stats
.rx_crc_errors
++;
393 tunnel
->dev
->stats
.rx_errors
++;
397 if (tunnel
->parms
.i_flags
&TUNNEL_SEQ
) {
398 if (!(tpi
->flags
&TUNNEL_SEQ
) ||
399 (tunnel
->i_seqno
&& (s32
)(ntohl(tpi
->seq
) - tunnel
->i_seqno
) < 0)) {
400 tunnel
->dev
->stats
.rx_fifo_errors
++;
401 tunnel
->dev
->stats
.rx_errors
++;
404 tunnel
->i_seqno
= ntohl(tpi
->seq
) + 1;
407 skb_reset_network_header(skb
);
409 err
= IP_ECN_decapsulate(iph
, skb
);
412 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
413 &iph
->saddr
, iph
->tos
);
415 ++tunnel
->dev
->stats
.rx_frame_errors
;
416 ++tunnel
->dev
->stats
.rx_errors
;
421 tstats
= this_cpu_ptr(tunnel
->dev
->tstats
);
422 u64_stats_update_begin(&tstats
->syncp
);
423 tstats
->rx_packets
++;
424 tstats
->rx_bytes
+= skb
->len
;
425 u64_stats_update_end(&tstats
->syncp
);
427 skb_scrub_packet(skb
, !net_eq(tunnel
->net
, dev_net(tunnel
->dev
)));
429 if (tunnel
->dev
->type
== ARPHRD_ETHER
) {
430 skb
->protocol
= eth_type_trans(skb
, tunnel
->dev
);
431 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
433 skb
->dev
= tunnel
->dev
;
437 skb_dst_set(skb
, (struct dst_entry
*)tun_dst
);
439 gro_cells_receive(&tunnel
->gro_cells
, skb
);
446 EXPORT_SYMBOL_GPL(ip_tunnel_rcv
);
448 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops
*ops
,
451 if (num
>= MAX_IPTUN_ENCAP_OPS
)
454 return !cmpxchg((const struct ip_tunnel_encap_ops
**)
458 EXPORT_SYMBOL(ip_tunnel_encap_add_ops
);
460 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops
*ops
,
465 if (num
>= MAX_IPTUN_ENCAP_OPS
)
468 ret
= (cmpxchg((const struct ip_tunnel_encap_ops
**)
470 ops
, NULL
) == ops
) ? 0 : -1;
476 EXPORT_SYMBOL(ip_tunnel_encap_del_ops
);
478 int ip_tunnel_encap_setup(struct ip_tunnel
*t
,
479 struct ip_tunnel_encap
*ipencap
)
483 memset(&t
->encap
, 0, sizeof(t
->encap
));
485 hlen
= ip_encap_hlen(ipencap
);
489 t
->encap
.type
= ipencap
->type
;
490 t
->encap
.sport
= ipencap
->sport
;
491 t
->encap
.dport
= ipencap
->dport
;
492 t
->encap
.flags
= ipencap
->flags
;
494 t
->encap_hlen
= hlen
;
495 t
->hlen
= t
->encap_hlen
+ t
->tun_hlen
;
499 EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup
);
501 static int tnl_update_pmtu(struct net_device
*dev
, struct sk_buff
*skb
,
502 struct rtable
*rt
, __be16 df
,
503 const struct iphdr
*inner_iph
)
505 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
506 int pkt_size
= skb
->len
- tunnel
->hlen
- dev
->hard_header_len
;
510 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
511 - sizeof(struct iphdr
) - tunnel
->hlen
;
513 mtu
= skb_dst(skb
) ? dst_mtu(skb_dst(skb
)) : dev
->mtu
;
516 skb_dst(skb
)->ops
->update_pmtu(skb_dst(skb
), NULL
, skb
, mtu
);
518 if (skb
->protocol
== htons(ETH_P_IP
)) {
519 if (!skb_is_gso(skb
) &&
520 (inner_iph
->frag_off
& htons(IP_DF
)) &&
522 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
523 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
527 #if IS_ENABLED(CONFIG_IPV6)
528 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
529 struct rt6_info
*rt6
= (struct rt6_info
*)skb_dst(skb
);
531 if (rt6
&& mtu
< dst_mtu(skb_dst(skb
)) &&
532 mtu
>= IPV6_MIN_MTU
) {
533 if ((tunnel
->parms
.iph
.daddr
&&
534 !ipv4_is_multicast(tunnel
->parms
.iph
.daddr
)) ||
535 rt6
->rt6i_dst
.plen
== 128) {
536 rt6
->rt6i_flags
|= RTF_MODIFIED
;
537 dst_metric_set(skb_dst(skb
), RTAX_MTU
, mtu
);
541 if (!skb_is_gso(skb
) && mtu
>= IPV6_MIN_MTU
&&
543 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
551 void ip_md_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
, u8 proto
)
553 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
554 u32 headroom
= sizeof(struct iphdr
);
555 struct ip_tunnel_info
*tun_info
;
556 const struct ip_tunnel_key
*key
;
557 const struct iphdr
*inner_iph
;
563 tun_info
= skb_tunnel_info(skb
);
564 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
565 ip_tunnel_info_af(tun_info
) != AF_INET
))
567 key
= &tun_info
->key
;
568 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
569 inner_iph
= (const struct iphdr
*)skb_inner_network_header(skb
);
572 if (skb
->protocol
== htons(ETH_P_IP
))
573 tos
= inner_iph
->tos
;
574 else if (skb
->protocol
== htons(ETH_P_IPV6
))
575 tos
= ipv6_get_dsfield((const struct ipv6hdr
*)inner_iph
);
577 init_tunnel_flow(&fl4
, proto
, key
->u
.ipv4
.dst
, key
->u
.ipv4
.src
, 0,
578 RT_TOS(tos
), tunnel
->parms
.link
);
579 if (tunnel
->encap
.type
!= TUNNEL_ENCAP_NONE
)
581 rt
= ip_route_output_key(tunnel
->net
, &fl4
);
583 dev
->stats
.tx_carrier_errors
++;
586 if (rt
->dst
.dev
== dev
) {
588 dev
->stats
.collisions
++;
591 tos
= ip_tunnel_ecn_encap(tos
, inner_iph
, skb
);
594 if (skb
->protocol
== htons(ETH_P_IP
))
595 ttl
= inner_iph
->ttl
;
596 else if (skb
->protocol
== htons(ETH_P_IPV6
))
597 ttl
= ((const struct ipv6hdr
*)inner_iph
)->hop_limit
;
599 ttl
= ip4_dst_hoplimit(&rt
->dst
);
601 if (key
->tun_flags
& TUNNEL_DONT_FRAGMENT
)
603 else if (skb
->protocol
== htons(ETH_P_IP
))
604 df
= inner_iph
->frag_off
& htons(IP_DF
);
605 headroom
+= LL_RESERVED_SPACE(rt
->dst
.dev
) + rt
->dst
.header_len
;
606 if (headroom
> dev
->needed_headroom
)
607 dev
->needed_headroom
= headroom
;
609 if (skb_cow_head(skb
, dev
->needed_headroom
)) {
613 iptunnel_xmit(NULL
, rt
, skb
, fl4
.saddr
, fl4
.daddr
, proto
, tos
, ttl
,
614 df
, !net_eq(tunnel
->net
, dev_net(dev
)));
617 dev
->stats
.tx_errors
++;
620 dev
->stats
.tx_dropped
++;
624 EXPORT_SYMBOL_GPL(ip_md_tunnel_xmit
);
626 void ip_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
627 const struct iphdr
*tnl_params
, u8 protocol
)
629 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
630 const struct iphdr
*inner_iph
;
634 struct rtable
*rt
; /* Route to the other host */
635 unsigned int max_headroom
; /* The extra header space needed */
639 inner_iph
= (const struct iphdr
*)skb_inner_network_header(skb
);
640 connected
= (tunnel
->parms
.iph
.daddr
!= 0);
642 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
644 dst
= tnl_params
->daddr
;
649 dev
->stats
.tx_fifo_errors
++;
653 if (skb
->protocol
== htons(ETH_P_IP
)) {
654 rt
= skb_rtable(skb
);
655 dst
= rt_nexthop(rt
, inner_iph
->daddr
);
657 #if IS_ENABLED(CONFIG_IPV6)
658 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
659 const struct in6_addr
*addr6
;
660 struct neighbour
*neigh
;
661 bool do_tx_error_icmp
;
664 neigh
= dst_neigh_lookup(skb_dst(skb
),
665 &ipv6_hdr(skb
)->daddr
);
669 addr6
= (const struct in6_addr
*)&neigh
->primary_key
;
670 addr_type
= ipv6_addr_type(addr6
);
672 if (addr_type
== IPV6_ADDR_ANY
) {
673 addr6
= &ipv6_hdr(skb
)->daddr
;
674 addr_type
= ipv6_addr_type(addr6
);
677 if ((addr_type
& IPV6_ADDR_COMPATv4
) == 0)
678 do_tx_error_icmp
= true;
680 do_tx_error_icmp
= false;
681 dst
= addr6
->s6_addr32
[3];
683 neigh_release(neigh
);
684 if (do_tx_error_icmp
)
694 tos
= tnl_params
->tos
;
697 if (skb
->protocol
== htons(ETH_P_IP
)) {
698 tos
= inner_iph
->tos
;
700 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
701 tos
= ipv6_get_dsfield((const struct ipv6hdr
*)inner_iph
);
706 init_tunnel_flow(&fl4
, protocol
, dst
, tnl_params
->saddr
,
707 tunnel
->parms
.o_key
, RT_TOS(tos
), tunnel
->parms
.link
);
709 if (ip_tunnel_encap(skb
, tunnel
, &protocol
, &fl4
) < 0)
712 rt
= connected
? dst_cache_get_ip4(&tunnel
->dst_cache
, &fl4
.saddr
) :
716 rt
= ip_route_output_key(tunnel
->net
, &fl4
);
719 dev
->stats
.tx_carrier_errors
++;
723 dst_cache_set_ip4(&tunnel
->dst_cache
, &rt
->dst
,
727 if (rt
->dst
.dev
== dev
) {
729 dev
->stats
.collisions
++;
733 if (tnl_update_pmtu(dev
, skb
, rt
, tnl_params
->frag_off
, inner_iph
)) {
738 if (tunnel
->err_count
> 0) {
739 if (time_before(jiffies
,
740 tunnel
->err_time
+ IPTUNNEL_ERR_TIMEO
)) {
743 dst_link_failure(skb
);
745 tunnel
->err_count
= 0;
748 tos
= ip_tunnel_ecn_encap(tos
, inner_iph
, skb
);
749 ttl
= tnl_params
->ttl
;
751 if (skb
->protocol
== htons(ETH_P_IP
))
752 ttl
= inner_iph
->ttl
;
753 #if IS_ENABLED(CONFIG_IPV6)
754 else if (skb
->protocol
== htons(ETH_P_IPV6
))
755 ttl
= ((const struct ipv6hdr
*)inner_iph
)->hop_limit
;
758 ttl
= ip4_dst_hoplimit(&rt
->dst
);
761 df
= tnl_params
->frag_off
;
762 if (skb
->protocol
== htons(ETH_P_IP
) && !tunnel
->ignore_df
)
763 df
|= (inner_iph
->frag_off
&htons(IP_DF
));
765 max_headroom
= LL_RESERVED_SPACE(rt
->dst
.dev
) + sizeof(struct iphdr
)
766 + rt
->dst
.header_len
+ ip_encap_hlen(&tunnel
->encap
);
767 if (max_headroom
> dev
->needed_headroom
)
768 dev
->needed_headroom
= max_headroom
;
770 if (skb_cow_head(skb
, dev
->needed_headroom
)) {
772 dev
->stats
.tx_dropped
++;
777 iptunnel_xmit(NULL
, rt
, skb
, fl4
.saddr
, fl4
.daddr
, protocol
, tos
, ttl
,
778 df
, !net_eq(tunnel
->net
, dev_net(dev
)));
781 #if IS_ENABLED(CONFIG_IPV6)
783 dst_link_failure(skb
);
786 dev
->stats
.tx_errors
++;
789 EXPORT_SYMBOL_GPL(ip_tunnel_xmit
);
791 static void ip_tunnel_update(struct ip_tunnel_net
*itn
,
793 struct net_device
*dev
,
794 struct ip_tunnel_parm
*p
,
797 ip_tunnel_del(itn
, t
);
798 t
->parms
.iph
.saddr
= p
->iph
.saddr
;
799 t
->parms
.iph
.daddr
= p
->iph
.daddr
;
800 t
->parms
.i_key
= p
->i_key
;
801 t
->parms
.o_key
= p
->o_key
;
802 if (dev
->type
!= ARPHRD_ETHER
) {
803 memcpy(dev
->dev_addr
, &p
->iph
.saddr
, 4);
804 memcpy(dev
->broadcast
, &p
->iph
.daddr
, 4);
806 ip_tunnel_add(itn
, t
);
808 t
->parms
.iph
.ttl
= p
->iph
.ttl
;
809 t
->parms
.iph
.tos
= p
->iph
.tos
;
810 t
->parms
.iph
.frag_off
= p
->iph
.frag_off
;
812 if (t
->parms
.link
!= p
->link
) {
815 t
->parms
.link
= p
->link
;
816 mtu
= ip_tunnel_bind_dev(dev
);
820 dst_cache_reset(&t
->dst_cache
);
821 netdev_state_change(dev
);
824 int ip_tunnel_ioctl(struct net_device
*dev
, struct ip_tunnel_parm
*p
, int cmd
)
827 struct ip_tunnel
*t
= netdev_priv(dev
);
828 struct net
*net
= t
->net
;
829 struct ip_tunnel_net
*itn
= net_generic(net
, t
->ip_tnl_net_id
);
831 BUG_ON(!itn
->fb_tunnel_dev
);
834 if (dev
== itn
->fb_tunnel_dev
) {
835 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
837 t
= netdev_priv(dev
);
839 memcpy(p
, &t
->parms
, sizeof(*p
));
845 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
848 p
->iph
.frag_off
|= htons(IP_DF
);
849 if (!(p
->i_flags
& VTI_ISVTI
)) {
850 if (!(p
->i_flags
& TUNNEL_KEY
))
852 if (!(p
->o_flags
& TUNNEL_KEY
))
856 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
858 if (cmd
== SIOCADDTUNNEL
) {
860 t
= ip_tunnel_create(net
, itn
, p
);
861 err
= PTR_ERR_OR_ZERO(t
);
868 if (dev
!= itn
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
875 unsigned int nflags
= 0;
877 if (ipv4_is_multicast(p
->iph
.daddr
))
878 nflags
= IFF_BROADCAST
;
879 else if (p
->iph
.daddr
)
880 nflags
= IFF_POINTOPOINT
;
882 if ((dev
->flags
^nflags
)&(IFF_POINTOPOINT
|IFF_BROADCAST
)) {
887 t
= netdev_priv(dev
);
893 ip_tunnel_update(itn
, t
, dev
, p
, true);
901 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
904 if (dev
== itn
->fb_tunnel_dev
) {
906 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
910 if (t
== netdev_priv(itn
->fb_tunnel_dev
))
914 unregister_netdevice(dev
);
925 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl
);
927 int __ip_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
, bool strict
)
929 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
930 int t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
931 int max_mtu
= 0xFFF8 - dev
->hard_header_len
- t_hlen
;
936 if (new_mtu
> max_mtu
) {
946 EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu
);
948 int ip_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
)
950 return __ip_tunnel_change_mtu(dev
, new_mtu
, true);
952 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu
);
954 static void ip_tunnel_dev_free(struct net_device
*dev
)
956 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
958 gro_cells_destroy(&tunnel
->gro_cells
);
959 dst_cache_destroy(&tunnel
->dst_cache
);
960 free_percpu(dev
->tstats
);
964 void ip_tunnel_dellink(struct net_device
*dev
, struct list_head
*head
)
966 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
967 struct ip_tunnel_net
*itn
;
969 itn
= net_generic(tunnel
->net
, tunnel
->ip_tnl_net_id
);
971 if (itn
->fb_tunnel_dev
!= dev
) {
972 ip_tunnel_del(itn
, netdev_priv(dev
));
973 unregister_netdevice_queue(dev
, head
);
976 EXPORT_SYMBOL_GPL(ip_tunnel_dellink
);
978 struct net
*ip_tunnel_get_link_net(const struct net_device
*dev
)
980 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
984 EXPORT_SYMBOL(ip_tunnel_get_link_net
);
986 int ip_tunnel_get_iflink(const struct net_device
*dev
)
988 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
990 return tunnel
->parms
.link
;
992 EXPORT_SYMBOL(ip_tunnel_get_iflink
);
994 int ip_tunnel_init_net(struct net
*net
, int ip_tnl_net_id
,
995 struct rtnl_link_ops
*ops
, char *devname
)
997 struct ip_tunnel_net
*itn
= net_generic(net
, ip_tnl_net_id
);
998 struct ip_tunnel_parm parms
;
1001 for (i
= 0; i
< IP_TNL_HASH_SIZE
; i
++)
1002 INIT_HLIST_HEAD(&itn
->tunnels
[i
]);
1005 itn
->fb_tunnel_dev
= NULL
;
1009 memset(&parms
, 0, sizeof(parms
));
1011 strlcpy(parms
.name
, devname
, IFNAMSIZ
);
1014 itn
->fb_tunnel_dev
= __ip_tunnel_create(net
, ops
, &parms
);
1015 /* FB netdevice is special: we have one, and only one per netns.
1016 * Allowing to move it to another netns is clearly unsafe.
1018 if (!IS_ERR(itn
->fb_tunnel_dev
)) {
1019 itn
->fb_tunnel_dev
->features
|= NETIF_F_NETNS_LOCAL
;
1020 itn
->fb_tunnel_dev
->mtu
= ip_tunnel_bind_dev(itn
->fb_tunnel_dev
);
1021 ip_tunnel_add(itn
, netdev_priv(itn
->fb_tunnel_dev
));
1025 return PTR_ERR_OR_ZERO(itn
->fb_tunnel_dev
);
1027 EXPORT_SYMBOL_GPL(ip_tunnel_init_net
);
1029 static void ip_tunnel_destroy(struct ip_tunnel_net
*itn
, struct list_head
*head
,
1030 struct rtnl_link_ops
*ops
)
1032 struct net
*net
= dev_net(itn
->fb_tunnel_dev
);
1033 struct net_device
*dev
, *aux
;
1036 for_each_netdev_safe(net
, dev
, aux
)
1037 if (dev
->rtnl_link_ops
== ops
)
1038 unregister_netdevice_queue(dev
, head
);
1040 for (h
= 0; h
< IP_TNL_HASH_SIZE
; h
++) {
1041 struct ip_tunnel
*t
;
1042 struct hlist_node
*n
;
1043 struct hlist_head
*thead
= &itn
->tunnels
[h
];
1045 hlist_for_each_entry_safe(t
, n
, thead
, hash_node
)
1046 /* If dev is in the same netns, it has already
1047 * been added to the list by the previous loop.
1049 if (!net_eq(dev_net(t
->dev
), net
))
1050 unregister_netdevice_queue(t
->dev
, head
);
1054 void ip_tunnel_delete_net(struct ip_tunnel_net
*itn
, struct rtnl_link_ops
*ops
)
1059 ip_tunnel_destroy(itn
, &list
, ops
);
1060 unregister_netdevice_many(&list
);
1063 EXPORT_SYMBOL_GPL(ip_tunnel_delete_net
);
1065 int ip_tunnel_newlink(struct net_device
*dev
, struct nlattr
*tb
[],
1066 struct ip_tunnel_parm
*p
)
1068 struct ip_tunnel
*nt
;
1069 struct net
*net
= dev_net(dev
);
1070 struct ip_tunnel_net
*itn
;
1074 nt
= netdev_priv(dev
);
1075 itn
= net_generic(net
, nt
->ip_tnl_net_id
);
1077 if (nt
->collect_md
) {
1078 if (rtnl_dereference(itn
->collect_md_tun
))
1081 if (ip_tunnel_find(itn
, p
, dev
->type
))
1087 err
= register_netdevice(dev
);
1091 if (dev
->type
== ARPHRD_ETHER
&& !tb
[IFLA_ADDRESS
])
1092 eth_hw_addr_random(dev
);
1094 mtu
= ip_tunnel_bind_dev(dev
);
1098 ip_tunnel_add(itn
, nt
);
1102 EXPORT_SYMBOL_GPL(ip_tunnel_newlink
);
1104 int ip_tunnel_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
1105 struct ip_tunnel_parm
*p
)
1107 struct ip_tunnel
*t
;
1108 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1109 struct net
*net
= tunnel
->net
;
1110 struct ip_tunnel_net
*itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
1112 if (dev
== itn
->fb_tunnel_dev
)
1115 t
= ip_tunnel_find(itn
, p
, dev
->type
);
1123 if (dev
->type
!= ARPHRD_ETHER
) {
1124 unsigned int nflags
= 0;
1126 if (ipv4_is_multicast(p
->iph
.daddr
))
1127 nflags
= IFF_BROADCAST
;
1128 else if (p
->iph
.daddr
)
1129 nflags
= IFF_POINTOPOINT
;
1131 if ((dev
->flags
^ nflags
) &
1132 (IFF_POINTOPOINT
| IFF_BROADCAST
))
1137 ip_tunnel_update(itn
, t
, dev
, p
, !tb
[IFLA_MTU
]);
1140 EXPORT_SYMBOL_GPL(ip_tunnel_changelink
);
1142 int ip_tunnel_init(struct net_device
*dev
)
1144 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1145 struct iphdr
*iph
= &tunnel
->parms
.iph
;
1148 dev
->destructor
= ip_tunnel_dev_free
;
1149 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1153 err
= dst_cache_init(&tunnel
->dst_cache
, GFP_KERNEL
);
1155 free_percpu(dev
->tstats
);
1159 err
= gro_cells_init(&tunnel
->gro_cells
, dev
);
1161 dst_cache_destroy(&tunnel
->dst_cache
);
1162 free_percpu(dev
->tstats
);
1167 tunnel
->net
= dev_net(dev
);
1168 strcpy(tunnel
->parms
.name
, dev
->name
);
1172 if (tunnel
->collect_md
) {
1173 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1174 netif_keep_dst(dev
);
1178 EXPORT_SYMBOL_GPL(ip_tunnel_init
);
1180 void ip_tunnel_uninit(struct net_device
*dev
)
1182 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1183 struct net
*net
= tunnel
->net
;
1184 struct ip_tunnel_net
*itn
;
1186 itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
1187 /* fb_tunnel_dev will be unregisted in net-exit call. */
1188 if (itn
->fb_tunnel_dev
!= dev
)
1189 ip_tunnel_del(itn
, netdev_priv(dev
));
1191 dst_cache_reset(&tunnel
->dst_cache
);
1193 EXPORT_SYMBOL_GPL(ip_tunnel_uninit
);
1195 /* Do least required initialization, rest of init is done in tunnel_init call */
1196 void ip_tunnel_setup(struct net_device
*dev
, int net_id
)
1198 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1199 tunnel
->ip_tnl_net_id
= net_id
;
1201 EXPORT_SYMBOL_GPL(ip_tunnel_setup
);
1203 MODULE_LICENSE("GPL");