2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
34 #include <linux/init.h>
35 #include <linux/in6.h>
36 #include <linux/inetdevice.h>
37 #include <linux/igmp.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/etherdevice.h>
40 #include <linux/if_ether.h>
41 #include <linux/if_vlan.h>
42 #include <linux/rculist.h>
43 #include <linux/err.h>
48 #include <net/protocol.h>
49 #include <net/ip_tunnels.h>
51 #include <net/checksum.h>
52 #include <net/dsfield.h>
53 #include <net/inet_ecn.h>
55 #include <net/net_namespace.h>
56 #include <net/netns/generic.h>
57 #include <net/rtnetlink.h>
60 #if IS_ENABLED(CONFIG_IPV6)
62 #include <net/ip6_fib.h>
63 #include <net/ip6_route.h>
66 static unsigned int ip_tunnel_hash(__be32 key
, __be32 remote
)
68 return hash_32((__force u32
)key
^ (__force u32
)remote
,
72 static bool ip_tunnel_key_match(const struct ip_tunnel_parm
*p
,
73 __be16 flags
, __be32 key
)
75 if (p
->i_flags
& TUNNEL_KEY
) {
76 if (flags
& TUNNEL_KEY
)
77 return key
== p
->i_key
;
79 /* key expected, none present */
82 return !(flags
& TUNNEL_KEY
);
85 /* Fallback tunnel: no source, no destination, no key, no options
88 We require exact key match i.e. if a key is present in packet
89 it will match only tunnel with the same key; if it is not present,
90 it will match only keyless tunnel.
92 All keysless packets, if not matched configured keyless tunnels
93 will match fallback tunnel.
94 Given src, dst and key, find appropriate for input tunnel.
96 struct ip_tunnel
*ip_tunnel_lookup(struct ip_tunnel_net
*itn
,
97 int link
, __be16 flags
,
98 __be32 remote
, __be32 local
,
102 struct ip_tunnel
*t
, *cand
= NULL
;
103 struct hlist_head
*head
;
105 hash
= ip_tunnel_hash(key
, remote
);
106 head
= &itn
->tunnels
[hash
];
108 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
109 if (local
!= t
->parms
.iph
.saddr
||
110 remote
!= t
->parms
.iph
.daddr
||
111 !(t
->dev
->flags
& IFF_UP
))
114 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
117 if (t
->parms
.link
== link
)
123 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
124 if (remote
!= t
->parms
.iph
.daddr
||
125 t
->parms
.iph
.saddr
!= 0 ||
126 !(t
->dev
->flags
& IFF_UP
))
129 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
132 if (t
->parms
.link
== link
)
138 hash
= ip_tunnel_hash(key
, 0);
139 head
= &itn
->tunnels
[hash
];
141 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
142 if ((local
!= t
->parms
.iph
.saddr
|| t
->parms
.iph
.daddr
!= 0) &&
143 (local
!= t
->parms
.iph
.daddr
|| !ipv4_is_multicast(local
)))
146 if (!(t
->dev
->flags
& IFF_UP
))
149 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
152 if (t
->parms
.link
== link
)
158 if (flags
& TUNNEL_NO_KEY
)
159 goto skip_key_lookup
;
161 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
162 if (t
->parms
.i_key
!= key
||
163 t
->parms
.iph
.saddr
!= 0 ||
164 t
->parms
.iph
.daddr
!= 0 ||
165 !(t
->dev
->flags
& IFF_UP
))
168 if (t
->parms
.link
== link
)
178 t
= rcu_dereference(itn
->collect_md_tun
);
182 if (itn
->fb_tunnel_dev
&& itn
->fb_tunnel_dev
->flags
& IFF_UP
)
183 return netdev_priv(itn
->fb_tunnel_dev
);
187 EXPORT_SYMBOL_GPL(ip_tunnel_lookup
);
189 static struct hlist_head
*ip_bucket(struct ip_tunnel_net
*itn
,
190 struct ip_tunnel_parm
*parms
)
194 __be32 i_key
= parms
->i_key
;
196 if (parms
->iph
.daddr
&& !ipv4_is_multicast(parms
->iph
.daddr
))
197 remote
= parms
->iph
.daddr
;
201 if (!(parms
->i_flags
& TUNNEL_KEY
) && (parms
->i_flags
& VTI_ISVTI
))
204 h
= ip_tunnel_hash(i_key
, remote
);
205 return &itn
->tunnels
[h
];
208 static void ip_tunnel_add(struct ip_tunnel_net
*itn
, struct ip_tunnel
*t
)
210 struct hlist_head
*head
= ip_bucket(itn
, &t
->parms
);
213 rcu_assign_pointer(itn
->collect_md_tun
, t
);
214 hlist_add_head_rcu(&t
->hash_node
, head
);
217 static void ip_tunnel_del(struct ip_tunnel_net
*itn
, struct ip_tunnel
*t
)
220 rcu_assign_pointer(itn
->collect_md_tun
, NULL
);
221 hlist_del_init_rcu(&t
->hash_node
);
224 static struct ip_tunnel
*ip_tunnel_find(struct ip_tunnel_net
*itn
,
225 struct ip_tunnel_parm
*parms
,
228 __be32 remote
= parms
->iph
.daddr
;
229 __be32 local
= parms
->iph
.saddr
;
230 __be32 key
= parms
->i_key
;
231 __be16 flags
= parms
->i_flags
;
232 int link
= parms
->link
;
233 struct ip_tunnel
*t
= NULL
;
234 struct hlist_head
*head
= ip_bucket(itn
, parms
);
236 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
237 if (local
== t
->parms
.iph
.saddr
&&
238 remote
== t
->parms
.iph
.daddr
&&
239 link
== t
->parms
.link
&&
240 type
== t
->dev
->type
&&
241 ip_tunnel_key_match(&t
->parms
, flags
, key
))
247 static struct net_device
*__ip_tunnel_create(struct net
*net
,
248 const struct rtnl_link_ops
*ops
,
249 struct ip_tunnel_parm
*parms
)
252 struct ip_tunnel
*tunnel
;
253 struct net_device
*dev
;
257 if (parms
->name
[0]) {
258 if (!dev_valid_name(parms
->name
))
260 strlcpy(name
, parms
->name
, IFNAMSIZ
);
262 if (strlen(ops
->kind
) > (IFNAMSIZ
- 3))
264 strcpy(name
, ops
->kind
);
269 dev
= alloc_netdev(ops
->priv_size
, name
, NET_NAME_UNKNOWN
, ops
->setup
);
274 dev_net_set(dev
, net
);
276 dev
->rtnl_link_ops
= ops
;
278 tunnel
= netdev_priv(dev
);
279 tunnel
->parms
= *parms
;
282 err
= register_netdevice(dev
);
294 static inline void init_tunnel_flow(struct flowi4
*fl4
,
296 __be32 daddr
, __be32 saddr
,
297 __be32 key
, __u8 tos
, int oif
)
299 memset(fl4
, 0, sizeof(*fl4
));
300 fl4
->flowi4_oif
= oif
;
303 fl4
->flowi4_tos
= tos
;
304 fl4
->flowi4_proto
= proto
;
305 fl4
->fl4_gre_key
= key
;
308 static int ip_tunnel_bind_dev(struct net_device
*dev
)
310 struct net_device
*tdev
= NULL
;
311 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
312 const struct iphdr
*iph
;
313 int hlen
= LL_MAX_HEADER
;
314 int mtu
= ETH_DATA_LEN
;
315 int t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
317 iph
= &tunnel
->parms
.iph
;
319 /* Guess output device to choose reasonable mtu and needed_headroom */
324 init_tunnel_flow(&fl4
, iph
->protocol
, iph
->daddr
,
325 iph
->saddr
, tunnel
->parms
.o_key
,
326 RT_TOS(iph
->tos
), tunnel
->parms
.link
);
327 rt
= ip_route_output_key(tunnel
->net
, &fl4
);
333 if (dev
->type
!= ARPHRD_ETHER
)
334 dev
->flags
|= IFF_POINTOPOINT
;
336 dst_cache_reset(&tunnel
->dst_cache
);
339 if (!tdev
&& tunnel
->parms
.link
)
340 tdev
= __dev_get_by_index(tunnel
->net
, tunnel
->parms
.link
);
343 hlen
= tdev
->hard_header_len
+ tdev
->needed_headroom
;
347 dev
->needed_headroom
= t_hlen
+ hlen
;
348 mtu
-= (dev
->hard_header_len
+ t_hlen
);
350 if (mtu
< IPV4_MIN_MTU
)
356 static struct ip_tunnel
*ip_tunnel_create(struct net
*net
,
357 struct ip_tunnel_net
*itn
,
358 struct ip_tunnel_parm
*parms
)
360 struct ip_tunnel
*nt
;
361 struct net_device
*dev
;
363 BUG_ON(!itn
->fb_tunnel_dev
);
364 dev
= __ip_tunnel_create(net
, itn
->fb_tunnel_dev
->rtnl_link_ops
, parms
);
366 return ERR_CAST(dev
);
368 dev
->mtu
= ip_tunnel_bind_dev(dev
);
370 nt
= netdev_priv(dev
);
371 ip_tunnel_add(itn
, nt
);
375 int ip_tunnel_rcv(struct ip_tunnel
*tunnel
, struct sk_buff
*skb
,
376 const struct tnl_ptk_info
*tpi
, struct metadata_dst
*tun_dst
,
379 struct pcpu_sw_netstats
*tstats
;
380 const struct iphdr
*iph
= ip_hdr(skb
);
383 #ifdef CONFIG_NET_IPGRE_BROADCAST
384 if (ipv4_is_multicast(iph
->daddr
)) {
385 tunnel
->dev
->stats
.multicast
++;
386 skb
->pkt_type
= PACKET_BROADCAST
;
390 if ((!(tpi
->flags
&TUNNEL_CSUM
) && (tunnel
->parms
.i_flags
&TUNNEL_CSUM
)) ||
391 ((tpi
->flags
&TUNNEL_CSUM
) && !(tunnel
->parms
.i_flags
&TUNNEL_CSUM
))) {
392 tunnel
->dev
->stats
.rx_crc_errors
++;
393 tunnel
->dev
->stats
.rx_errors
++;
397 if (tunnel
->parms
.i_flags
&TUNNEL_SEQ
) {
398 if (!(tpi
->flags
&TUNNEL_SEQ
) ||
399 (tunnel
->i_seqno
&& (s32
)(ntohl(tpi
->seq
) - tunnel
->i_seqno
) < 0)) {
400 tunnel
->dev
->stats
.rx_fifo_errors
++;
401 tunnel
->dev
->stats
.rx_errors
++;
404 tunnel
->i_seqno
= ntohl(tpi
->seq
) + 1;
407 skb_reset_network_header(skb
);
409 err
= IP_ECN_decapsulate(iph
, skb
);
412 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
413 &iph
->saddr
, iph
->tos
);
415 ++tunnel
->dev
->stats
.rx_frame_errors
;
416 ++tunnel
->dev
->stats
.rx_errors
;
421 tstats
= this_cpu_ptr(tunnel
->dev
->tstats
);
422 u64_stats_update_begin(&tstats
->syncp
);
423 tstats
->rx_packets
++;
424 tstats
->rx_bytes
+= skb
->len
;
425 u64_stats_update_end(&tstats
->syncp
);
427 skb_scrub_packet(skb
, !net_eq(tunnel
->net
, dev_net(tunnel
->dev
)));
429 if (tunnel
->dev
->type
== ARPHRD_ETHER
) {
430 skb
->protocol
= eth_type_trans(skb
, tunnel
->dev
);
431 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
433 skb
->dev
= tunnel
->dev
;
437 skb_dst_set(skb
, (struct dst_entry
*)tun_dst
);
439 gro_cells_receive(&tunnel
->gro_cells
, skb
);
446 EXPORT_SYMBOL_GPL(ip_tunnel_rcv
);
448 static int ip_encap_hlen(struct ip_tunnel_encap
*e
)
450 const struct ip_tunnel_encap_ops
*ops
;
453 if (e
->type
== TUNNEL_ENCAP_NONE
)
456 if (e
->type
>= MAX_IPTUN_ENCAP_OPS
)
460 ops
= rcu_dereference(iptun_encaps
[e
->type
]);
461 if (likely(ops
&& ops
->encap_hlen
))
462 hlen
= ops
->encap_hlen(e
);
468 const struct ip_tunnel_encap_ops __rcu
*
469 iptun_encaps
[MAX_IPTUN_ENCAP_OPS
] __read_mostly
;
471 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops
*ops
,
474 if (num
>= MAX_IPTUN_ENCAP_OPS
)
477 return !cmpxchg((const struct ip_tunnel_encap_ops
**)
481 EXPORT_SYMBOL(ip_tunnel_encap_add_ops
);
483 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops
*ops
,
488 if (num
>= MAX_IPTUN_ENCAP_OPS
)
491 ret
= (cmpxchg((const struct ip_tunnel_encap_ops
**)
493 ops
, NULL
) == ops
) ? 0 : -1;
499 EXPORT_SYMBOL(ip_tunnel_encap_del_ops
);
501 int ip_tunnel_encap_setup(struct ip_tunnel
*t
,
502 struct ip_tunnel_encap
*ipencap
)
506 memset(&t
->encap
, 0, sizeof(t
->encap
));
508 hlen
= ip_encap_hlen(ipencap
);
512 t
->encap
.type
= ipencap
->type
;
513 t
->encap
.sport
= ipencap
->sport
;
514 t
->encap
.dport
= ipencap
->dport
;
515 t
->encap
.flags
= ipencap
->flags
;
517 t
->encap_hlen
= hlen
;
518 t
->hlen
= t
->encap_hlen
+ t
->tun_hlen
;
522 EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup
);
524 int ip_tunnel_encap(struct sk_buff
*skb
, struct ip_tunnel
*t
,
525 u8
*protocol
, struct flowi4
*fl4
)
527 const struct ip_tunnel_encap_ops
*ops
;
530 if (t
->encap
.type
== TUNNEL_ENCAP_NONE
)
533 if (t
->encap
.type
>= MAX_IPTUN_ENCAP_OPS
)
537 ops
= rcu_dereference(iptun_encaps
[t
->encap
.type
]);
538 if (likely(ops
&& ops
->build_header
))
539 ret
= ops
->build_header(skb
, &t
->encap
, protocol
, fl4
);
544 EXPORT_SYMBOL(ip_tunnel_encap
);
546 static int tnl_update_pmtu(struct net_device
*dev
, struct sk_buff
*skb
,
547 struct rtable
*rt
, __be16 df
,
548 const struct iphdr
*inner_iph
)
550 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
551 int pkt_size
= skb
->len
- tunnel
->hlen
- dev
->hard_header_len
;
555 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
556 - sizeof(struct iphdr
) - tunnel
->hlen
;
558 mtu
= skb_dst(skb
) ? dst_mtu(skb_dst(skb
)) : dev
->mtu
;
561 skb_dst(skb
)->ops
->update_pmtu(skb_dst(skb
), NULL
, skb
, mtu
);
563 if (skb
->protocol
== htons(ETH_P_IP
)) {
564 if (!skb_is_gso(skb
) &&
565 (inner_iph
->frag_off
& htons(IP_DF
)) &&
567 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
568 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
572 #if IS_ENABLED(CONFIG_IPV6)
573 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
574 struct rt6_info
*rt6
= (struct rt6_info
*)skb_dst(skb
);
576 if (rt6
&& mtu
< dst_mtu(skb_dst(skb
)) &&
577 mtu
>= IPV6_MIN_MTU
) {
578 if ((tunnel
->parms
.iph
.daddr
&&
579 !ipv4_is_multicast(tunnel
->parms
.iph
.daddr
)) ||
580 rt6
->rt6i_dst
.plen
== 128) {
581 rt6
->rt6i_flags
|= RTF_MODIFIED
;
582 dst_metric_set(skb_dst(skb
), RTAX_MTU
, mtu
);
586 if (!skb_is_gso(skb
) && mtu
>= IPV6_MIN_MTU
&&
588 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
596 void ip_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
597 const struct iphdr
*tnl_params
, u8 protocol
)
599 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
600 unsigned int inner_nhdr_len
= 0;
601 const struct iphdr
*inner_iph
;
605 struct rtable
*rt
; /* Route to the other host */
606 unsigned int max_headroom
; /* The extra header space needed */
611 /* ensure we can access the inner net header, for several users below */
612 if (skb
->protocol
== htons(ETH_P_IP
))
613 inner_nhdr_len
= sizeof(struct iphdr
);
614 else if (skb
->protocol
== htons(ETH_P_IPV6
))
615 inner_nhdr_len
= sizeof(struct ipv6hdr
);
616 if (unlikely(!pskb_may_pull(skb
, inner_nhdr_len
)))
619 inner_iph
= (const struct iphdr
*)skb_inner_network_header(skb
);
620 connected
= (tunnel
->parms
.iph
.daddr
!= 0);
622 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
624 dst
= tnl_params
->daddr
;
629 dev
->stats
.tx_fifo_errors
++;
633 if (skb
->protocol
== htons(ETH_P_IP
)) {
634 rt
= skb_rtable(skb
);
635 dst
= rt_nexthop(rt
, inner_iph
->daddr
);
637 #if IS_ENABLED(CONFIG_IPV6)
638 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
639 const struct in6_addr
*addr6
;
640 struct neighbour
*neigh
;
641 bool do_tx_error_icmp
;
644 neigh
= dst_neigh_lookup(skb_dst(skb
),
645 &ipv6_hdr(skb
)->daddr
);
649 addr6
= (const struct in6_addr
*)&neigh
->primary_key
;
650 addr_type
= ipv6_addr_type(addr6
);
652 if (addr_type
== IPV6_ADDR_ANY
) {
653 addr6
= &ipv6_hdr(skb
)->daddr
;
654 addr_type
= ipv6_addr_type(addr6
);
657 if ((addr_type
& IPV6_ADDR_COMPATv4
) == 0)
658 do_tx_error_icmp
= true;
660 do_tx_error_icmp
= false;
661 dst
= addr6
->s6_addr32
[3];
663 neigh_release(neigh
);
664 if (do_tx_error_icmp
)
674 tos
= tnl_params
->tos
;
677 if (skb
->protocol
== htons(ETH_P_IP
)) {
678 tos
= inner_iph
->tos
;
680 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
681 tos
= ipv6_get_dsfield((const struct ipv6hdr
*)inner_iph
);
686 init_tunnel_flow(&fl4
, protocol
, dst
, tnl_params
->saddr
,
687 tunnel
->parms
.o_key
, RT_TOS(tos
), tunnel
->parms
.link
);
689 if (ip_tunnel_encap(skb
, tunnel
, &protocol
, &fl4
) < 0)
692 rt
= connected
? dst_cache_get_ip4(&tunnel
->dst_cache
, &fl4
.saddr
) :
696 rt
= ip_route_output_key(tunnel
->net
, &fl4
);
699 dev
->stats
.tx_carrier_errors
++;
703 dst_cache_set_ip4(&tunnel
->dst_cache
, &rt
->dst
,
707 if (rt
->dst
.dev
== dev
) {
709 dev
->stats
.collisions
++;
713 if (tnl_update_pmtu(dev
, skb
, rt
, tnl_params
->frag_off
, inner_iph
)) {
718 if (tunnel
->err_count
> 0) {
719 if (time_before(jiffies
,
720 tunnel
->err_time
+ IPTUNNEL_ERR_TIMEO
)) {
723 dst_link_failure(skb
);
725 tunnel
->err_count
= 0;
728 tos
= ip_tunnel_ecn_encap(tos
, inner_iph
, skb
);
729 ttl
= tnl_params
->ttl
;
731 if (skb
->protocol
== htons(ETH_P_IP
))
732 ttl
= inner_iph
->ttl
;
733 #if IS_ENABLED(CONFIG_IPV6)
734 else if (skb
->protocol
== htons(ETH_P_IPV6
))
735 ttl
= ((const struct ipv6hdr
*)inner_iph
)->hop_limit
;
738 ttl
= ip4_dst_hoplimit(&rt
->dst
);
741 df
= tnl_params
->frag_off
;
742 if (skb
->protocol
== htons(ETH_P_IP
))
743 df
|= (inner_iph
->frag_off
&htons(IP_DF
));
745 max_headroom
= LL_RESERVED_SPACE(rt
->dst
.dev
) + sizeof(struct iphdr
)
746 + rt
->dst
.header_len
+ ip_encap_hlen(&tunnel
->encap
);
747 if (max_headroom
> dev
->needed_headroom
)
748 dev
->needed_headroom
= max_headroom
;
750 if (skb_cow_head(skb
, dev
->needed_headroom
)) {
752 dev
->stats
.tx_dropped
++;
757 err
= iptunnel_xmit(NULL
, rt
, skb
, fl4
.saddr
, fl4
.daddr
, protocol
,
758 tos
, ttl
, df
, !net_eq(tunnel
->net
, dev_net(dev
)));
759 iptunnel_xmit_stats(err
, &dev
->stats
, dev
->tstats
);
763 #if IS_ENABLED(CONFIG_IPV6)
765 dst_link_failure(skb
);
768 dev
->stats
.tx_errors
++;
771 EXPORT_SYMBOL_GPL(ip_tunnel_xmit
);
773 static void ip_tunnel_update(struct ip_tunnel_net
*itn
,
775 struct net_device
*dev
,
776 struct ip_tunnel_parm
*p
,
779 ip_tunnel_del(itn
, t
);
780 t
->parms
.iph
.saddr
= p
->iph
.saddr
;
781 t
->parms
.iph
.daddr
= p
->iph
.daddr
;
782 t
->parms
.i_key
= p
->i_key
;
783 t
->parms
.o_key
= p
->o_key
;
784 if (dev
->type
!= ARPHRD_ETHER
) {
785 memcpy(dev
->dev_addr
, &p
->iph
.saddr
, 4);
786 memcpy(dev
->broadcast
, &p
->iph
.daddr
, 4);
788 ip_tunnel_add(itn
, t
);
790 t
->parms
.iph
.ttl
= p
->iph
.ttl
;
791 t
->parms
.iph
.tos
= p
->iph
.tos
;
792 t
->parms
.iph
.frag_off
= p
->iph
.frag_off
;
794 if (t
->parms
.link
!= p
->link
) {
797 t
->parms
.link
= p
->link
;
798 mtu
= ip_tunnel_bind_dev(dev
);
802 dst_cache_reset(&t
->dst_cache
);
803 netdev_state_change(dev
);
806 int ip_tunnel_ioctl(struct net_device
*dev
, struct ip_tunnel_parm
*p
, int cmd
)
809 struct ip_tunnel
*t
= netdev_priv(dev
);
810 struct net
*net
= t
->net
;
811 struct ip_tunnel_net
*itn
= net_generic(net
, t
->ip_tnl_net_id
);
813 BUG_ON(!itn
->fb_tunnel_dev
);
816 if (dev
== itn
->fb_tunnel_dev
) {
817 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
819 t
= netdev_priv(dev
);
821 memcpy(p
, &t
->parms
, sizeof(*p
));
827 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
830 p
->iph
.frag_off
|= htons(IP_DF
);
831 if (!(p
->i_flags
& VTI_ISVTI
)) {
832 if (!(p
->i_flags
& TUNNEL_KEY
))
834 if (!(p
->o_flags
& TUNNEL_KEY
))
838 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
840 if (cmd
== SIOCADDTUNNEL
) {
842 t
= ip_tunnel_create(net
, itn
, p
);
843 err
= PTR_ERR_OR_ZERO(t
);
850 if (dev
!= itn
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
857 unsigned int nflags
= 0;
859 if (ipv4_is_multicast(p
->iph
.daddr
))
860 nflags
= IFF_BROADCAST
;
861 else if (p
->iph
.daddr
)
862 nflags
= IFF_POINTOPOINT
;
864 if ((dev
->flags
^nflags
)&(IFF_POINTOPOINT
|IFF_BROADCAST
)) {
869 t
= netdev_priv(dev
);
875 ip_tunnel_update(itn
, t
, dev
, p
, true);
883 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
886 if (dev
== itn
->fb_tunnel_dev
) {
888 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
892 if (t
== netdev_priv(itn
->fb_tunnel_dev
))
896 unregister_netdevice(dev
);
907 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl
);
909 int __ip_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
, bool strict
)
911 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
912 int t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
913 int max_mtu
= 0xFFF8 - dev
->hard_header_len
- t_hlen
;
918 if (new_mtu
> max_mtu
) {
928 EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu
);
930 int ip_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
)
932 return __ip_tunnel_change_mtu(dev
, new_mtu
, true);
934 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu
);
936 static void ip_tunnel_dev_free(struct net_device
*dev
)
938 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
940 gro_cells_destroy(&tunnel
->gro_cells
);
941 dst_cache_destroy(&tunnel
->dst_cache
);
942 free_percpu(dev
->tstats
);
946 void ip_tunnel_dellink(struct net_device
*dev
, struct list_head
*head
)
948 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
949 struct ip_tunnel_net
*itn
;
951 itn
= net_generic(tunnel
->net
, tunnel
->ip_tnl_net_id
);
953 if (itn
->fb_tunnel_dev
!= dev
) {
954 ip_tunnel_del(itn
, netdev_priv(dev
));
955 unregister_netdevice_queue(dev
, head
);
958 EXPORT_SYMBOL_GPL(ip_tunnel_dellink
);
960 struct net
*ip_tunnel_get_link_net(const struct net_device
*dev
)
962 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
966 EXPORT_SYMBOL(ip_tunnel_get_link_net
);
968 int ip_tunnel_get_iflink(const struct net_device
*dev
)
970 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
972 return tunnel
->parms
.link
;
974 EXPORT_SYMBOL(ip_tunnel_get_iflink
);
976 int ip_tunnel_init_net(struct net
*net
, int ip_tnl_net_id
,
977 struct rtnl_link_ops
*ops
, char *devname
)
979 struct ip_tunnel_net
*itn
= net_generic(net
, ip_tnl_net_id
);
980 struct ip_tunnel_parm parms
;
983 for (i
= 0; i
< IP_TNL_HASH_SIZE
; i
++)
984 INIT_HLIST_HEAD(&itn
->tunnels
[i
]);
987 itn
->fb_tunnel_dev
= NULL
;
991 memset(&parms
, 0, sizeof(parms
));
993 strlcpy(parms
.name
, devname
, IFNAMSIZ
);
996 itn
->fb_tunnel_dev
= __ip_tunnel_create(net
, ops
, &parms
);
997 /* FB netdevice is special: we have one, and only one per netns.
998 * Allowing to move it to another netns is clearly unsafe.
1000 if (!IS_ERR(itn
->fb_tunnel_dev
)) {
1001 itn
->fb_tunnel_dev
->features
|= NETIF_F_NETNS_LOCAL
;
1002 itn
->fb_tunnel_dev
->mtu
= ip_tunnel_bind_dev(itn
->fb_tunnel_dev
);
1003 ip_tunnel_add(itn
, netdev_priv(itn
->fb_tunnel_dev
));
1007 return PTR_ERR_OR_ZERO(itn
->fb_tunnel_dev
);
1009 EXPORT_SYMBOL_GPL(ip_tunnel_init_net
);
1011 static void ip_tunnel_destroy(struct ip_tunnel_net
*itn
, struct list_head
*head
,
1012 struct rtnl_link_ops
*ops
)
1014 struct net
*net
= dev_net(itn
->fb_tunnel_dev
);
1015 struct net_device
*dev
, *aux
;
1018 for_each_netdev_safe(net
, dev
, aux
)
1019 if (dev
->rtnl_link_ops
== ops
)
1020 unregister_netdevice_queue(dev
, head
);
1022 for (h
= 0; h
< IP_TNL_HASH_SIZE
; h
++) {
1023 struct ip_tunnel
*t
;
1024 struct hlist_node
*n
;
1025 struct hlist_head
*thead
= &itn
->tunnels
[h
];
1027 hlist_for_each_entry_safe(t
, n
, thead
, hash_node
)
1028 /* If dev is in the same netns, it has already
1029 * been added to the list by the previous loop.
1031 if (!net_eq(dev_net(t
->dev
), net
))
1032 unregister_netdevice_queue(t
->dev
, head
);
1036 void ip_tunnel_delete_net(struct ip_tunnel_net
*itn
, struct rtnl_link_ops
*ops
)
1041 ip_tunnel_destroy(itn
, &list
, ops
);
1042 unregister_netdevice_many(&list
);
1045 EXPORT_SYMBOL_GPL(ip_tunnel_delete_net
);
1047 int ip_tunnel_newlink(struct net_device
*dev
, struct nlattr
*tb
[],
1048 struct ip_tunnel_parm
*p
)
1050 struct ip_tunnel
*nt
;
1051 struct net
*net
= dev_net(dev
);
1052 struct ip_tunnel_net
*itn
;
1056 nt
= netdev_priv(dev
);
1057 itn
= net_generic(net
, nt
->ip_tnl_net_id
);
1059 if (nt
->collect_md
) {
1060 if (rtnl_dereference(itn
->collect_md_tun
))
1063 if (ip_tunnel_find(itn
, p
, dev
->type
))
1069 err
= register_netdevice(dev
);
1073 if (dev
->type
== ARPHRD_ETHER
&& !tb
[IFLA_ADDRESS
])
1074 eth_hw_addr_random(dev
);
1076 mtu
= ip_tunnel_bind_dev(dev
);
1080 ip_tunnel_add(itn
, nt
);
1084 EXPORT_SYMBOL_GPL(ip_tunnel_newlink
);
1086 int ip_tunnel_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
1087 struct ip_tunnel_parm
*p
)
1089 struct ip_tunnel
*t
;
1090 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1091 struct net
*net
= tunnel
->net
;
1092 struct ip_tunnel_net
*itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
1094 if (dev
== itn
->fb_tunnel_dev
)
1097 t
= ip_tunnel_find(itn
, p
, dev
->type
);
1105 if (dev
->type
!= ARPHRD_ETHER
) {
1106 unsigned int nflags
= 0;
1108 if (ipv4_is_multicast(p
->iph
.daddr
))
1109 nflags
= IFF_BROADCAST
;
1110 else if (p
->iph
.daddr
)
1111 nflags
= IFF_POINTOPOINT
;
1113 if ((dev
->flags
^ nflags
) &
1114 (IFF_POINTOPOINT
| IFF_BROADCAST
))
1119 ip_tunnel_update(itn
, t
, dev
, p
, !tb
[IFLA_MTU
]);
1122 EXPORT_SYMBOL_GPL(ip_tunnel_changelink
);
1124 int ip_tunnel_init(struct net_device
*dev
)
1126 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1127 struct iphdr
*iph
= &tunnel
->parms
.iph
;
1130 dev
->destructor
= ip_tunnel_dev_free
;
1131 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1135 err
= dst_cache_init(&tunnel
->dst_cache
, GFP_KERNEL
);
1137 free_percpu(dev
->tstats
);
1141 err
= gro_cells_init(&tunnel
->gro_cells
, dev
);
1143 dst_cache_destroy(&tunnel
->dst_cache
);
1144 free_percpu(dev
->tstats
);
1149 tunnel
->net
= dev_net(dev
);
1150 strcpy(tunnel
->parms
.name
, dev
->name
);
1154 if (tunnel
->collect_md
) {
1155 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1156 netif_keep_dst(dev
);
1160 EXPORT_SYMBOL_GPL(ip_tunnel_init
);
1162 void ip_tunnel_uninit(struct net_device
*dev
)
1164 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1165 struct net
*net
= tunnel
->net
;
1166 struct ip_tunnel_net
*itn
;
1168 itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
1169 /* fb_tunnel_dev will be unregisted in net-exit call. */
1170 if (itn
->fb_tunnel_dev
!= dev
)
1171 ip_tunnel_del(itn
, netdev_priv(dev
));
1173 dst_cache_reset(&tunnel
->dst_cache
);
1175 EXPORT_SYMBOL_GPL(ip_tunnel_uninit
);
1177 /* Do least required initialization, rest of init is done in tunnel_init call */
1178 void ip_tunnel_setup(struct net_device
*dev
, int net_id
)
1180 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1181 tunnel
->ip_tnl_net_id
= net_id
;
1183 EXPORT_SYMBOL_GPL(ip_tunnel_setup
);
1185 MODULE_LICENSE("GPL");