2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
34 #include <linux/init.h>
35 #include <linux/in6.h>
36 #include <linux/inetdevice.h>
37 #include <linux/igmp.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/etherdevice.h>
40 #include <linux/if_ether.h>
41 #include <linux/if_vlan.h>
42 #include <linux/rculist.h>
47 #include <net/protocol.h>
48 #include <net/ip_tunnels.h>
50 #include <net/checksum.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56 #include <net/rtnetlink.h>
58 #if IS_ENABLED(CONFIG_IPV6)
60 #include <net/ip6_fib.h>
61 #include <net/ip6_route.h>
64 static unsigned int ip_tunnel_hash(struct ip_tunnel_net
*itn
,
65 __be32 key
, __be32 remote
)
67 return hash_32((__force u32
)key
^ (__force u32
)remote
,
71 /* Often modified stats are per cpu, other are shared (netdev->stats) */
72 struct rtnl_link_stats64
*ip_tunnel_get_stats64(struct net_device
*dev
,
73 struct rtnl_link_stats64
*tot
)
77 for_each_possible_cpu(i
) {
78 const struct pcpu_tstats
*tstats
= per_cpu_ptr(dev
->tstats
, i
);
79 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
83 start
= u64_stats_fetch_begin_bh(&tstats
->syncp
);
84 rx_packets
= tstats
->rx_packets
;
85 tx_packets
= tstats
->tx_packets
;
86 rx_bytes
= tstats
->rx_bytes
;
87 tx_bytes
= tstats
->tx_bytes
;
88 } while (u64_stats_fetch_retry_bh(&tstats
->syncp
, start
));
90 tot
->rx_packets
+= rx_packets
;
91 tot
->tx_packets
+= tx_packets
;
92 tot
->rx_bytes
+= rx_bytes
;
93 tot
->tx_bytes
+= tx_bytes
;
96 tot
->multicast
= dev
->stats
.multicast
;
98 tot
->rx_crc_errors
= dev
->stats
.rx_crc_errors
;
99 tot
->rx_fifo_errors
= dev
->stats
.rx_fifo_errors
;
100 tot
->rx_length_errors
= dev
->stats
.rx_length_errors
;
101 tot
->rx_frame_errors
= dev
->stats
.rx_frame_errors
;
102 tot
->rx_errors
= dev
->stats
.rx_errors
;
104 tot
->tx_fifo_errors
= dev
->stats
.tx_fifo_errors
;
105 tot
->tx_carrier_errors
= dev
->stats
.tx_carrier_errors
;
106 tot
->tx_dropped
= dev
->stats
.tx_dropped
;
107 tot
->tx_aborted_errors
= dev
->stats
.tx_aborted_errors
;
108 tot
->tx_errors
= dev
->stats
.tx_errors
;
110 tot
->collisions
= dev
->stats
.collisions
;
114 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64
);
116 static bool ip_tunnel_key_match(const struct ip_tunnel_parm
*p
,
117 __be16 flags
, __be32 key
)
119 if (p
->i_flags
& TUNNEL_KEY
) {
120 if (flags
& TUNNEL_KEY
)
121 return key
== p
->i_key
;
123 /* key expected, none present */
126 return !(flags
& TUNNEL_KEY
);
129 /* Fallback tunnel: no source, no destination, no key, no options
132 We require exact key match i.e. if a key is present in packet
133 it will match only tunnel with the same key; if it is not present,
134 it will match only keyless tunnel.
136 All keysless packets, if not matched configured keyless tunnels
137 will match fallback tunnel.
138 Given src, dst and key, find appropriate for input tunnel.
140 struct ip_tunnel
*ip_tunnel_lookup(struct ip_tunnel_net
*itn
,
141 int link
, __be16 flags
,
142 __be32 remote
, __be32 local
,
146 struct ip_tunnel
*t
, *cand
= NULL
;
147 struct hlist_head
*head
;
149 hash
= ip_tunnel_hash(itn
, key
, remote
);
150 head
= &itn
->tunnels
[hash
];
152 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
153 if (local
!= t
->parms
.iph
.saddr
||
154 remote
!= t
->parms
.iph
.daddr
||
155 !(t
->dev
->flags
& IFF_UP
))
158 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
161 if (t
->parms
.link
== link
)
167 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
168 if (remote
!= t
->parms
.iph
.daddr
||
169 t
->parms
.iph
.saddr
!= 0 ||
170 !(t
->dev
->flags
& IFF_UP
))
173 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
176 if (t
->parms
.link
== link
)
182 hash
= ip_tunnel_hash(itn
, key
, 0);
183 head
= &itn
->tunnels
[hash
];
185 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
186 if ((local
!= t
->parms
.iph
.saddr
|| t
->parms
.iph
.daddr
!= 0) &&
187 (local
!= t
->parms
.iph
.daddr
|| !ipv4_is_multicast(local
)))
190 if (!(t
->dev
->flags
& IFF_UP
))
193 if (!ip_tunnel_key_match(&t
->parms
, flags
, key
))
196 if (t
->parms
.link
== link
)
202 if (flags
& TUNNEL_NO_KEY
)
203 goto skip_key_lookup
;
205 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
206 if (t
->parms
.i_key
!= key
||
207 t
->parms
.iph
.saddr
!= 0 ||
208 t
->parms
.iph
.daddr
!= 0 ||
209 !(t
->dev
->flags
& IFF_UP
))
212 if (t
->parms
.link
== link
)
222 if (itn
->fb_tunnel_dev
&& itn
->fb_tunnel_dev
->flags
& IFF_UP
)
223 return netdev_priv(itn
->fb_tunnel_dev
);
228 EXPORT_SYMBOL_GPL(ip_tunnel_lookup
);
230 static struct hlist_head
*ip_bucket(struct ip_tunnel_net
*itn
,
231 struct ip_tunnel_parm
*parms
)
236 if (parms
->iph
.daddr
&& !ipv4_is_multicast(parms
->iph
.daddr
))
237 remote
= parms
->iph
.daddr
;
241 h
= ip_tunnel_hash(itn
, parms
->i_key
, remote
);
242 return &itn
->tunnels
[h
];
245 static void ip_tunnel_add(struct ip_tunnel_net
*itn
, struct ip_tunnel
*t
)
247 struct hlist_head
*head
= ip_bucket(itn
, &t
->parms
);
249 hlist_add_head_rcu(&t
->hash_node
, head
);
252 static void ip_tunnel_del(struct ip_tunnel
*t
)
254 hlist_del_init_rcu(&t
->hash_node
);
257 static struct ip_tunnel
*ip_tunnel_find(struct ip_tunnel_net
*itn
,
258 struct ip_tunnel_parm
*parms
,
261 __be32 remote
= parms
->iph
.daddr
;
262 __be32 local
= parms
->iph
.saddr
;
263 __be32 key
= parms
->i_key
;
264 int link
= parms
->link
;
265 struct ip_tunnel
*t
= NULL
;
266 struct hlist_head
*head
= ip_bucket(itn
, parms
);
268 hlist_for_each_entry_rcu(t
, head
, hash_node
) {
269 if (local
== t
->parms
.iph
.saddr
&&
270 remote
== t
->parms
.iph
.daddr
&&
271 key
== t
->parms
.i_key
&&
272 link
== t
->parms
.link
&&
273 type
== t
->dev
->type
)
279 static struct net_device
*__ip_tunnel_create(struct net
*net
,
280 const struct rtnl_link_ops
*ops
,
281 struct ip_tunnel_parm
*parms
)
284 struct ip_tunnel
*tunnel
;
285 struct net_device
*dev
;
289 strlcpy(name
, parms
->name
, IFNAMSIZ
);
291 if (strlen(ops
->kind
) > (IFNAMSIZ
- 3)) {
295 strlcpy(name
, ops
->kind
, IFNAMSIZ
);
296 strncat(name
, "%d", 2);
300 dev
= alloc_netdev(ops
->priv_size
, name
, ops
->setup
);
305 dev_net_set(dev
, net
);
307 dev
->rtnl_link_ops
= ops
;
309 tunnel
= netdev_priv(dev
);
310 tunnel
->parms
= *parms
;
313 err
= register_netdevice(dev
);
325 static inline struct rtable
*ip_route_output_tunnel(struct net
*net
,
328 __be32 daddr
, __be32 saddr
,
329 __be32 key
, __u8 tos
, int oif
)
331 memset(fl4
, 0, sizeof(*fl4
));
332 fl4
->flowi4_oif
= oif
;
335 fl4
->flowi4_tos
= tos
;
336 fl4
->flowi4_proto
= proto
;
337 fl4
->fl4_gre_key
= key
;
338 return ip_route_output_key(net
, fl4
);
341 static int ip_tunnel_bind_dev(struct net_device
*dev
)
343 struct net_device
*tdev
= NULL
;
344 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
345 const struct iphdr
*iph
;
346 int hlen
= LL_MAX_HEADER
;
347 int mtu
= ETH_DATA_LEN
;
348 int t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
350 iph
= &tunnel
->parms
.iph
;
352 /* Guess output device to choose reasonable mtu and needed_headroom */
357 rt
= ip_route_output_tunnel(tunnel
->net
, &fl4
,
358 tunnel
->parms
.iph
.protocol
,
359 iph
->daddr
, iph
->saddr
,
367 if (dev
->type
!= ARPHRD_ETHER
)
368 dev
->flags
|= IFF_POINTOPOINT
;
371 if (!tdev
&& tunnel
->parms
.link
)
372 tdev
= __dev_get_by_index(tunnel
->net
, tunnel
->parms
.link
);
375 hlen
= tdev
->hard_header_len
+ tdev
->needed_headroom
;
378 dev
->iflink
= tunnel
->parms
.link
;
380 dev
->needed_headroom
= t_hlen
+ hlen
;
381 mtu
-= (dev
->hard_header_len
+ t_hlen
);
389 static struct ip_tunnel
*ip_tunnel_create(struct net
*net
,
390 struct ip_tunnel_net
*itn
,
391 struct ip_tunnel_parm
*parms
)
393 struct ip_tunnel
*nt
, *fbt
;
394 struct net_device
*dev
;
396 BUG_ON(!itn
->fb_tunnel_dev
);
397 fbt
= netdev_priv(itn
->fb_tunnel_dev
);
398 dev
= __ip_tunnel_create(net
, itn
->fb_tunnel_dev
->rtnl_link_ops
, parms
);
402 dev
->mtu
= ip_tunnel_bind_dev(dev
);
404 nt
= netdev_priv(dev
);
405 ip_tunnel_add(itn
, nt
);
409 int ip_tunnel_rcv(struct ip_tunnel
*tunnel
, struct sk_buff
*skb
,
410 const struct tnl_ptk_info
*tpi
, bool log_ecn_error
)
412 struct pcpu_tstats
*tstats
;
413 const struct iphdr
*iph
= ip_hdr(skb
);
416 #ifdef CONFIG_NET_IPGRE_BROADCAST
417 if (ipv4_is_multicast(iph
->daddr
)) {
418 tunnel
->dev
->stats
.multicast
++;
419 skb
->pkt_type
= PACKET_BROADCAST
;
423 if ((!(tpi
->flags
&TUNNEL_CSUM
) && (tunnel
->parms
.i_flags
&TUNNEL_CSUM
)) ||
424 ((tpi
->flags
&TUNNEL_CSUM
) && !(tunnel
->parms
.i_flags
&TUNNEL_CSUM
))) {
425 tunnel
->dev
->stats
.rx_crc_errors
++;
426 tunnel
->dev
->stats
.rx_errors
++;
430 if (tunnel
->parms
.i_flags
&TUNNEL_SEQ
) {
431 if (!(tpi
->flags
&TUNNEL_SEQ
) ||
432 (tunnel
->i_seqno
&& (s32
)(ntohl(tpi
->seq
) - tunnel
->i_seqno
) < 0)) {
433 tunnel
->dev
->stats
.rx_fifo_errors
++;
434 tunnel
->dev
->stats
.rx_errors
++;
437 tunnel
->i_seqno
= ntohl(tpi
->seq
) + 1;
440 skb_reset_network_header(skb
);
442 err
= IP_ECN_decapsulate(iph
, skb
);
445 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
446 &iph
->saddr
, iph
->tos
);
448 ++tunnel
->dev
->stats
.rx_frame_errors
;
449 ++tunnel
->dev
->stats
.rx_errors
;
454 tstats
= this_cpu_ptr(tunnel
->dev
->tstats
);
455 u64_stats_update_begin(&tstats
->syncp
);
456 tstats
->rx_packets
++;
457 tstats
->rx_bytes
+= skb
->len
;
458 u64_stats_update_end(&tstats
->syncp
);
460 skb_scrub_packet(skb
, !net_eq(tunnel
->net
, dev_net(tunnel
->dev
)));
462 if (tunnel
->dev
->type
== ARPHRD_ETHER
) {
463 skb
->protocol
= eth_type_trans(skb
, tunnel
->dev
);
464 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
466 skb
->dev
= tunnel
->dev
;
469 gro_cells_receive(&tunnel
->gro_cells
, skb
);
476 EXPORT_SYMBOL_GPL(ip_tunnel_rcv
);
478 static int tnl_update_pmtu(struct net_device
*dev
, struct sk_buff
*skb
,
479 struct rtable
*rt
, __be16 df
)
481 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
482 int pkt_size
= skb
->len
- tunnel
->hlen
- dev
->hard_header_len
;
486 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
487 - sizeof(struct iphdr
) - tunnel
->hlen
;
489 mtu
= skb_dst(skb
) ? dst_mtu(skb_dst(skb
)) : dev
->mtu
;
492 skb_dst(skb
)->ops
->update_pmtu(skb_dst(skb
), NULL
, skb
, mtu
);
494 if (skb
->protocol
== htons(ETH_P_IP
)) {
495 if (!skb_is_gso(skb
) &&
496 (df
& htons(IP_DF
)) && mtu
< pkt_size
) {
497 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
498 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
502 #if IS_ENABLED(CONFIG_IPV6)
503 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
504 struct rt6_info
*rt6
= (struct rt6_info
*)skb_dst(skb
);
506 if (rt6
&& mtu
< dst_mtu(skb_dst(skb
)) &&
507 mtu
>= IPV6_MIN_MTU
) {
508 if ((tunnel
->parms
.iph
.daddr
&&
509 !ipv4_is_multicast(tunnel
->parms
.iph
.daddr
)) ||
510 rt6
->rt6i_dst
.plen
== 128) {
511 rt6
->rt6i_flags
|= RTF_MODIFIED
;
512 dst_metric_set(skb_dst(skb
), RTAX_MTU
, mtu
);
516 if (!skb_is_gso(skb
) && mtu
>= IPV6_MIN_MTU
&&
518 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
526 void ip_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
527 const struct iphdr
*tnl_params
, const u8 protocol
)
529 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
530 const struct iphdr
*inner_iph
;
534 struct rtable
*rt
; /* Route to the other host */
535 unsigned int max_headroom
; /* The extra header space needed */
539 inner_iph
= (const struct iphdr
*)skb_inner_network_header(skb
);
541 dst
= tnl_params
->daddr
;
545 if (skb_dst(skb
) == NULL
) {
546 dev
->stats
.tx_fifo_errors
++;
550 if (skb
->protocol
== htons(ETH_P_IP
)) {
551 rt
= skb_rtable(skb
);
552 dst
= rt_nexthop(rt
, inner_iph
->daddr
);
554 #if IS_ENABLED(CONFIG_IPV6)
555 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
556 const struct in6_addr
*addr6
;
557 struct neighbour
*neigh
;
558 bool do_tx_error_icmp
;
561 neigh
= dst_neigh_lookup(skb_dst(skb
),
562 &ipv6_hdr(skb
)->daddr
);
566 addr6
= (const struct in6_addr
*)&neigh
->primary_key
;
567 addr_type
= ipv6_addr_type(addr6
);
569 if (addr_type
== IPV6_ADDR_ANY
) {
570 addr6
= &ipv6_hdr(skb
)->daddr
;
571 addr_type
= ipv6_addr_type(addr6
);
574 if ((addr_type
& IPV6_ADDR_COMPATv4
) == 0)
575 do_tx_error_icmp
= true;
577 do_tx_error_icmp
= false;
578 dst
= addr6
->s6_addr32
[3];
580 neigh_release(neigh
);
581 if (do_tx_error_icmp
)
589 tos
= tnl_params
->tos
;
592 if (skb
->protocol
== htons(ETH_P_IP
))
593 tos
= inner_iph
->tos
;
594 else if (skb
->protocol
== htons(ETH_P_IPV6
))
595 tos
= ipv6_get_dsfield((const struct ipv6hdr
*)inner_iph
);
598 rt
= ip_route_output_tunnel(tunnel
->net
, &fl4
,
600 dst
, tnl_params
->saddr
,
605 dev
->stats
.tx_carrier_errors
++;
608 if (rt
->dst
.dev
== dev
) {
610 dev
->stats
.collisions
++;
614 if (tnl_update_pmtu(dev
, skb
, rt
, tnl_params
->frag_off
)) {
619 if (tunnel
->err_count
> 0) {
620 if (time_before(jiffies
,
621 tunnel
->err_time
+ IPTUNNEL_ERR_TIMEO
)) {
624 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
625 dst_link_failure(skb
);
627 tunnel
->err_count
= 0;
630 tos
= ip_tunnel_ecn_encap(tos
, inner_iph
, skb
);
631 ttl
= tnl_params
->ttl
;
633 if (skb
->protocol
== htons(ETH_P_IP
))
634 ttl
= inner_iph
->ttl
;
635 #if IS_ENABLED(CONFIG_IPV6)
636 else if (skb
->protocol
== htons(ETH_P_IPV6
))
637 ttl
= ((const struct ipv6hdr
*)inner_iph
)->hop_limit
;
640 ttl
= ip4_dst_hoplimit(&rt
->dst
);
643 df
= tnl_params
->frag_off
;
644 if (skb
->protocol
== htons(ETH_P_IP
))
645 df
|= (inner_iph
->frag_off
&htons(IP_DF
));
647 max_headroom
= LL_RESERVED_SPACE(rt
->dst
.dev
) + sizeof(struct iphdr
)
648 + rt
->dst
.header_len
;
649 if (max_headroom
> dev
->needed_headroom
)
650 dev
->needed_headroom
= max_headroom
;
652 if (skb_cow_head(skb
, dev
->needed_headroom
)) {
653 dev
->stats
.tx_dropped
++;
658 err
= iptunnel_xmit(rt
, skb
, fl4
.saddr
, fl4
.daddr
, protocol
,
659 tos
, ttl
, df
, !net_eq(tunnel
->net
, dev_net(dev
)));
660 iptunnel_xmit_stats(err
, &dev
->stats
, dev
->tstats
);
664 #if IS_ENABLED(CONFIG_IPV6)
666 dst_link_failure(skb
);
669 dev
->stats
.tx_errors
++;
672 EXPORT_SYMBOL_GPL(ip_tunnel_xmit
);
674 static void ip_tunnel_update(struct ip_tunnel_net
*itn
,
676 struct net_device
*dev
,
677 struct ip_tunnel_parm
*p
,
681 t
->parms
.iph
.saddr
= p
->iph
.saddr
;
682 t
->parms
.iph
.daddr
= p
->iph
.daddr
;
683 t
->parms
.i_key
= p
->i_key
;
684 t
->parms
.o_key
= p
->o_key
;
685 if (dev
->type
!= ARPHRD_ETHER
) {
686 memcpy(dev
->dev_addr
, &p
->iph
.saddr
, 4);
687 memcpy(dev
->broadcast
, &p
->iph
.daddr
, 4);
689 ip_tunnel_add(itn
, t
);
691 t
->parms
.iph
.ttl
= p
->iph
.ttl
;
692 t
->parms
.iph
.tos
= p
->iph
.tos
;
693 t
->parms
.iph
.frag_off
= p
->iph
.frag_off
;
695 if (t
->parms
.link
!= p
->link
) {
698 t
->parms
.link
= p
->link
;
699 mtu
= ip_tunnel_bind_dev(dev
);
703 netdev_state_change(dev
);
706 int ip_tunnel_ioctl(struct net_device
*dev
, struct ip_tunnel_parm
*p
, int cmd
)
710 struct net
*net
= dev_net(dev
);
711 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
712 struct ip_tunnel_net
*itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
714 BUG_ON(!itn
->fb_tunnel_dev
);
718 if (dev
== itn
->fb_tunnel_dev
)
719 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
721 t
= netdev_priv(dev
);
722 memcpy(p
, &t
->parms
, sizeof(*p
));
728 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
731 p
->iph
.frag_off
|= htons(IP_DF
);
732 if (!(p
->i_flags
&TUNNEL_KEY
))
734 if (!(p
->o_flags
&TUNNEL_KEY
))
737 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
739 if (!t
&& (cmd
== SIOCADDTUNNEL
))
740 t
= ip_tunnel_create(net
, itn
, p
);
742 if (dev
!= itn
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
749 unsigned int nflags
= 0;
751 if (ipv4_is_multicast(p
->iph
.daddr
))
752 nflags
= IFF_BROADCAST
;
753 else if (p
->iph
.daddr
)
754 nflags
= IFF_POINTOPOINT
;
756 if ((dev
->flags
^nflags
)&(IFF_POINTOPOINT
|IFF_BROADCAST
)) {
761 t
= netdev_priv(dev
);
767 ip_tunnel_update(itn
, t
, dev
, p
, true);
769 err
= (cmd
== SIOCADDTUNNEL
? -ENOBUFS
: -ENOENT
);
774 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
777 if (dev
== itn
->fb_tunnel_dev
) {
779 t
= ip_tunnel_find(itn
, p
, itn
->fb_tunnel_dev
->type
);
783 if (t
== netdev_priv(itn
->fb_tunnel_dev
))
787 unregister_netdevice(dev
);
798 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl
);
800 int ip_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
)
802 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
803 int t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
806 new_mtu
> 0xFFF8 - dev
->hard_header_len
- t_hlen
)
811 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu
);
813 static void ip_tunnel_dev_free(struct net_device
*dev
)
815 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
817 gro_cells_destroy(&tunnel
->gro_cells
);
818 free_percpu(dev
->tstats
);
822 void ip_tunnel_dellink(struct net_device
*dev
, struct list_head
*head
)
824 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
825 struct ip_tunnel_net
*itn
;
827 itn
= net_generic(tunnel
->net
, tunnel
->ip_tnl_net_id
);
829 if (itn
->fb_tunnel_dev
!= dev
) {
830 ip_tunnel_del(netdev_priv(dev
));
831 unregister_netdevice_queue(dev
, head
);
834 EXPORT_SYMBOL_GPL(ip_tunnel_dellink
);
836 int ip_tunnel_init_net(struct net
*net
, int ip_tnl_net_id
,
837 struct rtnl_link_ops
*ops
, char *devname
)
839 struct ip_tunnel_net
*itn
= net_generic(net
, ip_tnl_net_id
);
840 struct ip_tunnel_parm parms
;
843 for (i
= 0; i
< IP_TNL_HASH_SIZE
; i
++)
844 INIT_HLIST_HEAD(&itn
->tunnels
[i
]);
847 itn
->fb_tunnel_dev
= NULL
;
851 memset(&parms
, 0, sizeof(parms
));
853 strlcpy(parms
.name
, devname
, IFNAMSIZ
);
856 itn
->fb_tunnel_dev
= __ip_tunnel_create(net
, ops
, &parms
);
857 /* FB netdevice is special: we have one, and only one per netns.
858 * Allowing to move it to another netns is clearly unsafe.
860 if (!IS_ERR(itn
->fb_tunnel_dev
)) {
861 itn
->fb_tunnel_dev
->features
|= NETIF_F_NETNS_LOCAL
;
862 itn
->fb_tunnel_dev
->mtu
= ip_tunnel_bind_dev(itn
->fb_tunnel_dev
);
863 ip_tunnel_add(itn
, netdev_priv(itn
->fb_tunnel_dev
));
867 return PTR_RET(itn
->fb_tunnel_dev
);
869 EXPORT_SYMBOL_GPL(ip_tunnel_init_net
);
871 static void ip_tunnel_destroy(struct ip_tunnel_net
*itn
, struct list_head
*head
,
872 struct rtnl_link_ops
*ops
)
874 struct net
*net
= dev_net(itn
->fb_tunnel_dev
);
875 struct net_device
*dev
, *aux
;
878 for_each_netdev_safe(net
, dev
, aux
)
879 if (dev
->rtnl_link_ops
== ops
)
880 unregister_netdevice_queue(dev
, head
);
882 for (h
= 0; h
< IP_TNL_HASH_SIZE
; h
++) {
884 struct hlist_node
*n
;
885 struct hlist_head
*thead
= &itn
->tunnels
[h
];
887 hlist_for_each_entry_safe(t
, n
, thead
, hash_node
)
888 /* If dev is in the same netns, it has already
889 * been added to the list by the previous loop.
891 if (!net_eq(dev_net(t
->dev
), net
))
892 unregister_netdevice_queue(t
->dev
, head
);
896 void ip_tunnel_delete_net(struct ip_tunnel_net
*itn
, struct rtnl_link_ops
*ops
)
901 ip_tunnel_destroy(itn
, &list
, ops
);
902 unregister_netdevice_many(&list
);
905 EXPORT_SYMBOL_GPL(ip_tunnel_delete_net
);
907 int ip_tunnel_newlink(struct net_device
*dev
, struct nlattr
*tb
[],
908 struct ip_tunnel_parm
*p
)
910 struct ip_tunnel
*nt
;
911 struct net
*net
= dev_net(dev
);
912 struct ip_tunnel_net
*itn
;
916 nt
= netdev_priv(dev
);
917 itn
= net_generic(net
, nt
->ip_tnl_net_id
);
919 if (ip_tunnel_find(itn
, p
, dev
->type
))
924 err
= register_netdevice(dev
);
928 if (dev
->type
== ARPHRD_ETHER
&& !tb
[IFLA_ADDRESS
])
929 eth_hw_addr_random(dev
);
931 mtu
= ip_tunnel_bind_dev(dev
);
935 ip_tunnel_add(itn
, nt
);
940 EXPORT_SYMBOL_GPL(ip_tunnel_newlink
);
942 int ip_tunnel_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
943 struct ip_tunnel_parm
*p
)
946 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
947 struct net
*net
= tunnel
->net
;
948 struct ip_tunnel_net
*itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
950 if (dev
== itn
->fb_tunnel_dev
)
953 t
= ip_tunnel_find(itn
, p
, dev
->type
);
961 if (dev
->type
!= ARPHRD_ETHER
) {
962 unsigned int nflags
= 0;
964 if (ipv4_is_multicast(p
->iph
.daddr
))
965 nflags
= IFF_BROADCAST
;
966 else if (p
->iph
.daddr
)
967 nflags
= IFF_POINTOPOINT
;
969 if ((dev
->flags
^ nflags
) &
970 (IFF_POINTOPOINT
| IFF_BROADCAST
))
975 ip_tunnel_update(itn
, t
, dev
, p
, !tb
[IFLA_MTU
]);
978 EXPORT_SYMBOL_GPL(ip_tunnel_changelink
);
980 int ip_tunnel_init(struct net_device
*dev
)
982 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
983 struct iphdr
*iph
= &tunnel
->parms
.iph
;
986 dev
->destructor
= ip_tunnel_dev_free
;
987 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
991 err
= gro_cells_init(&tunnel
->gro_cells
, dev
);
993 free_percpu(dev
->tstats
);
998 tunnel
->net
= dev_net(dev
);
999 strcpy(tunnel
->parms
.name
, dev
->name
);
1005 EXPORT_SYMBOL_GPL(ip_tunnel_init
);
1007 void ip_tunnel_uninit(struct net_device
*dev
)
1009 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1010 struct net
*net
= tunnel
->net
;
1011 struct ip_tunnel_net
*itn
;
1013 itn
= net_generic(net
, tunnel
->ip_tnl_net_id
);
1014 /* fb_tunnel_dev will be unregisted in net-exit call. */
1015 if (itn
->fb_tunnel_dev
!= dev
)
1016 ip_tunnel_del(netdev_priv(dev
));
1018 EXPORT_SYMBOL_GPL(ip_tunnel_uninit
);
1020 /* Do least required initialization, rest of init is done in tunnel_init call */
1021 void ip_tunnel_setup(struct net_device
*dev
, int net_id
)
1023 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1024 tunnel
->ip_tnl_net_id
= net_id
;
1026 EXPORT_SYMBOL_GPL(ip_tunnel_setup
);
1028 MODULE_LICENSE("GPL");