1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * IPv6 tunneling device
4 * Linux INET6 implementation
7 * Ville Nuorvala <vnuorval@tcs.hut.fi>
8 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
11 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/capability.h>
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/sockios.h>
23 #include <linux/icmp.h>
27 #include <linux/net.h>
28 #include <linux/in6.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/icmpv6.h>
32 #include <linux/init.h>
33 #include <linux/route.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/netfilter_ipv6.h>
36 #include <linux/slab.h>
37 #include <linux/hash.h>
38 #include <linux/etherdevice.h>
40 #include <linux/uaccess.h>
41 #include <linux/atomic.h>
45 #include <net/ip_tunnels.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
53 #include <net/net_namespace.h>
54 #include <net/netns/generic.h>
55 #include <net/dst_metadata.h>
56 #include <net/inet_dscp.h>
58 MODULE_AUTHOR("Ville Nuorvala");
59 MODULE_DESCRIPTION("IPv6 tunneling device");
60 MODULE_LICENSE("GPL");
61 MODULE_ALIAS_RTNL_LINK("ip6tnl");
62 MODULE_ALIAS_NETDEV("ip6tnl0");
64 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5
65 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
67 static bool log_ecn_error
= true;
68 module_param(log_ecn_error
, bool, 0644);
69 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
71 static u32
HASH(const struct in6_addr
*addr1
, const struct in6_addr
*addr2
)
73 u32 hash
= ipv6_addr_hash(addr1
) ^ ipv6_addr_hash(addr2
);
75 return hash_32(hash
, IP6_TUNNEL_HASH_SIZE_SHIFT
);
78 static int ip6_tnl_dev_init(struct net_device
*dev
);
79 static void ip6_tnl_dev_setup(struct net_device
*dev
);
80 static struct rtnl_link_ops ip6_link_ops __read_mostly
;
82 static unsigned int ip6_tnl_net_id __read_mostly
;
84 /* the IPv6 tunnel fallback device */
85 struct net_device
*fb_tnl_dev
;
86 /* lists for storing tunnels in use */
87 struct ip6_tnl __rcu
*tnls_r_l
[IP6_TUNNEL_HASH_SIZE
];
88 struct ip6_tnl __rcu
*tnls_wc
[1];
89 struct ip6_tnl __rcu
**tnls
[2];
90 struct ip6_tnl __rcu
*collect_md_tun
;
93 static inline int ip6_tnl_mpls_supported(void)
95 return IS_ENABLED(CONFIG_MPLS
);
98 #define for_each_ip6_tunnel_rcu(start) \
99 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
102 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
103 * @net: network namespace
104 * @link: ifindex of underlying interface
105 * @remote: the address of the tunnel exit-point
106 * @local: the address of the tunnel entry-point
109 * tunnel matching given end-points if found,
110 * else fallback tunnel if its device is up,
114 static struct ip6_tnl
*
115 ip6_tnl_lookup(struct net
*net
, int link
,
116 const struct in6_addr
*remote
, const struct in6_addr
*local
)
118 unsigned int hash
= HASH(remote
, local
);
119 struct ip6_tnl
*t
, *cand
= NULL
;
120 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
123 for_each_ip6_tunnel_rcu(ip6n
->tnls_r_l
[hash
]) {
124 if (!ipv6_addr_equal(local
, &t
->parms
.laddr
) ||
125 !ipv6_addr_equal(remote
, &t
->parms
.raddr
) ||
126 !(t
->dev
->flags
& IFF_UP
))
129 if (link
== t
->parms
.link
)
135 memset(&any
, 0, sizeof(any
));
136 hash
= HASH(&any
, local
);
137 for_each_ip6_tunnel_rcu(ip6n
->tnls_r_l
[hash
]) {
138 if (!ipv6_addr_equal(local
, &t
->parms
.laddr
) ||
139 !ipv6_addr_any(&t
->parms
.raddr
) ||
140 !(t
->dev
->flags
& IFF_UP
))
143 if (link
== t
->parms
.link
)
149 hash
= HASH(remote
, &any
);
150 for_each_ip6_tunnel_rcu(ip6n
->tnls_r_l
[hash
]) {
151 if (!ipv6_addr_equal(remote
, &t
->parms
.raddr
) ||
152 !ipv6_addr_any(&t
->parms
.laddr
) ||
153 !(t
->dev
->flags
& IFF_UP
))
156 if (link
== t
->parms
.link
)
165 t
= rcu_dereference(ip6n
->collect_md_tun
);
166 if (t
&& t
->dev
->flags
& IFF_UP
)
169 t
= rcu_dereference(ip6n
->tnls_wc
[0]);
170 if (t
&& (t
->dev
->flags
& IFF_UP
))
177 * ip6_tnl_bucket - get head of list matching given tunnel parameters
178 * @ip6n: the private data for ip6_vti in the netns
179 * @p: parameters containing tunnel end-points
182 * ip6_tnl_bucket() returns the head of the list matching the
183 * &struct in6_addr entries laddr and raddr in @p.
185 * Return: head of IPv6 tunnel list
188 static struct ip6_tnl __rcu
**
189 ip6_tnl_bucket(struct ip6_tnl_net
*ip6n
, const struct __ip6_tnl_parm
*p
)
191 const struct in6_addr
*remote
= &p
->raddr
;
192 const struct in6_addr
*local
= &p
->laddr
;
196 if (!ipv6_addr_any(remote
) || !ipv6_addr_any(local
)) {
198 h
= HASH(remote
, local
);
200 return &ip6n
->tnls
[prio
][h
];
204 * ip6_tnl_link - add tunnel to hash table
205 * @ip6n: the private data for ip6_vti in the netns
206 * @t: tunnel to be added
210 ip6_tnl_link(struct ip6_tnl_net
*ip6n
, struct ip6_tnl
*t
)
212 struct ip6_tnl __rcu
**tp
= ip6_tnl_bucket(ip6n
, &t
->parms
);
214 if (t
->parms
.collect_md
)
215 rcu_assign_pointer(ip6n
->collect_md_tun
, t
);
216 rcu_assign_pointer(t
->next
, rtnl_dereference(*tp
));
217 rcu_assign_pointer(*tp
, t
);
221 * ip6_tnl_unlink - remove tunnel from hash table
222 * @ip6n: the private data for ip6_vti in the netns
223 * @t: tunnel to be removed
227 ip6_tnl_unlink(struct ip6_tnl_net
*ip6n
, struct ip6_tnl
*t
)
229 struct ip6_tnl __rcu
**tp
;
230 struct ip6_tnl
*iter
;
232 if (t
->parms
.collect_md
)
233 rcu_assign_pointer(ip6n
->collect_md_tun
, NULL
);
235 for (tp
= ip6_tnl_bucket(ip6n
, &t
->parms
);
236 (iter
= rtnl_dereference(*tp
)) != NULL
;
239 rcu_assign_pointer(*tp
, t
->next
);
245 static void ip6_dev_free(struct net_device
*dev
)
247 struct ip6_tnl
*t
= netdev_priv(dev
);
249 gro_cells_destroy(&t
->gro_cells
);
250 dst_cache_destroy(&t
->dst_cache
);
253 static int ip6_tnl_create2(struct net_device
*dev
)
255 struct ip6_tnl
*t
= netdev_priv(dev
);
256 struct net
*net
= dev_net(dev
);
257 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
260 dev
->rtnl_link_ops
= &ip6_link_ops
;
261 err
= register_netdevice(dev
);
265 strcpy(t
->parms
.name
, dev
->name
);
267 ip6_tnl_link(ip6n
, t
);
275 * ip6_tnl_create - create a new tunnel
276 * @net: network namespace
277 * @p: tunnel parameters
280 * Create tunnel matching given parameters.
283 * created tunnel or error pointer
286 static struct ip6_tnl
*ip6_tnl_create(struct net
*net
, struct __ip6_tnl_parm
*p
)
288 struct net_device
*dev
;
294 if (!dev_valid_name(p
->name
))
296 strscpy(name
, p
->name
, IFNAMSIZ
);
298 sprintf(name
, "ip6tnl%%d");
301 dev
= alloc_netdev(sizeof(*t
), name
, NET_NAME_UNKNOWN
,
306 dev_net_set(dev
, net
);
308 t
= netdev_priv(dev
);
310 t
->net
= dev_net(dev
);
311 err
= ip6_tnl_create2(dev
);
324 * ip6_tnl_locate - find or create tunnel matching given parameters
325 * @net: network namespace
326 * @p: tunnel parameters
327 * @create: != 0 if allowed to create new tunnel if no match found
330 * ip6_tnl_locate() first tries to locate an existing tunnel
331 * based on @parms. If this is unsuccessful, but @create is set a new
332 * tunnel device is created and registered for use.
335 * matching tunnel or error pointer
338 static struct ip6_tnl
*ip6_tnl_locate(struct net
*net
,
339 struct __ip6_tnl_parm
*p
, int create
)
341 const struct in6_addr
*remote
= &p
->raddr
;
342 const struct in6_addr
*local
= &p
->laddr
;
343 struct ip6_tnl __rcu
**tp
;
345 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
347 for (tp
= ip6_tnl_bucket(ip6n
, p
);
348 (t
= rtnl_dereference(*tp
)) != NULL
;
350 if (ipv6_addr_equal(local
, &t
->parms
.laddr
) &&
351 ipv6_addr_equal(remote
, &t
->parms
.raddr
) &&
352 p
->link
== t
->parms
.link
) {
354 return ERR_PTR(-EEXIST
);
360 return ERR_PTR(-ENODEV
);
361 return ip6_tnl_create(net
, p
);
365 * ip6_tnl_dev_uninit - tunnel device uninitializer
366 * @dev: the device to be destroyed
369 * ip6_tnl_dev_uninit() removes tunnel from its list
373 ip6_tnl_dev_uninit(struct net_device
*dev
)
375 struct ip6_tnl
*t
= netdev_priv(dev
);
376 struct net
*net
= t
->net
;
377 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
379 if (dev
== ip6n
->fb_tnl_dev
)
380 RCU_INIT_POINTER(ip6n
->tnls_wc
[0], NULL
);
382 ip6_tnl_unlink(ip6n
, t
);
383 dst_cache_reset(&t
->dst_cache
);
384 netdev_put(dev
, &t
->dev_tracker
);
388 * ip6_tnl_parse_tlv_enc_lim - handle encapsulation limit option
389 * @skb: received socket buffer
390 * @raw: the ICMPv6 error message data
393 * 0 if none was found,
394 * else index to encapsulation limit
397 __u16
ip6_tnl_parse_tlv_enc_lim(struct sk_buff
*skb
, __u8
*raw
)
399 const struct ipv6hdr
*ipv6h
= (const struct ipv6hdr
*)raw
;
400 unsigned int nhoff
= raw
- skb
->data
;
401 unsigned int off
= nhoff
+ sizeof(*ipv6h
);
402 u8 nexthdr
= ipv6h
->nexthdr
;
404 while (ipv6_ext_hdr(nexthdr
) && nexthdr
!= NEXTHDR_NONE
) {
405 struct ipv6_opt_hdr
*hdr
;
408 if (!pskb_may_pull(skb
, off
+ sizeof(*hdr
)))
411 hdr
= (struct ipv6_opt_hdr
*)(skb
->data
+ off
);
412 if (nexthdr
== NEXTHDR_FRAGMENT
) {
414 } else if (nexthdr
== NEXTHDR_AUTH
) {
415 optlen
= ipv6_authlen(hdr
);
417 optlen
= ipv6_optlen(hdr
);
420 if (!pskb_may_pull(skb
, off
+ optlen
))
423 hdr
= (struct ipv6_opt_hdr
*)(skb
->data
+ off
);
424 if (nexthdr
== NEXTHDR_FRAGMENT
) {
425 struct frag_hdr
*frag_hdr
= (struct frag_hdr
*)hdr
;
427 if (frag_hdr
->frag_off
)
430 if (nexthdr
== NEXTHDR_DEST
) {
434 struct ipv6_tlv_tnl_enc_lim
*tel
;
436 /* No more room for encapsulation limit */
437 if (i
+ sizeof(*tel
) > optlen
)
440 tel
= (struct ipv6_tlv_tnl_enc_lim
*)(skb
->data
+ off
+ i
);
441 /* return index of option if found and valid */
442 if (tel
->type
== IPV6_TLV_TNL_ENCAP_LIMIT
&&
444 return i
+ off
- nhoff
;
445 /* else jump to next option */
447 i
+= tel
->length
+ 2;
452 nexthdr
= hdr
->nexthdr
;
457 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim
);
459 /* ip6_tnl_err() should handle errors in the tunnel according to the
460 * specifications in RFC 2473.
463 ip6_tnl_err(struct sk_buff
*skb
, __u8 ipproto
, struct inet6_skb_parm
*opt
,
464 u8
*type
, u8
*code
, int *msg
, __u32
*info
, int offset
)
466 const struct ipv6hdr
*ipv6h
= (const struct ipv6hdr
*)skb
->data
;
467 struct net
*net
= dev_net(skb
->dev
);
468 u8 rel_type
= ICMPV6_DEST_UNREACH
;
469 u8 rel_code
= ICMPV6_ADDR_UNREACH
;
477 /* If the packet doesn't contain the original IPv6 header we are
478 in trouble since we might need the source address for further
479 processing of the error. */
482 t
= ip6_tnl_lookup(dev_net(skb
->dev
), skb
->dev
->ifindex
, &ipv6h
->daddr
, &ipv6h
->saddr
);
486 tproto
= READ_ONCE(t
->parms
.proto
);
487 if (tproto
!= ipproto
&& tproto
!= 0)
493 case ICMPV6_DEST_UNREACH
:
494 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
498 case ICMPV6_TIME_EXCEED
:
499 if ((*code
) == ICMPV6_EXC_HOPLIMIT
) {
500 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
505 case ICMPV6_PARAMPROB
: {
506 struct ipv6_tlv_tnl_enc_lim
*tel
;
510 if ((*code
) == ICMPV6_HDR_FIELD
)
511 teli
= ip6_tnl_parse_tlv_enc_lim(skb
, skb
->data
);
513 if (teli
&& teli
== *info
- 2) {
514 tel
= (struct ipv6_tlv_tnl_enc_lim
*) &skb
->data
[teli
];
515 if (tel
->encap_limit
== 0) {
516 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
521 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
526 case ICMPV6_PKT_TOOBIG
: {
529 ip6_update_pmtu(skb
, net
, htonl(*info
), 0, 0,
530 sock_net_uid(net
, NULL
));
531 mtu
= *info
- offset
;
532 if (mtu
< IPV6_MIN_MTU
)
534 len
= sizeof(*ipv6h
) + ntohs(ipv6h
->payload_len
);
536 rel_type
= ICMPV6_PKT_TOOBIG
;
544 ip6_redirect(skb
, net
, skb
->dev
->ifindex
, 0,
545 sock_net_uid(net
, NULL
));
560 ip4ip6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
561 u8 type
, u8 code
, int offset
, __be32 info
)
563 __u32 rel_info
= ntohl(info
);
564 const struct iphdr
*eiph
;
565 struct sk_buff
*skb2
;
566 int err
, rel_msg
= 0;
572 err
= ip6_tnl_err(skb
, IPPROTO_IPIP
, opt
, &rel_type
, &rel_code
,
573 &rel_msg
, &rel_info
, offset
);
581 case ICMPV6_DEST_UNREACH
:
582 if (rel_code
!= ICMPV6_ADDR_UNREACH
)
584 rel_type
= ICMP_DEST_UNREACH
;
585 rel_code
= ICMP_HOST_UNREACH
;
587 case ICMPV6_PKT_TOOBIG
:
590 rel_type
= ICMP_DEST_UNREACH
;
591 rel_code
= ICMP_FRAG_NEEDED
;
597 if (!pskb_may_pull(skb
, offset
+ sizeof(struct iphdr
)))
600 skb2
= skb_clone(skb
, GFP_ATOMIC
);
606 skb_pull(skb2
, offset
);
607 skb_reset_network_header(skb2
);
610 /* Try to guess incoming interface */
611 rt
= ip_route_output_ports(dev_net(skb
->dev
), &fl4
, NULL
, eiph
->saddr
,
612 0, 0, 0, IPPROTO_IPIP
,
613 eiph
->tos
& INET_DSCP_MASK
, 0);
617 skb2
->dev
= rt
->dst
.dev
;
620 /* route "incoming" packet */
621 if (rt
->rt_flags
& RTCF_LOCAL
) {
622 rt
= ip_route_output_ports(dev_net(skb
->dev
), &fl4
, NULL
,
623 eiph
->daddr
, eiph
->saddr
, 0, 0,
625 eiph
->tos
& INET_DSCP_MASK
, 0);
626 if (IS_ERR(rt
) || rt
->dst
.dev
->type
!= ARPHRD_TUNNEL6
) {
631 skb_dst_set(skb2
, &rt
->dst
);
633 if (ip_route_input(skb2
, eiph
->daddr
, eiph
->saddr
,
634 ip4h_dscp(eiph
), skb2
->dev
) ||
635 skb_dst(skb2
)->dev
->type
!= ARPHRD_TUNNEL6
)
639 /* change mtu on this route */
640 if (rel_type
== ICMP_DEST_UNREACH
&& rel_code
== ICMP_FRAG_NEEDED
) {
641 if (rel_info
> dst_mtu(skb_dst(skb2
)))
644 skb_dst_update_pmtu_no_confirm(skb2
, rel_info
);
647 icmp_send(skb2
, rel_type
, rel_code
, htonl(rel_info
));
655 ip6ip6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
656 u8 type
, u8 code
, int offset
, __be32 info
)
658 __u32 rel_info
= ntohl(info
);
659 int err
, rel_msg
= 0;
663 err
= ip6_tnl_err(skb
, IPPROTO_IPV6
, opt
, &rel_type
, &rel_code
,
664 &rel_msg
, &rel_info
, offset
);
668 if (rel_msg
&& pskb_may_pull(skb
, offset
+ sizeof(struct ipv6hdr
))) {
670 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
676 skb_pull(skb2
, offset
);
677 skb_reset_network_header(skb2
);
679 /* Try to guess incoming interface */
680 rt
= rt6_lookup(dev_net(skb
->dev
), &ipv6_hdr(skb2
)->saddr
,
683 if (rt
&& rt
->dst
.dev
)
684 skb2
->dev
= rt
->dst
.dev
;
686 icmpv6_send(skb2
, rel_type
, rel_code
, rel_info
);
697 mplsip6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
698 u8 type
, u8 code
, int offset
, __be32 info
)
700 __u32 rel_info
= ntohl(info
);
701 int err
, rel_msg
= 0;
705 err
= ip6_tnl_err(skb
, IPPROTO_MPLS
, opt
, &rel_type
, &rel_code
,
706 &rel_msg
, &rel_info
, offset
);
710 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl
*t
,
711 const struct ipv6hdr
*ipv6h
,
714 __u8 dsfield
= ipv6_get_dsfield(ipv6h
) & ~INET_ECN_MASK
;
716 if (t
->parms
.flags
& IP6_TNL_F_RCV_DSCP_COPY
)
717 ipv4_change_dsfield(ip_hdr(skb
), INET_ECN_MASK
, dsfield
);
719 return IP6_ECN_decapsulate(ipv6h
, skb
);
722 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl
*t
,
723 const struct ipv6hdr
*ipv6h
,
726 if (t
->parms
.flags
& IP6_TNL_F_RCV_DSCP_COPY
)
727 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h
), ipv6_hdr(skb
));
729 return IP6_ECN_decapsulate(ipv6h
, skb
);
732 static inline int mplsip6_dscp_ecn_decapsulate(const struct ip6_tnl
*t
,
733 const struct ipv6hdr
*ipv6h
,
736 /* ECN is not supported in AF_MPLS */
740 __u32
ip6_tnl_get_cap(struct ip6_tnl
*t
,
741 const struct in6_addr
*laddr
,
742 const struct in6_addr
*raddr
)
744 struct __ip6_tnl_parm
*p
= &t
->parms
;
745 int ltype
= ipv6_addr_type(laddr
);
746 int rtype
= ipv6_addr_type(raddr
);
749 if (ltype
== IPV6_ADDR_ANY
|| rtype
== IPV6_ADDR_ANY
) {
750 flags
= IP6_TNL_F_CAP_PER_PACKET
;
751 } else if (ltype
& (IPV6_ADDR_UNICAST
|IPV6_ADDR_MULTICAST
) &&
752 rtype
& (IPV6_ADDR_UNICAST
|IPV6_ADDR_MULTICAST
) &&
753 !((ltype
|rtype
) & IPV6_ADDR_LOOPBACK
) &&
754 (!((ltype
|rtype
) & IPV6_ADDR_LINKLOCAL
) || p
->link
)) {
755 if (ltype
&IPV6_ADDR_UNICAST
)
756 flags
|= IP6_TNL_F_CAP_XMIT
;
757 if (rtype
&IPV6_ADDR_UNICAST
)
758 flags
|= IP6_TNL_F_CAP_RCV
;
762 EXPORT_SYMBOL(ip6_tnl_get_cap
);
764 /* called with rcu_read_lock() */
765 int ip6_tnl_rcv_ctl(struct ip6_tnl
*t
,
766 const struct in6_addr
*laddr
,
767 const struct in6_addr
*raddr
)
769 struct __ip6_tnl_parm
*p
= &t
->parms
;
771 struct net
*net
= t
->net
;
773 if ((p
->flags
& IP6_TNL_F_CAP_RCV
) ||
774 ((p
->flags
& IP6_TNL_F_CAP_PER_PACKET
) &&
775 (ip6_tnl_get_cap(t
, laddr
, raddr
) & IP6_TNL_F_CAP_RCV
))) {
776 struct net_device
*ldev
= NULL
;
779 ldev
= dev_get_by_index_rcu(net
, p
->link
);
781 if ((ipv6_addr_is_multicast(laddr
) ||
782 likely(ipv6_chk_addr_and_flags(net
, laddr
, ldev
, false,
783 0, IFA_F_TENTATIVE
))) &&
784 ((p
->flags
& IP6_TNL_F_ALLOW_LOCAL_REMOTE
) ||
785 likely(!ipv6_chk_addr_and_flags(net
, raddr
, ldev
, true,
786 0, IFA_F_TENTATIVE
))))
791 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl
);
793 static int __ip6_tnl_rcv(struct ip6_tnl
*tunnel
, struct sk_buff
*skb
,
794 const struct tnl_ptk_info
*tpi
,
795 struct metadata_dst
*tun_dst
,
796 int (*dscp_ecn_decapsulate
)(const struct ip6_tnl
*t
,
797 const struct ipv6hdr
*ipv6h
,
798 struct sk_buff
*skb
),
801 const struct ipv6hdr
*ipv6h
;
804 if (test_bit(IP_TUNNEL_CSUM_BIT
, tunnel
->parms
.i_flags
) !=
805 test_bit(IP_TUNNEL_CSUM_BIT
, tpi
->flags
)) {
806 DEV_STATS_INC(tunnel
->dev
, rx_crc_errors
);
807 DEV_STATS_INC(tunnel
->dev
, rx_errors
);
811 if (test_bit(IP_TUNNEL_SEQ_BIT
, tunnel
->parms
.i_flags
)) {
812 if (!test_bit(IP_TUNNEL_SEQ_BIT
, tpi
->flags
) ||
814 (s32
)(ntohl(tpi
->seq
) - tunnel
->i_seqno
) < 0)) {
815 DEV_STATS_INC(tunnel
->dev
, rx_fifo_errors
);
816 DEV_STATS_INC(tunnel
->dev
, rx_errors
);
819 tunnel
->i_seqno
= ntohl(tpi
->seq
) + 1;
822 skb
->protocol
= tpi
->proto
;
824 /* Warning: All skb pointers will be invalidated! */
825 if (tunnel
->dev
->type
== ARPHRD_ETHER
) {
826 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
827 DEV_STATS_INC(tunnel
->dev
, rx_length_errors
);
828 DEV_STATS_INC(tunnel
->dev
, rx_errors
);
832 skb
->protocol
= eth_type_trans(skb
, tunnel
->dev
);
833 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
835 skb
->dev
= tunnel
->dev
;
836 skb_reset_mac_header(skb
);
839 /* Save offset of outer header relative to skb->head,
840 * because we are going to reset the network header to the inner header
841 * and might change skb->head.
843 nh
= skb_network_header(skb
) - skb
->head
;
845 skb_reset_network_header(skb
);
847 if (!pskb_inet_may_pull(skb
)) {
848 DEV_STATS_INC(tunnel
->dev
, rx_length_errors
);
849 DEV_STATS_INC(tunnel
->dev
, rx_errors
);
853 /* Get the outer header. */
854 ipv6h
= (struct ipv6hdr
*)(skb
->head
+ nh
);
856 memset(skb
->cb
, 0, sizeof(struct inet6_skb_parm
));
858 __skb_tunnel_rx(skb
, tunnel
->dev
, tunnel
->net
);
860 err
= dscp_ecn_decapsulate(tunnel
, ipv6h
, skb
);
863 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
865 ipv6_get_dsfield(ipv6h
));
867 DEV_STATS_INC(tunnel
->dev
, rx_frame_errors
);
868 DEV_STATS_INC(tunnel
->dev
, rx_errors
);
873 dev_sw_netstats_rx_add(tunnel
->dev
, skb
->len
);
875 skb_scrub_packet(skb
, !net_eq(tunnel
->net
, dev_net(tunnel
->dev
)));
878 skb_dst_set(skb
, (struct dst_entry
*)tun_dst
);
880 gro_cells_receive(&tunnel
->gro_cells
, skb
);
885 dst_release((struct dst_entry
*)tun_dst
);
890 int ip6_tnl_rcv(struct ip6_tnl
*t
, struct sk_buff
*skb
,
891 const struct tnl_ptk_info
*tpi
,
892 struct metadata_dst
*tun_dst
,
895 int (*dscp_ecn_decapsulate
)(const struct ip6_tnl
*t
,
896 const struct ipv6hdr
*ipv6h
,
897 struct sk_buff
*skb
);
899 dscp_ecn_decapsulate
= ip6ip6_dscp_ecn_decapsulate
;
900 if (tpi
->proto
== htons(ETH_P_IP
))
901 dscp_ecn_decapsulate
= ip4ip6_dscp_ecn_decapsulate
;
903 return __ip6_tnl_rcv(t
, skb
, tpi
, tun_dst
, dscp_ecn_decapsulate
,
906 EXPORT_SYMBOL(ip6_tnl_rcv
);
908 static const struct tnl_ptk_info tpi_v6
= {
909 /* no tunnel info required for ipxip6. */
910 .proto
= htons(ETH_P_IPV6
),
913 static const struct tnl_ptk_info tpi_v4
= {
914 /* no tunnel info required for ipxip6. */
915 .proto
= htons(ETH_P_IP
),
918 static const struct tnl_ptk_info tpi_mpls
= {
919 /* no tunnel info required for mplsip6. */
920 .proto
= htons(ETH_P_MPLS_UC
),
923 static int ipxip6_rcv(struct sk_buff
*skb
, u8 ipproto
,
924 const struct tnl_ptk_info
*tpi
,
925 int (*dscp_ecn_decapsulate
)(const struct ip6_tnl
*t
,
926 const struct ipv6hdr
*ipv6h
,
927 struct sk_buff
*skb
))
930 const struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
931 struct metadata_dst
*tun_dst
= NULL
;
935 t
= ip6_tnl_lookup(dev_net(skb
->dev
), skb
->dev
->ifindex
, &ipv6h
->saddr
, &ipv6h
->daddr
);
938 u8 tproto
= READ_ONCE(t
->parms
.proto
);
940 if (tproto
!= ipproto
&& tproto
!= 0)
942 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
944 ipv6h
= ipv6_hdr(skb
);
945 if (!ip6_tnl_rcv_ctl(t
, &ipv6h
->daddr
, &ipv6h
->saddr
))
947 if (iptunnel_pull_header(skb
, 0, tpi
->proto
, false))
949 if (t
->parms
.collect_md
) {
950 IP_TUNNEL_DECLARE_FLAGS(flags
) = { };
952 tun_dst
= ipv6_tun_rx_dst(skb
, flags
, 0, 0);
956 ret
= __ip6_tnl_rcv(t
, skb
, tpi
, tun_dst
, dscp_ecn_decapsulate
,
970 static int ip4ip6_rcv(struct sk_buff
*skb
)
972 return ipxip6_rcv(skb
, IPPROTO_IPIP
, &tpi_v4
,
973 ip4ip6_dscp_ecn_decapsulate
);
976 static int ip6ip6_rcv(struct sk_buff
*skb
)
978 return ipxip6_rcv(skb
, IPPROTO_IPV6
, &tpi_v6
,
979 ip6ip6_dscp_ecn_decapsulate
);
982 static int mplsip6_rcv(struct sk_buff
*skb
)
984 return ipxip6_rcv(skb
, IPPROTO_MPLS
, &tpi_mpls
,
985 mplsip6_dscp_ecn_decapsulate
);
988 struct ipv6_tel_txoption
{
989 struct ipv6_txoptions ops
;
993 static void init_tel_txopt(struct ipv6_tel_txoption
*opt
, __u8 encap_limit
)
995 memset(opt
, 0, sizeof(struct ipv6_tel_txoption
));
997 opt
->dst_opt
[2] = IPV6_TLV_TNL_ENCAP_LIMIT
;
999 opt
->dst_opt
[4] = encap_limit
;
1000 opt
->dst_opt
[5] = IPV6_TLV_PADN
;
1001 opt
->dst_opt
[6] = 1;
1003 opt
->ops
.dst1opt
= (struct ipv6_opt_hdr
*) opt
->dst_opt
;
1004 opt
->ops
.opt_nflen
= 8;
1008 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
1009 * @t: the outgoing tunnel device
1010 * @hdr: IPv6 header from the incoming packet
1013 * Avoid trivial tunneling loop by checking that tunnel exit-point
1014 * doesn't match source of incoming packet.
1022 ip6_tnl_addr_conflict(const struct ip6_tnl
*t
, const struct ipv6hdr
*hdr
)
1024 return ipv6_addr_equal(&t
->parms
.raddr
, &hdr
->saddr
);
1027 int ip6_tnl_xmit_ctl(struct ip6_tnl
*t
,
1028 const struct in6_addr
*laddr
,
1029 const struct in6_addr
*raddr
)
1031 struct __ip6_tnl_parm
*p
= &t
->parms
;
1033 struct net
*net
= t
->net
;
1035 if (t
->parms
.collect_md
)
1038 if ((p
->flags
& IP6_TNL_F_CAP_XMIT
) ||
1039 ((p
->flags
& IP6_TNL_F_CAP_PER_PACKET
) &&
1040 (ip6_tnl_get_cap(t
, laddr
, raddr
) & IP6_TNL_F_CAP_XMIT
))) {
1041 struct net_device
*ldev
= NULL
;
1045 ldev
= dev_get_by_index_rcu(net
, p
->link
);
1047 if (unlikely(!ipv6_chk_addr_and_flags(net
, laddr
, ldev
, false,
1048 0, IFA_F_TENTATIVE
)))
1049 pr_warn_ratelimited("%s xmit: Local address not yet configured!\n",
1051 else if (!(p
->flags
& IP6_TNL_F_ALLOW_LOCAL_REMOTE
) &&
1052 !ipv6_addr_is_multicast(raddr
) &&
1053 unlikely(ipv6_chk_addr_and_flags(net
, raddr
, ldev
,
1054 true, 0, IFA_F_TENTATIVE
)))
1055 pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n",
1063 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl
);
1066 * ip6_tnl_xmit - encapsulate packet and send
1067 * @skb: the outgoing socket buffer
1068 * @dev: the outgoing tunnel device
1069 * @dsfield: dscp code for outer header
1070 * @fl6: flow of tunneled packet
1071 * @encap_limit: encapsulation limit
1072 * @pmtu: Path MTU is stored if packet is too big
1073 * @proto: next header value
1076 * Build new header and do some sanity checks on the packet before sending
1082 * %-EMSGSIZE message too big. return mtu in this case.
1085 int ip6_tnl_xmit(struct sk_buff
*skb
, struct net_device
*dev
, __u8 dsfield
,
1086 struct flowi6
*fl6
, int encap_limit
, __u32
*pmtu
,
1089 struct ip6_tnl
*t
= netdev_priv(dev
);
1090 struct net
*net
= t
->net
;
1091 struct ipv6hdr
*ipv6h
;
1092 struct ipv6_tel_txoption opt
;
1093 struct dst_entry
*dst
= NULL
, *ndst
= NULL
;
1094 struct net_device
*tdev
;
1096 unsigned int eth_hlen
= t
->dev
->type
== ARPHRD_ETHER
? ETH_HLEN
: 0;
1097 unsigned int psh_hlen
= sizeof(struct ipv6hdr
) + t
->encap_hlen
;
1098 unsigned int max_headroom
= psh_hlen
;
1099 __be16 payload_protocol
;
1100 bool use_cache
= false;
1104 payload_protocol
= skb_protocol(skb
, true);
1106 if (t
->parms
.collect_md
) {
1107 hop_limit
= skb_tunnel_info(skb
)->key
.ttl
;
1110 hop_limit
= t
->parms
.hop_limit
;
1114 if (ipv6_addr_any(&t
->parms
.raddr
)) {
1115 if (payload_protocol
== htons(ETH_P_IPV6
)) {
1116 struct in6_addr
*addr6
;
1117 struct neighbour
*neigh
;
1121 goto tx_err_link_failure
;
1123 neigh
= dst_neigh_lookup(skb_dst(skb
),
1124 &ipv6_hdr(skb
)->daddr
);
1126 goto tx_err_link_failure
;
1128 addr6
= (struct in6_addr
*)&neigh
->primary_key
;
1129 addr_type
= ipv6_addr_type(addr6
);
1131 if (addr_type
== IPV6_ADDR_ANY
)
1132 addr6
= &ipv6_hdr(skb
)->daddr
;
1134 memcpy(&fl6
->daddr
, addr6
, sizeof(fl6
->daddr
));
1135 neigh_release(neigh
);
1136 } else if (payload_protocol
== htons(ETH_P_IP
)) {
1137 const struct rtable
*rt
= skb_rtable(skb
);
1140 goto tx_err_link_failure
;
1142 if (rt
->rt_gw_family
== AF_INET6
)
1143 memcpy(&fl6
->daddr
, &rt
->rt_gw6
, sizeof(fl6
->daddr
));
1145 } else if (t
->parms
.proto
!= 0 && !(t
->parms
.flags
&
1146 (IP6_TNL_F_USE_ORIG_TCLASS
|
1147 IP6_TNL_F_USE_ORIG_FWMARK
))) {
1148 /* enable the cache only if neither the outer protocol nor the
1149 * routing decision depends on the current inner header value
1155 dst
= dst_cache_get(&t
->dst_cache
);
1157 if (!ip6_tnl_xmit_ctl(t
, &fl6
->saddr
, &fl6
->daddr
))
1158 goto tx_err_link_failure
;
1162 /* add dsfield to flowlabel for route lookup */
1163 fl6
->flowlabel
= ip6_make_flowinfo(dsfield
, fl6
->flowlabel
);
1165 dst
= ip6_route_output(net
, NULL
, fl6
);
1168 goto tx_err_link_failure
;
1169 dst
= xfrm_lookup(net
, dst
, flowi6_to_flowi(fl6
), NULL
, 0);
1173 goto tx_err_link_failure
;
1175 if (t
->parms
.collect_md
&& ipv6_addr_any(&fl6
->saddr
) &&
1176 ipv6_dev_get_saddr(net
, ip6_dst_idev(dst
)->dev
,
1177 &fl6
->daddr
, 0, &fl6
->saddr
))
1178 goto tx_err_link_failure
;
1185 DEV_STATS_INC(dev
, collisions
);
1186 net_warn_ratelimited("%s: Local routing loop detected!\n",
1188 goto tx_err_dst_release
;
1190 mtu
= dst_mtu(dst
) - eth_hlen
- psh_hlen
- t
->tun_hlen
;
1191 if (encap_limit
>= 0) {
1195 mtu
= max(mtu
, skb
->protocol
== htons(ETH_P_IPV6
) ?
1196 IPV6_MIN_MTU
: IPV4_MIN_MTU
);
1198 skb_dst_update_pmtu_no_confirm(skb
, mtu
);
1199 if (skb
->len
- t
->tun_hlen
- eth_hlen
> mtu
&& !skb_is_gso(skb
)) {
1202 goto tx_err_dst_release
;
1205 if (t
->err_count
> 0) {
1206 if (time_before(jiffies
,
1207 t
->err_time
+ IP6TUNNEL_ERR_TIMEO
)) {
1210 dst_link_failure(skb
);
1216 skb_scrub_packet(skb
, !net_eq(t
->net
, dev_net(dev
)));
1219 * Okay, now see if we can stuff it in the buffer as-is.
1221 max_headroom
+= LL_RESERVED_SPACE(tdev
);
1223 if (skb_headroom(skb
) < max_headroom
|| skb_shared(skb
) ||
1224 (skb_cloned(skb
) && !skb_clone_writable(skb
, 0))) {
1225 struct sk_buff
*new_skb
;
1227 new_skb
= skb_realloc_headroom(skb
, max_headroom
);
1229 goto tx_err_dst_release
;
1232 skb_set_owner_w(new_skb
, skb
->sk
);
1237 if (t
->parms
.collect_md
) {
1238 if (t
->encap
.type
!= TUNNEL_ENCAP_NONE
)
1239 goto tx_err_dst_release
;
1241 if (use_cache
&& ndst
)
1242 dst_cache_set_ip6(&t
->dst_cache
, ndst
, &fl6
->saddr
);
1244 skb_dst_set(skb
, dst
);
1246 if (hop_limit
== 0) {
1247 if (payload_protocol
== htons(ETH_P_IP
))
1248 hop_limit
= ip_hdr(skb
)->ttl
;
1249 else if (payload_protocol
== htons(ETH_P_IPV6
))
1250 hop_limit
= ipv6_hdr(skb
)->hop_limit
;
1252 hop_limit
= ip6_dst_hoplimit(dst
);
1255 /* Calculate max headroom for all the headers and adjust
1256 * needed_headroom if necessary.
1258 max_headroom
= LL_RESERVED_SPACE(dst
->dev
) + sizeof(struct ipv6hdr
)
1259 + dst
->header_len
+ t
->hlen
;
1260 if (max_headroom
> READ_ONCE(dev
->needed_headroom
))
1261 WRITE_ONCE(dev
->needed_headroom
, max_headroom
);
1263 err
= ip6_tnl_encap(skb
, t
, &proto
, fl6
);
1267 if (encap_limit
>= 0) {
1268 init_tel_txopt(&opt
, encap_limit
);
1269 ipv6_push_frag_opts(skb
, &opt
.ops
, &proto
);
1272 skb_push(skb
, sizeof(struct ipv6hdr
));
1273 skb_reset_network_header(skb
);
1274 ipv6h
= ipv6_hdr(skb
);
1275 ip6_flow_hdr(ipv6h
, dsfield
,
1276 ip6_make_flowlabel(net
, skb
, fl6
->flowlabel
, true, fl6
));
1277 ipv6h
->hop_limit
= hop_limit
;
1278 ipv6h
->nexthdr
= proto
;
1279 ipv6h
->saddr
= fl6
->saddr
;
1280 ipv6h
->daddr
= fl6
->daddr
;
1281 ip6tunnel_xmit(NULL
, skb
, dev
);
1283 tx_err_link_failure
:
1284 DEV_STATS_INC(dev
, tx_carrier_errors
);
1285 dst_link_failure(skb
);
1290 EXPORT_SYMBOL(ip6_tnl_xmit
);
1293 ipxip6_tnl_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
1296 struct ip6_tnl
*t
= netdev_priv(dev
);
1297 struct ipv6hdr
*ipv6h
;
1298 const struct iphdr
*iph
;
1299 int encap_limit
= -1;
1302 __u8 dsfield
, orig_dsfield
;
1307 tproto
= READ_ONCE(t
->parms
.proto
);
1308 if (tproto
!= protocol
&& tproto
!= 0)
1311 if (t
->parms
.collect_md
) {
1312 struct ip_tunnel_info
*tun_info
;
1313 const struct ip_tunnel_key
*key
;
1315 tun_info
= skb_tunnel_info(skb
);
1316 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
1317 ip_tunnel_info_af(tun_info
) != AF_INET6
))
1319 key
= &tun_info
->key
;
1320 memset(&fl6
, 0, sizeof(fl6
));
1321 fl6
.flowi6_proto
= protocol
;
1322 fl6
.saddr
= key
->u
.ipv6
.src
;
1323 fl6
.daddr
= key
->u
.ipv6
.dst
;
1324 fl6
.flowlabel
= key
->label
;
1329 orig_dsfield
= ipv4_get_dsfield(iph
);
1332 ipv6h
= ipv6_hdr(skb
);
1333 orig_dsfield
= ipv6_get_dsfield(ipv6h
);
1336 orig_dsfield
= dsfield
;
1340 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1341 encap_limit
= t
->parms
.encap_limit
;
1342 if (protocol
== IPPROTO_IPV6
) {
1343 offset
= ip6_tnl_parse_tlv_enc_lim(skb
,
1344 skb_network_header(skb
));
1345 /* ip6_tnl_parse_tlv_enc_lim() might have
1346 * reallocated skb->head
1349 struct ipv6_tlv_tnl_enc_lim
*tel
;
1351 tel
= (void *)&skb_network_header(skb
)[offset
];
1352 if (tel
->encap_limit
== 0) {
1353 icmpv6_ndo_send(skb
, ICMPV6_PARAMPROB
,
1354 ICMPV6_HDR_FIELD
, offset
+ 2);
1357 encap_limit
= tel
->encap_limit
- 1;
1361 memcpy(&fl6
, &t
->fl
.u
.ip6
, sizeof(fl6
));
1362 fl6
.flowi6_proto
= protocol
;
1364 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FWMARK
)
1365 fl6
.flowi6_mark
= skb
->mark
;
1367 fl6
.flowi6_mark
= t
->parms
.fwmark
;
1371 orig_dsfield
= ipv4_get_dsfield(iph
);
1372 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_TCLASS
)
1373 dsfield
= orig_dsfield
;
1375 dsfield
= ip6_tclass(t
->parms
.flowinfo
);
1378 ipv6h
= ipv6_hdr(skb
);
1379 orig_dsfield
= ipv6_get_dsfield(ipv6h
);
1380 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_TCLASS
)
1381 dsfield
= orig_dsfield
;
1383 dsfield
= ip6_tclass(t
->parms
.flowinfo
);
1384 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FLOWLABEL
)
1385 fl6
.flowlabel
|= ip6_flowlabel(ipv6h
);
1388 orig_dsfield
= dsfield
= ip6_tclass(t
->parms
.flowinfo
);
1393 fl6
.flowi6_uid
= sock_net_uid(dev_net(dev
), NULL
);
1394 dsfield
= INET_ECN_encapsulate(dsfield
, orig_dsfield
);
1396 if (iptunnel_handle_offloads(skb
, SKB_GSO_IPXIP6
))
1399 skb_set_inner_ipproto(skb
, protocol
);
1401 err
= ip6_tnl_xmit(skb
, dev
, dsfield
, &fl6
, encap_limit
, &mtu
,
1404 /* XXX: send ICMP error even if DF is not set. */
1405 if (err
== -EMSGSIZE
)
1408 icmp_ndo_send(skb
, ICMP_DEST_UNREACH
,
1409 ICMP_FRAG_NEEDED
, htonl(mtu
));
1412 icmpv6_ndo_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
1424 ip6_tnl_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1426 struct ip6_tnl
*t
= netdev_priv(dev
);
1430 if (!pskb_inet_may_pull(skb
))
1433 switch (skb
->protocol
) {
1434 case htons(ETH_P_IP
):
1435 ipproto
= IPPROTO_IPIP
;
1437 case htons(ETH_P_IPV6
):
1438 if (ip6_tnl_addr_conflict(t
, ipv6_hdr(skb
)))
1440 ipproto
= IPPROTO_IPV6
;
1442 case htons(ETH_P_MPLS_UC
):
1443 ipproto
= IPPROTO_MPLS
;
1449 ret
= ipxip6_tnl_xmit(skb
, dev
, ipproto
);
1453 return NETDEV_TX_OK
;
1456 DEV_STATS_INC(dev
, tx_errors
);
1457 DEV_STATS_INC(dev
, tx_dropped
);
1459 return NETDEV_TX_OK
;
1462 static void ip6_tnl_link_config(struct ip6_tnl
*t
)
1464 struct net_device
*dev
= t
->dev
;
1465 struct net_device
*tdev
= NULL
;
1466 struct __ip6_tnl_parm
*p
= &t
->parms
;
1467 struct flowi6
*fl6
= &t
->fl
.u
.ip6
;
1471 __dev_addr_set(dev
, &p
->laddr
, sizeof(struct in6_addr
));
1472 memcpy(dev
->broadcast
, &p
->raddr
, sizeof(struct in6_addr
));
1474 /* Set up flowi template */
1475 fl6
->saddr
= p
->laddr
;
1476 fl6
->daddr
= p
->raddr
;
1477 fl6
->flowi6_oif
= p
->link
;
1480 if (!(p
->flags
&IP6_TNL_F_USE_ORIG_TCLASS
))
1481 fl6
->flowlabel
|= IPV6_TCLASS_MASK
& p
->flowinfo
;
1482 if (!(p
->flags
&IP6_TNL_F_USE_ORIG_FLOWLABEL
))
1483 fl6
->flowlabel
|= IPV6_FLOWLABEL_MASK
& p
->flowinfo
;
1485 p
->flags
&= ~(IP6_TNL_F_CAP_XMIT
|IP6_TNL_F_CAP_RCV
|IP6_TNL_F_CAP_PER_PACKET
);
1486 p
->flags
|= ip6_tnl_get_cap(t
, &p
->laddr
, &p
->raddr
);
1488 if (p
->flags
&IP6_TNL_F_CAP_XMIT
&& p
->flags
&IP6_TNL_F_CAP_RCV
)
1489 dev
->flags
|= IFF_POINTOPOINT
;
1491 dev
->flags
&= ~IFF_POINTOPOINT
;
1494 t
->hlen
= t
->encap_hlen
+ t
->tun_hlen
;
1495 t_hlen
= t
->hlen
+ sizeof(struct ipv6hdr
);
1497 if (p
->flags
& IP6_TNL_F_CAP_XMIT
) {
1498 int strict
= (ipv6_addr_type(&p
->raddr
) &
1499 (IPV6_ADDR_MULTICAST
|IPV6_ADDR_LINKLOCAL
));
1501 struct rt6_info
*rt
= rt6_lookup(t
->net
,
1502 &p
->raddr
, &p
->laddr
,
1503 p
->link
, NULL
, strict
);
1509 if (!tdev
&& p
->link
)
1510 tdev
= __dev_get_by_index(t
->net
, p
->link
);
1513 dev
->needed_headroom
= tdev
->hard_header_len
+
1514 tdev
->needed_headroom
+ t_hlen
;
1515 mtu
= min_t(unsigned int, tdev
->mtu
, IP6_MAX_MTU
);
1518 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1521 if (mtu
< IPV6_MIN_MTU
)
1523 WRITE_ONCE(dev
->mtu
, mtu
);
1529 * ip6_tnl_change - update the tunnel parameters
1530 * @t: tunnel to be changed
1531 * @p: tunnel configuration parameters
1534 * ip6_tnl_change() updates the tunnel parameters
1538 ip6_tnl_change(struct ip6_tnl
*t
, const struct __ip6_tnl_parm
*p
)
1540 t
->parms
.laddr
= p
->laddr
;
1541 t
->parms
.raddr
= p
->raddr
;
1542 t
->parms
.flags
= p
->flags
;
1543 t
->parms
.hop_limit
= p
->hop_limit
;
1544 t
->parms
.encap_limit
= p
->encap_limit
;
1545 t
->parms
.flowinfo
= p
->flowinfo
;
1546 t
->parms
.link
= p
->link
;
1547 t
->parms
.proto
= p
->proto
;
1548 t
->parms
.fwmark
= p
->fwmark
;
1549 dst_cache_reset(&t
->dst_cache
);
1550 ip6_tnl_link_config(t
);
1553 static void ip6_tnl_update(struct ip6_tnl
*t
, struct __ip6_tnl_parm
*p
)
1555 struct net
*net
= t
->net
;
1556 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
1558 ip6_tnl_unlink(ip6n
, t
);
1560 ip6_tnl_change(t
, p
);
1561 ip6_tnl_link(ip6n
, t
);
1562 netdev_state_change(t
->dev
);
1565 static void ip6_tnl0_update(struct ip6_tnl
*t
, struct __ip6_tnl_parm
*p
)
1567 /* for default tnl0 device allow to change only the proto */
1568 t
->parms
.proto
= p
->proto
;
1569 netdev_state_change(t
->dev
);
1573 ip6_tnl_parm_from_user(struct __ip6_tnl_parm
*p
, const struct ip6_tnl_parm
*u
)
1575 p
->laddr
= u
->laddr
;
1576 p
->raddr
= u
->raddr
;
1577 p
->flags
= u
->flags
;
1578 p
->hop_limit
= u
->hop_limit
;
1579 p
->encap_limit
= u
->encap_limit
;
1580 p
->flowinfo
= u
->flowinfo
;
1582 p
->proto
= u
->proto
;
1583 memcpy(p
->name
, u
->name
, sizeof(u
->name
));
1587 ip6_tnl_parm_to_user(struct ip6_tnl_parm
*u
, const struct __ip6_tnl_parm
*p
)
1589 u
->laddr
= p
->laddr
;
1590 u
->raddr
= p
->raddr
;
1591 u
->flags
= p
->flags
;
1592 u
->hop_limit
= p
->hop_limit
;
1593 u
->encap_limit
= p
->encap_limit
;
1594 u
->flowinfo
= p
->flowinfo
;
1596 u
->proto
= p
->proto
;
1597 memcpy(u
->name
, p
->name
, sizeof(u
->name
));
1601 * ip6_tnl_siocdevprivate - configure ipv6 tunnels from userspace
1602 * @dev: virtual device associated with tunnel
1604 * @data: parameters passed from userspace
1605 * @cmd: command to be performed
1608 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1611 * The possible commands are the following:
1612 * %SIOCGETTUNNEL: get tunnel parameters for device
1613 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1614 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1615 * %SIOCDELTUNNEL: delete tunnel
1617 * The fallback device "ip6tnl0", created during module
1618 * initialization, can be used for creating other tunnel devices.
1622 * %-EFAULT if unable to copy data to or from userspace,
1623 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1624 * %-EINVAL if passed tunnel parameters are invalid,
1625 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1626 * %-ENODEV if attempting to change or delete a nonexisting device
1630 ip6_tnl_siocdevprivate(struct net_device
*dev
, struct ifreq
*ifr
,
1631 void __user
*data
, int cmd
)
1634 struct ip6_tnl_parm p
;
1635 struct __ip6_tnl_parm p1
;
1636 struct ip6_tnl
*t
= netdev_priv(dev
);
1637 struct net
*net
= t
->net
;
1638 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
1640 memset(&p1
, 0, sizeof(p1
));
1644 if (dev
== ip6n
->fb_tnl_dev
) {
1645 if (copy_from_user(&p
, data
, sizeof(p
))) {
1649 ip6_tnl_parm_from_user(&p1
, &p
);
1650 t
= ip6_tnl_locate(net
, &p1
, 0);
1652 t
= netdev_priv(dev
);
1654 memset(&p
, 0, sizeof(p
));
1656 ip6_tnl_parm_to_user(&p
, &t
->parms
);
1657 if (copy_to_user(data
, &p
, sizeof(p
)))
1663 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1666 if (copy_from_user(&p
, data
, sizeof(p
)))
1669 if (p
.proto
!= IPPROTO_IPV6
&& p
.proto
!= IPPROTO_IPIP
&&
1672 ip6_tnl_parm_from_user(&p1
, &p
);
1673 t
= ip6_tnl_locate(net
, &p1
, cmd
== SIOCADDTUNNEL
);
1674 if (cmd
== SIOCCHGTUNNEL
) {
1676 if (t
->dev
!= dev
) {
1681 t
= netdev_priv(dev
);
1682 if (dev
== ip6n
->fb_tnl_dev
)
1683 ip6_tnl0_update(t
, &p1
);
1685 ip6_tnl_update(t
, &p1
);
1689 ip6_tnl_parm_to_user(&p
, &t
->parms
);
1690 if (copy_to_user(data
, &p
, sizeof(p
)))
1699 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1702 if (dev
== ip6n
->fb_tnl_dev
) {
1704 if (copy_from_user(&p
, data
, sizeof(p
)))
1707 ip6_tnl_parm_from_user(&p1
, &p
);
1708 t
= ip6_tnl_locate(net
, &p1
, 0);
1712 if (t
->dev
== ip6n
->fb_tnl_dev
)
1717 unregister_netdevice(dev
);
1726 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1727 * @dev: virtual device associated with tunnel
1728 * @new_mtu: the new mtu
1732 * %-EINVAL if mtu too small
1735 int ip6_tnl_change_mtu(struct net_device
*dev
, int new_mtu
)
1737 struct ip6_tnl
*tnl
= netdev_priv(dev
);
1740 t_hlen
= tnl
->hlen
+ sizeof(struct ipv6hdr
);
1741 if (tnl
->parms
.proto
== IPPROTO_IPV6
) {
1742 if (new_mtu
< IPV6_MIN_MTU
)
1745 if (new_mtu
< ETH_MIN_MTU
)
1748 if (tnl
->parms
.proto
== IPPROTO_IPV6
|| tnl
->parms
.proto
== 0) {
1749 if (new_mtu
> IP6_MAX_MTU
- dev
->hard_header_len
- t_hlen
)
1752 if (new_mtu
> IP_MAX_MTU
- dev
->hard_header_len
- t_hlen
)
1755 WRITE_ONCE(dev
->mtu
, new_mtu
);
1758 EXPORT_SYMBOL(ip6_tnl_change_mtu
);
1760 int ip6_tnl_get_iflink(const struct net_device
*dev
)
1762 struct ip6_tnl
*t
= netdev_priv(dev
);
1764 return READ_ONCE(t
->parms
.link
);
1766 EXPORT_SYMBOL(ip6_tnl_get_iflink
);
1768 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops
*ops
,
1771 if (num
>= MAX_IPTUN_ENCAP_OPS
)
1774 return !cmpxchg((const struct ip6_tnl_encap_ops
**)
1775 &ip6tun_encaps
[num
],
1776 NULL
, ops
) ? 0 : -1;
1778 EXPORT_SYMBOL(ip6_tnl_encap_add_ops
);
1780 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops
*ops
,
1785 if (num
>= MAX_IPTUN_ENCAP_OPS
)
1788 ret
= (cmpxchg((const struct ip6_tnl_encap_ops
**)
1789 &ip6tun_encaps
[num
],
1790 ops
, NULL
) == ops
) ? 0 : -1;
1796 EXPORT_SYMBOL(ip6_tnl_encap_del_ops
);
1798 int ip6_tnl_encap_setup(struct ip6_tnl
*t
,
1799 struct ip_tunnel_encap
*ipencap
)
1803 memset(&t
->encap
, 0, sizeof(t
->encap
));
1805 hlen
= ip6_encap_hlen(ipencap
);
1809 t
->encap
.type
= ipencap
->type
;
1810 t
->encap
.sport
= ipencap
->sport
;
1811 t
->encap
.dport
= ipencap
->dport
;
1812 t
->encap
.flags
= ipencap
->flags
;
1814 t
->encap_hlen
= hlen
;
1815 t
->hlen
= t
->encap_hlen
+ t
->tun_hlen
;
1819 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup
);
1821 static const struct net_device_ops ip6_tnl_netdev_ops
= {
1822 .ndo_init
= ip6_tnl_dev_init
,
1823 .ndo_uninit
= ip6_tnl_dev_uninit
,
1824 .ndo_start_xmit
= ip6_tnl_start_xmit
,
1825 .ndo_siocdevprivate
= ip6_tnl_siocdevprivate
,
1826 .ndo_change_mtu
= ip6_tnl_change_mtu
,
1827 .ndo_get_stats64
= dev_get_tstats64
,
1828 .ndo_get_iflink
= ip6_tnl_get_iflink
,
1831 #define IPXIPX_FEATURES (NETIF_F_SG | \
1832 NETIF_F_FRAGLIST | \
1834 NETIF_F_GSO_SOFTWARE | \
1838 * ip6_tnl_dev_setup - setup virtual tunnel device
1839 * @dev: virtual device associated with tunnel
1842 * Initialize function pointers and device parameters
1845 static void ip6_tnl_dev_setup(struct net_device
*dev
)
1847 dev
->netdev_ops
= &ip6_tnl_netdev_ops
;
1848 dev
->header_ops
= &ip_tunnel_header_ops
;
1849 dev
->needs_free_netdev
= true;
1850 dev
->priv_destructor
= ip6_dev_free
;
1852 dev
->type
= ARPHRD_TUNNEL6
;
1853 dev
->flags
|= IFF_NOARP
;
1854 dev
->addr_len
= sizeof(struct in6_addr
);
1856 dev
->pcpu_stat_type
= NETDEV_PCPU_STAT_TSTATS
;
1857 netif_keep_dst(dev
);
1859 dev
->features
|= IPXIPX_FEATURES
;
1860 dev
->hw_features
|= IPXIPX_FEATURES
;
1862 /* This perm addr will be used as interface identifier by IPv6 */
1863 dev
->addr_assign_type
= NET_ADDR_RANDOM
;
1864 eth_random_addr(dev
->perm_addr
);
1869 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1870 * @dev: virtual device associated with tunnel
1874 ip6_tnl_dev_init_gen(struct net_device
*dev
)
1876 struct ip6_tnl
*t
= netdev_priv(dev
);
1881 t
->net
= dev_net(dev
);
1883 ret
= dst_cache_init(&t
->dst_cache
, GFP_KERNEL
);
1887 ret
= gro_cells_init(&t
->gro_cells
, dev
);
1892 t
->hlen
= t
->encap_hlen
+ t
->tun_hlen
;
1893 t_hlen
= t
->hlen
+ sizeof(struct ipv6hdr
);
1895 dev
->type
= ARPHRD_TUNNEL6
;
1896 dev
->mtu
= ETH_DATA_LEN
- t_hlen
;
1897 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1899 dev
->min_mtu
= ETH_MIN_MTU
;
1900 dev
->max_mtu
= IP6_MAX_MTU
- dev
->hard_header_len
- t_hlen
;
1902 netdev_hold(dev
, &t
->dev_tracker
, GFP_KERNEL
);
1903 netdev_lockdep_set_classes(dev
);
1907 dst_cache_destroy(&t
->dst_cache
);
1913 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1914 * @dev: virtual device associated with tunnel
1917 static int ip6_tnl_dev_init(struct net_device
*dev
)
1919 struct ip6_tnl
*t
= netdev_priv(dev
);
1920 int err
= ip6_tnl_dev_init_gen(dev
);
1924 ip6_tnl_link_config(t
);
1925 if (t
->parms
.collect_md
)
1926 netif_keep_dst(dev
);
1931 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1932 * @dev: fallback device
1937 static int __net_init
ip6_fb_tnl_dev_init(struct net_device
*dev
)
1939 struct ip6_tnl
*t
= netdev_priv(dev
);
1940 struct net
*net
= dev_net(dev
);
1941 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
1943 t
->parms
.proto
= IPPROTO_IPV6
;
1945 rcu_assign_pointer(ip6n
->tnls_wc
[0], t
);
1949 static int ip6_tnl_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1950 struct netlink_ext_ack
*extack
)
1954 if (!data
|| !data
[IFLA_IPTUN_PROTO
])
1957 proto
= nla_get_u8(data
[IFLA_IPTUN_PROTO
]);
1958 if (proto
!= IPPROTO_IPV6
&&
1959 proto
!= IPPROTO_IPIP
&&
1966 static void ip6_tnl_netlink_parms(struct nlattr
*data
[],
1967 struct __ip6_tnl_parm
*parms
)
1969 memset(parms
, 0, sizeof(*parms
));
1974 if (data
[IFLA_IPTUN_LINK
])
1975 parms
->link
= nla_get_u32(data
[IFLA_IPTUN_LINK
]);
1977 if (data
[IFLA_IPTUN_LOCAL
])
1978 parms
->laddr
= nla_get_in6_addr(data
[IFLA_IPTUN_LOCAL
]);
1980 if (data
[IFLA_IPTUN_REMOTE
])
1981 parms
->raddr
= nla_get_in6_addr(data
[IFLA_IPTUN_REMOTE
]);
1983 if (data
[IFLA_IPTUN_TTL
])
1984 parms
->hop_limit
= nla_get_u8(data
[IFLA_IPTUN_TTL
]);
1986 if (data
[IFLA_IPTUN_ENCAP_LIMIT
])
1987 parms
->encap_limit
= nla_get_u8(data
[IFLA_IPTUN_ENCAP_LIMIT
]);
1989 if (data
[IFLA_IPTUN_FLOWINFO
])
1990 parms
->flowinfo
= nla_get_be32(data
[IFLA_IPTUN_FLOWINFO
]);
1992 if (data
[IFLA_IPTUN_FLAGS
])
1993 parms
->flags
= nla_get_u32(data
[IFLA_IPTUN_FLAGS
]);
1995 if (data
[IFLA_IPTUN_PROTO
])
1996 parms
->proto
= nla_get_u8(data
[IFLA_IPTUN_PROTO
]);
1998 if (data
[IFLA_IPTUN_COLLECT_METADATA
])
1999 parms
->collect_md
= true;
2001 if (data
[IFLA_IPTUN_FWMARK
])
2002 parms
->fwmark
= nla_get_u32(data
[IFLA_IPTUN_FWMARK
]);
2005 static int ip6_tnl_newlink(struct net
*src_net
, struct net_device
*dev
,
2006 struct nlattr
*tb
[], struct nlattr
*data
[],
2007 struct netlink_ext_ack
*extack
)
2009 struct net
*net
= dev_net(dev
);
2010 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
2011 struct ip_tunnel_encap ipencap
;
2012 struct ip6_tnl
*nt
, *t
;
2015 nt
= netdev_priv(dev
);
2017 if (ip_tunnel_netlink_encap_parms(data
, &ipencap
)) {
2018 err
= ip6_tnl_encap_setup(nt
, &ipencap
);
2023 ip6_tnl_netlink_parms(data
, &nt
->parms
);
2025 if (nt
->parms
.collect_md
) {
2026 if (rtnl_dereference(ip6n
->collect_md_tun
))
2029 t
= ip6_tnl_locate(net
, &nt
->parms
, 0);
2034 err
= ip6_tnl_create2(dev
);
2035 if (!err
&& tb
[IFLA_MTU
])
2036 ip6_tnl_change_mtu(dev
, nla_get_u32(tb
[IFLA_MTU
]));
2041 static int ip6_tnl_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
2042 struct nlattr
*data
[],
2043 struct netlink_ext_ack
*extack
)
2045 struct ip6_tnl
*t
= netdev_priv(dev
);
2046 struct __ip6_tnl_parm p
;
2047 struct net
*net
= t
->net
;
2048 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
2049 struct ip_tunnel_encap ipencap
;
2051 if (dev
== ip6n
->fb_tnl_dev
)
2054 if (ip_tunnel_netlink_encap_parms(data
, &ipencap
)) {
2055 int err
= ip6_tnl_encap_setup(t
, &ipencap
);
2060 ip6_tnl_netlink_parms(data
, &p
);
2064 t
= ip6_tnl_locate(net
, &p
, 0);
2069 t
= netdev_priv(dev
);
2071 ip6_tnl_update(t
, &p
);
2075 static void ip6_tnl_dellink(struct net_device
*dev
, struct list_head
*head
)
2077 struct net
*net
= dev_net(dev
);
2078 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
2080 if (dev
!= ip6n
->fb_tnl_dev
)
2081 unregister_netdevice_queue(dev
, head
);
2084 static size_t ip6_tnl_get_size(const struct net_device
*dev
)
2087 /* IFLA_IPTUN_LINK */
2089 /* IFLA_IPTUN_LOCAL */
2090 nla_total_size(sizeof(struct in6_addr
)) +
2091 /* IFLA_IPTUN_REMOTE */
2092 nla_total_size(sizeof(struct in6_addr
)) +
2093 /* IFLA_IPTUN_TTL */
2095 /* IFLA_IPTUN_ENCAP_LIMIT */
2097 /* IFLA_IPTUN_FLOWINFO */
2099 /* IFLA_IPTUN_FLAGS */
2101 /* IFLA_IPTUN_PROTO */
2103 /* IFLA_IPTUN_ENCAP_TYPE */
2105 /* IFLA_IPTUN_ENCAP_FLAGS */
2107 /* IFLA_IPTUN_ENCAP_SPORT */
2109 /* IFLA_IPTUN_ENCAP_DPORT */
2111 /* IFLA_IPTUN_COLLECT_METADATA */
2113 /* IFLA_IPTUN_FWMARK */
2118 static int ip6_tnl_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
2120 struct ip6_tnl
*tunnel
= netdev_priv(dev
);
2121 struct __ip6_tnl_parm
*parm
= &tunnel
->parms
;
2123 if (nla_put_u32(skb
, IFLA_IPTUN_LINK
, parm
->link
) ||
2124 nla_put_in6_addr(skb
, IFLA_IPTUN_LOCAL
, &parm
->laddr
) ||
2125 nla_put_in6_addr(skb
, IFLA_IPTUN_REMOTE
, &parm
->raddr
) ||
2126 nla_put_u8(skb
, IFLA_IPTUN_TTL
, parm
->hop_limit
) ||
2127 nla_put_u8(skb
, IFLA_IPTUN_ENCAP_LIMIT
, parm
->encap_limit
) ||
2128 nla_put_be32(skb
, IFLA_IPTUN_FLOWINFO
, parm
->flowinfo
) ||
2129 nla_put_u32(skb
, IFLA_IPTUN_FLAGS
, parm
->flags
) ||
2130 nla_put_u8(skb
, IFLA_IPTUN_PROTO
, parm
->proto
) ||
2131 nla_put_u32(skb
, IFLA_IPTUN_FWMARK
, parm
->fwmark
))
2132 goto nla_put_failure
;
2134 if (nla_put_u16(skb
, IFLA_IPTUN_ENCAP_TYPE
, tunnel
->encap
.type
) ||
2135 nla_put_be16(skb
, IFLA_IPTUN_ENCAP_SPORT
, tunnel
->encap
.sport
) ||
2136 nla_put_be16(skb
, IFLA_IPTUN_ENCAP_DPORT
, tunnel
->encap
.dport
) ||
2137 nla_put_u16(skb
, IFLA_IPTUN_ENCAP_FLAGS
, tunnel
->encap
.flags
))
2138 goto nla_put_failure
;
2140 if (parm
->collect_md
)
2141 if (nla_put_flag(skb
, IFLA_IPTUN_COLLECT_METADATA
))
2142 goto nla_put_failure
;
2150 struct net
*ip6_tnl_get_link_net(const struct net_device
*dev
)
2152 struct ip6_tnl
*tunnel
= netdev_priv(dev
);
2154 return READ_ONCE(tunnel
->net
);
2156 EXPORT_SYMBOL(ip6_tnl_get_link_net
);
2158 static const struct nla_policy ip6_tnl_policy
[IFLA_IPTUN_MAX
+ 1] = {
2159 [IFLA_IPTUN_LINK
] = { .type
= NLA_U32
},
2160 [IFLA_IPTUN_LOCAL
] = { .len
= sizeof(struct in6_addr
) },
2161 [IFLA_IPTUN_REMOTE
] = { .len
= sizeof(struct in6_addr
) },
2162 [IFLA_IPTUN_TTL
] = { .type
= NLA_U8
},
2163 [IFLA_IPTUN_ENCAP_LIMIT
] = { .type
= NLA_U8
},
2164 [IFLA_IPTUN_FLOWINFO
] = { .type
= NLA_U32
},
2165 [IFLA_IPTUN_FLAGS
] = { .type
= NLA_U32
},
2166 [IFLA_IPTUN_PROTO
] = { .type
= NLA_U8
},
2167 [IFLA_IPTUN_ENCAP_TYPE
] = { .type
= NLA_U16
},
2168 [IFLA_IPTUN_ENCAP_FLAGS
] = { .type
= NLA_U16
},
2169 [IFLA_IPTUN_ENCAP_SPORT
] = { .type
= NLA_U16
},
2170 [IFLA_IPTUN_ENCAP_DPORT
] = { .type
= NLA_U16
},
2171 [IFLA_IPTUN_COLLECT_METADATA
] = { .type
= NLA_FLAG
},
2172 [IFLA_IPTUN_FWMARK
] = { .type
= NLA_U32
},
2175 static struct rtnl_link_ops ip6_link_ops __read_mostly
= {
2177 .maxtype
= IFLA_IPTUN_MAX
,
2178 .policy
= ip6_tnl_policy
,
2179 .priv_size
= sizeof(struct ip6_tnl
),
2180 .setup
= ip6_tnl_dev_setup
,
2181 .validate
= ip6_tnl_validate
,
2182 .newlink
= ip6_tnl_newlink
,
2183 .changelink
= ip6_tnl_changelink
,
2184 .dellink
= ip6_tnl_dellink
,
2185 .get_size
= ip6_tnl_get_size
,
2186 .fill_info
= ip6_tnl_fill_info
,
2187 .get_link_net
= ip6_tnl_get_link_net
,
2190 static struct xfrm6_tunnel ip4ip6_handler __read_mostly
= {
2191 .handler
= ip4ip6_rcv
,
2192 .err_handler
= ip4ip6_err
,
2196 static struct xfrm6_tunnel ip6ip6_handler __read_mostly
= {
2197 .handler
= ip6ip6_rcv
,
2198 .err_handler
= ip6ip6_err
,
2202 static struct xfrm6_tunnel mplsip6_handler __read_mostly
= {
2203 .handler
= mplsip6_rcv
,
2204 .err_handler
= mplsip6_err
,
2208 static void __net_exit
ip6_tnl_destroy_tunnels(struct net
*net
, struct list_head
*list
)
2210 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
2211 struct net_device
*dev
, *aux
;
2215 for_each_netdev_safe(net
, dev
, aux
)
2216 if (dev
->rtnl_link_ops
== &ip6_link_ops
)
2217 unregister_netdevice_queue(dev
, list
);
2219 for (h
= 0; h
< IP6_TUNNEL_HASH_SIZE
; h
++) {
2220 t
= rtnl_dereference(ip6n
->tnls_r_l
[h
]);
2222 /* If dev is in the same netns, it has already
2223 * been added to the list by the previous loop.
2225 if (!net_eq(dev_net(t
->dev
), net
))
2226 unregister_netdevice_queue(t
->dev
, list
);
2227 t
= rtnl_dereference(t
->next
);
2231 t
= rtnl_dereference(ip6n
->tnls_wc
[0]);
2233 /* If dev is in the same netns, it has already
2234 * been added to the list by the previous loop.
2236 if (!net_eq(dev_net(t
->dev
), net
))
2237 unregister_netdevice_queue(t
->dev
, list
);
2238 t
= rtnl_dereference(t
->next
);
2242 static int __net_init
ip6_tnl_init_net(struct net
*net
)
2244 struct ip6_tnl_net
*ip6n
= net_generic(net
, ip6_tnl_net_id
);
2245 struct ip6_tnl
*t
= NULL
;
2248 ip6n
->tnls
[0] = ip6n
->tnls_wc
;
2249 ip6n
->tnls
[1] = ip6n
->tnls_r_l
;
2251 if (!net_has_fallback_tunnels(net
))
2254 ip6n
->fb_tnl_dev
= alloc_netdev(sizeof(struct ip6_tnl
), "ip6tnl0",
2255 NET_NAME_UNKNOWN
, ip6_tnl_dev_setup
);
2257 if (!ip6n
->fb_tnl_dev
)
2259 dev_net_set(ip6n
->fb_tnl_dev
, net
);
2260 ip6n
->fb_tnl_dev
->rtnl_link_ops
= &ip6_link_ops
;
2261 /* FB netdevice is special: we have one, and only one per netns.
2262 * Allowing to move it to another netns is clearly unsafe.
2264 ip6n
->fb_tnl_dev
->netns_local
= true;
2266 err
= ip6_fb_tnl_dev_init(ip6n
->fb_tnl_dev
);
2270 err
= register_netdev(ip6n
->fb_tnl_dev
);
2274 t
= netdev_priv(ip6n
->fb_tnl_dev
);
2276 strcpy(t
->parms
.name
, ip6n
->fb_tnl_dev
->name
);
2280 free_netdev(ip6n
->fb_tnl_dev
);
2285 static void __net_exit
ip6_tnl_exit_batch_rtnl(struct list_head
*net_list
,
2286 struct list_head
*dev_to_kill
)
2291 list_for_each_entry(net
, net_list
, exit_list
)
2292 ip6_tnl_destroy_tunnels(net
, dev_to_kill
);
2295 static struct pernet_operations ip6_tnl_net_ops
= {
2296 .init
= ip6_tnl_init_net
,
2297 .exit_batch_rtnl
= ip6_tnl_exit_batch_rtnl
,
2298 .id
= &ip6_tnl_net_id
,
2299 .size
= sizeof(struct ip6_tnl_net
),
2303 * ip6_tunnel_init - register protocol and reserve needed resources
2305 * Return: 0 on success
2308 static int __init
ip6_tunnel_init(void)
2312 if (!ipv6_mod_enabled())
2315 err
= register_pernet_device(&ip6_tnl_net_ops
);
2319 err
= xfrm6_tunnel_register(&ip4ip6_handler
, AF_INET
);
2321 pr_err("%s: can't register ip4ip6\n", __func__
);
2325 err
= xfrm6_tunnel_register(&ip6ip6_handler
, AF_INET6
);
2327 pr_err("%s: can't register ip6ip6\n", __func__
);
2331 if (ip6_tnl_mpls_supported()) {
2332 err
= xfrm6_tunnel_register(&mplsip6_handler
, AF_MPLS
);
2334 pr_err("%s: can't register mplsip6\n", __func__
);
2339 err
= rtnl_link_register(&ip6_link_ops
);
2341 goto rtnl_link_failed
;
2346 if (ip6_tnl_mpls_supported())
2347 xfrm6_tunnel_deregister(&mplsip6_handler
, AF_MPLS
);
2349 xfrm6_tunnel_deregister(&ip6ip6_handler
, AF_INET6
);
2351 xfrm6_tunnel_deregister(&ip4ip6_handler
, AF_INET
);
2353 unregister_pernet_device(&ip6_tnl_net_ops
);
2359 * ip6_tunnel_cleanup - free resources and unregister protocol
2362 static void __exit
ip6_tunnel_cleanup(void)
2364 rtnl_link_unregister(&ip6_link_ops
);
2365 if (xfrm6_tunnel_deregister(&ip4ip6_handler
, AF_INET
))
2366 pr_info("%s: can't deregister ip4ip6\n", __func__
);
2368 if (xfrm6_tunnel_deregister(&ip6ip6_handler
, AF_INET6
))
2369 pr_info("%s: can't deregister ip6ip6\n", __func__
);
2371 if (ip6_tnl_mpls_supported() &&
2372 xfrm6_tunnel_deregister(&mplsip6_handler
, AF_MPLS
))
2373 pr_info("%s: can't deregister mplsip6\n", __func__
);
2374 unregister_pernet_device(&ip6_tnl_net_ops
);
2377 module_init(ip6_tunnel_init
);
2378 module_exit(ip6_tunnel_cleanup
);