1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * GRE over IPv6 protocol decoder.
5 * Authors: Dmitry Kozlov (xeb@mail.ru)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/skbuff.h>
17 #include <linux/netdevice.h>
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 #include <linux/if_arp.h>
22 #include <linux/init.h>
23 #include <linux/in6.h>
24 #include <linux/inetdevice.h>
25 #include <linux/igmp.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/etherdevice.h>
28 #include <linux/if_ether.h>
29 #include <linux/hash.h>
30 #include <linux/if_tunnel.h>
31 #include <linux/ip6_tunnel.h>
35 #include <net/ip_tunnels.h>
37 #include <net/protocol.h>
38 #include <net/addrconf.h>
40 #include <net/checksum.h>
41 #include <net/dsfield.h>
42 #include <net/inet_ecn.h>
44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h>
46 #include <net/rtnetlink.h>
49 #include <net/ip6_fib.h>
50 #include <net/ip6_route.h>
51 #include <net/ip6_tunnel.h>
53 #include <net/erspan.h>
54 #include <net/dst_metadata.h>
57 static bool log_ecn_error
= true;
58 module_param(log_ecn_error
, bool, 0644);
59 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
61 #define IP6_GRE_HASH_SIZE_SHIFT 5
62 #define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT)
64 static unsigned int ip6gre_net_id __read_mostly
;
66 struct ip6_tnl __rcu
*tunnels
[4][IP6_GRE_HASH_SIZE
];
68 struct ip6_tnl __rcu
*collect_md_tun
;
69 struct ip6_tnl __rcu
*collect_md_tun_erspan
;
70 struct net_device
*fb_tunnel_dev
;
73 static struct rtnl_link_ops ip6gre_link_ops __read_mostly
;
74 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly
;
75 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly
;
76 static int ip6gre_tunnel_init(struct net_device
*dev
);
77 static void ip6gre_tunnel_setup(struct net_device
*dev
);
78 static void ip6gre_tunnel_link(struct ip6gre_net
*ign
, struct ip6_tnl
*t
);
79 static void ip6gre_tnl_link_config(struct ip6_tnl
*t
, int set_mtu
);
80 static void ip6erspan_tnl_link_config(struct ip6_tnl
*t
, int set_mtu
);
82 /* Tunnel hash table */
92 We require exact key match i.e. if a key is present in packet
93 it will match only tunnel with the same key; if it is not present,
94 it will match only keyless tunnel.
96 All keysless packets, if not matched configured keyless tunnels
97 will match fallback tunnel.
100 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1))
101 static u32
HASH_ADDR(const struct in6_addr
*addr
)
103 u32 hash
= ipv6_addr_hash(addr
);
105 return hash_32(hash
, IP6_GRE_HASH_SIZE_SHIFT
);
108 #define tunnels_r_l tunnels[3]
109 #define tunnels_r tunnels[2]
110 #define tunnels_l tunnels[1]
111 #define tunnels_wc tunnels[0]
113 /* Given src, dst and key, find appropriate for input tunnel. */
115 static struct ip6_tnl
*ip6gre_tunnel_lookup(struct net_device
*dev
,
116 const struct in6_addr
*remote
, const struct in6_addr
*local
,
117 __be32 key
, __be16 gre_proto
)
119 struct net
*net
= dev_net(dev
);
120 int link
= dev
->ifindex
;
121 unsigned int h0
= HASH_ADDR(remote
);
122 unsigned int h1
= HASH_KEY(key
);
123 struct ip6_tnl
*t
, *cand
= NULL
;
124 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
125 int dev_type
= (gre_proto
== htons(ETH_P_TEB
) ||
126 gre_proto
== htons(ETH_P_ERSPAN
) ||
127 gre_proto
== htons(ETH_P_ERSPAN2
)) ?
128 ARPHRD_ETHER
: ARPHRD_IP6GRE
;
129 int score
, cand_score
= 4;
130 struct net_device
*ndev
;
132 for_each_ip_tunnel_rcu(t
, ign
->tunnels_r_l
[h0
^ h1
]) {
133 if (!ipv6_addr_equal(local
, &t
->parms
.laddr
) ||
134 !ipv6_addr_equal(remote
, &t
->parms
.raddr
) ||
135 key
!= t
->parms
.i_key
||
136 !(t
->dev
->flags
& IFF_UP
))
139 if (t
->dev
->type
!= ARPHRD_IP6GRE
&&
140 t
->dev
->type
!= dev_type
)
144 if (t
->parms
.link
!= link
)
146 if (t
->dev
->type
!= dev_type
)
151 if (score
< cand_score
) {
157 for_each_ip_tunnel_rcu(t
, ign
->tunnels_r
[h0
^ h1
]) {
158 if (!ipv6_addr_equal(remote
, &t
->parms
.raddr
) ||
159 key
!= t
->parms
.i_key
||
160 !(t
->dev
->flags
& IFF_UP
))
163 if (t
->dev
->type
!= ARPHRD_IP6GRE
&&
164 t
->dev
->type
!= dev_type
)
168 if (t
->parms
.link
!= link
)
170 if (t
->dev
->type
!= dev_type
)
175 if (score
< cand_score
) {
181 for_each_ip_tunnel_rcu(t
, ign
->tunnels_l
[h1
]) {
182 if ((!ipv6_addr_equal(local
, &t
->parms
.laddr
) &&
183 (!ipv6_addr_equal(local
, &t
->parms
.raddr
) ||
184 !ipv6_addr_is_multicast(local
))) ||
185 key
!= t
->parms
.i_key
||
186 !(t
->dev
->flags
& IFF_UP
))
189 if (t
->dev
->type
!= ARPHRD_IP6GRE
&&
190 t
->dev
->type
!= dev_type
)
194 if (t
->parms
.link
!= link
)
196 if (t
->dev
->type
!= dev_type
)
201 if (score
< cand_score
) {
207 for_each_ip_tunnel_rcu(t
, ign
->tunnels_wc
[h1
]) {
208 if (t
->parms
.i_key
!= key
||
209 !(t
->dev
->flags
& IFF_UP
))
212 if (t
->dev
->type
!= ARPHRD_IP6GRE
&&
213 t
->dev
->type
!= dev_type
)
217 if (t
->parms
.link
!= link
)
219 if (t
->dev
->type
!= dev_type
)
224 if (score
< cand_score
) {
233 if (gre_proto
== htons(ETH_P_ERSPAN
) ||
234 gre_proto
== htons(ETH_P_ERSPAN2
))
235 t
= rcu_dereference(ign
->collect_md_tun_erspan
);
237 t
= rcu_dereference(ign
->collect_md_tun
);
239 if (t
&& t
->dev
->flags
& IFF_UP
)
242 ndev
= READ_ONCE(ign
->fb_tunnel_dev
);
243 if (ndev
&& ndev
->flags
& IFF_UP
)
244 return netdev_priv(ndev
);
249 static struct ip6_tnl __rcu
**__ip6gre_bucket(struct ip6gre_net
*ign
,
250 const struct __ip6_tnl_parm
*p
)
252 const struct in6_addr
*remote
= &p
->raddr
;
253 const struct in6_addr
*local
= &p
->laddr
;
254 unsigned int h
= HASH_KEY(p
->i_key
);
257 if (!ipv6_addr_any(local
))
259 if (!ipv6_addr_any(remote
) && !ipv6_addr_is_multicast(remote
)) {
261 h
^= HASH_ADDR(remote
);
264 return &ign
->tunnels
[prio
][h
];
267 static void ip6gre_tunnel_link_md(struct ip6gre_net
*ign
, struct ip6_tnl
*t
)
269 if (t
->parms
.collect_md
)
270 rcu_assign_pointer(ign
->collect_md_tun
, t
);
273 static void ip6erspan_tunnel_link_md(struct ip6gre_net
*ign
, struct ip6_tnl
*t
)
275 if (t
->parms
.collect_md
)
276 rcu_assign_pointer(ign
->collect_md_tun_erspan
, t
);
279 static void ip6gre_tunnel_unlink_md(struct ip6gre_net
*ign
, struct ip6_tnl
*t
)
281 if (t
->parms
.collect_md
)
282 rcu_assign_pointer(ign
->collect_md_tun
, NULL
);
285 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net
*ign
,
288 if (t
->parms
.collect_md
)
289 rcu_assign_pointer(ign
->collect_md_tun_erspan
, NULL
);
292 static inline struct ip6_tnl __rcu
**ip6gre_bucket(struct ip6gre_net
*ign
,
293 const struct ip6_tnl
*t
)
295 return __ip6gre_bucket(ign
, &t
->parms
);
298 static void ip6gre_tunnel_link(struct ip6gre_net
*ign
, struct ip6_tnl
*t
)
300 struct ip6_tnl __rcu
**tp
= ip6gre_bucket(ign
, t
);
302 rcu_assign_pointer(t
->next
, rtnl_dereference(*tp
));
303 rcu_assign_pointer(*tp
, t
);
306 static void ip6gre_tunnel_unlink(struct ip6gre_net
*ign
, struct ip6_tnl
*t
)
308 struct ip6_tnl __rcu
**tp
;
309 struct ip6_tnl
*iter
;
311 for (tp
= ip6gre_bucket(ign
, t
);
312 (iter
= rtnl_dereference(*tp
)) != NULL
;
315 rcu_assign_pointer(*tp
, t
->next
);
321 static struct ip6_tnl
*ip6gre_tunnel_find(struct net
*net
,
322 const struct __ip6_tnl_parm
*parms
,
325 const struct in6_addr
*remote
= &parms
->raddr
;
326 const struct in6_addr
*local
= &parms
->laddr
;
327 __be32 key
= parms
->i_key
;
328 int link
= parms
->link
;
330 struct ip6_tnl __rcu
**tp
;
331 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
333 for (tp
= __ip6gre_bucket(ign
, parms
);
334 (t
= rtnl_dereference(*tp
)) != NULL
;
336 if (ipv6_addr_equal(local
, &t
->parms
.laddr
) &&
337 ipv6_addr_equal(remote
, &t
->parms
.raddr
) &&
338 key
== t
->parms
.i_key
&&
339 link
== t
->parms
.link
&&
340 type
== t
->dev
->type
)
346 static struct ip6_tnl
*ip6gre_tunnel_locate(struct net
*net
,
347 const struct __ip6_tnl_parm
*parms
, int create
)
349 struct ip6_tnl
*t
, *nt
;
350 struct net_device
*dev
;
352 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
354 t
= ip6gre_tunnel_find(net
, parms
, ARPHRD_IP6GRE
);
360 if (parms
->name
[0]) {
361 if (!dev_valid_name(parms
->name
))
363 strlcpy(name
, parms
->name
, IFNAMSIZ
);
365 strcpy(name
, "ip6gre%d");
367 dev
= alloc_netdev(sizeof(*t
), name
, NET_NAME_UNKNOWN
,
368 ip6gre_tunnel_setup
);
372 dev_net_set(dev
, net
);
374 nt
= netdev_priv(dev
);
376 dev
->rtnl_link_ops
= &ip6gre_link_ops
;
379 nt
->net
= dev_net(dev
);
381 if (register_netdevice(dev
) < 0)
384 ip6gre_tnl_link_config(nt
, 1);
386 /* Can use a lockless transmit, unless we generate output sequences */
387 if (!(nt
->parms
.o_flags
& TUNNEL_SEQ
))
388 dev
->features
|= NETIF_F_LLTX
;
391 ip6gre_tunnel_link(ign
, nt
);
399 static void ip6erspan_tunnel_uninit(struct net_device
*dev
)
401 struct ip6_tnl
*t
= netdev_priv(dev
);
402 struct ip6gre_net
*ign
= net_generic(t
->net
, ip6gre_net_id
);
404 ip6erspan_tunnel_unlink_md(ign
, t
);
405 ip6gre_tunnel_unlink(ign
, t
);
406 dst_cache_reset(&t
->dst_cache
);
410 static void ip6gre_tunnel_uninit(struct net_device
*dev
)
412 struct ip6_tnl
*t
= netdev_priv(dev
);
413 struct ip6gre_net
*ign
= net_generic(t
->net
, ip6gre_net_id
);
415 ip6gre_tunnel_unlink_md(ign
, t
);
416 ip6gre_tunnel_unlink(ign
, t
);
417 if (ign
->fb_tunnel_dev
== dev
)
418 WRITE_ONCE(ign
->fb_tunnel_dev
, NULL
);
419 dst_cache_reset(&t
->dst_cache
);
424 static int ip6gre_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
425 u8 type
, u8 code
, int offset
, __be32 info
)
427 struct net
*net
= dev_net(skb
->dev
);
428 const struct ipv6hdr
*ipv6h
;
429 struct tnl_ptk_info tpi
;
432 if (gre_parse_header(skb
, &tpi
, NULL
, htons(ETH_P_IPV6
),
436 ipv6h
= (const struct ipv6hdr
*)skb
->data
;
437 t
= ip6gre_tunnel_lookup(skb
->dev
, &ipv6h
->daddr
, &ipv6h
->saddr
,
443 case ICMPV6_DEST_UNREACH
:
444 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
446 if (code
!= ICMPV6_PORT_UNREACH
)
449 case ICMPV6_TIME_EXCEED
:
450 if (code
== ICMPV6_EXC_HOPLIMIT
) {
451 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
456 case ICMPV6_PARAMPROB
: {
457 struct ipv6_tlv_tnl_enc_lim
*tel
;
461 if (code
== ICMPV6_HDR_FIELD
)
462 teli
= ip6_tnl_parse_tlv_enc_lim(skb
, skb
->data
);
464 if (teli
&& teli
== be32_to_cpu(info
) - 2) {
465 tel
= (struct ipv6_tlv_tnl_enc_lim
*) &skb
->data
[teli
];
466 if (tel
->encap_limit
== 0) {
467 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
471 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
476 case ICMPV6_PKT_TOOBIG
:
477 ip6_update_pmtu(skb
, net
, info
, 0, 0, sock_net_uid(net
, NULL
));
480 ip6_redirect(skb
, net
, skb
->dev
->ifindex
, 0,
481 sock_net_uid(net
, NULL
));
485 if (time_before(jiffies
, t
->err_time
+ IP6TUNNEL_ERR_TIMEO
))
489 t
->err_time
= jiffies
;
494 static int ip6gre_rcv(struct sk_buff
*skb
, const struct tnl_ptk_info
*tpi
)
496 const struct ipv6hdr
*ipv6h
;
497 struct ip6_tnl
*tunnel
;
499 ipv6h
= ipv6_hdr(skb
);
500 tunnel
= ip6gre_tunnel_lookup(skb
->dev
,
501 &ipv6h
->saddr
, &ipv6h
->daddr
, tpi
->key
,
504 if (tunnel
->parms
.collect_md
) {
505 struct metadata_dst
*tun_dst
;
510 tun_id
= key32_to_tunnel_id(tpi
->key
);
512 tun_dst
= ipv6_tun_rx_dst(skb
, flags
, tun_id
, 0);
514 return PACKET_REJECT
;
516 ip6_tnl_rcv(tunnel
, skb
, tpi
, tun_dst
, log_ecn_error
);
518 ip6_tnl_rcv(tunnel
, skb
, tpi
, NULL
, log_ecn_error
);
524 return PACKET_REJECT
;
527 static int ip6erspan_rcv(struct sk_buff
*skb
,
528 struct tnl_ptk_info
*tpi
,
531 struct erspan_base_hdr
*ershdr
;
532 const struct ipv6hdr
*ipv6h
;
533 struct erspan_md2
*md2
;
534 struct ip6_tnl
*tunnel
;
537 ipv6h
= ipv6_hdr(skb
);
538 ershdr
= (struct erspan_base_hdr
*)skb
->data
;
541 tunnel
= ip6gre_tunnel_lookup(skb
->dev
,
542 &ipv6h
->saddr
, &ipv6h
->daddr
, tpi
->key
,
545 int len
= erspan_hdr_len(ver
);
547 if (unlikely(!pskb_may_pull(skb
, len
)))
548 return PACKET_REJECT
;
550 if (__iptunnel_pull_header(skb
, len
,
553 return PACKET_REJECT
;
555 if (tunnel
->parms
.collect_md
) {
556 struct erspan_metadata
*pkt_md
, *md
;
557 struct metadata_dst
*tun_dst
;
558 struct ip_tunnel_info
*info
;
563 tpi
->flags
|= TUNNEL_KEY
;
565 tun_id
= key32_to_tunnel_id(tpi
->key
);
567 tun_dst
= ipv6_tun_rx_dst(skb
, flags
, tun_id
,
570 return PACKET_REJECT
;
572 /* skb can be uncloned in __iptunnel_pull_header, so
573 * old pkt_md is no longer valid and we need to reset
576 gh
= skb_network_header(skb
) +
577 skb_network_header_len(skb
);
578 pkt_md
= (struct erspan_metadata
*)(gh
+ gre_hdr_len
+
580 info
= &tun_dst
->u
.tun_info
;
581 md
= ip_tunnel_info_opts(info
);
584 memcpy(md2
, pkt_md
, ver
== 1 ? ERSPAN_V1_MDSIZE
:
586 info
->key
.tun_flags
|= TUNNEL_ERSPAN_OPT
;
587 info
->options_len
= sizeof(*md
);
589 ip6_tnl_rcv(tunnel
, skb
, tpi
, tun_dst
, log_ecn_error
);
592 ip6_tnl_rcv(tunnel
, skb
, tpi
, NULL
, log_ecn_error
);
598 return PACKET_REJECT
;
601 static int gre_rcv(struct sk_buff
*skb
)
603 struct tnl_ptk_info tpi
;
604 bool csum_err
= false;
607 hdr_len
= gre_parse_header(skb
, &tpi
, &csum_err
, htons(ETH_P_IPV6
), 0);
611 if (iptunnel_pull_header(skb
, hdr_len
, tpi
.proto
, false))
614 if (unlikely(tpi
.proto
== htons(ETH_P_ERSPAN
) ||
615 tpi
.proto
== htons(ETH_P_ERSPAN2
))) {
616 if (ip6erspan_rcv(skb
, &tpi
, hdr_len
) == PACKET_RCVD
)
621 if (ip6gre_rcv(skb
, &tpi
) == PACKET_RCVD
)
625 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_PORT_UNREACH
, 0);
631 static int gre_handle_offloads(struct sk_buff
*skb
, bool csum
)
633 return iptunnel_handle_offloads(skb
,
634 csum
? SKB_GSO_GRE_CSUM
: SKB_GSO_GRE
);
637 static void prepare_ip6gre_xmit_ipv4(struct sk_buff
*skb
,
638 struct net_device
*dev
,
639 struct flowi6
*fl6
, __u8
*dsfield
,
642 const struct iphdr
*iph
= ip_hdr(skb
);
643 struct ip6_tnl
*t
= netdev_priv(dev
);
645 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
646 *encap_limit
= t
->parms
.encap_limit
;
648 memcpy(fl6
, &t
->fl
.u
.ip6
, sizeof(*fl6
));
650 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_TCLASS
)
651 *dsfield
= ipv4_get_dsfield(iph
);
653 *dsfield
= ip6_tclass(t
->parms
.flowinfo
);
655 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FWMARK
)
656 fl6
->flowi6_mark
= skb
->mark
;
658 fl6
->flowi6_mark
= t
->parms
.fwmark
;
660 fl6
->flowi6_uid
= sock_net_uid(dev_net(dev
), NULL
);
663 static int prepare_ip6gre_xmit_ipv6(struct sk_buff
*skb
,
664 struct net_device
*dev
,
665 struct flowi6
*fl6
, __u8
*dsfield
,
668 struct ipv6hdr
*ipv6h
;
669 struct ip6_tnl
*t
= netdev_priv(dev
);
672 offset
= ip6_tnl_parse_tlv_enc_lim(skb
, skb_network_header(skb
));
673 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
674 ipv6h
= ipv6_hdr(skb
);
677 struct ipv6_tlv_tnl_enc_lim
*tel
;
679 tel
= (struct ipv6_tlv_tnl_enc_lim
*)&skb_network_header(skb
)[offset
];
680 if (tel
->encap_limit
== 0) {
681 icmpv6_send(skb
, ICMPV6_PARAMPROB
,
682 ICMPV6_HDR_FIELD
, offset
+ 2);
685 *encap_limit
= tel
->encap_limit
- 1;
686 } else if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
)) {
687 *encap_limit
= t
->parms
.encap_limit
;
690 memcpy(fl6
, &t
->fl
.u
.ip6
, sizeof(*fl6
));
692 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_TCLASS
)
693 *dsfield
= ipv6_get_dsfield(ipv6h
);
695 *dsfield
= ip6_tclass(t
->parms
.flowinfo
);
697 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FLOWLABEL
)
698 fl6
->flowlabel
|= ip6_flowlabel(ipv6h
);
700 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FWMARK
)
701 fl6
->flowi6_mark
= skb
->mark
;
703 fl6
->flowi6_mark
= t
->parms
.fwmark
;
705 fl6
->flowi6_uid
= sock_net_uid(dev_net(dev
), NULL
);
710 static struct ip_tunnel_info
*skb_tunnel_info_txcheck(struct sk_buff
*skb
)
712 struct ip_tunnel_info
*tun_info
;
714 tun_info
= skb_tunnel_info(skb
);
715 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
)))
716 return ERR_PTR(-EINVAL
);
721 static netdev_tx_t
__gre6_xmit(struct sk_buff
*skb
,
722 struct net_device
*dev
, __u8 dsfield
,
723 struct flowi6
*fl6
, int encap_limit
,
724 __u32
*pmtu
, __be16 proto
)
726 struct ip6_tnl
*tunnel
= netdev_priv(dev
);
729 if (dev
->type
== ARPHRD_ETHER
)
730 IPCB(skb
)->flags
= 0;
732 if (dev
->header_ops
&& dev
->type
== ARPHRD_IP6GRE
)
733 fl6
->daddr
= ((struct ipv6hdr
*)skb
->data
)->daddr
;
735 fl6
->daddr
= tunnel
->parms
.raddr
;
737 if (skb_cow_head(skb
, dev
->needed_headroom
?: tunnel
->hlen
))
740 /* Push GRE header. */
741 protocol
= (dev
->type
== ARPHRD_ETHER
) ? htons(ETH_P_TEB
) : proto
;
743 if (tunnel
->parms
.collect_md
) {
744 struct ip_tunnel_info
*tun_info
;
745 const struct ip_tunnel_key
*key
;
748 tun_info
= skb_tunnel_info_txcheck(skb
);
749 if (IS_ERR(tun_info
) ||
750 unlikely(ip_tunnel_info_af(tun_info
) != AF_INET6
))
753 key
= &tun_info
->key
;
754 memset(fl6
, 0, sizeof(*fl6
));
755 fl6
->flowi6_proto
= IPPROTO_GRE
;
756 fl6
->daddr
= key
->u
.ipv6
.dst
;
757 fl6
->flowlabel
= key
->label
;
758 fl6
->flowi6_uid
= sock_net_uid(dev_net(dev
), NULL
);
761 flags
= key
->tun_flags
&
762 (TUNNEL_CSUM
| TUNNEL_KEY
| TUNNEL_SEQ
);
763 tunnel
->tun_hlen
= gre_calc_hlen(flags
);
765 gre_build_header(skb
, tunnel
->tun_hlen
,
767 tunnel_id_to_key32(tun_info
->key
.tun_id
),
768 (flags
& TUNNEL_SEQ
) ? htonl(tunnel
->o_seqno
++)
772 if (tunnel
->parms
.o_flags
& TUNNEL_SEQ
)
775 gre_build_header(skb
, tunnel
->tun_hlen
, tunnel
->parms
.o_flags
,
776 protocol
, tunnel
->parms
.o_key
,
777 htonl(tunnel
->o_seqno
));
780 return ip6_tnl_xmit(skb
, dev
, dsfield
, fl6
, encap_limit
, pmtu
,
784 static inline int ip6gre_xmit_ipv4(struct sk_buff
*skb
, struct net_device
*dev
)
786 struct ip6_tnl
*t
= netdev_priv(dev
);
787 int encap_limit
= -1;
793 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
795 if (!t
->parms
.collect_md
)
796 prepare_ip6gre_xmit_ipv4(skb
, dev
, &fl6
,
797 &dsfield
, &encap_limit
);
799 err
= gre_handle_offloads(skb
, !!(t
->parms
.o_flags
& TUNNEL_CSUM
));
803 err
= __gre6_xmit(skb
, dev
, dsfield
, &fl6
, encap_limit
, &mtu
,
806 /* XXX: send ICMP error even if DF is not set. */
807 if (err
== -EMSGSIZE
)
808 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
816 static inline int ip6gre_xmit_ipv6(struct sk_buff
*skb
, struct net_device
*dev
)
818 struct ip6_tnl
*t
= netdev_priv(dev
);
819 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
820 int encap_limit
= -1;
826 if (ipv6_addr_equal(&t
->parms
.raddr
, &ipv6h
->saddr
))
829 if (!t
->parms
.collect_md
&&
830 prepare_ip6gre_xmit_ipv6(skb
, dev
, &fl6
, &dsfield
, &encap_limit
))
833 if (gre_handle_offloads(skb
, !!(t
->parms
.o_flags
& TUNNEL_CSUM
)))
836 err
= __gre6_xmit(skb
, dev
, dsfield
, &fl6
, encap_limit
,
837 &mtu
, skb
->protocol
);
839 if (err
== -EMSGSIZE
)
840 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
848 * ip6gre_tnl_addr_conflict - compare packet addresses to tunnel's own
849 * @t: the outgoing tunnel device
850 * @hdr: IPv6 header from the incoming packet
853 * Avoid trivial tunneling loop by checking that tunnel exit-point
854 * doesn't match source of incoming packet.
861 static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl
*t
,
862 const struct ipv6hdr
*hdr
)
864 return ipv6_addr_equal(&t
->parms
.raddr
, &hdr
->saddr
);
867 static int ip6gre_xmit_other(struct sk_buff
*skb
, struct net_device
*dev
)
869 struct ip6_tnl
*t
= netdev_priv(dev
);
870 int encap_limit
= -1;
875 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
876 encap_limit
= t
->parms
.encap_limit
;
878 if (!t
->parms
.collect_md
)
879 memcpy(&fl6
, &t
->fl
.u
.ip6
, sizeof(fl6
));
881 err
= gre_handle_offloads(skb
, !!(t
->parms
.o_flags
& TUNNEL_CSUM
));
885 err
= __gre6_xmit(skb
, dev
, 0, &fl6
, encap_limit
, &mtu
, skb
->protocol
);
890 static netdev_tx_t
ip6gre_tunnel_xmit(struct sk_buff
*skb
,
891 struct net_device
*dev
)
893 struct ip6_tnl
*t
= netdev_priv(dev
);
894 struct net_device_stats
*stats
= &t
->dev
->stats
;
897 if (!pskb_inet_may_pull(skb
))
900 if (!ip6_tnl_xmit_ctl(t
, &t
->parms
.laddr
, &t
->parms
.raddr
))
903 switch (skb
->protocol
) {
904 case htons(ETH_P_IP
):
905 ret
= ip6gre_xmit_ipv4(skb
, dev
);
907 case htons(ETH_P_IPV6
):
908 ret
= ip6gre_xmit_ipv6(skb
, dev
);
911 ret
= ip6gre_xmit_other(skb
, dev
);
921 if (!t
->parms
.collect_md
|| !IS_ERR(skb_tunnel_info_txcheck(skb
)))
928 static netdev_tx_t
ip6erspan_tunnel_xmit(struct sk_buff
*skb
,
929 struct net_device
*dev
)
931 struct ip_tunnel_info
*tun_info
= NULL
;
932 struct ip6_tnl
*t
= netdev_priv(dev
);
933 struct dst_entry
*dst
= skb_dst(skb
);
934 struct net_device_stats
*stats
;
935 bool truncate
= false;
936 int encap_limit
= -1;
937 __u8 dsfield
= false;
945 if (!pskb_inet_may_pull(skb
))
948 if (!ip6_tnl_xmit_ctl(t
, &t
->parms
.laddr
, &t
->parms
.raddr
))
951 if (gre_handle_offloads(skb
, false))
954 if (skb
->len
> dev
->mtu
+ dev
->hard_header_len
) {
955 pskb_trim(skb
, dev
->mtu
+ dev
->hard_header_len
);
959 nhoff
= skb_network_header(skb
) - skb_mac_header(skb
);
960 if (skb
->protocol
== htons(ETH_P_IP
) &&
961 (ntohs(ip_hdr(skb
)->tot_len
) > skb
->len
- nhoff
))
964 thoff
= skb_transport_header(skb
) - skb_mac_header(skb
);
965 if (skb
->protocol
== htons(ETH_P_IPV6
) &&
966 (ntohs(ipv6_hdr(skb
)->payload_len
) > skb
->len
- thoff
))
969 if (skb_cow_head(skb
, dev
->needed_headroom
?: t
->hlen
))
972 t
->parms
.o_flags
&= ~TUNNEL_KEY
;
973 IPCB(skb
)->flags
= 0;
975 /* For collect_md mode, derive fl6 from the tunnel key,
976 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
978 if (t
->parms
.collect_md
) {
979 const struct ip_tunnel_key
*key
;
980 struct erspan_metadata
*md
;
983 tun_info
= skb_tunnel_info_txcheck(skb
);
984 if (IS_ERR(tun_info
) ||
985 unlikely(ip_tunnel_info_af(tun_info
) != AF_INET6
))
988 key
= &tun_info
->key
;
989 memset(&fl6
, 0, sizeof(fl6
));
990 fl6
.flowi6_proto
= IPPROTO_GRE
;
991 fl6
.daddr
= key
->u
.ipv6
.dst
;
992 fl6
.flowlabel
= key
->label
;
993 fl6
.flowi6_uid
= sock_net_uid(dev_net(dev
), NULL
);
996 if (!(tun_info
->key
.tun_flags
& TUNNEL_ERSPAN_OPT
))
998 if (tun_info
->options_len
< sizeof(*md
))
1000 md
= ip_tunnel_info_opts(tun_info
);
1002 tun_id
= tunnel_id_to_key32(key
->tun_id
);
1003 if (md
->version
== 1) {
1004 erspan_build_header(skb
,
1006 ntohl(md
->u
.index
), truncate
,
1008 } else if (md
->version
== 2) {
1009 erspan_build_header_v2(skb
,
1012 get_hwid(&md
->u
.md2
),
1018 switch (skb
->protocol
) {
1019 case htons(ETH_P_IP
):
1020 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
1021 prepare_ip6gre_xmit_ipv4(skb
, dev
, &fl6
,
1022 &dsfield
, &encap_limit
);
1024 case htons(ETH_P_IPV6
):
1025 if (ipv6_addr_equal(&t
->parms
.raddr
, &ipv6_hdr(skb
)->saddr
))
1027 if (prepare_ip6gre_xmit_ipv6(skb
, dev
, &fl6
,
1028 &dsfield
, &encap_limit
))
1032 memcpy(&fl6
, &t
->fl
.u
.ip6
, sizeof(fl6
));
1036 if (t
->parms
.erspan_ver
== 1)
1037 erspan_build_header(skb
, ntohl(t
->parms
.o_key
),
1040 else if (t
->parms
.erspan_ver
== 2)
1041 erspan_build_header_v2(skb
, ntohl(t
->parms
.o_key
),
1048 fl6
.daddr
= t
->parms
.raddr
;
1051 /* Push GRE header. */
1052 proto
= (t
->parms
.erspan_ver
== 1) ? htons(ETH_P_ERSPAN
)
1053 : htons(ETH_P_ERSPAN2
);
1054 gre_build_header(skb
, 8, TUNNEL_SEQ
, proto
, 0, htonl(t
->o_seqno
++));
1056 /* TooBig packet may have updated dst->dev's mtu */
1057 if (!t
->parms
.collect_md
&& dst
&& dst_mtu(dst
) > dst
->dev
->mtu
)
1058 dst
->ops
->update_pmtu(dst
, NULL
, skb
, dst
->dev
->mtu
, false);
1060 err
= ip6_tnl_xmit(skb
, dev
, dsfield
, &fl6
, encap_limit
, &mtu
,
1063 /* XXX: send ICMP error even if DF is not set. */
1064 if (err
== -EMSGSIZE
) {
1065 if (skb
->protocol
== htons(ETH_P_IP
))
1066 icmp_send(skb
, ICMP_DEST_UNREACH
,
1067 ICMP_FRAG_NEEDED
, htonl(mtu
));
1069 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
1074 return NETDEV_TX_OK
;
1077 stats
= &t
->dev
->stats
;
1078 if (!IS_ERR(tun_info
))
1080 stats
->tx_dropped
++;
1082 return NETDEV_TX_OK
;
1085 static void ip6gre_tnl_link_config_common(struct ip6_tnl
*t
)
1087 struct net_device
*dev
= t
->dev
;
1088 struct __ip6_tnl_parm
*p
= &t
->parms
;
1089 struct flowi6
*fl6
= &t
->fl
.u
.ip6
;
1091 if (dev
->type
!= ARPHRD_ETHER
) {
1092 memcpy(dev
->dev_addr
, &p
->laddr
, sizeof(struct in6_addr
));
1093 memcpy(dev
->broadcast
, &p
->raddr
, sizeof(struct in6_addr
));
1096 /* Set up flowi template */
1097 fl6
->saddr
= p
->laddr
;
1098 fl6
->daddr
= p
->raddr
;
1099 fl6
->flowi6_oif
= p
->link
;
1101 fl6
->flowi6_proto
= IPPROTO_GRE
;
1103 if (!(p
->flags
&IP6_TNL_F_USE_ORIG_TCLASS
))
1104 fl6
->flowlabel
|= IPV6_TCLASS_MASK
& p
->flowinfo
;
1105 if (!(p
->flags
&IP6_TNL_F_USE_ORIG_FLOWLABEL
))
1106 fl6
->flowlabel
|= IPV6_FLOWLABEL_MASK
& p
->flowinfo
;
1108 p
->flags
&= ~(IP6_TNL_F_CAP_XMIT
|IP6_TNL_F_CAP_RCV
|IP6_TNL_F_CAP_PER_PACKET
);
1109 p
->flags
|= ip6_tnl_get_cap(t
, &p
->laddr
, &p
->raddr
);
1111 if (p
->flags
&IP6_TNL_F_CAP_XMIT
&&
1112 p
->flags
&IP6_TNL_F_CAP_RCV
&& dev
->type
!= ARPHRD_ETHER
)
1113 dev
->flags
|= IFF_POINTOPOINT
;
1115 dev
->flags
&= ~IFF_POINTOPOINT
;
1118 static void ip6gre_tnl_link_config_route(struct ip6_tnl
*t
, int set_mtu
,
1121 const struct __ip6_tnl_parm
*p
= &t
->parms
;
1122 struct net_device
*dev
= t
->dev
;
1124 if (p
->flags
& IP6_TNL_F_CAP_XMIT
) {
1125 int strict
= (ipv6_addr_type(&p
->raddr
) &
1126 (IPV6_ADDR_MULTICAST
|IPV6_ADDR_LINKLOCAL
));
1128 struct rt6_info
*rt
= rt6_lookup(t
->net
,
1129 &p
->raddr
, &p
->laddr
,
1130 p
->link
, NULL
, strict
);
1136 unsigned short dst_len
= rt
->dst
.dev
->hard_header_len
+
1139 if (t
->dev
->header_ops
)
1140 dev
->hard_header_len
= dst_len
;
1142 dev
->needed_headroom
= dst_len
;
1145 dev
->mtu
= rt
->dst
.dev
->mtu
- t_hlen
;
1146 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1148 if (dev
->type
== ARPHRD_ETHER
)
1149 dev
->mtu
-= ETH_HLEN
;
1151 if (dev
->mtu
< IPV6_MIN_MTU
)
1152 dev
->mtu
= IPV6_MIN_MTU
;
1159 static int ip6gre_calc_hlen(struct ip6_tnl
*tunnel
)
1163 tunnel
->tun_hlen
= gre_calc_hlen(tunnel
->parms
.o_flags
);
1164 tunnel
->hlen
= tunnel
->tun_hlen
+ tunnel
->encap_hlen
;
1166 t_hlen
= tunnel
->hlen
+ sizeof(struct ipv6hdr
);
1168 if (tunnel
->dev
->header_ops
)
1169 tunnel
->dev
->hard_header_len
= LL_MAX_HEADER
+ t_hlen
;
1171 tunnel
->dev
->needed_headroom
= LL_MAX_HEADER
+ t_hlen
;
1176 static void ip6gre_tnl_link_config(struct ip6_tnl
*t
, int set_mtu
)
1178 ip6gre_tnl_link_config_common(t
);
1179 ip6gre_tnl_link_config_route(t
, set_mtu
, ip6gre_calc_hlen(t
));
1182 static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl
*t
,
1183 const struct __ip6_tnl_parm
*p
)
1185 t
->parms
.laddr
= p
->laddr
;
1186 t
->parms
.raddr
= p
->raddr
;
1187 t
->parms
.flags
= p
->flags
;
1188 t
->parms
.hop_limit
= p
->hop_limit
;
1189 t
->parms
.encap_limit
= p
->encap_limit
;
1190 t
->parms
.flowinfo
= p
->flowinfo
;
1191 t
->parms
.link
= p
->link
;
1192 t
->parms
.proto
= p
->proto
;
1193 t
->parms
.i_key
= p
->i_key
;
1194 t
->parms
.o_key
= p
->o_key
;
1195 t
->parms
.i_flags
= p
->i_flags
;
1196 t
->parms
.o_flags
= p
->o_flags
;
1197 t
->parms
.fwmark
= p
->fwmark
;
1198 t
->parms
.erspan_ver
= p
->erspan_ver
;
1199 t
->parms
.index
= p
->index
;
1200 t
->parms
.dir
= p
->dir
;
1201 t
->parms
.hwid
= p
->hwid
;
1202 dst_cache_reset(&t
->dst_cache
);
1205 static int ip6gre_tnl_change(struct ip6_tnl
*t
, const struct __ip6_tnl_parm
*p
,
1208 ip6gre_tnl_copy_tnl_parm(t
, p
);
1209 ip6gre_tnl_link_config(t
, set_mtu
);
1213 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm
*p
,
1214 const struct ip6_tnl_parm2
*u
)
1216 p
->laddr
= u
->laddr
;
1217 p
->raddr
= u
->raddr
;
1218 p
->flags
= u
->flags
;
1219 p
->hop_limit
= u
->hop_limit
;
1220 p
->encap_limit
= u
->encap_limit
;
1221 p
->flowinfo
= u
->flowinfo
;
1223 p
->i_key
= u
->i_key
;
1224 p
->o_key
= u
->o_key
;
1225 p
->i_flags
= gre_flags_to_tnl_flags(u
->i_flags
);
1226 p
->o_flags
= gre_flags_to_tnl_flags(u
->o_flags
);
1227 memcpy(p
->name
, u
->name
, sizeof(u
->name
));
1230 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2
*u
,
1231 const struct __ip6_tnl_parm
*p
)
1233 u
->proto
= IPPROTO_GRE
;
1234 u
->laddr
= p
->laddr
;
1235 u
->raddr
= p
->raddr
;
1236 u
->flags
= p
->flags
;
1237 u
->hop_limit
= p
->hop_limit
;
1238 u
->encap_limit
= p
->encap_limit
;
1239 u
->flowinfo
= p
->flowinfo
;
1241 u
->i_key
= p
->i_key
;
1242 u
->o_key
= p
->o_key
;
1243 u
->i_flags
= gre_tnl_flags_to_gre_flags(p
->i_flags
);
1244 u
->o_flags
= gre_tnl_flags_to_gre_flags(p
->o_flags
);
1245 memcpy(u
->name
, p
->name
, sizeof(u
->name
));
1248 static int ip6gre_tunnel_ioctl(struct net_device
*dev
,
1249 struct ifreq
*ifr
, int cmd
)
1252 struct ip6_tnl_parm2 p
;
1253 struct __ip6_tnl_parm p1
;
1254 struct ip6_tnl
*t
= netdev_priv(dev
);
1255 struct net
*net
= t
->net
;
1256 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
1258 memset(&p1
, 0, sizeof(p1
));
1262 if (dev
== ign
->fb_tunnel_dev
) {
1263 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
))) {
1267 ip6gre_tnl_parm_from_user(&p1
, &p
);
1268 t
= ip6gre_tunnel_locate(net
, &p1
, 0);
1270 t
= netdev_priv(dev
);
1272 memset(&p
, 0, sizeof(p
));
1273 ip6gre_tnl_parm_to_user(&p
, &t
->parms
);
1274 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
1281 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1285 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1289 if ((p
.i_flags
|p
.o_flags
)&(GRE_VERSION
|GRE_ROUTING
))
1292 if (!(p
.i_flags
&GRE_KEY
))
1294 if (!(p
.o_flags
&GRE_KEY
))
1297 ip6gre_tnl_parm_from_user(&p1
, &p
);
1298 t
= ip6gre_tunnel_locate(net
, &p1
, cmd
== SIOCADDTUNNEL
);
1300 if (dev
!= ign
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
1302 if (t
->dev
!= dev
) {
1307 t
= netdev_priv(dev
);
1309 ip6gre_tunnel_unlink(ign
, t
);
1311 ip6gre_tnl_change(t
, &p1
, 1);
1312 ip6gre_tunnel_link(ign
, t
);
1313 netdev_state_change(dev
);
1320 memset(&p
, 0, sizeof(p
));
1321 ip6gre_tnl_parm_to_user(&p
, &t
->parms
);
1322 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
1325 err
= (cmd
== SIOCADDTUNNEL
? -ENOBUFS
: -ENOENT
);
1330 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1333 if (dev
== ign
->fb_tunnel_dev
) {
1335 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1338 ip6gre_tnl_parm_from_user(&p1
, &p
);
1339 t
= ip6gre_tunnel_locate(net
, &p1
, 0);
1343 if (t
== netdev_priv(ign
->fb_tunnel_dev
))
1347 unregister_netdevice(dev
);
1359 static int ip6gre_header(struct sk_buff
*skb
, struct net_device
*dev
,
1360 unsigned short type
, const void *daddr
,
1361 const void *saddr
, unsigned int len
)
1363 struct ip6_tnl
*t
= netdev_priv(dev
);
1364 struct ipv6hdr
*ipv6h
;
1367 ipv6h
= skb_push(skb
, t
->hlen
+ sizeof(*ipv6h
));
1368 ip6_flow_hdr(ipv6h
, 0, ip6_make_flowlabel(dev_net(dev
), skb
,
1369 t
->fl
.u
.ip6
.flowlabel
,
1370 true, &t
->fl
.u
.ip6
));
1371 ipv6h
->hop_limit
= t
->parms
.hop_limit
;
1372 ipv6h
->nexthdr
= NEXTHDR_GRE
;
1373 ipv6h
->saddr
= t
->parms
.laddr
;
1374 ipv6h
->daddr
= t
->parms
.raddr
;
1376 p
= (__be16
*)(ipv6h
+ 1);
1377 p
[0] = t
->parms
.o_flags
;
1381 * Set the source hardware address.
1385 memcpy(&ipv6h
->saddr
, saddr
, sizeof(struct in6_addr
));
1387 memcpy(&ipv6h
->daddr
, daddr
, sizeof(struct in6_addr
));
1388 if (!ipv6_addr_any(&ipv6h
->daddr
))
1394 static const struct header_ops ip6gre_header_ops
= {
1395 .create
= ip6gre_header
,
1398 static const struct net_device_ops ip6gre_netdev_ops
= {
1399 .ndo_init
= ip6gre_tunnel_init
,
1400 .ndo_uninit
= ip6gre_tunnel_uninit
,
1401 .ndo_start_xmit
= ip6gre_tunnel_xmit
,
1402 .ndo_do_ioctl
= ip6gre_tunnel_ioctl
,
1403 .ndo_change_mtu
= ip6_tnl_change_mtu
,
1404 .ndo_get_stats64
= dev_get_tstats64
,
1405 .ndo_get_iflink
= ip6_tnl_get_iflink
,
1408 static void ip6gre_dev_free(struct net_device
*dev
)
1410 struct ip6_tnl
*t
= netdev_priv(dev
);
1412 gro_cells_destroy(&t
->gro_cells
);
1413 dst_cache_destroy(&t
->dst_cache
);
1414 free_percpu(dev
->tstats
);
1417 static void ip6gre_tunnel_setup(struct net_device
*dev
)
1419 dev
->netdev_ops
= &ip6gre_netdev_ops
;
1420 dev
->needs_free_netdev
= true;
1421 dev
->priv_destructor
= ip6gre_dev_free
;
1423 dev
->type
= ARPHRD_IP6GRE
;
1425 dev
->flags
|= IFF_NOARP
;
1426 dev
->addr_len
= sizeof(struct in6_addr
);
1427 netif_keep_dst(dev
);
1428 /* This perm addr will be used as interface identifier by IPv6 */
1429 dev
->addr_assign_type
= NET_ADDR_RANDOM
;
1430 eth_random_addr(dev
->perm_addr
);
1433 #define GRE6_FEATURES (NETIF_F_SG | \
1434 NETIF_F_FRAGLIST | \
1438 static void ip6gre_tnl_init_features(struct net_device
*dev
)
1440 struct ip6_tnl
*nt
= netdev_priv(dev
);
1442 dev
->features
|= GRE6_FEATURES
;
1443 dev
->hw_features
|= GRE6_FEATURES
;
1445 if (!(nt
->parms
.o_flags
& TUNNEL_SEQ
)) {
1446 /* TCP offload with GRE SEQ is not supported, nor
1447 * can we support 2 levels of outer headers requiring
1450 if (!(nt
->parms
.o_flags
& TUNNEL_CSUM
) ||
1451 nt
->encap
.type
== TUNNEL_ENCAP_NONE
) {
1452 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1453 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
1456 /* Can use a lockless transmit, unless we generate
1459 dev
->features
|= NETIF_F_LLTX
;
1463 static int ip6gre_tunnel_init_common(struct net_device
*dev
)
1465 struct ip6_tnl
*tunnel
;
1469 tunnel
= netdev_priv(dev
);
1472 tunnel
->net
= dev_net(dev
);
1473 strcpy(tunnel
->parms
.name
, dev
->name
);
1475 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1479 ret
= dst_cache_init(&tunnel
->dst_cache
, GFP_KERNEL
);
1481 goto cleanup_alloc_pcpu_stats
;
1483 ret
= gro_cells_init(&tunnel
->gro_cells
, dev
);
1485 goto cleanup_dst_cache_init
;
1487 t_hlen
= ip6gre_calc_hlen(tunnel
);
1488 dev
->mtu
= ETH_DATA_LEN
- t_hlen
;
1489 if (dev
->type
== ARPHRD_ETHER
)
1490 dev
->mtu
-= ETH_HLEN
;
1491 if (!(tunnel
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1494 if (tunnel
->parms
.collect_md
) {
1495 netif_keep_dst(dev
);
1497 ip6gre_tnl_init_features(dev
);
1501 cleanup_dst_cache_init
:
1502 dst_cache_destroy(&tunnel
->dst_cache
);
1503 cleanup_alloc_pcpu_stats
:
1504 free_percpu(dev
->tstats
);
1509 static int ip6gre_tunnel_init(struct net_device
*dev
)
1511 struct ip6_tnl
*tunnel
;
1514 ret
= ip6gre_tunnel_init_common(dev
);
1518 tunnel
= netdev_priv(dev
);
1520 if (tunnel
->parms
.collect_md
)
1523 memcpy(dev
->dev_addr
, &tunnel
->parms
.laddr
, sizeof(struct in6_addr
));
1524 memcpy(dev
->broadcast
, &tunnel
->parms
.raddr
, sizeof(struct in6_addr
));
1526 if (ipv6_addr_any(&tunnel
->parms
.raddr
))
1527 dev
->header_ops
= &ip6gre_header_ops
;
1532 static void ip6gre_fb_tunnel_init(struct net_device
*dev
)
1534 struct ip6_tnl
*tunnel
= netdev_priv(dev
);
1537 tunnel
->net
= dev_net(dev
);
1538 strcpy(tunnel
->parms
.name
, dev
->name
);
1540 tunnel
->hlen
= sizeof(struct ipv6hdr
) + 4;
1545 static struct inet6_protocol ip6gre_protocol __read_mostly
= {
1547 .err_handler
= ip6gre_err
,
1548 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1551 static void ip6gre_destroy_tunnels(struct net
*net
, struct list_head
*head
)
1553 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
1554 struct net_device
*dev
, *aux
;
1557 for_each_netdev_safe(net
, dev
, aux
)
1558 if (dev
->rtnl_link_ops
== &ip6gre_link_ops
||
1559 dev
->rtnl_link_ops
== &ip6gre_tap_ops
||
1560 dev
->rtnl_link_ops
== &ip6erspan_tap_ops
)
1561 unregister_netdevice_queue(dev
, head
);
1563 for (prio
= 0; prio
< 4; prio
++) {
1565 for (h
= 0; h
< IP6_GRE_HASH_SIZE
; h
++) {
1568 t
= rtnl_dereference(ign
->tunnels
[prio
][h
]);
1571 /* If dev is in the same netns, it has already
1572 * been added to the list by the previous loop.
1574 if (!net_eq(dev_net(t
->dev
), net
))
1575 unregister_netdevice_queue(t
->dev
,
1577 t
= rtnl_dereference(t
->next
);
1583 static int __net_init
ip6gre_init_net(struct net
*net
)
1585 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
1586 struct net_device
*ndev
;
1589 if (!net_has_fallback_tunnels(net
))
1591 ndev
= alloc_netdev(sizeof(struct ip6_tnl
), "ip6gre0",
1592 NET_NAME_UNKNOWN
, ip6gre_tunnel_setup
);
1597 ign
->fb_tunnel_dev
= ndev
;
1598 dev_net_set(ign
->fb_tunnel_dev
, net
);
1599 /* FB netdevice is special: we have one, and only one per netns.
1600 * Allowing to move it to another netns is clearly unsafe.
1602 ign
->fb_tunnel_dev
->features
|= NETIF_F_NETNS_LOCAL
;
1605 ip6gre_fb_tunnel_init(ign
->fb_tunnel_dev
);
1606 ign
->fb_tunnel_dev
->rtnl_link_ops
= &ip6gre_link_ops
;
1608 err
= register_netdev(ign
->fb_tunnel_dev
);
1612 rcu_assign_pointer(ign
->tunnels_wc
[0],
1613 netdev_priv(ign
->fb_tunnel_dev
));
1622 static void __net_exit
ip6gre_exit_batch_net(struct list_head
*net_list
)
1628 list_for_each_entry(net
, net_list
, exit_list
)
1629 ip6gre_destroy_tunnels(net
, &list
);
1630 unregister_netdevice_many(&list
);
1634 static struct pernet_operations ip6gre_net_ops
= {
1635 .init
= ip6gre_init_net
,
1636 .exit_batch
= ip6gre_exit_batch_net
,
1637 .id
= &ip6gre_net_id
,
1638 .size
= sizeof(struct ip6gre_net
),
1641 static int ip6gre_tunnel_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1642 struct netlink_ext_ack
*extack
)
1650 if (data
[IFLA_GRE_IFLAGS
])
1651 flags
|= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1652 if (data
[IFLA_GRE_OFLAGS
])
1653 flags
|= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1654 if (flags
& (GRE_VERSION
|GRE_ROUTING
))
1660 static int ip6gre_tap_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1661 struct netlink_ext_ack
*extack
)
1663 struct in6_addr daddr
;
1665 if (tb
[IFLA_ADDRESS
]) {
1666 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
1668 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
1669 return -EADDRNOTAVAIL
;
1675 if (data
[IFLA_GRE_REMOTE
]) {
1676 daddr
= nla_get_in6_addr(data
[IFLA_GRE_REMOTE
]);
1677 if (ipv6_addr_any(&daddr
))
1682 return ip6gre_tunnel_validate(tb
, data
, extack
);
1685 static int ip6erspan_tap_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1686 struct netlink_ext_ack
*extack
)
1694 ret
= ip6gre_tap_validate(tb
, data
, extack
);
1698 /* ERSPAN should only have GRE sequence and key flag */
1699 if (data
[IFLA_GRE_OFLAGS
])
1700 flags
|= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1701 if (data
[IFLA_GRE_IFLAGS
])
1702 flags
|= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1703 if (!data
[IFLA_GRE_COLLECT_METADATA
] &&
1704 flags
!= (GRE_SEQ
| GRE_KEY
))
1707 /* ERSPAN Session ID only has 10-bit. Since we reuse
1708 * 32-bit key field as ID, check it's range.
1710 if (data
[IFLA_GRE_IKEY
] &&
1711 (ntohl(nla_get_be32(data
[IFLA_GRE_IKEY
])) & ~ID_MASK
))
1714 if (data
[IFLA_GRE_OKEY
] &&
1715 (ntohl(nla_get_be32(data
[IFLA_GRE_OKEY
])) & ~ID_MASK
))
1718 if (data
[IFLA_GRE_ERSPAN_VER
]) {
1719 ver
= nla_get_u8(data
[IFLA_GRE_ERSPAN_VER
]);
1720 if (ver
!= 1 && ver
!= 2)
1725 if (data
[IFLA_GRE_ERSPAN_INDEX
]) {
1726 u32 index
= nla_get_u32(data
[IFLA_GRE_ERSPAN_INDEX
]);
1728 if (index
& ~INDEX_MASK
)
1731 } else if (ver
== 2) {
1732 if (data
[IFLA_GRE_ERSPAN_DIR
]) {
1733 u16 dir
= nla_get_u8(data
[IFLA_GRE_ERSPAN_DIR
]);
1735 if (dir
& ~(DIR_MASK
>> DIR_OFFSET
))
1739 if (data
[IFLA_GRE_ERSPAN_HWID
]) {
1740 u16 hwid
= nla_get_u16(data
[IFLA_GRE_ERSPAN_HWID
]);
1742 if (hwid
& ~(HWID_MASK
>> HWID_OFFSET
))
1750 static void ip6erspan_set_version(struct nlattr
*data
[],
1751 struct __ip6_tnl_parm
*parms
)
1756 parms
->erspan_ver
= 1;
1757 if (data
[IFLA_GRE_ERSPAN_VER
])
1758 parms
->erspan_ver
= nla_get_u8(data
[IFLA_GRE_ERSPAN_VER
]);
1760 if (parms
->erspan_ver
== 1) {
1761 if (data
[IFLA_GRE_ERSPAN_INDEX
])
1762 parms
->index
= nla_get_u32(data
[IFLA_GRE_ERSPAN_INDEX
]);
1763 } else if (parms
->erspan_ver
== 2) {
1764 if (data
[IFLA_GRE_ERSPAN_DIR
])
1765 parms
->dir
= nla_get_u8(data
[IFLA_GRE_ERSPAN_DIR
]);
1766 if (data
[IFLA_GRE_ERSPAN_HWID
])
1767 parms
->hwid
= nla_get_u16(data
[IFLA_GRE_ERSPAN_HWID
]);
1771 static void ip6gre_netlink_parms(struct nlattr
*data
[],
1772 struct __ip6_tnl_parm
*parms
)
1774 memset(parms
, 0, sizeof(*parms
));
1779 if (data
[IFLA_GRE_LINK
])
1780 parms
->link
= nla_get_u32(data
[IFLA_GRE_LINK
]);
1782 if (data
[IFLA_GRE_IFLAGS
])
1783 parms
->i_flags
= gre_flags_to_tnl_flags(
1784 nla_get_be16(data
[IFLA_GRE_IFLAGS
]));
1786 if (data
[IFLA_GRE_OFLAGS
])
1787 parms
->o_flags
= gre_flags_to_tnl_flags(
1788 nla_get_be16(data
[IFLA_GRE_OFLAGS
]));
1790 if (data
[IFLA_GRE_IKEY
])
1791 parms
->i_key
= nla_get_be32(data
[IFLA_GRE_IKEY
]);
1793 if (data
[IFLA_GRE_OKEY
])
1794 parms
->o_key
= nla_get_be32(data
[IFLA_GRE_OKEY
]);
1796 if (data
[IFLA_GRE_LOCAL
])
1797 parms
->laddr
= nla_get_in6_addr(data
[IFLA_GRE_LOCAL
]);
1799 if (data
[IFLA_GRE_REMOTE
])
1800 parms
->raddr
= nla_get_in6_addr(data
[IFLA_GRE_REMOTE
]);
1802 if (data
[IFLA_GRE_TTL
])
1803 parms
->hop_limit
= nla_get_u8(data
[IFLA_GRE_TTL
]);
1805 if (data
[IFLA_GRE_ENCAP_LIMIT
])
1806 parms
->encap_limit
= nla_get_u8(data
[IFLA_GRE_ENCAP_LIMIT
]);
1808 if (data
[IFLA_GRE_FLOWINFO
])
1809 parms
->flowinfo
= nla_get_be32(data
[IFLA_GRE_FLOWINFO
]);
1811 if (data
[IFLA_GRE_FLAGS
])
1812 parms
->flags
= nla_get_u32(data
[IFLA_GRE_FLAGS
]);
1814 if (data
[IFLA_GRE_FWMARK
])
1815 parms
->fwmark
= nla_get_u32(data
[IFLA_GRE_FWMARK
]);
1817 if (data
[IFLA_GRE_COLLECT_METADATA
])
1818 parms
->collect_md
= true;
1821 static int ip6gre_tap_init(struct net_device
*dev
)
1825 ret
= ip6gre_tunnel_init_common(dev
);
1829 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1834 static const struct net_device_ops ip6gre_tap_netdev_ops
= {
1835 .ndo_init
= ip6gre_tap_init
,
1836 .ndo_uninit
= ip6gre_tunnel_uninit
,
1837 .ndo_start_xmit
= ip6gre_tunnel_xmit
,
1838 .ndo_set_mac_address
= eth_mac_addr
,
1839 .ndo_validate_addr
= eth_validate_addr
,
1840 .ndo_change_mtu
= ip6_tnl_change_mtu
,
1841 .ndo_get_stats64
= dev_get_tstats64
,
1842 .ndo_get_iflink
= ip6_tnl_get_iflink
,
1845 static int ip6erspan_calc_hlen(struct ip6_tnl
*tunnel
)
1849 tunnel
->tun_hlen
= 8;
1850 tunnel
->hlen
= tunnel
->tun_hlen
+ tunnel
->encap_hlen
+
1851 erspan_hdr_len(tunnel
->parms
.erspan_ver
);
1853 t_hlen
= tunnel
->hlen
+ sizeof(struct ipv6hdr
);
1854 tunnel
->dev
->needed_headroom
= LL_MAX_HEADER
+ t_hlen
;
1858 static int ip6erspan_tap_init(struct net_device
*dev
)
1860 struct ip6_tnl
*tunnel
;
1864 tunnel
= netdev_priv(dev
);
1867 tunnel
->net
= dev_net(dev
);
1868 strcpy(tunnel
->parms
.name
, dev
->name
);
1870 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1874 ret
= dst_cache_init(&tunnel
->dst_cache
, GFP_KERNEL
);
1876 goto cleanup_alloc_pcpu_stats
;
1878 ret
= gro_cells_init(&tunnel
->gro_cells
, dev
);
1880 goto cleanup_dst_cache_init
;
1882 t_hlen
= ip6erspan_calc_hlen(tunnel
);
1883 dev
->mtu
= ETH_DATA_LEN
- t_hlen
;
1884 if (dev
->type
== ARPHRD_ETHER
)
1885 dev
->mtu
-= ETH_HLEN
;
1886 if (!(tunnel
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1889 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1890 ip6erspan_tnl_link_config(tunnel
, 1);
1894 cleanup_dst_cache_init
:
1895 dst_cache_destroy(&tunnel
->dst_cache
);
1896 cleanup_alloc_pcpu_stats
:
1897 free_percpu(dev
->tstats
);
1902 static const struct net_device_ops ip6erspan_netdev_ops
= {
1903 .ndo_init
= ip6erspan_tap_init
,
1904 .ndo_uninit
= ip6erspan_tunnel_uninit
,
1905 .ndo_start_xmit
= ip6erspan_tunnel_xmit
,
1906 .ndo_set_mac_address
= eth_mac_addr
,
1907 .ndo_validate_addr
= eth_validate_addr
,
1908 .ndo_change_mtu
= ip6_tnl_change_mtu
,
1909 .ndo_get_stats64
= dev_get_tstats64
,
1910 .ndo_get_iflink
= ip6_tnl_get_iflink
,
1913 static void ip6gre_tap_setup(struct net_device
*dev
)
1919 dev
->netdev_ops
= &ip6gre_tap_netdev_ops
;
1920 dev
->needs_free_netdev
= true;
1921 dev
->priv_destructor
= ip6gre_dev_free
;
1923 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1924 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1925 netif_keep_dst(dev
);
1928 static bool ip6gre_netlink_encap_parms(struct nlattr
*data
[],
1929 struct ip_tunnel_encap
*ipencap
)
1933 memset(ipencap
, 0, sizeof(*ipencap
));
1938 if (data
[IFLA_GRE_ENCAP_TYPE
]) {
1940 ipencap
->type
= nla_get_u16(data
[IFLA_GRE_ENCAP_TYPE
]);
1943 if (data
[IFLA_GRE_ENCAP_FLAGS
]) {
1945 ipencap
->flags
= nla_get_u16(data
[IFLA_GRE_ENCAP_FLAGS
]);
1948 if (data
[IFLA_GRE_ENCAP_SPORT
]) {
1950 ipencap
->sport
= nla_get_be16(data
[IFLA_GRE_ENCAP_SPORT
]);
1953 if (data
[IFLA_GRE_ENCAP_DPORT
]) {
1955 ipencap
->dport
= nla_get_be16(data
[IFLA_GRE_ENCAP_DPORT
]);
1961 static int ip6gre_newlink_common(struct net
*src_net
, struct net_device
*dev
,
1962 struct nlattr
*tb
[], struct nlattr
*data
[],
1963 struct netlink_ext_ack
*extack
)
1966 struct ip_tunnel_encap ipencap
;
1969 nt
= netdev_priv(dev
);
1971 if (ip6gre_netlink_encap_parms(data
, &ipencap
)) {
1972 int err
= ip6_tnl_encap_setup(nt
, &ipencap
);
1978 if (dev
->type
== ARPHRD_ETHER
&& !tb
[IFLA_ADDRESS
])
1979 eth_hw_addr_random(dev
);
1982 nt
->net
= dev_net(dev
);
1984 err
= register_netdevice(dev
);
1989 ip6_tnl_change_mtu(dev
, nla_get_u32(tb
[IFLA_MTU
]));
1997 static int ip6gre_newlink(struct net
*src_net
, struct net_device
*dev
,
1998 struct nlattr
*tb
[], struct nlattr
*data
[],
1999 struct netlink_ext_ack
*extack
)
2001 struct ip6_tnl
*nt
= netdev_priv(dev
);
2002 struct net
*net
= dev_net(dev
);
2003 struct ip6gre_net
*ign
;
2006 ip6gre_netlink_parms(data
, &nt
->parms
);
2007 ign
= net_generic(net
, ip6gre_net_id
);
2009 if (nt
->parms
.collect_md
) {
2010 if (rtnl_dereference(ign
->collect_md_tun
))
2013 if (ip6gre_tunnel_find(net
, &nt
->parms
, dev
->type
))
2017 err
= ip6gre_newlink_common(src_net
, dev
, tb
, data
, extack
);
2019 ip6gre_tnl_link_config(nt
, !tb
[IFLA_MTU
]);
2020 ip6gre_tunnel_link_md(ign
, nt
);
2021 ip6gre_tunnel_link(net_generic(net
, ip6gre_net_id
), nt
);
2026 static struct ip6_tnl
*
2027 ip6gre_changelink_common(struct net_device
*dev
, struct nlattr
*tb
[],
2028 struct nlattr
*data
[], struct __ip6_tnl_parm
*p_p
,
2029 struct netlink_ext_ack
*extack
)
2031 struct ip6_tnl
*t
, *nt
= netdev_priv(dev
);
2032 struct net
*net
= nt
->net
;
2033 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
2034 struct ip_tunnel_encap ipencap
;
2036 if (dev
== ign
->fb_tunnel_dev
)
2037 return ERR_PTR(-EINVAL
);
2039 if (ip6gre_netlink_encap_parms(data
, &ipencap
)) {
2040 int err
= ip6_tnl_encap_setup(nt
, &ipencap
);
2043 return ERR_PTR(err
);
2046 ip6gre_netlink_parms(data
, p_p
);
2048 t
= ip6gre_tunnel_locate(net
, p_p
, 0);
2052 return ERR_PTR(-EEXIST
);
2060 static int ip6gre_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
2061 struct nlattr
*data
[],
2062 struct netlink_ext_ack
*extack
)
2064 struct ip6_tnl
*t
= netdev_priv(dev
);
2065 struct ip6gre_net
*ign
= net_generic(t
->net
, ip6gre_net_id
);
2066 struct __ip6_tnl_parm p
;
2068 t
= ip6gre_changelink_common(dev
, tb
, data
, &p
, extack
);
2072 ip6gre_tunnel_unlink_md(ign
, t
);
2073 ip6gre_tunnel_unlink(ign
, t
);
2074 ip6gre_tnl_change(t
, &p
, !tb
[IFLA_MTU
]);
2075 ip6gre_tunnel_link_md(ign
, t
);
2076 ip6gre_tunnel_link(ign
, t
);
2080 static void ip6gre_dellink(struct net_device
*dev
, struct list_head
*head
)
2082 struct net
*net
= dev_net(dev
);
2083 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
2085 if (dev
!= ign
->fb_tunnel_dev
)
2086 unregister_netdevice_queue(dev
, head
);
2089 static size_t ip6gre_get_size(const struct net_device
*dev
)
2094 /* IFLA_GRE_IFLAGS */
2096 /* IFLA_GRE_OFLAGS */
2102 /* IFLA_GRE_LOCAL */
2103 nla_total_size(sizeof(struct in6_addr
)) +
2104 /* IFLA_GRE_REMOTE */
2105 nla_total_size(sizeof(struct in6_addr
)) +
2108 /* IFLA_GRE_ENCAP_LIMIT */
2110 /* IFLA_GRE_FLOWINFO */
2112 /* IFLA_GRE_FLAGS */
2114 /* IFLA_GRE_ENCAP_TYPE */
2116 /* IFLA_GRE_ENCAP_FLAGS */
2118 /* IFLA_GRE_ENCAP_SPORT */
2120 /* IFLA_GRE_ENCAP_DPORT */
2122 /* IFLA_GRE_COLLECT_METADATA */
2124 /* IFLA_GRE_FWMARK */
2126 /* IFLA_GRE_ERSPAN_INDEX */
2131 static int ip6gre_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
2133 struct ip6_tnl
*t
= netdev_priv(dev
);
2134 struct __ip6_tnl_parm
*p
= &t
->parms
;
2135 __be16 o_flags
= p
->o_flags
;
2137 if (p
->erspan_ver
== 1 || p
->erspan_ver
== 2) {
2139 o_flags
|= TUNNEL_KEY
;
2141 if (nla_put_u8(skb
, IFLA_GRE_ERSPAN_VER
, p
->erspan_ver
))
2142 goto nla_put_failure
;
2144 if (p
->erspan_ver
== 1) {
2145 if (nla_put_u32(skb
, IFLA_GRE_ERSPAN_INDEX
, p
->index
))
2146 goto nla_put_failure
;
2148 if (nla_put_u8(skb
, IFLA_GRE_ERSPAN_DIR
, p
->dir
))
2149 goto nla_put_failure
;
2150 if (nla_put_u16(skb
, IFLA_GRE_ERSPAN_HWID
, p
->hwid
))
2151 goto nla_put_failure
;
2155 if (nla_put_u32(skb
, IFLA_GRE_LINK
, p
->link
) ||
2156 nla_put_be16(skb
, IFLA_GRE_IFLAGS
,
2157 gre_tnl_flags_to_gre_flags(p
->i_flags
)) ||
2158 nla_put_be16(skb
, IFLA_GRE_OFLAGS
,
2159 gre_tnl_flags_to_gre_flags(o_flags
)) ||
2160 nla_put_be32(skb
, IFLA_GRE_IKEY
, p
->i_key
) ||
2161 nla_put_be32(skb
, IFLA_GRE_OKEY
, p
->o_key
) ||
2162 nla_put_in6_addr(skb
, IFLA_GRE_LOCAL
, &p
->laddr
) ||
2163 nla_put_in6_addr(skb
, IFLA_GRE_REMOTE
, &p
->raddr
) ||
2164 nla_put_u8(skb
, IFLA_GRE_TTL
, p
->hop_limit
) ||
2165 nla_put_u8(skb
, IFLA_GRE_ENCAP_LIMIT
, p
->encap_limit
) ||
2166 nla_put_be32(skb
, IFLA_GRE_FLOWINFO
, p
->flowinfo
) ||
2167 nla_put_u32(skb
, IFLA_GRE_FLAGS
, p
->flags
) ||
2168 nla_put_u32(skb
, IFLA_GRE_FWMARK
, p
->fwmark
))
2169 goto nla_put_failure
;
2171 if (nla_put_u16(skb
, IFLA_GRE_ENCAP_TYPE
,
2173 nla_put_be16(skb
, IFLA_GRE_ENCAP_SPORT
,
2175 nla_put_be16(skb
, IFLA_GRE_ENCAP_DPORT
,
2177 nla_put_u16(skb
, IFLA_GRE_ENCAP_FLAGS
,
2179 goto nla_put_failure
;
2181 if (p
->collect_md
) {
2182 if (nla_put_flag(skb
, IFLA_GRE_COLLECT_METADATA
))
2183 goto nla_put_failure
;
2192 static const struct nla_policy ip6gre_policy
[IFLA_GRE_MAX
+ 1] = {
2193 [IFLA_GRE_LINK
] = { .type
= NLA_U32
},
2194 [IFLA_GRE_IFLAGS
] = { .type
= NLA_U16
},
2195 [IFLA_GRE_OFLAGS
] = { .type
= NLA_U16
},
2196 [IFLA_GRE_IKEY
] = { .type
= NLA_U32
},
2197 [IFLA_GRE_OKEY
] = { .type
= NLA_U32
},
2198 [IFLA_GRE_LOCAL
] = { .len
= sizeof_field(struct ipv6hdr
, saddr
) },
2199 [IFLA_GRE_REMOTE
] = { .len
= sizeof_field(struct ipv6hdr
, daddr
) },
2200 [IFLA_GRE_TTL
] = { .type
= NLA_U8
},
2201 [IFLA_GRE_ENCAP_LIMIT
] = { .type
= NLA_U8
},
2202 [IFLA_GRE_FLOWINFO
] = { .type
= NLA_U32
},
2203 [IFLA_GRE_FLAGS
] = { .type
= NLA_U32
},
2204 [IFLA_GRE_ENCAP_TYPE
] = { .type
= NLA_U16
},
2205 [IFLA_GRE_ENCAP_FLAGS
] = { .type
= NLA_U16
},
2206 [IFLA_GRE_ENCAP_SPORT
] = { .type
= NLA_U16
},
2207 [IFLA_GRE_ENCAP_DPORT
] = { .type
= NLA_U16
},
2208 [IFLA_GRE_COLLECT_METADATA
] = { .type
= NLA_FLAG
},
2209 [IFLA_GRE_FWMARK
] = { .type
= NLA_U32
},
2210 [IFLA_GRE_ERSPAN_INDEX
] = { .type
= NLA_U32
},
2211 [IFLA_GRE_ERSPAN_VER
] = { .type
= NLA_U8
},
2212 [IFLA_GRE_ERSPAN_DIR
] = { .type
= NLA_U8
},
2213 [IFLA_GRE_ERSPAN_HWID
] = { .type
= NLA_U16
},
2216 static void ip6erspan_tap_setup(struct net_device
*dev
)
2221 dev
->netdev_ops
= &ip6erspan_netdev_ops
;
2222 dev
->needs_free_netdev
= true;
2223 dev
->priv_destructor
= ip6gre_dev_free
;
2225 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
2226 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
2227 netif_keep_dst(dev
);
2230 static int ip6erspan_newlink(struct net
*src_net
, struct net_device
*dev
,
2231 struct nlattr
*tb
[], struct nlattr
*data
[],
2232 struct netlink_ext_ack
*extack
)
2234 struct ip6_tnl
*nt
= netdev_priv(dev
);
2235 struct net
*net
= dev_net(dev
);
2236 struct ip6gre_net
*ign
;
2239 ip6gre_netlink_parms(data
, &nt
->parms
);
2240 ip6erspan_set_version(data
, &nt
->parms
);
2241 ign
= net_generic(net
, ip6gre_net_id
);
2243 if (nt
->parms
.collect_md
) {
2244 if (rtnl_dereference(ign
->collect_md_tun_erspan
))
2247 if (ip6gre_tunnel_find(net
, &nt
->parms
, dev
->type
))
2251 err
= ip6gre_newlink_common(src_net
, dev
, tb
, data
, extack
);
2253 ip6erspan_tnl_link_config(nt
, !tb
[IFLA_MTU
]);
2254 ip6erspan_tunnel_link_md(ign
, nt
);
2255 ip6gre_tunnel_link(net_generic(net
, ip6gre_net_id
), nt
);
2260 static void ip6erspan_tnl_link_config(struct ip6_tnl
*t
, int set_mtu
)
2262 ip6gre_tnl_link_config_common(t
);
2263 ip6gre_tnl_link_config_route(t
, set_mtu
, ip6erspan_calc_hlen(t
));
2266 static int ip6erspan_tnl_change(struct ip6_tnl
*t
,
2267 const struct __ip6_tnl_parm
*p
, int set_mtu
)
2269 ip6gre_tnl_copy_tnl_parm(t
, p
);
2270 ip6erspan_tnl_link_config(t
, set_mtu
);
2274 static int ip6erspan_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
2275 struct nlattr
*data
[],
2276 struct netlink_ext_ack
*extack
)
2278 struct ip6gre_net
*ign
= net_generic(dev_net(dev
), ip6gre_net_id
);
2279 struct __ip6_tnl_parm p
;
2282 t
= ip6gre_changelink_common(dev
, tb
, data
, &p
, extack
);
2286 ip6erspan_set_version(data
, &p
);
2287 ip6gre_tunnel_unlink_md(ign
, t
);
2288 ip6gre_tunnel_unlink(ign
, t
);
2289 ip6erspan_tnl_change(t
, &p
, !tb
[IFLA_MTU
]);
2290 ip6erspan_tunnel_link_md(ign
, t
);
2291 ip6gre_tunnel_link(ign
, t
);
2295 static struct rtnl_link_ops ip6gre_link_ops __read_mostly
= {
2297 .maxtype
= IFLA_GRE_MAX
,
2298 .policy
= ip6gre_policy
,
2299 .priv_size
= sizeof(struct ip6_tnl
),
2300 .setup
= ip6gre_tunnel_setup
,
2301 .validate
= ip6gre_tunnel_validate
,
2302 .newlink
= ip6gre_newlink
,
2303 .changelink
= ip6gre_changelink
,
2304 .dellink
= ip6gre_dellink
,
2305 .get_size
= ip6gre_get_size
,
2306 .fill_info
= ip6gre_fill_info
,
2307 .get_link_net
= ip6_tnl_get_link_net
,
2310 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly
= {
2311 .kind
= "ip6gretap",
2312 .maxtype
= IFLA_GRE_MAX
,
2313 .policy
= ip6gre_policy
,
2314 .priv_size
= sizeof(struct ip6_tnl
),
2315 .setup
= ip6gre_tap_setup
,
2316 .validate
= ip6gre_tap_validate
,
2317 .newlink
= ip6gre_newlink
,
2318 .changelink
= ip6gre_changelink
,
2319 .get_size
= ip6gre_get_size
,
2320 .fill_info
= ip6gre_fill_info
,
2321 .get_link_net
= ip6_tnl_get_link_net
,
2324 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly
= {
2325 .kind
= "ip6erspan",
2326 .maxtype
= IFLA_GRE_MAX
,
2327 .policy
= ip6gre_policy
,
2328 .priv_size
= sizeof(struct ip6_tnl
),
2329 .setup
= ip6erspan_tap_setup
,
2330 .validate
= ip6erspan_tap_validate
,
2331 .newlink
= ip6erspan_newlink
,
2332 .changelink
= ip6erspan_changelink
,
2333 .get_size
= ip6gre_get_size
,
2334 .fill_info
= ip6gre_fill_info
,
2335 .get_link_net
= ip6_tnl_get_link_net
,
2339 * And now the modules code and kernel interface.
2342 static int __init
ip6gre_init(void)
2346 pr_info("GRE over IPv6 tunneling driver\n");
2348 err
= register_pernet_device(&ip6gre_net_ops
);
2352 err
= inet6_add_protocol(&ip6gre_protocol
, IPPROTO_GRE
);
2354 pr_info("%s: can't add protocol\n", __func__
);
2355 goto add_proto_failed
;
2358 err
= rtnl_link_register(&ip6gre_link_ops
);
2360 goto rtnl_link_failed
;
2362 err
= rtnl_link_register(&ip6gre_tap_ops
);
2364 goto tap_ops_failed
;
2366 err
= rtnl_link_register(&ip6erspan_tap_ops
);
2368 goto erspan_link_failed
;
2374 rtnl_link_unregister(&ip6gre_tap_ops
);
2376 rtnl_link_unregister(&ip6gre_link_ops
);
2378 inet6_del_protocol(&ip6gre_protocol
, IPPROTO_GRE
);
2380 unregister_pernet_device(&ip6gre_net_ops
);
2384 static void __exit
ip6gre_fini(void)
2386 rtnl_link_unregister(&ip6gre_tap_ops
);
2387 rtnl_link_unregister(&ip6gre_link_ops
);
2388 rtnl_link_unregister(&ip6erspan_tap_ops
);
2389 inet6_del_protocol(&ip6gre_protocol
, IPPROTO_GRE
);
2390 unregister_pernet_device(&ip6gre_net_ops
);
2393 module_init(ip6gre_init
);
2394 module_exit(ip6gre_fini
);
2395 MODULE_LICENSE("GPL");
2396 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
2397 MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
2398 MODULE_ALIAS_RTNL_LINK("ip6gre");
2399 MODULE_ALIAS_RTNL_LINK("ip6gretap");
2400 MODULE_ALIAS_RTNL_LINK("ip6erspan");
2401 MODULE_ALIAS_NETDEV("ip6gre0");