1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * GRE over IPv6 protocol decoder.
5 * Authors: Dmitry Kozlov (xeb@mail.ru)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/skbuff.h>
17 #include <linux/netdevice.h>
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 #include <linux/if_arp.h>
22 #include <linux/init.h>
23 #include <linux/in6.h>
24 #include <linux/inetdevice.h>
25 #include <linux/igmp.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/etherdevice.h>
28 #include <linux/if_ether.h>
29 #include <linux/hash.h>
30 #include <linux/if_tunnel.h>
31 #include <linux/ip6_tunnel.h>
35 #include <net/ip_tunnels.h>
37 #include <net/protocol.h>
38 #include <net/addrconf.h>
40 #include <net/checksum.h>
41 #include <net/dsfield.h>
42 #include <net/inet_ecn.h>
44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h>
46 #include <net/rtnetlink.h>
49 #include <net/ip6_fib.h>
50 #include <net/ip6_route.h>
51 #include <net/ip6_tunnel.h>
53 #include <net/erspan.h>
54 #include <net/dst_metadata.h>
57 static bool log_ecn_error
= true;
58 module_param(log_ecn_error
, bool, 0644);
59 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
61 #define IP6_GRE_HASH_SIZE_SHIFT 5
62 #define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT)
64 static unsigned int ip6gre_net_id __read_mostly
;
66 struct ip6_tnl __rcu
*tunnels
[4][IP6_GRE_HASH_SIZE
];
68 struct ip6_tnl __rcu
*collect_md_tun
;
69 struct ip6_tnl __rcu
*collect_md_tun_erspan
;
70 struct net_device
*fb_tunnel_dev
;
73 static struct rtnl_link_ops ip6gre_link_ops __read_mostly
;
74 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly
;
75 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly
;
76 static int ip6gre_tunnel_init(struct net_device
*dev
);
77 static void ip6gre_tunnel_setup(struct net_device
*dev
);
78 static void ip6gre_tunnel_link(struct ip6gre_net
*ign
, struct ip6_tnl
*t
);
79 static void ip6gre_tnl_link_config(struct ip6_tnl
*t
, int set_mtu
);
80 static void ip6erspan_tnl_link_config(struct ip6_tnl
*t
, int set_mtu
);
82 /* Tunnel hash table */
92 We require exact key match i.e. if a key is present in packet
93 it will match only tunnel with the same key; if it is not present,
94 it will match only keyless tunnel.
96 All keysless packets, if not matched configured keyless tunnels
97 will match fallback tunnel.
100 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1))
101 static u32
HASH_ADDR(const struct in6_addr
*addr
)
103 u32 hash
= ipv6_addr_hash(addr
);
105 return hash_32(hash
, IP6_GRE_HASH_SIZE_SHIFT
);
108 #define tunnels_r_l tunnels[3]
109 #define tunnels_r tunnels[2]
110 #define tunnels_l tunnels[1]
111 #define tunnels_wc tunnels[0]
113 /* Given src, dst and key, find appropriate for input tunnel. */
115 static struct ip6_tnl
*ip6gre_tunnel_lookup(struct net_device
*dev
,
116 const struct in6_addr
*remote
, const struct in6_addr
*local
,
117 __be32 key
, __be16 gre_proto
)
119 struct net
*net
= dev_net(dev
);
120 int link
= dev
->ifindex
;
121 unsigned int h0
= HASH_ADDR(remote
);
122 unsigned int h1
= HASH_KEY(key
);
123 struct ip6_tnl
*t
, *cand
= NULL
;
124 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
125 int dev_type
= (gre_proto
== htons(ETH_P_TEB
) ||
126 gre_proto
== htons(ETH_P_ERSPAN
) ||
127 gre_proto
== htons(ETH_P_ERSPAN2
)) ?
128 ARPHRD_ETHER
: ARPHRD_IP6GRE
;
129 int score
, cand_score
= 4;
131 for_each_ip_tunnel_rcu(t
, ign
->tunnels_r_l
[h0
^ h1
]) {
132 if (!ipv6_addr_equal(local
, &t
->parms
.laddr
) ||
133 !ipv6_addr_equal(remote
, &t
->parms
.raddr
) ||
134 key
!= t
->parms
.i_key
||
135 !(t
->dev
->flags
& IFF_UP
))
138 if (t
->dev
->type
!= ARPHRD_IP6GRE
&&
139 t
->dev
->type
!= dev_type
)
143 if (t
->parms
.link
!= link
)
145 if (t
->dev
->type
!= dev_type
)
150 if (score
< cand_score
) {
156 for_each_ip_tunnel_rcu(t
, ign
->tunnels_r
[h0
^ h1
]) {
157 if (!ipv6_addr_equal(remote
, &t
->parms
.raddr
) ||
158 key
!= t
->parms
.i_key
||
159 !(t
->dev
->flags
& IFF_UP
))
162 if (t
->dev
->type
!= ARPHRD_IP6GRE
&&
163 t
->dev
->type
!= dev_type
)
167 if (t
->parms
.link
!= link
)
169 if (t
->dev
->type
!= dev_type
)
174 if (score
< cand_score
) {
180 for_each_ip_tunnel_rcu(t
, ign
->tunnels_l
[h1
]) {
181 if ((!ipv6_addr_equal(local
, &t
->parms
.laddr
) &&
182 (!ipv6_addr_equal(local
, &t
->parms
.raddr
) ||
183 !ipv6_addr_is_multicast(local
))) ||
184 key
!= t
->parms
.i_key
||
185 !(t
->dev
->flags
& IFF_UP
))
188 if (t
->dev
->type
!= ARPHRD_IP6GRE
&&
189 t
->dev
->type
!= dev_type
)
193 if (t
->parms
.link
!= link
)
195 if (t
->dev
->type
!= dev_type
)
200 if (score
< cand_score
) {
206 for_each_ip_tunnel_rcu(t
, ign
->tunnels_wc
[h1
]) {
207 if (t
->parms
.i_key
!= key
||
208 !(t
->dev
->flags
& IFF_UP
))
211 if (t
->dev
->type
!= ARPHRD_IP6GRE
&&
212 t
->dev
->type
!= dev_type
)
216 if (t
->parms
.link
!= link
)
218 if (t
->dev
->type
!= dev_type
)
223 if (score
< cand_score
) {
232 if (gre_proto
== htons(ETH_P_ERSPAN
) ||
233 gre_proto
== htons(ETH_P_ERSPAN2
))
234 t
= rcu_dereference(ign
->collect_md_tun_erspan
);
236 t
= rcu_dereference(ign
->collect_md_tun
);
238 if (t
&& t
->dev
->flags
& IFF_UP
)
241 dev
= ign
->fb_tunnel_dev
;
242 if (dev
&& dev
->flags
& IFF_UP
)
243 return netdev_priv(dev
);
248 static struct ip6_tnl __rcu
**__ip6gre_bucket(struct ip6gre_net
*ign
,
249 const struct __ip6_tnl_parm
*p
)
251 const struct in6_addr
*remote
= &p
->raddr
;
252 const struct in6_addr
*local
= &p
->laddr
;
253 unsigned int h
= HASH_KEY(p
->i_key
);
256 if (!ipv6_addr_any(local
))
258 if (!ipv6_addr_any(remote
) && !ipv6_addr_is_multicast(remote
)) {
260 h
^= HASH_ADDR(remote
);
263 return &ign
->tunnels
[prio
][h
];
266 static void ip6gre_tunnel_link_md(struct ip6gre_net
*ign
, struct ip6_tnl
*t
)
268 if (t
->parms
.collect_md
)
269 rcu_assign_pointer(ign
->collect_md_tun
, t
);
272 static void ip6erspan_tunnel_link_md(struct ip6gre_net
*ign
, struct ip6_tnl
*t
)
274 if (t
->parms
.collect_md
)
275 rcu_assign_pointer(ign
->collect_md_tun_erspan
, t
);
278 static void ip6gre_tunnel_unlink_md(struct ip6gre_net
*ign
, struct ip6_tnl
*t
)
280 if (t
->parms
.collect_md
)
281 rcu_assign_pointer(ign
->collect_md_tun
, NULL
);
284 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net
*ign
,
287 if (t
->parms
.collect_md
)
288 rcu_assign_pointer(ign
->collect_md_tun_erspan
, NULL
);
291 static inline struct ip6_tnl __rcu
**ip6gre_bucket(struct ip6gre_net
*ign
,
292 const struct ip6_tnl
*t
)
294 return __ip6gre_bucket(ign
, &t
->parms
);
297 static void ip6gre_tunnel_link(struct ip6gre_net
*ign
, struct ip6_tnl
*t
)
299 struct ip6_tnl __rcu
**tp
= ip6gre_bucket(ign
, t
);
301 rcu_assign_pointer(t
->next
, rtnl_dereference(*tp
));
302 rcu_assign_pointer(*tp
, t
);
305 static void ip6gre_tunnel_unlink(struct ip6gre_net
*ign
, struct ip6_tnl
*t
)
307 struct ip6_tnl __rcu
**tp
;
308 struct ip6_tnl
*iter
;
310 for (tp
= ip6gre_bucket(ign
, t
);
311 (iter
= rtnl_dereference(*tp
)) != NULL
;
314 rcu_assign_pointer(*tp
, t
->next
);
320 static struct ip6_tnl
*ip6gre_tunnel_find(struct net
*net
,
321 const struct __ip6_tnl_parm
*parms
,
324 const struct in6_addr
*remote
= &parms
->raddr
;
325 const struct in6_addr
*local
= &parms
->laddr
;
326 __be32 key
= parms
->i_key
;
327 int link
= parms
->link
;
329 struct ip6_tnl __rcu
**tp
;
330 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
332 for (tp
= __ip6gre_bucket(ign
, parms
);
333 (t
= rtnl_dereference(*tp
)) != NULL
;
335 if (ipv6_addr_equal(local
, &t
->parms
.laddr
) &&
336 ipv6_addr_equal(remote
, &t
->parms
.raddr
) &&
337 key
== t
->parms
.i_key
&&
338 link
== t
->parms
.link
&&
339 type
== t
->dev
->type
)
345 static struct ip6_tnl
*ip6gre_tunnel_locate(struct net
*net
,
346 const struct __ip6_tnl_parm
*parms
, int create
)
348 struct ip6_tnl
*t
, *nt
;
349 struct net_device
*dev
;
351 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
353 t
= ip6gre_tunnel_find(net
, parms
, ARPHRD_IP6GRE
);
359 if (parms
->name
[0]) {
360 if (!dev_valid_name(parms
->name
))
362 strlcpy(name
, parms
->name
, IFNAMSIZ
);
364 strcpy(name
, "ip6gre%d");
366 dev
= alloc_netdev(sizeof(*t
), name
, NET_NAME_UNKNOWN
,
367 ip6gre_tunnel_setup
);
371 dev_net_set(dev
, net
);
373 nt
= netdev_priv(dev
);
375 dev
->rtnl_link_ops
= &ip6gre_link_ops
;
378 nt
->net
= dev_net(dev
);
380 if (register_netdevice(dev
) < 0)
383 ip6gre_tnl_link_config(nt
, 1);
385 /* Can use a lockless transmit, unless we generate output sequences */
386 if (!(nt
->parms
.o_flags
& TUNNEL_SEQ
))
387 dev
->features
|= NETIF_F_LLTX
;
390 ip6gre_tunnel_link(ign
, nt
);
398 static void ip6erspan_tunnel_uninit(struct net_device
*dev
)
400 struct ip6_tnl
*t
= netdev_priv(dev
);
401 struct ip6gre_net
*ign
= net_generic(t
->net
, ip6gre_net_id
);
403 ip6erspan_tunnel_unlink_md(ign
, t
);
404 ip6gre_tunnel_unlink(ign
, t
);
405 dst_cache_reset(&t
->dst_cache
);
409 static void ip6gre_tunnel_uninit(struct net_device
*dev
)
411 struct ip6_tnl
*t
= netdev_priv(dev
);
412 struct ip6gre_net
*ign
= net_generic(t
->net
, ip6gre_net_id
);
414 ip6gre_tunnel_unlink_md(ign
, t
);
415 ip6gre_tunnel_unlink(ign
, t
);
416 dst_cache_reset(&t
->dst_cache
);
421 static int ip6gre_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
422 u8 type
, u8 code
, int offset
, __be32 info
)
424 struct net
*net
= dev_net(skb
->dev
);
425 const struct ipv6hdr
*ipv6h
;
426 struct tnl_ptk_info tpi
;
429 if (gre_parse_header(skb
, &tpi
, NULL
, htons(ETH_P_IPV6
),
433 ipv6h
= (const struct ipv6hdr
*)skb
->data
;
434 t
= ip6gre_tunnel_lookup(skb
->dev
, &ipv6h
->daddr
, &ipv6h
->saddr
,
440 case ICMPV6_DEST_UNREACH
:
441 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
443 if (code
!= ICMPV6_PORT_UNREACH
)
446 case ICMPV6_TIME_EXCEED
:
447 if (code
== ICMPV6_EXC_HOPLIMIT
) {
448 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
453 case ICMPV6_PARAMPROB
: {
454 struct ipv6_tlv_tnl_enc_lim
*tel
;
458 if (code
== ICMPV6_HDR_FIELD
)
459 teli
= ip6_tnl_parse_tlv_enc_lim(skb
, skb
->data
);
461 if (teli
&& teli
== be32_to_cpu(info
) - 2) {
462 tel
= (struct ipv6_tlv_tnl_enc_lim
*) &skb
->data
[teli
];
463 if (tel
->encap_limit
== 0) {
464 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
468 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
473 case ICMPV6_PKT_TOOBIG
:
474 ip6_update_pmtu(skb
, net
, info
, 0, 0, sock_net_uid(net
, NULL
));
477 ip6_redirect(skb
, net
, skb
->dev
->ifindex
, 0,
478 sock_net_uid(net
, NULL
));
482 if (time_before(jiffies
, t
->err_time
+ IP6TUNNEL_ERR_TIMEO
))
486 t
->err_time
= jiffies
;
491 static int ip6gre_rcv(struct sk_buff
*skb
, const struct tnl_ptk_info
*tpi
)
493 const struct ipv6hdr
*ipv6h
;
494 struct ip6_tnl
*tunnel
;
496 ipv6h
= ipv6_hdr(skb
);
497 tunnel
= ip6gre_tunnel_lookup(skb
->dev
,
498 &ipv6h
->saddr
, &ipv6h
->daddr
, tpi
->key
,
501 if (tunnel
->parms
.collect_md
) {
502 struct metadata_dst
*tun_dst
;
507 tun_id
= key32_to_tunnel_id(tpi
->key
);
509 tun_dst
= ipv6_tun_rx_dst(skb
, flags
, tun_id
, 0);
511 return PACKET_REJECT
;
513 ip6_tnl_rcv(tunnel
, skb
, tpi
, tun_dst
, log_ecn_error
);
515 ip6_tnl_rcv(tunnel
, skb
, tpi
, NULL
, log_ecn_error
);
521 return PACKET_REJECT
;
524 static int ip6erspan_rcv(struct sk_buff
*skb
,
525 struct tnl_ptk_info
*tpi
,
528 struct erspan_base_hdr
*ershdr
;
529 const struct ipv6hdr
*ipv6h
;
530 struct erspan_md2
*md2
;
531 struct ip6_tnl
*tunnel
;
534 ipv6h
= ipv6_hdr(skb
);
535 ershdr
= (struct erspan_base_hdr
*)skb
->data
;
538 tunnel
= ip6gre_tunnel_lookup(skb
->dev
,
539 &ipv6h
->saddr
, &ipv6h
->daddr
, tpi
->key
,
542 int len
= erspan_hdr_len(ver
);
544 if (unlikely(!pskb_may_pull(skb
, len
)))
545 return PACKET_REJECT
;
547 if (__iptunnel_pull_header(skb
, len
,
550 return PACKET_REJECT
;
552 if (tunnel
->parms
.collect_md
) {
553 struct erspan_metadata
*pkt_md
, *md
;
554 struct metadata_dst
*tun_dst
;
555 struct ip_tunnel_info
*info
;
560 tpi
->flags
|= TUNNEL_KEY
;
562 tun_id
= key32_to_tunnel_id(tpi
->key
);
564 tun_dst
= ipv6_tun_rx_dst(skb
, flags
, tun_id
,
567 return PACKET_REJECT
;
569 /* skb can be uncloned in __iptunnel_pull_header, so
570 * old pkt_md is no longer valid and we need to reset
573 gh
= skb_network_header(skb
) +
574 skb_network_header_len(skb
);
575 pkt_md
= (struct erspan_metadata
*)(gh
+ gre_hdr_len
+
577 info
= &tun_dst
->u
.tun_info
;
578 md
= ip_tunnel_info_opts(info
);
581 memcpy(md2
, pkt_md
, ver
== 1 ? ERSPAN_V1_MDSIZE
:
583 info
->key
.tun_flags
|= TUNNEL_ERSPAN_OPT
;
584 info
->options_len
= sizeof(*md
);
586 ip6_tnl_rcv(tunnel
, skb
, tpi
, tun_dst
, log_ecn_error
);
589 ip6_tnl_rcv(tunnel
, skb
, tpi
, NULL
, log_ecn_error
);
595 return PACKET_REJECT
;
598 static int gre_rcv(struct sk_buff
*skb
)
600 struct tnl_ptk_info tpi
;
601 bool csum_err
= false;
604 hdr_len
= gre_parse_header(skb
, &tpi
, &csum_err
, htons(ETH_P_IPV6
), 0);
608 if (iptunnel_pull_header(skb
, hdr_len
, tpi
.proto
, false))
611 if (unlikely(tpi
.proto
== htons(ETH_P_ERSPAN
) ||
612 tpi
.proto
== htons(ETH_P_ERSPAN2
))) {
613 if (ip6erspan_rcv(skb
, &tpi
, hdr_len
) == PACKET_RCVD
)
618 if (ip6gre_rcv(skb
, &tpi
) == PACKET_RCVD
)
622 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_PORT_UNREACH
, 0);
628 static int gre_handle_offloads(struct sk_buff
*skb
, bool csum
)
630 return iptunnel_handle_offloads(skb
,
631 csum
? SKB_GSO_GRE_CSUM
: SKB_GSO_GRE
);
634 static void prepare_ip6gre_xmit_ipv4(struct sk_buff
*skb
,
635 struct net_device
*dev
,
636 struct flowi6
*fl6
, __u8
*dsfield
,
639 const struct iphdr
*iph
= ip_hdr(skb
);
640 struct ip6_tnl
*t
= netdev_priv(dev
);
642 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
643 *encap_limit
= t
->parms
.encap_limit
;
645 memcpy(fl6
, &t
->fl
.u
.ip6
, sizeof(*fl6
));
647 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_TCLASS
)
648 *dsfield
= ipv4_get_dsfield(iph
);
650 *dsfield
= ip6_tclass(t
->parms
.flowinfo
);
652 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FWMARK
)
653 fl6
->flowi6_mark
= skb
->mark
;
655 fl6
->flowi6_mark
= t
->parms
.fwmark
;
657 fl6
->flowi6_uid
= sock_net_uid(dev_net(dev
), NULL
);
660 static int prepare_ip6gre_xmit_ipv6(struct sk_buff
*skb
,
661 struct net_device
*dev
,
662 struct flowi6
*fl6
, __u8
*dsfield
,
665 struct ipv6hdr
*ipv6h
;
666 struct ip6_tnl
*t
= netdev_priv(dev
);
669 offset
= ip6_tnl_parse_tlv_enc_lim(skb
, skb_network_header(skb
));
670 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
671 ipv6h
= ipv6_hdr(skb
);
674 struct ipv6_tlv_tnl_enc_lim
*tel
;
676 tel
= (struct ipv6_tlv_tnl_enc_lim
*)&skb_network_header(skb
)[offset
];
677 if (tel
->encap_limit
== 0) {
678 icmpv6_send(skb
, ICMPV6_PARAMPROB
,
679 ICMPV6_HDR_FIELD
, offset
+ 2);
682 *encap_limit
= tel
->encap_limit
- 1;
683 } else if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
)) {
684 *encap_limit
= t
->parms
.encap_limit
;
687 memcpy(fl6
, &t
->fl
.u
.ip6
, sizeof(*fl6
));
689 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_TCLASS
)
690 *dsfield
= ipv6_get_dsfield(ipv6h
);
692 *dsfield
= ip6_tclass(t
->parms
.flowinfo
);
694 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FLOWLABEL
)
695 fl6
->flowlabel
|= ip6_flowlabel(ipv6h
);
697 if (t
->parms
.flags
& IP6_TNL_F_USE_ORIG_FWMARK
)
698 fl6
->flowi6_mark
= skb
->mark
;
700 fl6
->flowi6_mark
= t
->parms
.fwmark
;
702 fl6
->flowi6_uid
= sock_net_uid(dev_net(dev
), NULL
);
707 static netdev_tx_t
__gre6_xmit(struct sk_buff
*skb
,
708 struct net_device
*dev
, __u8 dsfield
,
709 struct flowi6
*fl6
, int encap_limit
,
710 __u32
*pmtu
, __be16 proto
)
712 struct ip6_tnl
*tunnel
= netdev_priv(dev
);
715 if (dev
->type
== ARPHRD_ETHER
)
716 IPCB(skb
)->flags
= 0;
718 if (dev
->header_ops
&& dev
->type
== ARPHRD_IP6GRE
)
719 fl6
->daddr
= ((struct ipv6hdr
*)skb
->data
)->daddr
;
721 fl6
->daddr
= tunnel
->parms
.raddr
;
723 if (skb_cow_head(skb
, dev
->needed_headroom
?: tunnel
->hlen
))
726 /* Push GRE header. */
727 protocol
= (dev
->type
== ARPHRD_ETHER
) ? htons(ETH_P_TEB
) : proto
;
729 if (tunnel
->parms
.collect_md
) {
730 struct ip_tunnel_info
*tun_info
;
731 const struct ip_tunnel_key
*key
;
734 tun_info
= skb_tunnel_info(skb
);
735 if (unlikely(!tun_info
||
736 !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
737 ip_tunnel_info_af(tun_info
) != AF_INET6
))
740 key
= &tun_info
->key
;
741 memset(fl6
, 0, sizeof(*fl6
));
742 fl6
->flowi6_proto
= IPPROTO_GRE
;
743 fl6
->daddr
= key
->u
.ipv6
.dst
;
744 fl6
->flowlabel
= key
->label
;
745 fl6
->flowi6_uid
= sock_net_uid(dev_net(dev
), NULL
);
748 flags
= key
->tun_flags
&
749 (TUNNEL_CSUM
| TUNNEL_KEY
| TUNNEL_SEQ
);
750 tunnel
->tun_hlen
= gre_calc_hlen(flags
);
752 gre_build_header(skb
, tunnel
->tun_hlen
,
754 tunnel_id_to_key32(tun_info
->key
.tun_id
),
755 (flags
& TUNNEL_SEQ
) ? htonl(tunnel
->o_seqno
++)
759 if (tunnel
->parms
.o_flags
& TUNNEL_SEQ
)
762 gre_build_header(skb
, tunnel
->tun_hlen
, tunnel
->parms
.o_flags
,
763 protocol
, tunnel
->parms
.o_key
,
764 htonl(tunnel
->o_seqno
));
767 return ip6_tnl_xmit(skb
, dev
, dsfield
, fl6
, encap_limit
, pmtu
,
771 static inline int ip6gre_xmit_ipv4(struct sk_buff
*skb
, struct net_device
*dev
)
773 struct ip6_tnl
*t
= netdev_priv(dev
);
774 int encap_limit
= -1;
780 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
782 if (!t
->parms
.collect_md
)
783 prepare_ip6gre_xmit_ipv4(skb
, dev
, &fl6
,
784 &dsfield
, &encap_limit
);
786 err
= gre_handle_offloads(skb
, !!(t
->parms
.o_flags
& TUNNEL_CSUM
));
790 err
= __gre6_xmit(skb
, dev
, dsfield
, &fl6
, encap_limit
, &mtu
,
793 /* XXX: send ICMP error even if DF is not set. */
794 if (err
== -EMSGSIZE
)
795 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
803 static inline int ip6gre_xmit_ipv6(struct sk_buff
*skb
, struct net_device
*dev
)
805 struct ip6_tnl
*t
= netdev_priv(dev
);
806 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
807 int encap_limit
= -1;
813 if (ipv6_addr_equal(&t
->parms
.raddr
, &ipv6h
->saddr
))
816 if (!t
->parms
.collect_md
&&
817 prepare_ip6gre_xmit_ipv6(skb
, dev
, &fl6
, &dsfield
, &encap_limit
))
820 if (gre_handle_offloads(skb
, !!(t
->parms
.o_flags
& TUNNEL_CSUM
)))
823 err
= __gre6_xmit(skb
, dev
, dsfield
, &fl6
, encap_limit
,
824 &mtu
, skb
->protocol
);
826 if (err
== -EMSGSIZE
)
827 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
835 * ip6gre_tnl_addr_conflict - compare packet addresses to tunnel's own
836 * @t: the outgoing tunnel device
837 * @hdr: IPv6 header from the incoming packet
840 * Avoid trivial tunneling loop by checking that tunnel exit-point
841 * doesn't match source of incoming packet.
848 static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl
*t
,
849 const struct ipv6hdr
*hdr
)
851 return ipv6_addr_equal(&t
->parms
.raddr
, &hdr
->saddr
);
854 static int ip6gre_xmit_other(struct sk_buff
*skb
, struct net_device
*dev
)
856 struct ip6_tnl
*t
= netdev_priv(dev
);
857 int encap_limit
= -1;
862 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
863 encap_limit
= t
->parms
.encap_limit
;
865 if (!t
->parms
.collect_md
)
866 memcpy(&fl6
, &t
->fl
.u
.ip6
, sizeof(fl6
));
868 err
= gre_handle_offloads(skb
, !!(t
->parms
.o_flags
& TUNNEL_CSUM
));
872 err
= __gre6_xmit(skb
, dev
, 0, &fl6
, encap_limit
, &mtu
, skb
->protocol
);
877 static netdev_tx_t
ip6gre_tunnel_xmit(struct sk_buff
*skb
,
878 struct net_device
*dev
)
880 struct ip6_tnl
*t
= netdev_priv(dev
);
881 struct net_device_stats
*stats
= &t
->dev
->stats
;
884 if (!pskb_inet_may_pull(skb
))
887 if (!ip6_tnl_xmit_ctl(t
, &t
->parms
.laddr
, &t
->parms
.raddr
))
890 switch (skb
->protocol
) {
891 case htons(ETH_P_IP
):
892 ret
= ip6gre_xmit_ipv4(skb
, dev
);
894 case htons(ETH_P_IPV6
):
895 ret
= ip6gre_xmit_ipv6(skb
, dev
);
898 ret
= ip6gre_xmit_other(skb
, dev
);
914 static netdev_tx_t
ip6erspan_tunnel_xmit(struct sk_buff
*skb
,
915 struct net_device
*dev
)
917 struct ip6_tnl
*t
= netdev_priv(dev
);
918 struct dst_entry
*dst
= skb_dst(skb
);
919 struct net_device_stats
*stats
;
920 bool truncate
= false;
921 int encap_limit
= -1;
922 __u8 dsfield
= false;
930 if (!pskb_inet_may_pull(skb
))
933 if (!ip6_tnl_xmit_ctl(t
, &t
->parms
.laddr
, &t
->parms
.raddr
))
936 if (gre_handle_offloads(skb
, false))
939 if (skb
->len
> dev
->mtu
+ dev
->hard_header_len
) {
940 pskb_trim(skb
, dev
->mtu
+ dev
->hard_header_len
);
944 nhoff
= skb_network_header(skb
) - skb_mac_header(skb
);
945 if (skb
->protocol
== htons(ETH_P_IP
) &&
946 (ntohs(ip_hdr(skb
)->tot_len
) > skb
->len
- nhoff
))
949 thoff
= skb_transport_header(skb
) - skb_mac_header(skb
);
950 if (skb
->protocol
== htons(ETH_P_IPV6
) &&
951 (ntohs(ipv6_hdr(skb
)->payload_len
) > skb
->len
- thoff
))
954 if (skb_cow_head(skb
, dev
->needed_headroom
?: t
->hlen
))
957 t
->parms
.o_flags
&= ~TUNNEL_KEY
;
958 IPCB(skb
)->flags
= 0;
960 /* For collect_md mode, derive fl6 from the tunnel key,
961 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
963 if (t
->parms
.collect_md
) {
964 struct ip_tunnel_info
*tun_info
;
965 const struct ip_tunnel_key
*key
;
966 struct erspan_metadata
*md
;
969 tun_info
= skb_tunnel_info(skb
);
970 if (unlikely(!tun_info
||
971 !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
972 ip_tunnel_info_af(tun_info
) != AF_INET6
))
975 key
= &tun_info
->key
;
976 memset(&fl6
, 0, sizeof(fl6
));
977 fl6
.flowi6_proto
= IPPROTO_GRE
;
978 fl6
.daddr
= key
->u
.ipv6
.dst
;
979 fl6
.flowlabel
= key
->label
;
980 fl6
.flowi6_uid
= sock_net_uid(dev_net(dev
), NULL
);
983 if (!(tun_info
->key
.tun_flags
& TUNNEL_ERSPAN_OPT
))
985 if (tun_info
->options_len
< sizeof(*md
))
987 md
= ip_tunnel_info_opts(tun_info
);
989 tun_id
= tunnel_id_to_key32(key
->tun_id
);
990 if (md
->version
== 1) {
991 erspan_build_header(skb
,
993 ntohl(md
->u
.index
), truncate
,
995 } else if (md
->version
== 2) {
996 erspan_build_header_v2(skb
,
999 get_hwid(&md
->u
.md2
),
1005 switch (skb
->protocol
) {
1006 case htons(ETH_P_IP
):
1007 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
1008 prepare_ip6gre_xmit_ipv4(skb
, dev
, &fl6
,
1009 &dsfield
, &encap_limit
);
1011 case htons(ETH_P_IPV6
):
1012 if (ipv6_addr_equal(&t
->parms
.raddr
, &ipv6_hdr(skb
)->saddr
))
1014 if (prepare_ip6gre_xmit_ipv6(skb
, dev
, &fl6
,
1015 &dsfield
, &encap_limit
))
1019 memcpy(&fl6
, &t
->fl
.u
.ip6
, sizeof(fl6
));
1023 if (t
->parms
.erspan_ver
== 1)
1024 erspan_build_header(skb
, ntohl(t
->parms
.o_key
),
1027 else if (t
->parms
.erspan_ver
== 2)
1028 erspan_build_header_v2(skb
, ntohl(t
->parms
.o_key
),
1035 fl6
.daddr
= t
->parms
.raddr
;
1038 /* Push GRE header. */
1039 proto
= (t
->parms
.erspan_ver
== 1) ? htons(ETH_P_ERSPAN
)
1040 : htons(ETH_P_ERSPAN2
);
1041 gre_build_header(skb
, 8, TUNNEL_SEQ
, proto
, 0, htonl(t
->o_seqno
++));
1043 /* TooBig packet may have updated dst->dev's mtu */
1044 if (!t
->parms
.collect_md
&& dst
&& dst_mtu(dst
) > dst
->dev
->mtu
)
1045 dst
->ops
->update_pmtu(dst
, NULL
, skb
, dst
->dev
->mtu
, false);
1047 err
= ip6_tnl_xmit(skb
, dev
, dsfield
, &fl6
, encap_limit
, &mtu
,
1050 /* XXX: send ICMP error even if DF is not set. */
1051 if (err
== -EMSGSIZE
) {
1052 if (skb
->protocol
== htons(ETH_P_IP
))
1053 icmp_send(skb
, ICMP_DEST_UNREACH
,
1054 ICMP_FRAG_NEEDED
, htonl(mtu
));
1056 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
1061 return NETDEV_TX_OK
;
1064 stats
= &t
->dev
->stats
;
1066 stats
->tx_dropped
++;
1068 return NETDEV_TX_OK
;
1071 static void ip6gre_tnl_link_config_common(struct ip6_tnl
*t
)
1073 struct net_device
*dev
= t
->dev
;
1074 struct __ip6_tnl_parm
*p
= &t
->parms
;
1075 struct flowi6
*fl6
= &t
->fl
.u
.ip6
;
1077 if (dev
->type
!= ARPHRD_ETHER
) {
1078 memcpy(dev
->dev_addr
, &p
->laddr
, sizeof(struct in6_addr
));
1079 memcpy(dev
->broadcast
, &p
->raddr
, sizeof(struct in6_addr
));
1082 /* Set up flowi template */
1083 fl6
->saddr
= p
->laddr
;
1084 fl6
->daddr
= p
->raddr
;
1085 fl6
->flowi6_oif
= p
->link
;
1087 fl6
->flowi6_proto
= IPPROTO_GRE
;
1089 if (!(p
->flags
&IP6_TNL_F_USE_ORIG_TCLASS
))
1090 fl6
->flowlabel
|= IPV6_TCLASS_MASK
& p
->flowinfo
;
1091 if (!(p
->flags
&IP6_TNL_F_USE_ORIG_FLOWLABEL
))
1092 fl6
->flowlabel
|= IPV6_FLOWLABEL_MASK
& p
->flowinfo
;
1094 p
->flags
&= ~(IP6_TNL_F_CAP_XMIT
|IP6_TNL_F_CAP_RCV
|IP6_TNL_F_CAP_PER_PACKET
);
1095 p
->flags
|= ip6_tnl_get_cap(t
, &p
->laddr
, &p
->raddr
);
1097 if (p
->flags
&IP6_TNL_F_CAP_XMIT
&&
1098 p
->flags
&IP6_TNL_F_CAP_RCV
&& dev
->type
!= ARPHRD_ETHER
)
1099 dev
->flags
|= IFF_POINTOPOINT
;
1101 dev
->flags
&= ~IFF_POINTOPOINT
;
1104 static void ip6gre_tnl_link_config_route(struct ip6_tnl
*t
, int set_mtu
,
1107 const struct __ip6_tnl_parm
*p
= &t
->parms
;
1108 struct net_device
*dev
= t
->dev
;
1110 if (p
->flags
& IP6_TNL_F_CAP_XMIT
) {
1111 int strict
= (ipv6_addr_type(&p
->raddr
) &
1112 (IPV6_ADDR_MULTICAST
|IPV6_ADDR_LINKLOCAL
));
1114 struct rt6_info
*rt
= rt6_lookup(t
->net
,
1115 &p
->raddr
, &p
->laddr
,
1116 p
->link
, NULL
, strict
);
1122 dev
->needed_headroom
= rt
->dst
.dev
->hard_header_len
+
1126 dev
->mtu
= rt
->dst
.dev
->mtu
- t_hlen
;
1127 if (!(t
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1129 if (dev
->type
== ARPHRD_ETHER
)
1130 dev
->mtu
-= ETH_HLEN
;
1132 if (dev
->mtu
< IPV6_MIN_MTU
)
1133 dev
->mtu
= IPV6_MIN_MTU
;
1140 static int ip6gre_calc_hlen(struct ip6_tnl
*tunnel
)
1144 tunnel
->tun_hlen
= gre_calc_hlen(tunnel
->parms
.o_flags
);
1145 tunnel
->hlen
= tunnel
->tun_hlen
+ tunnel
->encap_hlen
;
1147 t_hlen
= tunnel
->hlen
+ sizeof(struct ipv6hdr
);
1148 tunnel
->dev
->needed_headroom
= LL_MAX_HEADER
+ t_hlen
;
1152 static void ip6gre_tnl_link_config(struct ip6_tnl
*t
, int set_mtu
)
1154 ip6gre_tnl_link_config_common(t
);
1155 ip6gre_tnl_link_config_route(t
, set_mtu
, ip6gre_calc_hlen(t
));
1158 static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl
*t
,
1159 const struct __ip6_tnl_parm
*p
)
1161 t
->parms
.laddr
= p
->laddr
;
1162 t
->parms
.raddr
= p
->raddr
;
1163 t
->parms
.flags
= p
->flags
;
1164 t
->parms
.hop_limit
= p
->hop_limit
;
1165 t
->parms
.encap_limit
= p
->encap_limit
;
1166 t
->parms
.flowinfo
= p
->flowinfo
;
1167 t
->parms
.link
= p
->link
;
1168 t
->parms
.proto
= p
->proto
;
1169 t
->parms
.i_key
= p
->i_key
;
1170 t
->parms
.o_key
= p
->o_key
;
1171 t
->parms
.i_flags
= p
->i_flags
;
1172 t
->parms
.o_flags
= p
->o_flags
;
1173 t
->parms
.fwmark
= p
->fwmark
;
1174 t
->parms
.erspan_ver
= p
->erspan_ver
;
1175 t
->parms
.index
= p
->index
;
1176 t
->parms
.dir
= p
->dir
;
1177 t
->parms
.hwid
= p
->hwid
;
1178 dst_cache_reset(&t
->dst_cache
);
1181 static int ip6gre_tnl_change(struct ip6_tnl
*t
, const struct __ip6_tnl_parm
*p
,
1184 ip6gre_tnl_copy_tnl_parm(t
, p
);
1185 ip6gre_tnl_link_config(t
, set_mtu
);
1189 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm
*p
,
1190 const struct ip6_tnl_parm2
*u
)
1192 p
->laddr
= u
->laddr
;
1193 p
->raddr
= u
->raddr
;
1194 p
->flags
= u
->flags
;
1195 p
->hop_limit
= u
->hop_limit
;
1196 p
->encap_limit
= u
->encap_limit
;
1197 p
->flowinfo
= u
->flowinfo
;
1199 p
->i_key
= u
->i_key
;
1200 p
->o_key
= u
->o_key
;
1201 p
->i_flags
= gre_flags_to_tnl_flags(u
->i_flags
);
1202 p
->o_flags
= gre_flags_to_tnl_flags(u
->o_flags
);
1203 memcpy(p
->name
, u
->name
, sizeof(u
->name
));
1206 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2
*u
,
1207 const struct __ip6_tnl_parm
*p
)
1209 u
->proto
= IPPROTO_GRE
;
1210 u
->laddr
= p
->laddr
;
1211 u
->raddr
= p
->raddr
;
1212 u
->flags
= p
->flags
;
1213 u
->hop_limit
= p
->hop_limit
;
1214 u
->encap_limit
= p
->encap_limit
;
1215 u
->flowinfo
= p
->flowinfo
;
1217 u
->i_key
= p
->i_key
;
1218 u
->o_key
= p
->o_key
;
1219 u
->i_flags
= gre_tnl_flags_to_gre_flags(p
->i_flags
);
1220 u
->o_flags
= gre_tnl_flags_to_gre_flags(p
->o_flags
);
1221 memcpy(u
->name
, p
->name
, sizeof(u
->name
));
1224 static int ip6gre_tunnel_ioctl(struct net_device
*dev
,
1225 struct ifreq
*ifr
, int cmd
)
1228 struct ip6_tnl_parm2 p
;
1229 struct __ip6_tnl_parm p1
;
1230 struct ip6_tnl
*t
= netdev_priv(dev
);
1231 struct net
*net
= t
->net
;
1232 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
1234 memset(&p1
, 0, sizeof(p1
));
1238 if (dev
== ign
->fb_tunnel_dev
) {
1239 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
))) {
1243 ip6gre_tnl_parm_from_user(&p1
, &p
);
1244 t
= ip6gre_tunnel_locate(net
, &p1
, 0);
1246 t
= netdev_priv(dev
);
1248 memset(&p
, 0, sizeof(p
));
1249 ip6gre_tnl_parm_to_user(&p
, &t
->parms
);
1250 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
1257 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1261 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1265 if ((p
.i_flags
|p
.o_flags
)&(GRE_VERSION
|GRE_ROUTING
))
1268 if (!(p
.i_flags
&GRE_KEY
))
1270 if (!(p
.o_flags
&GRE_KEY
))
1273 ip6gre_tnl_parm_from_user(&p1
, &p
);
1274 t
= ip6gre_tunnel_locate(net
, &p1
, cmd
== SIOCADDTUNNEL
);
1276 if (dev
!= ign
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
1278 if (t
->dev
!= dev
) {
1283 t
= netdev_priv(dev
);
1285 ip6gre_tunnel_unlink(ign
, t
);
1287 ip6gre_tnl_change(t
, &p1
, 1);
1288 ip6gre_tunnel_link(ign
, t
);
1289 netdev_state_change(dev
);
1296 memset(&p
, 0, sizeof(p
));
1297 ip6gre_tnl_parm_to_user(&p
, &t
->parms
);
1298 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
1301 err
= (cmd
== SIOCADDTUNNEL
? -ENOBUFS
: -ENOENT
);
1306 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1309 if (dev
== ign
->fb_tunnel_dev
) {
1311 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1314 ip6gre_tnl_parm_from_user(&p1
, &p
);
1315 t
= ip6gre_tunnel_locate(net
, &p1
, 0);
1319 if (t
== netdev_priv(ign
->fb_tunnel_dev
))
1323 unregister_netdevice(dev
);
1335 static int ip6gre_header(struct sk_buff
*skb
, struct net_device
*dev
,
1336 unsigned short type
, const void *daddr
,
1337 const void *saddr
, unsigned int len
)
1339 struct ip6_tnl
*t
= netdev_priv(dev
);
1340 struct ipv6hdr
*ipv6h
;
1343 ipv6h
= skb_push(skb
, t
->hlen
+ sizeof(*ipv6h
));
1344 ip6_flow_hdr(ipv6h
, 0, ip6_make_flowlabel(dev_net(dev
), skb
,
1345 t
->fl
.u
.ip6
.flowlabel
,
1346 true, &t
->fl
.u
.ip6
));
1347 ipv6h
->hop_limit
= t
->parms
.hop_limit
;
1348 ipv6h
->nexthdr
= NEXTHDR_GRE
;
1349 ipv6h
->saddr
= t
->parms
.laddr
;
1350 ipv6h
->daddr
= t
->parms
.raddr
;
1352 p
= (__be16
*)(ipv6h
+ 1);
1353 p
[0] = t
->parms
.o_flags
;
1357 * Set the source hardware address.
1361 memcpy(&ipv6h
->saddr
, saddr
, sizeof(struct in6_addr
));
1363 memcpy(&ipv6h
->daddr
, daddr
, sizeof(struct in6_addr
));
1364 if (!ipv6_addr_any(&ipv6h
->daddr
))
1370 static const struct header_ops ip6gre_header_ops
= {
1371 .create
= ip6gre_header
,
1374 static const struct net_device_ops ip6gre_netdev_ops
= {
1375 .ndo_init
= ip6gre_tunnel_init
,
1376 .ndo_uninit
= ip6gre_tunnel_uninit
,
1377 .ndo_start_xmit
= ip6gre_tunnel_xmit
,
1378 .ndo_do_ioctl
= ip6gre_tunnel_ioctl
,
1379 .ndo_change_mtu
= ip6_tnl_change_mtu
,
1380 .ndo_get_stats64
= ip_tunnel_get_stats64
,
1381 .ndo_get_iflink
= ip6_tnl_get_iflink
,
1384 static void ip6gre_dev_free(struct net_device
*dev
)
1386 struct ip6_tnl
*t
= netdev_priv(dev
);
1388 gro_cells_destroy(&t
->gro_cells
);
1389 dst_cache_destroy(&t
->dst_cache
);
1390 free_percpu(dev
->tstats
);
1393 static void ip6gre_tunnel_setup(struct net_device
*dev
)
1395 dev
->netdev_ops
= &ip6gre_netdev_ops
;
1396 dev
->needs_free_netdev
= true;
1397 dev
->priv_destructor
= ip6gre_dev_free
;
1399 dev
->type
= ARPHRD_IP6GRE
;
1401 dev
->flags
|= IFF_NOARP
;
1402 dev
->addr_len
= sizeof(struct in6_addr
);
1403 netif_keep_dst(dev
);
1404 /* This perm addr will be used as interface identifier by IPv6 */
1405 dev
->addr_assign_type
= NET_ADDR_RANDOM
;
1406 eth_random_addr(dev
->perm_addr
);
1409 #define GRE6_FEATURES (NETIF_F_SG | \
1410 NETIF_F_FRAGLIST | \
1414 static void ip6gre_tnl_init_features(struct net_device
*dev
)
1416 struct ip6_tnl
*nt
= netdev_priv(dev
);
1418 dev
->features
|= GRE6_FEATURES
;
1419 dev
->hw_features
|= GRE6_FEATURES
;
1421 if (!(nt
->parms
.o_flags
& TUNNEL_SEQ
)) {
1422 /* TCP offload with GRE SEQ is not supported, nor
1423 * can we support 2 levels of outer headers requiring
1426 if (!(nt
->parms
.o_flags
& TUNNEL_CSUM
) ||
1427 nt
->encap
.type
== TUNNEL_ENCAP_NONE
) {
1428 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1429 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
1432 /* Can use a lockless transmit, unless we generate
1435 dev
->features
|= NETIF_F_LLTX
;
1439 static int ip6gre_tunnel_init_common(struct net_device
*dev
)
1441 struct ip6_tnl
*tunnel
;
1445 tunnel
= netdev_priv(dev
);
1448 tunnel
->net
= dev_net(dev
);
1449 strcpy(tunnel
->parms
.name
, dev
->name
);
1451 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1455 ret
= dst_cache_init(&tunnel
->dst_cache
, GFP_KERNEL
);
1457 goto cleanup_alloc_pcpu_stats
;
1459 ret
= gro_cells_init(&tunnel
->gro_cells
, dev
);
1461 goto cleanup_dst_cache_init
;
1463 t_hlen
= ip6gre_calc_hlen(tunnel
);
1464 dev
->mtu
= ETH_DATA_LEN
- t_hlen
;
1465 if (dev
->type
== ARPHRD_ETHER
)
1466 dev
->mtu
-= ETH_HLEN
;
1467 if (!(tunnel
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1470 if (tunnel
->parms
.collect_md
) {
1471 netif_keep_dst(dev
);
1473 ip6gre_tnl_init_features(dev
);
1477 cleanup_dst_cache_init
:
1478 dst_cache_destroy(&tunnel
->dst_cache
);
1479 cleanup_alloc_pcpu_stats
:
1480 free_percpu(dev
->tstats
);
1485 static int ip6gre_tunnel_init(struct net_device
*dev
)
1487 struct ip6_tnl
*tunnel
;
1490 ret
= ip6gre_tunnel_init_common(dev
);
1494 tunnel
= netdev_priv(dev
);
1496 if (tunnel
->parms
.collect_md
)
1499 memcpy(dev
->dev_addr
, &tunnel
->parms
.laddr
, sizeof(struct in6_addr
));
1500 memcpy(dev
->broadcast
, &tunnel
->parms
.raddr
, sizeof(struct in6_addr
));
1502 if (ipv6_addr_any(&tunnel
->parms
.raddr
))
1503 dev
->header_ops
= &ip6gre_header_ops
;
1508 static void ip6gre_fb_tunnel_init(struct net_device
*dev
)
1510 struct ip6_tnl
*tunnel
= netdev_priv(dev
);
1513 tunnel
->net
= dev_net(dev
);
1514 strcpy(tunnel
->parms
.name
, dev
->name
);
1516 tunnel
->hlen
= sizeof(struct ipv6hdr
) + 4;
1521 static struct inet6_protocol ip6gre_protocol __read_mostly
= {
1523 .err_handler
= ip6gre_err
,
1524 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1527 static void ip6gre_destroy_tunnels(struct net
*net
, struct list_head
*head
)
1529 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
1530 struct net_device
*dev
, *aux
;
1533 for_each_netdev_safe(net
, dev
, aux
)
1534 if (dev
->rtnl_link_ops
== &ip6gre_link_ops
||
1535 dev
->rtnl_link_ops
== &ip6gre_tap_ops
||
1536 dev
->rtnl_link_ops
== &ip6erspan_tap_ops
)
1537 unregister_netdevice_queue(dev
, head
);
1539 for (prio
= 0; prio
< 4; prio
++) {
1541 for (h
= 0; h
< IP6_GRE_HASH_SIZE
; h
++) {
1544 t
= rtnl_dereference(ign
->tunnels
[prio
][h
]);
1547 /* If dev is in the same netns, it has already
1548 * been added to the list by the previous loop.
1550 if (!net_eq(dev_net(t
->dev
), net
))
1551 unregister_netdevice_queue(t
->dev
,
1553 t
= rtnl_dereference(t
->next
);
1559 static int __net_init
ip6gre_init_net(struct net
*net
)
1561 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
1564 if (!net_has_fallback_tunnels(net
))
1566 ign
->fb_tunnel_dev
= alloc_netdev(sizeof(struct ip6_tnl
), "ip6gre0",
1568 ip6gre_tunnel_setup
);
1569 if (!ign
->fb_tunnel_dev
) {
1573 dev_net_set(ign
->fb_tunnel_dev
, net
);
1574 /* FB netdevice is special: we have one, and only one per netns.
1575 * Allowing to move it to another netns is clearly unsafe.
1577 ign
->fb_tunnel_dev
->features
|= NETIF_F_NETNS_LOCAL
;
1580 ip6gre_fb_tunnel_init(ign
->fb_tunnel_dev
);
1581 ign
->fb_tunnel_dev
->rtnl_link_ops
= &ip6gre_link_ops
;
1583 err
= register_netdev(ign
->fb_tunnel_dev
);
1587 rcu_assign_pointer(ign
->tunnels_wc
[0],
1588 netdev_priv(ign
->fb_tunnel_dev
));
1592 free_netdev(ign
->fb_tunnel_dev
);
1597 static void __net_exit
ip6gre_exit_batch_net(struct list_head
*net_list
)
1603 list_for_each_entry(net
, net_list
, exit_list
)
1604 ip6gre_destroy_tunnels(net
, &list
);
1605 unregister_netdevice_many(&list
);
1609 static struct pernet_operations ip6gre_net_ops
= {
1610 .init
= ip6gre_init_net
,
1611 .exit_batch
= ip6gre_exit_batch_net
,
1612 .id
= &ip6gre_net_id
,
1613 .size
= sizeof(struct ip6gre_net
),
1616 static int ip6gre_tunnel_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1617 struct netlink_ext_ack
*extack
)
1625 if (data
[IFLA_GRE_IFLAGS
])
1626 flags
|= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1627 if (data
[IFLA_GRE_OFLAGS
])
1628 flags
|= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1629 if (flags
& (GRE_VERSION
|GRE_ROUTING
))
1635 static int ip6gre_tap_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1636 struct netlink_ext_ack
*extack
)
1638 struct in6_addr daddr
;
1640 if (tb
[IFLA_ADDRESS
]) {
1641 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
1643 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
1644 return -EADDRNOTAVAIL
;
1650 if (data
[IFLA_GRE_REMOTE
]) {
1651 daddr
= nla_get_in6_addr(data
[IFLA_GRE_REMOTE
]);
1652 if (ipv6_addr_any(&daddr
))
1657 return ip6gre_tunnel_validate(tb
, data
, extack
);
1660 static int ip6erspan_tap_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1661 struct netlink_ext_ack
*extack
)
1669 ret
= ip6gre_tap_validate(tb
, data
, extack
);
1673 /* ERSPAN should only have GRE sequence and key flag */
1674 if (data
[IFLA_GRE_OFLAGS
])
1675 flags
|= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1676 if (data
[IFLA_GRE_IFLAGS
])
1677 flags
|= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1678 if (!data
[IFLA_GRE_COLLECT_METADATA
] &&
1679 flags
!= (GRE_SEQ
| GRE_KEY
))
1682 /* ERSPAN Session ID only has 10-bit. Since we reuse
1683 * 32-bit key field as ID, check it's range.
1685 if (data
[IFLA_GRE_IKEY
] &&
1686 (ntohl(nla_get_be32(data
[IFLA_GRE_IKEY
])) & ~ID_MASK
))
1689 if (data
[IFLA_GRE_OKEY
] &&
1690 (ntohl(nla_get_be32(data
[IFLA_GRE_OKEY
])) & ~ID_MASK
))
1693 if (data
[IFLA_GRE_ERSPAN_VER
]) {
1694 ver
= nla_get_u8(data
[IFLA_GRE_ERSPAN_VER
]);
1695 if (ver
!= 1 && ver
!= 2)
1700 if (data
[IFLA_GRE_ERSPAN_INDEX
]) {
1701 u32 index
= nla_get_u32(data
[IFLA_GRE_ERSPAN_INDEX
]);
1703 if (index
& ~INDEX_MASK
)
1706 } else if (ver
== 2) {
1707 if (data
[IFLA_GRE_ERSPAN_DIR
]) {
1708 u16 dir
= nla_get_u8(data
[IFLA_GRE_ERSPAN_DIR
]);
1710 if (dir
& ~(DIR_MASK
>> DIR_OFFSET
))
1714 if (data
[IFLA_GRE_ERSPAN_HWID
]) {
1715 u16 hwid
= nla_get_u16(data
[IFLA_GRE_ERSPAN_HWID
]);
1717 if (hwid
& ~(HWID_MASK
>> HWID_OFFSET
))
1725 static void ip6erspan_set_version(struct nlattr
*data
[],
1726 struct __ip6_tnl_parm
*parms
)
1731 parms
->erspan_ver
= 1;
1732 if (data
[IFLA_GRE_ERSPAN_VER
])
1733 parms
->erspan_ver
= nla_get_u8(data
[IFLA_GRE_ERSPAN_VER
]);
1735 if (parms
->erspan_ver
== 1) {
1736 if (data
[IFLA_GRE_ERSPAN_INDEX
])
1737 parms
->index
= nla_get_u32(data
[IFLA_GRE_ERSPAN_INDEX
]);
1738 } else if (parms
->erspan_ver
== 2) {
1739 if (data
[IFLA_GRE_ERSPAN_DIR
])
1740 parms
->dir
= nla_get_u8(data
[IFLA_GRE_ERSPAN_DIR
]);
1741 if (data
[IFLA_GRE_ERSPAN_HWID
])
1742 parms
->hwid
= nla_get_u16(data
[IFLA_GRE_ERSPAN_HWID
]);
1746 static void ip6gre_netlink_parms(struct nlattr
*data
[],
1747 struct __ip6_tnl_parm
*parms
)
1749 memset(parms
, 0, sizeof(*parms
));
1754 if (data
[IFLA_GRE_LINK
])
1755 parms
->link
= nla_get_u32(data
[IFLA_GRE_LINK
]);
1757 if (data
[IFLA_GRE_IFLAGS
])
1758 parms
->i_flags
= gre_flags_to_tnl_flags(
1759 nla_get_be16(data
[IFLA_GRE_IFLAGS
]));
1761 if (data
[IFLA_GRE_OFLAGS
])
1762 parms
->o_flags
= gre_flags_to_tnl_flags(
1763 nla_get_be16(data
[IFLA_GRE_OFLAGS
]));
1765 if (data
[IFLA_GRE_IKEY
])
1766 parms
->i_key
= nla_get_be32(data
[IFLA_GRE_IKEY
]);
1768 if (data
[IFLA_GRE_OKEY
])
1769 parms
->o_key
= nla_get_be32(data
[IFLA_GRE_OKEY
]);
1771 if (data
[IFLA_GRE_LOCAL
])
1772 parms
->laddr
= nla_get_in6_addr(data
[IFLA_GRE_LOCAL
]);
1774 if (data
[IFLA_GRE_REMOTE
])
1775 parms
->raddr
= nla_get_in6_addr(data
[IFLA_GRE_REMOTE
]);
1777 if (data
[IFLA_GRE_TTL
])
1778 parms
->hop_limit
= nla_get_u8(data
[IFLA_GRE_TTL
]);
1780 if (data
[IFLA_GRE_ENCAP_LIMIT
])
1781 parms
->encap_limit
= nla_get_u8(data
[IFLA_GRE_ENCAP_LIMIT
]);
1783 if (data
[IFLA_GRE_FLOWINFO
])
1784 parms
->flowinfo
= nla_get_be32(data
[IFLA_GRE_FLOWINFO
]);
1786 if (data
[IFLA_GRE_FLAGS
])
1787 parms
->flags
= nla_get_u32(data
[IFLA_GRE_FLAGS
]);
1789 if (data
[IFLA_GRE_FWMARK
])
1790 parms
->fwmark
= nla_get_u32(data
[IFLA_GRE_FWMARK
]);
1792 if (data
[IFLA_GRE_COLLECT_METADATA
])
1793 parms
->collect_md
= true;
1796 static int ip6gre_tap_init(struct net_device
*dev
)
1800 ret
= ip6gre_tunnel_init_common(dev
);
1804 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1809 static const struct net_device_ops ip6gre_tap_netdev_ops
= {
1810 .ndo_init
= ip6gre_tap_init
,
1811 .ndo_uninit
= ip6gre_tunnel_uninit
,
1812 .ndo_start_xmit
= ip6gre_tunnel_xmit
,
1813 .ndo_set_mac_address
= eth_mac_addr
,
1814 .ndo_validate_addr
= eth_validate_addr
,
1815 .ndo_change_mtu
= ip6_tnl_change_mtu
,
1816 .ndo_get_stats64
= ip_tunnel_get_stats64
,
1817 .ndo_get_iflink
= ip6_tnl_get_iflink
,
1820 static int ip6erspan_calc_hlen(struct ip6_tnl
*tunnel
)
1824 tunnel
->tun_hlen
= 8;
1825 tunnel
->hlen
= tunnel
->tun_hlen
+ tunnel
->encap_hlen
+
1826 erspan_hdr_len(tunnel
->parms
.erspan_ver
);
1828 t_hlen
= tunnel
->hlen
+ sizeof(struct ipv6hdr
);
1829 tunnel
->dev
->needed_headroom
= LL_MAX_HEADER
+ t_hlen
;
1833 static int ip6erspan_tap_init(struct net_device
*dev
)
1835 struct ip6_tnl
*tunnel
;
1839 tunnel
= netdev_priv(dev
);
1842 tunnel
->net
= dev_net(dev
);
1843 strcpy(tunnel
->parms
.name
, dev
->name
);
1845 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1849 ret
= dst_cache_init(&tunnel
->dst_cache
, GFP_KERNEL
);
1851 goto cleanup_alloc_pcpu_stats
;
1853 ret
= gro_cells_init(&tunnel
->gro_cells
, dev
);
1855 goto cleanup_dst_cache_init
;
1857 t_hlen
= ip6erspan_calc_hlen(tunnel
);
1858 dev
->mtu
= ETH_DATA_LEN
- t_hlen
;
1859 if (dev
->type
== ARPHRD_ETHER
)
1860 dev
->mtu
-= ETH_HLEN
;
1861 if (!(tunnel
->parms
.flags
& IP6_TNL_F_IGN_ENCAP_LIMIT
))
1864 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1865 ip6erspan_tnl_link_config(tunnel
, 1);
1869 cleanup_dst_cache_init
:
1870 dst_cache_destroy(&tunnel
->dst_cache
);
1871 cleanup_alloc_pcpu_stats
:
1872 free_percpu(dev
->tstats
);
1877 static const struct net_device_ops ip6erspan_netdev_ops
= {
1878 .ndo_init
= ip6erspan_tap_init
,
1879 .ndo_uninit
= ip6erspan_tunnel_uninit
,
1880 .ndo_start_xmit
= ip6erspan_tunnel_xmit
,
1881 .ndo_set_mac_address
= eth_mac_addr
,
1882 .ndo_validate_addr
= eth_validate_addr
,
1883 .ndo_change_mtu
= ip6_tnl_change_mtu
,
1884 .ndo_get_stats64
= ip_tunnel_get_stats64
,
1885 .ndo_get_iflink
= ip6_tnl_get_iflink
,
1888 static void ip6gre_tap_setup(struct net_device
*dev
)
1894 dev
->netdev_ops
= &ip6gre_tap_netdev_ops
;
1895 dev
->needs_free_netdev
= true;
1896 dev
->priv_destructor
= ip6gre_dev_free
;
1898 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1899 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1900 netif_keep_dst(dev
);
1903 static bool ip6gre_netlink_encap_parms(struct nlattr
*data
[],
1904 struct ip_tunnel_encap
*ipencap
)
1908 memset(ipencap
, 0, sizeof(*ipencap
));
1913 if (data
[IFLA_GRE_ENCAP_TYPE
]) {
1915 ipencap
->type
= nla_get_u16(data
[IFLA_GRE_ENCAP_TYPE
]);
1918 if (data
[IFLA_GRE_ENCAP_FLAGS
]) {
1920 ipencap
->flags
= nla_get_u16(data
[IFLA_GRE_ENCAP_FLAGS
]);
1923 if (data
[IFLA_GRE_ENCAP_SPORT
]) {
1925 ipencap
->sport
= nla_get_be16(data
[IFLA_GRE_ENCAP_SPORT
]);
1928 if (data
[IFLA_GRE_ENCAP_DPORT
]) {
1930 ipencap
->dport
= nla_get_be16(data
[IFLA_GRE_ENCAP_DPORT
]);
1936 static int ip6gre_newlink_common(struct net
*src_net
, struct net_device
*dev
,
1937 struct nlattr
*tb
[], struct nlattr
*data
[],
1938 struct netlink_ext_ack
*extack
)
1941 struct ip_tunnel_encap ipencap
;
1944 nt
= netdev_priv(dev
);
1946 if (ip6gre_netlink_encap_parms(data
, &ipencap
)) {
1947 int err
= ip6_tnl_encap_setup(nt
, &ipencap
);
1953 if (dev
->type
== ARPHRD_ETHER
&& !tb
[IFLA_ADDRESS
])
1954 eth_hw_addr_random(dev
);
1957 nt
->net
= dev_net(dev
);
1959 err
= register_netdevice(dev
);
1964 ip6_tnl_change_mtu(dev
, nla_get_u32(tb
[IFLA_MTU
]));
1972 static int ip6gre_newlink(struct net
*src_net
, struct net_device
*dev
,
1973 struct nlattr
*tb
[], struct nlattr
*data
[],
1974 struct netlink_ext_ack
*extack
)
1976 struct ip6_tnl
*nt
= netdev_priv(dev
);
1977 struct net
*net
= dev_net(dev
);
1978 struct ip6gre_net
*ign
;
1981 ip6gre_netlink_parms(data
, &nt
->parms
);
1982 ign
= net_generic(net
, ip6gre_net_id
);
1984 if (nt
->parms
.collect_md
) {
1985 if (rtnl_dereference(ign
->collect_md_tun
))
1988 if (ip6gre_tunnel_find(net
, &nt
->parms
, dev
->type
))
1992 err
= ip6gre_newlink_common(src_net
, dev
, tb
, data
, extack
);
1994 ip6gre_tnl_link_config(nt
, !tb
[IFLA_MTU
]);
1995 ip6gre_tunnel_link_md(ign
, nt
);
1996 ip6gre_tunnel_link(net_generic(net
, ip6gre_net_id
), nt
);
2001 static struct ip6_tnl
*
2002 ip6gre_changelink_common(struct net_device
*dev
, struct nlattr
*tb
[],
2003 struct nlattr
*data
[], struct __ip6_tnl_parm
*p_p
,
2004 struct netlink_ext_ack
*extack
)
2006 struct ip6_tnl
*t
, *nt
= netdev_priv(dev
);
2007 struct net
*net
= nt
->net
;
2008 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
2009 struct ip_tunnel_encap ipencap
;
2011 if (dev
== ign
->fb_tunnel_dev
)
2012 return ERR_PTR(-EINVAL
);
2014 if (ip6gre_netlink_encap_parms(data
, &ipencap
)) {
2015 int err
= ip6_tnl_encap_setup(nt
, &ipencap
);
2018 return ERR_PTR(err
);
2021 ip6gre_netlink_parms(data
, p_p
);
2023 t
= ip6gre_tunnel_locate(net
, p_p
, 0);
2027 return ERR_PTR(-EEXIST
);
2035 static int ip6gre_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
2036 struct nlattr
*data
[],
2037 struct netlink_ext_ack
*extack
)
2039 struct ip6_tnl
*t
= netdev_priv(dev
);
2040 struct ip6gre_net
*ign
= net_generic(t
->net
, ip6gre_net_id
);
2041 struct __ip6_tnl_parm p
;
2043 t
= ip6gre_changelink_common(dev
, tb
, data
, &p
, extack
);
2047 ip6gre_tunnel_unlink_md(ign
, t
);
2048 ip6gre_tunnel_unlink(ign
, t
);
2049 ip6gre_tnl_change(t
, &p
, !tb
[IFLA_MTU
]);
2050 ip6gre_tunnel_link_md(ign
, t
);
2051 ip6gre_tunnel_link(ign
, t
);
2055 static void ip6gre_dellink(struct net_device
*dev
, struct list_head
*head
)
2057 struct net
*net
= dev_net(dev
);
2058 struct ip6gre_net
*ign
= net_generic(net
, ip6gre_net_id
);
2060 if (dev
!= ign
->fb_tunnel_dev
)
2061 unregister_netdevice_queue(dev
, head
);
2064 static size_t ip6gre_get_size(const struct net_device
*dev
)
2069 /* IFLA_GRE_IFLAGS */
2071 /* IFLA_GRE_OFLAGS */
2077 /* IFLA_GRE_LOCAL */
2078 nla_total_size(sizeof(struct in6_addr
)) +
2079 /* IFLA_GRE_REMOTE */
2080 nla_total_size(sizeof(struct in6_addr
)) +
2083 /* IFLA_GRE_ENCAP_LIMIT */
2085 /* IFLA_GRE_FLOWINFO */
2087 /* IFLA_GRE_FLAGS */
2089 /* IFLA_GRE_ENCAP_TYPE */
2091 /* IFLA_GRE_ENCAP_FLAGS */
2093 /* IFLA_GRE_ENCAP_SPORT */
2095 /* IFLA_GRE_ENCAP_DPORT */
2097 /* IFLA_GRE_COLLECT_METADATA */
2099 /* IFLA_GRE_FWMARK */
2101 /* IFLA_GRE_ERSPAN_INDEX */
2106 static int ip6gre_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
2108 struct ip6_tnl
*t
= netdev_priv(dev
);
2109 struct __ip6_tnl_parm
*p
= &t
->parms
;
2110 __be16 o_flags
= p
->o_flags
;
2112 if (p
->erspan_ver
== 1 || p
->erspan_ver
== 2) {
2114 o_flags
|= TUNNEL_KEY
;
2116 if (nla_put_u8(skb
, IFLA_GRE_ERSPAN_VER
, p
->erspan_ver
))
2117 goto nla_put_failure
;
2119 if (p
->erspan_ver
== 1) {
2120 if (nla_put_u32(skb
, IFLA_GRE_ERSPAN_INDEX
, p
->index
))
2121 goto nla_put_failure
;
2123 if (nla_put_u8(skb
, IFLA_GRE_ERSPAN_DIR
, p
->dir
))
2124 goto nla_put_failure
;
2125 if (nla_put_u16(skb
, IFLA_GRE_ERSPAN_HWID
, p
->hwid
))
2126 goto nla_put_failure
;
2130 if (nla_put_u32(skb
, IFLA_GRE_LINK
, p
->link
) ||
2131 nla_put_be16(skb
, IFLA_GRE_IFLAGS
,
2132 gre_tnl_flags_to_gre_flags(p
->i_flags
)) ||
2133 nla_put_be16(skb
, IFLA_GRE_OFLAGS
,
2134 gre_tnl_flags_to_gre_flags(o_flags
)) ||
2135 nla_put_be32(skb
, IFLA_GRE_IKEY
, p
->i_key
) ||
2136 nla_put_be32(skb
, IFLA_GRE_OKEY
, p
->o_key
) ||
2137 nla_put_in6_addr(skb
, IFLA_GRE_LOCAL
, &p
->laddr
) ||
2138 nla_put_in6_addr(skb
, IFLA_GRE_REMOTE
, &p
->raddr
) ||
2139 nla_put_u8(skb
, IFLA_GRE_TTL
, p
->hop_limit
) ||
2140 nla_put_u8(skb
, IFLA_GRE_ENCAP_LIMIT
, p
->encap_limit
) ||
2141 nla_put_be32(skb
, IFLA_GRE_FLOWINFO
, p
->flowinfo
) ||
2142 nla_put_u32(skb
, IFLA_GRE_FLAGS
, p
->flags
) ||
2143 nla_put_u32(skb
, IFLA_GRE_FWMARK
, p
->fwmark
))
2144 goto nla_put_failure
;
2146 if (nla_put_u16(skb
, IFLA_GRE_ENCAP_TYPE
,
2148 nla_put_be16(skb
, IFLA_GRE_ENCAP_SPORT
,
2150 nla_put_be16(skb
, IFLA_GRE_ENCAP_DPORT
,
2152 nla_put_u16(skb
, IFLA_GRE_ENCAP_FLAGS
,
2154 goto nla_put_failure
;
2156 if (p
->collect_md
) {
2157 if (nla_put_flag(skb
, IFLA_GRE_COLLECT_METADATA
))
2158 goto nla_put_failure
;
2167 static const struct nla_policy ip6gre_policy
[IFLA_GRE_MAX
+ 1] = {
2168 [IFLA_GRE_LINK
] = { .type
= NLA_U32
},
2169 [IFLA_GRE_IFLAGS
] = { .type
= NLA_U16
},
2170 [IFLA_GRE_OFLAGS
] = { .type
= NLA_U16
},
2171 [IFLA_GRE_IKEY
] = { .type
= NLA_U32
},
2172 [IFLA_GRE_OKEY
] = { .type
= NLA_U32
},
2173 [IFLA_GRE_LOCAL
] = { .len
= sizeof_field(struct ipv6hdr
, saddr
) },
2174 [IFLA_GRE_REMOTE
] = { .len
= sizeof_field(struct ipv6hdr
, daddr
) },
2175 [IFLA_GRE_TTL
] = { .type
= NLA_U8
},
2176 [IFLA_GRE_ENCAP_LIMIT
] = { .type
= NLA_U8
},
2177 [IFLA_GRE_FLOWINFO
] = { .type
= NLA_U32
},
2178 [IFLA_GRE_FLAGS
] = { .type
= NLA_U32
},
2179 [IFLA_GRE_ENCAP_TYPE
] = { .type
= NLA_U16
},
2180 [IFLA_GRE_ENCAP_FLAGS
] = { .type
= NLA_U16
},
2181 [IFLA_GRE_ENCAP_SPORT
] = { .type
= NLA_U16
},
2182 [IFLA_GRE_ENCAP_DPORT
] = { .type
= NLA_U16
},
2183 [IFLA_GRE_COLLECT_METADATA
] = { .type
= NLA_FLAG
},
2184 [IFLA_GRE_FWMARK
] = { .type
= NLA_U32
},
2185 [IFLA_GRE_ERSPAN_INDEX
] = { .type
= NLA_U32
},
2186 [IFLA_GRE_ERSPAN_VER
] = { .type
= NLA_U8
},
2187 [IFLA_GRE_ERSPAN_DIR
] = { .type
= NLA_U8
},
2188 [IFLA_GRE_ERSPAN_HWID
] = { .type
= NLA_U16
},
2191 static void ip6erspan_tap_setup(struct net_device
*dev
)
2196 dev
->netdev_ops
= &ip6erspan_netdev_ops
;
2197 dev
->needs_free_netdev
= true;
2198 dev
->priv_destructor
= ip6gre_dev_free
;
2200 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
2201 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
2202 netif_keep_dst(dev
);
2205 static int ip6erspan_newlink(struct net
*src_net
, struct net_device
*dev
,
2206 struct nlattr
*tb
[], struct nlattr
*data
[],
2207 struct netlink_ext_ack
*extack
)
2209 struct ip6_tnl
*nt
= netdev_priv(dev
);
2210 struct net
*net
= dev_net(dev
);
2211 struct ip6gre_net
*ign
;
2214 ip6gre_netlink_parms(data
, &nt
->parms
);
2215 ip6erspan_set_version(data
, &nt
->parms
);
2216 ign
= net_generic(net
, ip6gre_net_id
);
2218 if (nt
->parms
.collect_md
) {
2219 if (rtnl_dereference(ign
->collect_md_tun_erspan
))
2222 if (ip6gre_tunnel_find(net
, &nt
->parms
, dev
->type
))
2226 err
= ip6gre_newlink_common(src_net
, dev
, tb
, data
, extack
);
2228 ip6erspan_tnl_link_config(nt
, !tb
[IFLA_MTU
]);
2229 ip6erspan_tunnel_link_md(ign
, nt
);
2230 ip6gre_tunnel_link(net_generic(net
, ip6gre_net_id
), nt
);
2235 static void ip6erspan_tnl_link_config(struct ip6_tnl
*t
, int set_mtu
)
2237 ip6gre_tnl_link_config_common(t
);
2238 ip6gre_tnl_link_config_route(t
, set_mtu
, ip6erspan_calc_hlen(t
));
2241 static int ip6erspan_tnl_change(struct ip6_tnl
*t
,
2242 const struct __ip6_tnl_parm
*p
, int set_mtu
)
2244 ip6gre_tnl_copy_tnl_parm(t
, p
);
2245 ip6erspan_tnl_link_config(t
, set_mtu
);
2249 static int ip6erspan_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
2250 struct nlattr
*data
[],
2251 struct netlink_ext_ack
*extack
)
2253 struct ip6gre_net
*ign
= net_generic(dev_net(dev
), ip6gre_net_id
);
2254 struct __ip6_tnl_parm p
;
2257 t
= ip6gre_changelink_common(dev
, tb
, data
, &p
, extack
);
2261 ip6erspan_set_version(data
, &p
);
2262 ip6gre_tunnel_unlink_md(ign
, t
);
2263 ip6gre_tunnel_unlink(ign
, t
);
2264 ip6erspan_tnl_change(t
, &p
, !tb
[IFLA_MTU
]);
2265 ip6erspan_tunnel_link_md(ign
, t
);
2266 ip6gre_tunnel_link(ign
, t
);
2270 static struct rtnl_link_ops ip6gre_link_ops __read_mostly
= {
2272 .maxtype
= IFLA_GRE_MAX
,
2273 .policy
= ip6gre_policy
,
2274 .priv_size
= sizeof(struct ip6_tnl
),
2275 .setup
= ip6gre_tunnel_setup
,
2276 .validate
= ip6gre_tunnel_validate
,
2277 .newlink
= ip6gre_newlink
,
2278 .changelink
= ip6gre_changelink
,
2279 .dellink
= ip6gre_dellink
,
2280 .get_size
= ip6gre_get_size
,
2281 .fill_info
= ip6gre_fill_info
,
2282 .get_link_net
= ip6_tnl_get_link_net
,
2285 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly
= {
2286 .kind
= "ip6gretap",
2287 .maxtype
= IFLA_GRE_MAX
,
2288 .policy
= ip6gre_policy
,
2289 .priv_size
= sizeof(struct ip6_tnl
),
2290 .setup
= ip6gre_tap_setup
,
2291 .validate
= ip6gre_tap_validate
,
2292 .newlink
= ip6gre_newlink
,
2293 .changelink
= ip6gre_changelink
,
2294 .get_size
= ip6gre_get_size
,
2295 .fill_info
= ip6gre_fill_info
,
2296 .get_link_net
= ip6_tnl_get_link_net
,
2299 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly
= {
2300 .kind
= "ip6erspan",
2301 .maxtype
= IFLA_GRE_MAX
,
2302 .policy
= ip6gre_policy
,
2303 .priv_size
= sizeof(struct ip6_tnl
),
2304 .setup
= ip6erspan_tap_setup
,
2305 .validate
= ip6erspan_tap_validate
,
2306 .newlink
= ip6erspan_newlink
,
2307 .changelink
= ip6erspan_changelink
,
2308 .get_size
= ip6gre_get_size
,
2309 .fill_info
= ip6gre_fill_info
,
2310 .get_link_net
= ip6_tnl_get_link_net
,
2314 * And now the modules code and kernel interface.
2317 static int __init
ip6gre_init(void)
2321 pr_info("GRE over IPv6 tunneling driver\n");
2323 err
= register_pernet_device(&ip6gre_net_ops
);
2327 err
= inet6_add_protocol(&ip6gre_protocol
, IPPROTO_GRE
);
2329 pr_info("%s: can't add protocol\n", __func__
);
2330 goto add_proto_failed
;
2333 err
= rtnl_link_register(&ip6gre_link_ops
);
2335 goto rtnl_link_failed
;
2337 err
= rtnl_link_register(&ip6gre_tap_ops
);
2339 goto tap_ops_failed
;
2341 err
= rtnl_link_register(&ip6erspan_tap_ops
);
2343 goto erspan_link_failed
;
2349 rtnl_link_unregister(&ip6gre_tap_ops
);
2351 rtnl_link_unregister(&ip6gre_link_ops
);
2353 inet6_del_protocol(&ip6gre_protocol
, IPPROTO_GRE
);
2355 unregister_pernet_device(&ip6gre_net_ops
);
2359 static void __exit
ip6gre_fini(void)
2361 rtnl_link_unregister(&ip6gre_tap_ops
);
2362 rtnl_link_unregister(&ip6gre_link_ops
);
2363 rtnl_link_unregister(&ip6erspan_tap_ops
);
2364 inet6_del_protocol(&ip6gre_protocol
, IPPROTO_GRE
);
2365 unregister_pernet_device(&ip6gre_net_ops
);
2368 module_init(ip6gre_init
);
2369 module_exit(ip6gre_fini
);
2370 MODULE_LICENSE("GPL");
2371 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
2372 MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
2373 MODULE_ALIAS_RTNL_LINK("ip6gre");
2374 MODULE_ALIAS_RTNL_LINK("ip6gretap");
2375 MODULE_ALIAS_RTNL_LINK("ip6erspan");
2376 MODULE_ALIAS_NETDEV("ip6gre0");