2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
50 #include <net/dst_metadata.h>
51 #include <net/erspan.h>
57 1. The most important issue is detecting local dead loops.
58 They would cause complete host lockup in transmit, which
59 would be "resolved" by stack overflow or, if queueing is enabled,
60 with infinite looping in net_bh.
62 We cannot track such dead loops during route installation,
63 it is infeasible task. The most general solutions would be
64 to keep skb->encapsulation counter (sort of local ttl),
65 and silently drop packet when it expires. It is a good
66 solution, but it supposes maintaining new variable in ALL
67 skb, even if no tunneling is used.
69 Current solution: xmit_recursion breaks dead loops. This is a percpu
70 counter, since when we enter the first ndo_xmit(), cpu migration is
71 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
73 2. Networking dead loops would not kill routers, but would really
74 kill network. IP hop limit plays role of "t->recursion" in this case,
75 if we copy it from packet being encapsulated to upper header.
76 It is very good solution, but it introduces two problems:
78 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
79 do not work over tunnels.
80 - traceroute does not work. I planned to relay ICMP from tunnel,
81 so that this problem would be solved and traceroute output
82 would even more informative. This idea appeared to be wrong:
83 only Linux complies to rfc1812 now (yes, guys, Linux is the only
84 true router now :-)), all routers (at least, in neighbourhood of mine)
85 return only 8 bytes of payload. It is the end.
87 Hence, if we want that OSPF worked or traceroute said something reasonable,
88 we should search for another solution.
90 One of them is to parse packet trying to detect inner encapsulation
91 made by our node. It is difficult or even impossible, especially,
92 taking into account fragmentation. TO be short, ttl is not solution at all.
94 Current solution: The solution was UNEXPECTEDLY SIMPLE.
95 We force DF flag on tunnels with preconfigured hop limit,
96 that is ALL. :-) Well, it does not remove the problem completely,
97 but exponential growth of network traffic is changed to linear
98 (branches, that exceed pmtu are pruned) and tunnel mtu
99 rapidly degrades to value <68, where looping stops.
100 Yes, it is not good if there exists a router in the loop,
101 which does not force DF, even when encapsulating packets have DF set.
102 But it is not our problem! Nobody could accuse us, we made
103 all that we could make. Even if it is your gated who injected
104 fatal route to network, even if it were you who configured
105 fatal static route: you are innocent. :-)
110 static bool log_ecn_error
= true;
111 module_param(log_ecn_error
, bool, 0644);
112 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
114 static struct rtnl_link_ops ipgre_link_ops __read_mostly
;
115 static int ipgre_tunnel_init(struct net_device
*dev
);
116 static void erspan_build_header(struct sk_buff
*skb
,
118 bool truncate
, bool is_ipv4
);
120 static unsigned int ipgre_net_id __read_mostly
;
121 static unsigned int gre_tap_net_id __read_mostly
;
122 static unsigned int erspan_net_id __read_mostly
;
124 static void ipgre_err(struct sk_buff
*skb
, u32 info
,
125 const struct tnl_ptk_info
*tpi
)
128 /* All the routers (except for Linux) return only
129 8 bytes of packet payload. It means, that precise relaying of
130 ICMP in the real Internet is absolutely infeasible.
132 Moreover, Cisco "wise men" put GRE key to the third word
133 in GRE header. It makes impossible maintaining even soft
134 state for keyed GRE tunnels with enabled checksum. Tell
137 Well, I wonder, rfc1812 was written by Cisco employee,
138 what the hell these idiots break standards established
141 struct net
*net
= dev_net(skb
->dev
);
142 struct ip_tunnel_net
*itn
;
143 const struct iphdr
*iph
;
144 const int type
= icmp_hdr(skb
)->type
;
145 const int code
= icmp_hdr(skb
)->code
;
146 unsigned int data_len
= 0;
151 case ICMP_PARAMETERPROB
:
154 case ICMP_DEST_UNREACH
:
157 case ICMP_PORT_UNREACH
:
158 /* Impossible event. */
161 /* All others are translated to HOST_UNREACH.
162 rfc2003 contains "deep thoughts" about NET_UNREACH,
163 I believe they are just ether pollution. --ANK
169 case ICMP_TIME_EXCEEDED
:
170 if (code
!= ICMP_EXC_TTL
)
172 data_len
= icmp_hdr(skb
)->un
.reserved
[1] * 4; /* RFC 4884 4.1 */
179 if (tpi
->proto
== htons(ETH_P_TEB
))
180 itn
= net_generic(net
, gre_tap_net_id
);
182 itn
= net_generic(net
, ipgre_net_id
);
184 iph
= (const struct iphdr
*)(icmp_hdr(skb
) + 1);
185 t
= ip_tunnel_lookup(itn
, skb
->dev
->ifindex
, tpi
->flags
,
186 iph
->daddr
, iph
->saddr
, tpi
->key
);
191 #if IS_ENABLED(CONFIG_IPV6)
192 if (tpi
->proto
== htons(ETH_P_IPV6
) &&
193 !ip6_err_gen_icmpv6_unreach(skb
, iph
->ihl
* 4 + tpi
->hdr_len
,
198 if (t
->parms
.iph
.daddr
== 0 ||
199 ipv4_is_multicast(t
->parms
.iph
.daddr
))
202 if (t
->parms
.iph
.ttl
== 0 && type
== ICMP_TIME_EXCEEDED
)
205 if (time_before(jiffies
, t
->err_time
+ IPTUNNEL_ERR_TIMEO
))
209 t
->err_time
= jiffies
;
212 static void gre_err(struct sk_buff
*skb
, u32 info
)
214 /* All the routers (except for Linux) return only
215 * 8 bytes of packet payload. It means, that precise relaying of
216 * ICMP in the real Internet is absolutely infeasible.
218 * Moreover, Cisco "wise men" put GRE key to the third word
219 * in GRE header. It makes impossible maintaining even soft
221 * GRE tunnels with enabled checksum. Tell them "thank you".
223 * Well, I wonder, rfc1812 was written by Cisco employee,
224 * what the hell these idiots break standards established
228 const struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
229 const int type
= icmp_hdr(skb
)->type
;
230 const int code
= icmp_hdr(skb
)->code
;
231 struct tnl_ptk_info tpi
;
232 bool csum_err
= false;
234 if (gre_parse_header(skb
, &tpi
, &csum_err
, htons(ETH_P_IP
),
236 if (!csum_err
) /* ignore csum errors. */
240 if (type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
) {
241 ipv4_update_pmtu(skb
, dev_net(skb
->dev
), info
,
242 skb
->dev
->ifindex
, 0, IPPROTO_GRE
, 0);
245 if (type
== ICMP_REDIRECT
) {
246 ipv4_redirect(skb
, dev_net(skb
->dev
), skb
->dev
->ifindex
, 0,
251 ipgre_err(skb
, info
, &tpi
);
254 static int erspan_rcv(struct sk_buff
*skb
, struct tnl_ptk_info
*tpi
,
257 struct net
*net
= dev_net(skb
->dev
);
258 struct metadata_dst
*tun_dst
= NULL
;
259 struct erspan_base_hdr
*ershdr
;
260 struct erspan_metadata
*pkt_md
;
261 struct ip_tunnel_net
*itn
;
262 struct ip_tunnel
*tunnel
;
263 const struct iphdr
*iph
;
264 struct erspan_md2
*md2
;
268 itn
= net_generic(net
, erspan_net_id
);
269 len
= gre_hdr_len
+ sizeof(*ershdr
);
271 /* Check based hdr len */
272 if (unlikely(!pskb_may_pull(skb
, len
)))
273 return PACKET_REJECT
;
276 ershdr
= (struct erspan_base_hdr
*)(skb
->data
+ gre_hdr_len
);
279 /* The original GRE header does not have key field,
280 * Use ERSPAN 10-bit session ID as key.
282 tpi
->key
= cpu_to_be32(get_session_id(ershdr
));
283 tunnel
= ip_tunnel_lookup(itn
, skb
->dev
->ifindex
,
284 tpi
->flags
| TUNNEL_KEY
,
285 iph
->saddr
, iph
->daddr
, tpi
->key
);
288 len
= gre_hdr_len
+ erspan_hdr_len(ver
);
289 if (unlikely(!pskb_may_pull(skb
, len
)))
290 return PACKET_REJECT
;
292 ershdr
= (struct erspan_base_hdr
*)(skb
->data
+ gre_hdr_len
);
293 pkt_md
= (struct erspan_metadata
*)(ershdr
+ 1);
295 if (__iptunnel_pull_header(skb
,
301 if (tunnel
->collect_md
) {
302 struct ip_tunnel_info
*info
;
303 struct erspan_metadata
*md
;
307 tpi
->flags
|= TUNNEL_KEY
;
309 tun_id
= key32_to_tunnel_id(tpi
->key
);
311 tun_dst
= ip_tun_rx_dst(skb
, flags
,
312 tun_id
, sizeof(*md
));
314 return PACKET_REJECT
;
316 md
= ip_tunnel_info_opts(&tun_dst
->u
.tun_info
);
319 memcpy(md2
, pkt_md
, ver
== 1 ? ERSPAN_V1_MDSIZE
:
322 info
= &tun_dst
->u
.tun_info
;
323 info
->key
.tun_flags
|= TUNNEL_ERSPAN_OPT
;
324 info
->options_len
= sizeof(*md
);
327 skb_reset_mac_header(skb
);
328 ip_tunnel_rcv(tunnel
, skb
, tpi
, tun_dst
, log_ecn_error
);
336 static int __ipgre_rcv(struct sk_buff
*skb
, const struct tnl_ptk_info
*tpi
,
337 struct ip_tunnel_net
*itn
, int hdr_len
, bool raw_proto
)
339 struct metadata_dst
*tun_dst
= NULL
;
340 const struct iphdr
*iph
;
341 struct ip_tunnel
*tunnel
;
344 tunnel
= ip_tunnel_lookup(itn
, skb
->dev
->ifindex
, tpi
->flags
,
345 iph
->saddr
, iph
->daddr
, tpi
->key
);
348 if (__iptunnel_pull_header(skb
, hdr_len
, tpi
->proto
,
349 raw_proto
, false) < 0)
352 if (tunnel
->dev
->type
!= ARPHRD_NONE
)
353 skb_pop_mac_header(skb
);
355 skb_reset_mac_header(skb
);
356 if (tunnel
->collect_md
) {
360 flags
= tpi
->flags
& (TUNNEL_CSUM
| TUNNEL_KEY
);
361 tun_id
= key32_to_tunnel_id(tpi
->key
);
362 tun_dst
= ip_tun_rx_dst(skb
, flags
, tun_id
, 0);
364 return PACKET_REJECT
;
367 ip_tunnel_rcv(tunnel
, skb
, tpi
, tun_dst
, log_ecn_error
);
377 static int ipgre_rcv(struct sk_buff
*skb
, const struct tnl_ptk_info
*tpi
,
380 struct net
*net
= dev_net(skb
->dev
);
381 struct ip_tunnel_net
*itn
;
384 if (tpi
->proto
== htons(ETH_P_TEB
))
385 itn
= net_generic(net
, gre_tap_net_id
);
387 itn
= net_generic(net
, ipgre_net_id
);
389 res
= __ipgre_rcv(skb
, tpi
, itn
, hdr_len
, false);
390 if (res
== PACKET_NEXT
&& tpi
->proto
== htons(ETH_P_TEB
)) {
391 /* ipgre tunnels in collect metadata mode should receive
392 * also ETH_P_TEB traffic.
394 itn
= net_generic(net
, ipgre_net_id
);
395 res
= __ipgre_rcv(skb
, tpi
, itn
, hdr_len
, true);
400 static int gre_rcv(struct sk_buff
*skb
)
402 struct tnl_ptk_info tpi
;
403 bool csum_err
= false;
406 #ifdef CONFIG_NET_IPGRE_BROADCAST
407 if (ipv4_is_multicast(ip_hdr(skb
)->daddr
)) {
408 /* Looped back packet, drop it! */
409 if (rt_is_output_route(skb_rtable(skb
)))
414 hdr_len
= gre_parse_header(skb
, &tpi
, &csum_err
, htons(ETH_P_IP
), 0);
418 if (unlikely(tpi
.proto
== htons(ETH_P_ERSPAN
) ||
419 tpi
.proto
== htons(ETH_P_ERSPAN2
))) {
420 if (erspan_rcv(skb
, &tpi
, hdr_len
) == PACKET_RCVD
)
425 if (ipgre_rcv(skb
, &tpi
, hdr_len
) == PACKET_RCVD
)
429 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
435 static void __gre_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
436 const struct iphdr
*tnl_params
,
439 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
441 if (tunnel
->parms
.o_flags
& TUNNEL_SEQ
)
444 /* Push GRE header. */
445 gre_build_header(skb
, tunnel
->tun_hlen
,
446 tunnel
->parms
.o_flags
, proto
, tunnel
->parms
.o_key
,
447 htonl(tunnel
->o_seqno
));
449 ip_tunnel_xmit(skb
, dev
, tnl_params
, tnl_params
->protocol
);
452 static int gre_handle_offloads(struct sk_buff
*skb
, bool csum
)
454 return iptunnel_handle_offloads(skb
, csum
? SKB_GSO_GRE_CSUM
: SKB_GSO_GRE
);
457 static struct rtable
*gre_get_rt(struct sk_buff
*skb
,
458 struct net_device
*dev
,
460 const struct ip_tunnel_key
*key
)
462 struct net
*net
= dev_net(dev
);
464 memset(fl
, 0, sizeof(*fl
));
465 fl
->daddr
= key
->u
.ipv4
.dst
;
466 fl
->saddr
= key
->u
.ipv4
.src
;
467 fl
->flowi4_tos
= RT_TOS(key
->tos
);
468 fl
->flowi4_mark
= skb
->mark
;
469 fl
->flowi4_proto
= IPPROTO_GRE
;
471 return ip_route_output_key(net
, fl
);
474 static struct rtable
*prepare_fb_xmit(struct sk_buff
*skb
,
475 struct net_device
*dev
,
479 struct ip_tunnel_info
*tun_info
;
480 const struct ip_tunnel_key
*key
;
481 struct rtable
*rt
= NULL
;
486 tun_info
= skb_tunnel_info(skb
);
487 key
= &tun_info
->key
;
488 use_cache
= ip_tunnel_dst_cache_usable(skb
, tun_info
);
491 rt
= dst_cache_get_ip4(&tun_info
->dst_cache
, &fl
->saddr
);
493 rt
= gre_get_rt(skb
, dev
, fl
, key
);
497 dst_cache_set_ip4(&tun_info
->dst_cache
, &rt
->dst
,
501 min_headroom
= LL_RESERVED_SPACE(rt
->dst
.dev
) + rt
->dst
.header_len
502 + tunnel_hlen
+ sizeof(struct iphdr
);
503 if (skb_headroom(skb
) < min_headroom
|| skb_header_cloned(skb
)) {
504 int head_delta
= SKB_DATA_ALIGN(min_headroom
-
507 err
= pskb_expand_head(skb
, max_t(int, head_delta
, 0),
518 dev
->stats
.tx_dropped
++;
522 static void gre_fb_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
525 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
526 struct ip_tunnel_info
*tun_info
;
527 const struct ip_tunnel_key
*key
;
528 struct rtable
*rt
= NULL
;
533 tun_info
= skb_tunnel_info(skb
);
534 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
535 ip_tunnel_info_af(tun_info
) != AF_INET
))
538 key
= &tun_info
->key
;
539 tunnel_hlen
= gre_calc_hlen(key
->tun_flags
);
541 rt
= prepare_fb_xmit(skb
, dev
, &fl
, tunnel_hlen
);
545 /* Push Tunnel header. */
546 if (gre_handle_offloads(skb
, !!(tun_info
->key
.tun_flags
& TUNNEL_CSUM
)))
549 flags
= tun_info
->key
.tun_flags
&
550 (TUNNEL_CSUM
| TUNNEL_KEY
| TUNNEL_SEQ
);
551 gre_build_header(skb
, tunnel_hlen
, flags
, proto
,
552 tunnel_id_to_key32(tun_info
->key
.tun_id
),
553 (flags
& TUNNEL_SEQ
) ? htonl(tunnel
->o_seqno
++) : 0);
555 df
= key
->tun_flags
& TUNNEL_DONT_FRAGMENT
? htons(IP_DF
) : 0;
557 iptunnel_xmit(skb
->sk
, rt
, skb
, fl
.saddr
, key
->u
.ipv4
.dst
, IPPROTO_GRE
,
558 key
->tos
, key
->ttl
, df
, false);
565 dev
->stats
.tx_dropped
++;
568 static void erspan_fb_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
571 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
572 struct ip_tunnel_info
*tun_info
;
573 const struct ip_tunnel_key
*key
;
574 struct erspan_metadata
*md
;
575 struct rtable
*rt
= NULL
;
576 bool truncate
= false;
584 tun_info
= skb_tunnel_info(skb
);
585 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
586 ip_tunnel_info_af(tun_info
) != AF_INET
))
589 key
= &tun_info
->key
;
590 if (!(tun_info
->key
.tun_flags
& TUNNEL_ERSPAN_OPT
))
592 md
= ip_tunnel_info_opts(tun_info
);
596 /* ERSPAN has fixed 8 byte GRE header */
597 version
= md
->version
;
598 tunnel_hlen
= 8 + erspan_hdr_len(version
);
600 rt
= prepare_fb_xmit(skb
, dev
, &fl
, tunnel_hlen
);
604 if (gre_handle_offloads(skb
, false))
607 if (skb
->len
> dev
->mtu
+ dev
->hard_header_len
) {
608 pskb_trim(skb
, dev
->mtu
+ dev
->hard_header_len
);
612 nhoff
= skb_network_header(skb
) - skb_mac_header(skb
);
613 if (skb
->protocol
== htons(ETH_P_IP
) &&
614 (ntohs(ip_hdr(skb
)->tot_len
) > skb
->len
- nhoff
))
617 thoff
= skb_transport_header(skb
) - skb_mac_header(skb
);
618 if (skb
->protocol
== htons(ETH_P_IPV6
) &&
619 (ntohs(ipv6_hdr(skb
)->payload_len
) > skb
->len
- thoff
))
623 erspan_build_header(skb
, ntohl(tunnel_id_to_key32(key
->tun_id
)),
624 ntohl(md
->u
.index
), truncate
, true);
625 } else if (version
== 2) {
626 erspan_build_header_v2(skb
,
627 ntohl(tunnel_id_to_key32(key
->tun_id
)),
629 get_hwid(&md
->u
.md2
),
635 gre_build_header(skb
, 8, TUNNEL_SEQ
,
636 htons(ETH_P_ERSPAN
), 0, htonl(tunnel
->o_seqno
++));
638 df
= key
->tun_flags
& TUNNEL_DONT_FRAGMENT
? htons(IP_DF
) : 0;
640 iptunnel_xmit(skb
->sk
, rt
, skb
, fl
.saddr
, key
->u
.ipv4
.dst
, IPPROTO_GRE
,
641 key
->tos
, key
->ttl
, df
, false);
648 dev
->stats
.tx_dropped
++;
651 static int gre_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
653 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
657 if (ip_tunnel_info_af(info
) != AF_INET
)
660 rt
= gre_get_rt(skb
, dev
, &fl4
, &info
->key
);
665 info
->key
.u
.ipv4
.src
= fl4
.saddr
;
669 static netdev_tx_t
ipgre_xmit(struct sk_buff
*skb
,
670 struct net_device
*dev
)
672 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
673 const struct iphdr
*tnl_params
;
675 if (tunnel
->collect_md
) {
676 gre_fb_xmit(skb
, dev
, skb
->protocol
);
680 if (dev
->header_ops
) {
681 /* Need space for new headers */
682 if (skb_cow_head(skb
, dev
->needed_headroom
-
683 (tunnel
->hlen
+ sizeof(struct iphdr
))))
686 tnl_params
= (const struct iphdr
*)skb
->data
;
688 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
691 skb_pull(skb
, tunnel
->hlen
+ sizeof(struct iphdr
));
692 skb_reset_mac_header(skb
);
694 if (skb_cow_head(skb
, dev
->needed_headroom
))
697 tnl_params
= &tunnel
->parms
.iph
;
700 if (gre_handle_offloads(skb
, !!(tunnel
->parms
.o_flags
& TUNNEL_CSUM
)))
703 __gre_xmit(skb
, dev
, tnl_params
, skb
->protocol
);
708 dev
->stats
.tx_dropped
++;
712 static netdev_tx_t
erspan_xmit(struct sk_buff
*skb
,
713 struct net_device
*dev
)
715 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
716 bool truncate
= false;
718 if (tunnel
->collect_md
) {
719 erspan_fb_xmit(skb
, dev
, skb
->protocol
);
723 if (gre_handle_offloads(skb
, false))
726 if (skb_cow_head(skb
, dev
->needed_headroom
))
729 if (skb
->len
> dev
->mtu
+ dev
->hard_header_len
) {
730 pskb_trim(skb
, dev
->mtu
+ dev
->hard_header_len
);
734 /* Push ERSPAN header */
735 if (tunnel
->erspan_ver
== 1)
736 erspan_build_header(skb
, ntohl(tunnel
->parms
.o_key
),
739 else if (tunnel
->erspan_ver
== 2)
740 erspan_build_header_v2(skb
, ntohl(tunnel
->parms
.o_key
),
741 tunnel
->dir
, tunnel
->hwid
,
746 tunnel
->parms
.o_flags
&= ~TUNNEL_KEY
;
747 __gre_xmit(skb
, dev
, &tunnel
->parms
.iph
, htons(ETH_P_ERSPAN
));
752 dev
->stats
.tx_dropped
++;
756 static netdev_tx_t
gre_tap_xmit(struct sk_buff
*skb
,
757 struct net_device
*dev
)
759 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
761 if (tunnel
->collect_md
) {
762 gre_fb_xmit(skb
, dev
, htons(ETH_P_TEB
));
766 if (gre_handle_offloads(skb
, !!(tunnel
->parms
.o_flags
& TUNNEL_CSUM
)))
769 if (skb_cow_head(skb
, dev
->needed_headroom
))
772 __gre_xmit(skb
, dev
, &tunnel
->parms
.iph
, htons(ETH_P_TEB
));
777 dev
->stats
.tx_dropped
++;
781 static void ipgre_link_update(struct net_device
*dev
, bool set_mtu
)
783 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
786 len
= tunnel
->tun_hlen
;
787 tunnel
->tun_hlen
= gre_calc_hlen(tunnel
->parms
.o_flags
);
788 len
= tunnel
->tun_hlen
- len
;
789 tunnel
->hlen
= tunnel
->hlen
+ len
;
791 dev
->needed_headroom
= dev
->needed_headroom
+ len
;
793 dev
->mtu
= max_t(int, dev
->mtu
- len
, 68);
795 if (!(tunnel
->parms
.o_flags
& TUNNEL_SEQ
)) {
796 if (!(tunnel
->parms
.o_flags
& TUNNEL_CSUM
) ||
797 tunnel
->encap
.type
== TUNNEL_ENCAP_NONE
) {
798 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
799 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
801 dev
->features
&= ~NETIF_F_GSO_SOFTWARE
;
802 dev
->hw_features
&= ~NETIF_F_GSO_SOFTWARE
;
804 dev
->features
|= NETIF_F_LLTX
;
806 dev
->hw_features
&= ~NETIF_F_GSO_SOFTWARE
;
807 dev
->features
&= ~(NETIF_F_LLTX
| NETIF_F_GSO_SOFTWARE
);
811 static int ipgre_tunnel_ioctl(struct net_device
*dev
,
812 struct ifreq
*ifr
, int cmd
)
814 struct ip_tunnel_parm p
;
817 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
820 if (cmd
== SIOCADDTUNNEL
|| cmd
== SIOCCHGTUNNEL
) {
821 if (p
.iph
.version
!= 4 || p
.iph
.protocol
!= IPPROTO_GRE
||
822 p
.iph
.ihl
!= 5 || (p
.iph
.frag_off
& htons(~IP_DF
)) ||
823 ((p
.i_flags
| p
.o_flags
) & (GRE_VERSION
| GRE_ROUTING
)))
827 p
.i_flags
= gre_flags_to_tnl_flags(p
.i_flags
);
828 p
.o_flags
= gre_flags_to_tnl_flags(p
.o_flags
);
830 err
= ip_tunnel_ioctl(dev
, &p
, cmd
);
834 if (cmd
== SIOCCHGTUNNEL
) {
835 struct ip_tunnel
*t
= netdev_priv(dev
);
837 t
->parms
.i_flags
= p
.i_flags
;
838 t
->parms
.o_flags
= p
.o_flags
;
840 if (strcmp(dev
->rtnl_link_ops
->kind
, "erspan"))
841 ipgre_link_update(dev
, true);
844 p
.i_flags
= gre_tnl_flags_to_gre_flags(p
.i_flags
);
845 p
.o_flags
= gre_tnl_flags_to_gre_flags(p
.o_flags
);
847 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
853 /* Nice toy. Unfortunately, useless in real life :-)
854 It allows to construct virtual multiprotocol broadcast "LAN"
855 over the Internet, provided multicast routing is tuned.
858 I have no idea was this bicycle invented before me,
859 so that I had to set ARPHRD_IPGRE to a random value.
860 I have an impression, that Cisco could make something similar,
861 but this feature is apparently missing in IOS<=11.2(8).
863 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
864 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
866 ping -t 255 224.66.66.66
868 If nobody answers, mbone does not work.
870 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
871 ip addr add 10.66.66.<somewhat>/24 dev Universe
873 ifconfig Universe add fe80::<Your_real_addr>/10
874 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
877 ftp fec0:6666:6666::193.233.7.65
880 static int ipgre_header(struct sk_buff
*skb
, struct net_device
*dev
,
882 const void *daddr
, const void *saddr
, unsigned int len
)
884 struct ip_tunnel
*t
= netdev_priv(dev
);
886 struct gre_base_hdr
*greh
;
888 iph
= skb_push(skb
, t
->hlen
+ sizeof(*iph
));
889 greh
= (struct gre_base_hdr
*)(iph
+1);
890 greh
->flags
= gre_tnl_flags_to_gre_flags(t
->parms
.o_flags
);
891 greh
->protocol
= htons(type
);
893 memcpy(iph
, &t
->parms
.iph
, sizeof(struct iphdr
));
895 /* Set the source hardware address. */
897 memcpy(&iph
->saddr
, saddr
, 4);
899 memcpy(&iph
->daddr
, daddr
, 4);
901 return t
->hlen
+ sizeof(*iph
);
903 return -(t
->hlen
+ sizeof(*iph
));
906 static int ipgre_header_parse(const struct sk_buff
*skb
, unsigned char *haddr
)
908 const struct iphdr
*iph
= (const struct iphdr
*) skb_mac_header(skb
);
909 memcpy(haddr
, &iph
->saddr
, 4);
913 static const struct header_ops ipgre_header_ops
= {
914 .create
= ipgre_header
,
915 .parse
= ipgre_header_parse
,
918 #ifdef CONFIG_NET_IPGRE_BROADCAST
919 static int ipgre_open(struct net_device
*dev
)
921 struct ip_tunnel
*t
= netdev_priv(dev
);
923 if (ipv4_is_multicast(t
->parms
.iph
.daddr
)) {
927 rt
= ip_route_output_gre(t
->net
, &fl4
,
931 RT_TOS(t
->parms
.iph
.tos
),
934 return -EADDRNOTAVAIL
;
937 if (!__in_dev_get_rtnl(dev
))
938 return -EADDRNOTAVAIL
;
939 t
->mlink
= dev
->ifindex
;
940 ip_mc_inc_group(__in_dev_get_rtnl(dev
), t
->parms
.iph
.daddr
);
945 static int ipgre_close(struct net_device
*dev
)
947 struct ip_tunnel
*t
= netdev_priv(dev
);
949 if (ipv4_is_multicast(t
->parms
.iph
.daddr
) && t
->mlink
) {
950 struct in_device
*in_dev
;
951 in_dev
= inetdev_by_index(t
->net
, t
->mlink
);
953 ip_mc_dec_group(in_dev
, t
->parms
.iph
.daddr
);
959 static const struct net_device_ops ipgre_netdev_ops
= {
960 .ndo_init
= ipgre_tunnel_init
,
961 .ndo_uninit
= ip_tunnel_uninit
,
962 #ifdef CONFIG_NET_IPGRE_BROADCAST
963 .ndo_open
= ipgre_open
,
964 .ndo_stop
= ipgre_close
,
966 .ndo_start_xmit
= ipgre_xmit
,
967 .ndo_do_ioctl
= ipgre_tunnel_ioctl
,
968 .ndo_change_mtu
= ip_tunnel_change_mtu
,
969 .ndo_get_stats64
= ip_tunnel_get_stats64
,
970 .ndo_get_iflink
= ip_tunnel_get_iflink
,
973 #define GRE_FEATURES (NETIF_F_SG | \
978 static void ipgre_tunnel_setup(struct net_device
*dev
)
980 dev
->netdev_ops
= &ipgre_netdev_ops
;
981 dev
->type
= ARPHRD_IPGRE
;
982 ip_tunnel_setup(dev
, ipgre_net_id
);
985 static void __gre_tunnel_init(struct net_device
*dev
)
987 struct ip_tunnel
*tunnel
;
989 tunnel
= netdev_priv(dev
);
990 tunnel
->tun_hlen
= gre_calc_hlen(tunnel
->parms
.o_flags
);
991 tunnel
->parms
.iph
.protocol
= IPPROTO_GRE
;
993 tunnel
->hlen
= tunnel
->tun_hlen
+ tunnel
->encap_hlen
;
995 dev
->features
|= GRE_FEATURES
;
996 dev
->hw_features
|= GRE_FEATURES
;
998 if (!(tunnel
->parms
.o_flags
& TUNNEL_SEQ
)) {
999 /* TCP offload with GRE SEQ is not supported, nor
1000 * can we support 2 levels of outer headers requiring
1003 if (!(tunnel
->parms
.o_flags
& TUNNEL_CSUM
) ||
1004 (tunnel
->encap
.type
== TUNNEL_ENCAP_NONE
)) {
1005 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1006 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
1009 /* Can use a lockless transmit, unless we generate
1012 dev
->features
|= NETIF_F_LLTX
;
1016 static int ipgre_tunnel_init(struct net_device
*dev
)
1018 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1019 struct iphdr
*iph
= &tunnel
->parms
.iph
;
1021 __gre_tunnel_init(dev
);
1023 memcpy(dev
->dev_addr
, &iph
->saddr
, 4);
1024 memcpy(dev
->broadcast
, &iph
->daddr
, 4);
1026 dev
->flags
= IFF_NOARP
;
1027 netif_keep_dst(dev
);
1030 if (iph
->daddr
&& !tunnel
->collect_md
) {
1031 #ifdef CONFIG_NET_IPGRE_BROADCAST
1032 if (ipv4_is_multicast(iph
->daddr
)) {
1035 dev
->flags
= IFF_BROADCAST
;
1036 dev
->header_ops
= &ipgre_header_ops
;
1039 } else if (!tunnel
->collect_md
) {
1040 dev
->header_ops
= &ipgre_header_ops
;
1043 return ip_tunnel_init(dev
);
1046 static const struct gre_protocol ipgre_protocol
= {
1048 .err_handler
= gre_err
,
1051 static int __net_init
ipgre_init_net(struct net
*net
)
1053 return ip_tunnel_init_net(net
, ipgre_net_id
, &ipgre_link_ops
, NULL
);
1056 static void __net_exit
ipgre_exit_batch_net(struct list_head
*list_net
)
1058 ip_tunnel_delete_nets(list_net
, ipgre_net_id
, &ipgre_link_ops
);
1061 static struct pernet_operations ipgre_net_ops
= {
1062 .init
= ipgre_init_net
,
1063 .exit_batch
= ipgre_exit_batch_net
,
1064 .id
= &ipgre_net_id
,
1065 .size
= sizeof(struct ip_tunnel_net
),
1068 static int ipgre_tunnel_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1069 struct netlink_ext_ack
*extack
)
1077 if (data
[IFLA_GRE_IFLAGS
])
1078 flags
|= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1079 if (data
[IFLA_GRE_OFLAGS
])
1080 flags
|= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1081 if (flags
& (GRE_VERSION
|GRE_ROUTING
))
1084 if (data
[IFLA_GRE_COLLECT_METADATA
] &&
1085 data
[IFLA_GRE_ENCAP_TYPE
] &&
1086 nla_get_u16(data
[IFLA_GRE_ENCAP_TYPE
]) != TUNNEL_ENCAP_NONE
)
1092 static int ipgre_tap_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1093 struct netlink_ext_ack
*extack
)
1097 if (tb
[IFLA_ADDRESS
]) {
1098 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
1100 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
1101 return -EADDRNOTAVAIL
;
1107 if (data
[IFLA_GRE_REMOTE
]) {
1108 memcpy(&daddr
, nla_data(data
[IFLA_GRE_REMOTE
]), 4);
1114 return ipgre_tunnel_validate(tb
, data
, extack
);
1117 static int erspan_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1118 struct netlink_ext_ack
*extack
)
1126 ret
= ipgre_tap_validate(tb
, data
, extack
);
1130 /* ERSPAN should only have GRE sequence and key flag */
1131 if (data
[IFLA_GRE_OFLAGS
])
1132 flags
|= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1133 if (data
[IFLA_GRE_IFLAGS
])
1134 flags
|= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1135 if (!data
[IFLA_GRE_COLLECT_METADATA
] &&
1136 flags
!= (GRE_SEQ
| GRE_KEY
))
1139 /* ERSPAN Session ID only has 10-bit. Since we reuse
1140 * 32-bit key field as ID, check it's range.
1142 if (data
[IFLA_GRE_IKEY
] &&
1143 (ntohl(nla_get_be32(data
[IFLA_GRE_IKEY
])) & ~ID_MASK
))
1146 if (data
[IFLA_GRE_OKEY
] &&
1147 (ntohl(nla_get_be32(data
[IFLA_GRE_OKEY
])) & ~ID_MASK
))
1153 static int ipgre_netlink_parms(struct net_device
*dev
,
1154 struct nlattr
*data
[],
1155 struct nlattr
*tb
[],
1156 struct ip_tunnel_parm
*parms
,
1159 struct ip_tunnel
*t
= netdev_priv(dev
);
1161 memset(parms
, 0, sizeof(*parms
));
1163 parms
->iph
.protocol
= IPPROTO_GRE
;
1168 if (data
[IFLA_GRE_LINK
])
1169 parms
->link
= nla_get_u32(data
[IFLA_GRE_LINK
]);
1171 if (data
[IFLA_GRE_IFLAGS
])
1172 parms
->i_flags
= gre_flags_to_tnl_flags(nla_get_be16(data
[IFLA_GRE_IFLAGS
]));
1174 if (data
[IFLA_GRE_OFLAGS
])
1175 parms
->o_flags
= gre_flags_to_tnl_flags(nla_get_be16(data
[IFLA_GRE_OFLAGS
]));
1177 if (data
[IFLA_GRE_IKEY
])
1178 parms
->i_key
= nla_get_be32(data
[IFLA_GRE_IKEY
]);
1180 if (data
[IFLA_GRE_OKEY
])
1181 parms
->o_key
= nla_get_be32(data
[IFLA_GRE_OKEY
]);
1183 if (data
[IFLA_GRE_LOCAL
])
1184 parms
->iph
.saddr
= nla_get_in_addr(data
[IFLA_GRE_LOCAL
]);
1186 if (data
[IFLA_GRE_REMOTE
])
1187 parms
->iph
.daddr
= nla_get_in_addr(data
[IFLA_GRE_REMOTE
]);
1189 if (data
[IFLA_GRE_TTL
])
1190 parms
->iph
.ttl
= nla_get_u8(data
[IFLA_GRE_TTL
]);
1192 if (data
[IFLA_GRE_TOS
])
1193 parms
->iph
.tos
= nla_get_u8(data
[IFLA_GRE_TOS
]);
1195 if (!data
[IFLA_GRE_PMTUDISC
] || nla_get_u8(data
[IFLA_GRE_PMTUDISC
])) {
1198 parms
->iph
.frag_off
= htons(IP_DF
);
1201 if (data
[IFLA_GRE_COLLECT_METADATA
]) {
1202 t
->collect_md
= true;
1203 if (dev
->type
== ARPHRD_IPGRE
)
1204 dev
->type
= ARPHRD_NONE
;
1207 if (data
[IFLA_GRE_IGNORE_DF
]) {
1208 if (nla_get_u8(data
[IFLA_GRE_IGNORE_DF
])
1209 && (parms
->iph
.frag_off
& htons(IP_DF
)))
1211 t
->ignore_df
= !!nla_get_u8(data
[IFLA_GRE_IGNORE_DF
]);
1214 if (data
[IFLA_GRE_FWMARK
])
1215 *fwmark
= nla_get_u32(data
[IFLA_GRE_FWMARK
]);
1217 if (data
[IFLA_GRE_ERSPAN_VER
]) {
1218 t
->erspan_ver
= nla_get_u8(data
[IFLA_GRE_ERSPAN_VER
]);
1220 if (t
->erspan_ver
!= 1 && t
->erspan_ver
!= 2)
1224 if (t
->erspan_ver
== 1) {
1225 if (data
[IFLA_GRE_ERSPAN_INDEX
]) {
1226 t
->index
= nla_get_u32(data
[IFLA_GRE_ERSPAN_INDEX
]);
1227 if (t
->index
& ~INDEX_MASK
)
1230 } else if (t
->erspan_ver
== 2) {
1231 if (data
[IFLA_GRE_ERSPAN_DIR
]) {
1232 t
->dir
= nla_get_u8(data
[IFLA_GRE_ERSPAN_DIR
]);
1233 if (t
->dir
& ~(DIR_MASK
>> DIR_OFFSET
))
1236 if (data
[IFLA_GRE_ERSPAN_HWID
]) {
1237 t
->hwid
= nla_get_u16(data
[IFLA_GRE_ERSPAN_HWID
]);
1238 if (t
->hwid
& ~(HWID_MASK
>> HWID_OFFSET
))
1246 /* This function returns true when ENCAP attributes are present in the nl msg */
1247 static bool ipgre_netlink_encap_parms(struct nlattr
*data
[],
1248 struct ip_tunnel_encap
*ipencap
)
1252 memset(ipencap
, 0, sizeof(*ipencap
));
1257 if (data
[IFLA_GRE_ENCAP_TYPE
]) {
1259 ipencap
->type
= nla_get_u16(data
[IFLA_GRE_ENCAP_TYPE
]);
1262 if (data
[IFLA_GRE_ENCAP_FLAGS
]) {
1264 ipencap
->flags
= nla_get_u16(data
[IFLA_GRE_ENCAP_FLAGS
]);
1267 if (data
[IFLA_GRE_ENCAP_SPORT
]) {
1269 ipencap
->sport
= nla_get_be16(data
[IFLA_GRE_ENCAP_SPORT
]);
1272 if (data
[IFLA_GRE_ENCAP_DPORT
]) {
1274 ipencap
->dport
= nla_get_be16(data
[IFLA_GRE_ENCAP_DPORT
]);
1280 static int gre_tap_init(struct net_device
*dev
)
1282 __gre_tunnel_init(dev
);
1283 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1284 netif_keep_dst(dev
);
1286 return ip_tunnel_init(dev
);
1289 static const struct net_device_ops gre_tap_netdev_ops
= {
1290 .ndo_init
= gre_tap_init
,
1291 .ndo_uninit
= ip_tunnel_uninit
,
1292 .ndo_start_xmit
= gre_tap_xmit
,
1293 .ndo_set_mac_address
= eth_mac_addr
,
1294 .ndo_validate_addr
= eth_validate_addr
,
1295 .ndo_change_mtu
= ip_tunnel_change_mtu
,
1296 .ndo_get_stats64
= ip_tunnel_get_stats64
,
1297 .ndo_get_iflink
= ip_tunnel_get_iflink
,
1298 .ndo_fill_metadata_dst
= gre_fill_metadata_dst
,
1301 static int erspan_tunnel_init(struct net_device
*dev
)
1303 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1305 tunnel
->tun_hlen
= 8;
1306 tunnel
->parms
.iph
.protocol
= IPPROTO_GRE
;
1307 tunnel
->hlen
= tunnel
->tun_hlen
+ tunnel
->encap_hlen
+
1308 erspan_hdr_len(tunnel
->erspan_ver
);
1310 dev
->features
|= GRE_FEATURES
;
1311 dev
->hw_features
|= GRE_FEATURES
;
1312 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1313 netif_keep_dst(dev
);
1315 return ip_tunnel_init(dev
);
1318 static const struct net_device_ops erspan_netdev_ops
= {
1319 .ndo_init
= erspan_tunnel_init
,
1320 .ndo_uninit
= ip_tunnel_uninit
,
1321 .ndo_start_xmit
= erspan_xmit
,
1322 .ndo_set_mac_address
= eth_mac_addr
,
1323 .ndo_validate_addr
= eth_validate_addr
,
1324 .ndo_change_mtu
= ip_tunnel_change_mtu
,
1325 .ndo_get_stats64
= ip_tunnel_get_stats64
,
1326 .ndo_get_iflink
= ip_tunnel_get_iflink
,
1327 .ndo_fill_metadata_dst
= gre_fill_metadata_dst
,
1330 static void ipgre_tap_setup(struct net_device
*dev
)
1334 dev
->netdev_ops
= &gre_tap_netdev_ops
;
1335 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1336 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1337 ip_tunnel_setup(dev
, gre_tap_net_id
);
1340 bool is_gretap_dev(const struct net_device
*dev
)
1342 return dev
->netdev_ops
== &gre_tap_netdev_ops
;
1344 EXPORT_SYMBOL_GPL(is_gretap_dev
);
1346 static int ipgre_newlink(struct net
*src_net
, struct net_device
*dev
,
1347 struct nlattr
*tb
[], struct nlattr
*data
[],
1348 struct netlink_ext_ack
*extack
)
1350 struct ip_tunnel_parm p
;
1351 struct ip_tunnel_encap ipencap
;
1355 if (ipgre_netlink_encap_parms(data
, &ipencap
)) {
1356 struct ip_tunnel
*t
= netdev_priv(dev
);
1357 err
= ip_tunnel_encap_setup(t
, &ipencap
);
1363 err
= ipgre_netlink_parms(dev
, data
, tb
, &p
, &fwmark
);
1366 return ip_tunnel_newlink(dev
, tb
, &p
, fwmark
);
1369 static int ipgre_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
1370 struct nlattr
*data
[],
1371 struct netlink_ext_ack
*extack
)
1373 struct ip_tunnel
*t
= netdev_priv(dev
);
1374 struct ip_tunnel_encap ipencap
;
1375 __u32 fwmark
= t
->fwmark
;
1376 struct ip_tunnel_parm p
;
1379 if (ipgre_netlink_encap_parms(data
, &ipencap
)) {
1380 err
= ip_tunnel_encap_setup(t
, &ipencap
);
1386 err
= ipgre_netlink_parms(dev
, data
, tb
, &p
, &fwmark
);
1390 err
= ip_tunnel_changelink(dev
, tb
, &p
, fwmark
);
1394 t
->parms
.i_flags
= p
.i_flags
;
1395 t
->parms
.o_flags
= p
.o_flags
;
1397 if (strcmp(dev
->rtnl_link_ops
->kind
, "erspan"))
1398 ipgre_link_update(dev
, !tb
[IFLA_MTU
]);
1403 static size_t ipgre_get_size(const struct net_device
*dev
)
1408 /* IFLA_GRE_IFLAGS */
1410 /* IFLA_GRE_OFLAGS */
1416 /* IFLA_GRE_LOCAL */
1418 /* IFLA_GRE_REMOTE */
1424 /* IFLA_GRE_PMTUDISC */
1426 /* IFLA_GRE_ENCAP_TYPE */
1428 /* IFLA_GRE_ENCAP_FLAGS */
1430 /* IFLA_GRE_ENCAP_SPORT */
1432 /* IFLA_GRE_ENCAP_DPORT */
1434 /* IFLA_GRE_COLLECT_METADATA */
1436 /* IFLA_GRE_IGNORE_DF */
1438 /* IFLA_GRE_FWMARK */
1440 /* IFLA_GRE_ERSPAN_INDEX */
1442 /* IFLA_GRE_ERSPAN_VER */
1444 /* IFLA_GRE_ERSPAN_DIR */
1446 /* IFLA_GRE_ERSPAN_HWID */
1451 static int ipgre_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1453 struct ip_tunnel
*t
= netdev_priv(dev
);
1454 struct ip_tunnel_parm
*p
= &t
->parms
;
1456 if (nla_put_u32(skb
, IFLA_GRE_LINK
, p
->link
) ||
1457 nla_put_be16(skb
, IFLA_GRE_IFLAGS
,
1458 gre_tnl_flags_to_gre_flags(p
->i_flags
)) ||
1459 nla_put_be16(skb
, IFLA_GRE_OFLAGS
,
1460 gre_tnl_flags_to_gre_flags(p
->o_flags
)) ||
1461 nla_put_be32(skb
, IFLA_GRE_IKEY
, p
->i_key
) ||
1462 nla_put_be32(skb
, IFLA_GRE_OKEY
, p
->o_key
) ||
1463 nla_put_in_addr(skb
, IFLA_GRE_LOCAL
, p
->iph
.saddr
) ||
1464 nla_put_in_addr(skb
, IFLA_GRE_REMOTE
, p
->iph
.daddr
) ||
1465 nla_put_u8(skb
, IFLA_GRE_TTL
, p
->iph
.ttl
) ||
1466 nla_put_u8(skb
, IFLA_GRE_TOS
, p
->iph
.tos
) ||
1467 nla_put_u8(skb
, IFLA_GRE_PMTUDISC
,
1468 !!(p
->iph
.frag_off
& htons(IP_DF
))) ||
1469 nla_put_u32(skb
, IFLA_GRE_FWMARK
, t
->fwmark
))
1470 goto nla_put_failure
;
1472 if (nla_put_u16(skb
, IFLA_GRE_ENCAP_TYPE
,
1474 nla_put_be16(skb
, IFLA_GRE_ENCAP_SPORT
,
1476 nla_put_be16(skb
, IFLA_GRE_ENCAP_DPORT
,
1478 nla_put_u16(skb
, IFLA_GRE_ENCAP_FLAGS
,
1480 goto nla_put_failure
;
1482 if (nla_put_u8(skb
, IFLA_GRE_IGNORE_DF
, t
->ignore_df
))
1483 goto nla_put_failure
;
1485 if (t
->collect_md
) {
1486 if (nla_put_flag(skb
, IFLA_GRE_COLLECT_METADATA
))
1487 goto nla_put_failure
;
1490 if (nla_put_u8(skb
, IFLA_GRE_ERSPAN_VER
, t
->erspan_ver
))
1491 goto nla_put_failure
;
1493 if (t
->erspan_ver
== 1) {
1494 if (nla_put_u32(skb
, IFLA_GRE_ERSPAN_INDEX
, t
->index
))
1495 goto nla_put_failure
;
1496 } else if (t
->erspan_ver
== 2) {
1497 if (nla_put_u8(skb
, IFLA_GRE_ERSPAN_DIR
, t
->dir
))
1498 goto nla_put_failure
;
1499 if (nla_put_u16(skb
, IFLA_GRE_ERSPAN_HWID
, t
->hwid
))
1500 goto nla_put_failure
;
1509 static void erspan_setup(struct net_device
*dev
)
1512 dev
->netdev_ops
= &erspan_netdev_ops
;
1513 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1514 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1515 ip_tunnel_setup(dev
, erspan_net_id
);
1518 static const struct nla_policy ipgre_policy
[IFLA_GRE_MAX
+ 1] = {
1519 [IFLA_GRE_LINK
] = { .type
= NLA_U32
},
1520 [IFLA_GRE_IFLAGS
] = { .type
= NLA_U16
},
1521 [IFLA_GRE_OFLAGS
] = { .type
= NLA_U16
},
1522 [IFLA_GRE_IKEY
] = { .type
= NLA_U32
},
1523 [IFLA_GRE_OKEY
] = { .type
= NLA_U32
},
1524 [IFLA_GRE_LOCAL
] = { .len
= FIELD_SIZEOF(struct iphdr
, saddr
) },
1525 [IFLA_GRE_REMOTE
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
1526 [IFLA_GRE_TTL
] = { .type
= NLA_U8
},
1527 [IFLA_GRE_TOS
] = { .type
= NLA_U8
},
1528 [IFLA_GRE_PMTUDISC
] = { .type
= NLA_U8
},
1529 [IFLA_GRE_ENCAP_TYPE
] = { .type
= NLA_U16
},
1530 [IFLA_GRE_ENCAP_FLAGS
] = { .type
= NLA_U16
},
1531 [IFLA_GRE_ENCAP_SPORT
] = { .type
= NLA_U16
},
1532 [IFLA_GRE_ENCAP_DPORT
] = { .type
= NLA_U16
},
1533 [IFLA_GRE_COLLECT_METADATA
] = { .type
= NLA_FLAG
},
1534 [IFLA_GRE_IGNORE_DF
] = { .type
= NLA_U8
},
1535 [IFLA_GRE_FWMARK
] = { .type
= NLA_U32
},
1536 [IFLA_GRE_ERSPAN_INDEX
] = { .type
= NLA_U32
},
1537 [IFLA_GRE_ERSPAN_VER
] = { .type
= NLA_U8
},
1538 [IFLA_GRE_ERSPAN_DIR
] = { .type
= NLA_U8
},
1539 [IFLA_GRE_ERSPAN_HWID
] = { .type
= NLA_U16
},
1542 static struct rtnl_link_ops ipgre_link_ops __read_mostly
= {
1544 .maxtype
= IFLA_GRE_MAX
,
1545 .policy
= ipgre_policy
,
1546 .priv_size
= sizeof(struct ip_tunnel
),
1547 .setup
= ipgre_tunnel_setup
,
1548 .validate
= ipgre_tunnel_validate
,
1549 .newlink
= ipgre_newlink
,
1550 .changelink
= ipgre_changelink
,
1551 .dellink
= ip_tunnel_dellink
,
1552 .get_size
= ipgre_get_size
,
1553 .fill_info
= ipgre_fill_info
,
1554 .get_link_net
= ip_tunnel_get_link_net
,
1557 static struct rtnl_link_ops ipgre_tap_ops __read_mostly
= {
1559 .maxtype
= IFLA_GRE_MAX
,
1560 .policy
= ipgre_policy
,
1561 .priv_size
= sizeof(struct ip_tunnel
),
1562 .setup
= ipgre_tap_setup
,
1563 .validate
= ipgre_tap_validate
,
1564 .newlink
= ipgre_newlink
,
1565 .changelink
= ipgre_changelink
,
1566 .dellink
= ip_tunnel_dellink
,
1567 .get_size
= ipgre_get_size
,
1568 .fill_info
= ipgre_fill_info
,
1569 .get_link_net
= ip_tunnel_get_link_net
,
1572 static struct rtnl_link_ops erspan_link_ops __read_mostly
= {
1574 .maxtype
= IFLA_GRE_MAX
,
1575 .policy
= ipgre_policy
,
1576 .priv_size
= sizeof(struct ip_tunnel
),
1577 .setup
= erspan_setup
,
1578 .validate
= erspan_validate
,
1579 .newlink
= ipgre_newlink
,
1580 .changelink
= ipgre_changelink
,
1581 .dellink
= ip_tunnel_dellink
,
1582 .get_size
= ipgre_get_size
,
1583 .fill_info
= ipgre_fill_info
,
1584 .get_link_net
= ip_tunnel_get_link_net
,
1587 struct net_device
*gretap_fb_dev_create(struct net
*net
, const char *name
,
1588 u8 name_assign_type
)
1590 struct nlattr
*tb
[IFLA_MAX
+ 1];
1591 struct net_device
*dev
;
1592 LIST_HEAD(list_kill
);
1593 struct ip_tunnel
*t
;
1596 memset(&tb
, 0, sizeof(tb
));
1598 dev
= rtnl_create_link(net
, name
, name_assign_type
,
1599 &ipgre_tap_ops
, tb
);
1603 /* Configure flow based GRE device. */
1604 t
= netdev_priv(dev
);
1605 t
->collect_md
= true;
1607 err
= ipgre_newlink(net
, dev
, tb
, NULL
, NULL
);
1610 return ERR_PTR(err
);
1613 /* openvswitch users expect packet sizes to be unrestricted,
1614 * so set the largest MTU we can.
1616 err
= __ip_tunnel_change_mtu(dev
, IP_MAX_MTU
, false);
1620 err
= rtnl_configure_link(dev
, NULL
);
1626 ip_tunnel_dellink(dev
, &list_kill
);
1627 unregister_netdevice_many(&list_kill
);
1628 return ERR_PTR(err
);
1630 EXPORT_SYMBOL_GPL(gretap_fb_dev_create
);
1632 static int __net_init
ipgre_tap_init_net(struct net
*net
)
1634 return ip_tunnel_init_net(net
, gre_tap_net_id
, &ipgre_tap_ops
, "gretap0");
1637 static void __net_exit
ipgre_tap_exit_batch_net(struct list_head
*list_net
)
1639 ip_tunnel_delete_nets(list_net
, gre_tap_net_id
, &ipgre_tap_ops
);
1642 static struct pernet_operations ipgre_tap_net_ops
= {
1643 .init
= ipgre_tap_init_net
,
1644 .exit_batch
= ipgre_tap_exit_batch_net
,
1645 .id
= &gre_tap_net_id
,
1646 .size
= sizeof(struct ip_tunnel_net
),
1649 static int __net_init
erspan_init_net(struct net
*net
)
1651 return ip_tunnel_init_net(net
, erspan_net_id
,
1652 &erspan_link_ops
, "erspan0");
1655 static void __net_exit
erspan_exit_batch_net(struct list_head
*net_list
)
1657 ip_tunnel_delete_nets(net_list
, erspan_net_id
, &erspan_link_ops
);
1660 static struct pernet_operations erspan_net_ops
= {
1661 .init
= erspan_init_net
,
1662 .exit_batch
= erspan_exit_batch_net
,
1663 .id
= &erspan_net_id
,
1664 .size
= sizeof(struct ip_tunnel_net
),
1667 static int __init
ipgre_init(void)
1671 pr_info("GRE over IPv4 tunneling driver\n");
1673 err
= register_pernet_device(&ipgre_net_ops
);
1677 err
= register_pernet_device(&ipgre_tap_net_ops
);
1679 goto pnet_tap_failed
;
1681 err
= register_pernet_device(&erspan_net_ops
);
1683 goto pnet_erspan_failed
;
1685 err
= gre_add_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1687 pr_info("%s: can't add protocol\n", __func__
);
1688 goto add_proto_failed
;
1691 err
= rtnl_link_register(&ipgre_link_ops
);
1693 goto rtnl_link_failed
;
1695 err
= rtnl_link_register(&ipgre_tap_ops
);
1697 goto tap_ops_failed
;
1699 err
= rtnl_link_register(&erspan_link_ops
);
1701 goto erspan_link_failed
;
1706 rtnl_link_unregister(&ipgre_tap_ops
);
1708 rtnl_link_unregister(&ipgre_link_ops
);
1710 gre_del_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1712 unregister_pernet_device(&erspan_net_ops
);
1714 unregister_pernet_device(&ipgre_tap_net_ops
);
1716 unregister_pernet_device(&ipgre_net_ops
);
1720 static void __exit
ipgre_fini(void)
1722 rtnl_link_unregister(&ipgre_tap_ops
);
1723 rtnl_link_unregister(&ipgre_link_ops
);
1724 rtnl_link_unregister(&erspan_link_ops
);
1725 gre_del_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1726 unregister_pernet_device(&ipgre_tap_net_ops
);
1727 unregister_pernet_device(&ipgre_net_ops
);
1728 unregister_pernet_device(&erspan_net_ops
);
1731 module_init(ipgre_init
);
1732 module_exit(ipgre_fini
);
1733 MODULE_LICENSE("GPL");
1734 MODULE_ALIAS_RTNL_LINK("gre");
1735 MODULE_ALIAS_RTNL_LINK("gretap");
1736 MODULE_ALIAS_RTNL_LINK("erspan");
1737 MODULE_ALIAS_NETDEV("gre0");
1738 MODULE_ALIAS_NETDEV("gretap0");
1739 MODULE_ALIAS_NETDEV("erspan0");