1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_IP_TUNNELS_H
3 #define __NET_IP_TUNNELS_H 1
5 #include <linux/if_tunnel.h>
6 #include <linux/netdevice.h>
7 #include <linux/skbuff.h>
8 #include <linux/socket.h>
9 #include <linux/types.h>
10 #include <linux/u64_stats_sync.h>
11 #include <linux/bitops.h>
13 #include <net/dsfield.h>
14 #include <net/gro_cells.h>
15 #include <net/inet_ecn.h>
16 #include <net/netns/generic.h>
17 #include <net/rtnetlink.h>
18 #include <net/lwtunnel.h>
19 #include <net/dst_cache.h>
21 #if IS_ENABLED(CONFIG_IPV6)
23 #include <net/ip6_fib.h>
24 #include <net/ip6_route.h>
27 /* Keep error state on tunnel for 30 sec */
28 #define IPTUNNEL_ERR_TIMEO (30*HZ)
30 /* Used to memset ip_tunnel padding. */
31 #define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
33 /* Used to memset ipv4 address padding. */
34 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
35 #define IP_TUNNEL_KEY_IPV4_PAD_LEN \
36 (sizeof_field(struct ip_tunnel_key, u) - \
37 sizeof_field(struct ip_tunnel_key, u.ipv4))
39 #define __ipt_flag_op(op, ...) \
40 op(__VA_ARGS__, __IP_TUNNEL_FLAG_NUM)
42 #define IP_TUNNEL_DECLARE_FLAGS(...) \
43 __ipt_flag_op(DECLARE_BITMAP, __VA_ARGS__)
45 #define ip_tunnel_flags_zero(...) __ipt_flag_op(bitmap_zero, __VA_ARGS__)
46 #define ip_tunnel_flags_copy(...) __ipt_flag_op(bitmap_copy, __VA_ARGS__)
47 #define ip_tunnel_flags_and(...) __ipt_flag_op(bitmap_and, __VA_ARGS__)
48 #define ip_tunnel_flags_or(...) __ipt_flag_op(bitmap_or, __VA_ARGS__)
50 #define ip_tunnel_flags_empty(...) \
51 __ipt_flag_op(bitmap_empty, __VA_ARGS__)
52 #define ip_tunnel_flags_intersect(...) \
53 __ipt_flag_op(bitmap_intersects, __VA_ARGS__)
54 #define ip_tunnel_flags_subset(...) \
55 __ipt_flag_op(bitmap_subset, __VA_ARGS__)
57 struct ip_tunnel_key
{
69 IP_TUNNEL_DECLARE_FLAGS(tun_flags
);
70 __be32 label
; /* Flow Label for IPv6 */
72 u8 tos
; /* TOS for IPv4, TC for IPv6 */
73 u8 ttl
; /* TTL for IPv4, HL for IPv6 */
79 struct ip_tunnel_encap
{
86 /* Flags for ip_tunnel_info mode. */
87 #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
88 #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
89 #define IP_TUNNEL_INFO_BRIDGE 0x04 /* represents a bridged tunnel id */
91 /* Maximum tunnel options length. */
92 #define IP_TUNNEL_OPTS_MAX \
93 GENMASK((sizeof_field(struct ip_tunnel_info, \
94 options_len) * BITS_PER_BYTE) - 1, 0)
96 #define ip_tunnel_info_opts(info) \
98 const struct ip_tunnel_info * : ((const void *)((info) + 1)),\
99 struct ip_tunnel_info * : ((void *)((info) + 1))\
102 struct ip_tunnel_info
{
103 struct ip_tunnel_key key
;
104 struct ip_tunnel_encap encap
;
105 #ifdef CONFIG_DST_CACHE
106 struct dst_cache dst_cache
;
112 /* 6rd prefix/relay information */
113 #ifdef CONFIG_IPV6_SIT_6RD
114 struct ip_tunnel_6rd_parm
{
115 struct in6_addr prefix
;
122 struct ip_tunnel_prl_entry
{
123 struct ip_tunnel_prl_entry __rcu
*next
;
126 struct rcu_head rcu_head
;
131 /* Kernel-side variant of ip_tunnel_parm */
132 struct ip_tunnel_parm_kern
{
134 IP_TUNNEL_DECLARE_FLAGS(i_flags
);
135 IP_TUNNEL_DECLARE_FLAGS(o_flags
);
143 struct ip_tunnel __rcu
*next
;
144 struct hlist_node hash_node
;
146 struct net_device
*dev
;
147 netdevice_tracker dev_tracker
;
149 struct net
*net
; /* netns for packet i/o */
151 unsigned long err_time
; /* Time when the last ICMP error
153 int err_count
; /* Number of arrived ICMP errors */
155 /* These four fields used only by GRE */
156 u32 i_seqno
; /* The last seen seqno */
157 atomic_t o_seqno
; /* The last output seqno */
158 int tun_hlen
; /* Precalculated header length */
160 /* These four fields used only by ERSPAN */
161 u32 index
; /* ERSPAN type II index */
162 u8 erspan_ver
; /* ERSPAN version */
163 u8 dir
; /* ERSPAN direction */
164 u16 hwid
; /* ERSPAN hardware ID */
166 struct dst_cache dst_cache
;
168 struct ip_tunnel_parm_kern parms
;
171 int encap_hlen
; /* Encap header length (FOU,GUE) */
172 int hlen
; /* tun_hlen + encap_hlen */
173 struct ip_tunnel_encap encap
;
176 #ifdef CONFIG_IPV6_SIT_6RD
177 struct ip_tunnel_6rd_parm ip6rd
;
179 struct ip_tunnel_prl_entry __rcu
*prl
; /* potential router list */
180 unsigned int prl_count
; /* # of entries in PRL */
181 unsigned int ip_tnl_net_id
;
182 struct gro_cells gro_cells
;
188 struct tnl_ptk_info
{
189 IP_TUNNEL_DECLARE_FLAGS(flags
);
196 #define PACKET_RCVD 0
197 #define PACKET_REJECT 1
198 #define PACKET_NEXT 2
200 #define IP_TNL_HASH_BITS 7
201 #define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
203 struct ip_tunnel_net
{
204 struct net_device
*fb_tunnel_dev
;
205 struct rtnl_link_ops
*rtnl_link_ops
;
206 struct hlist_head tunnels
[IP_TNL_HASH_SIZE
];
207 struct ip_tunnel __rcu
*collect_md_tun
;
211 static inline void ip_tunnel_set_options_present(unsigned long *flags
)
213 IP_TUNNEL_DECLARE_FLAGS(present
) = { };
215 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT
, present
);
216 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT
, present
);
217 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT
, present
);
218 __set_bit(IP_TUNNEL_GTP_OPT_BIT
, present
);
219 __set_bit(IP_TUNNEL_PFCP_OPT_BIT
, present
);
221 ip_tunnel_flags_or(flags
, flags
, present
);
224 static inline void ip_tunnel_clear_options_present(unsigned long *flags
)
226 IP_TUNNEL_DECLARE_FLAGS(present
) = { };
228 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT
, present
);
229 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT
, present
);
230 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT
, present
);
231 __set_bit(IP_TUNNEL_GTP_OPT_BIT
, present
);
232 __set_bit(IP_TUNNEL_PFCP_OPT_BIT
, present
);
234 __ipt_flag_op(bitmap_andnot
, flags
, flags
, present
);
237 static inline bool ip_tunnel_is_options_present(const unsigned long *flags
)
239 IP_TUNNEL_DECLARE_FLAGS(present
) = { };
241 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT
, present
);
242 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT
, present
);
243 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT
, present
);
244 __set_bit(IP_TUNNEL_GTP_OPT_BIT
, present
);
245 __set_bit(IP_TUNNEL_PFCP_OPT_BIT
, present
);
247 return ip_tunnel_flags_intersect(flags
, present
);
250 static inline bool ip_tunnel_flags_is_be16_compat(const unsigned long *flags
)
252 IP_TUNNEL_DECLARE_FLAGS(supp
) = { };
254 bitmap_set(supp
, 0, BITS_PER_TYPE(__be16
));
255 __set_bit(IP_TUNNEL_VTI_BIT
, supp
);
257 return ip_tunnel_flags_subset(flags
, supp
);
260 static inline void ip_tunnel_flags_from_be16(unsigned long *dst
, __be16 flags
)
262 ip_tunnel_flags_zero(dst
);
264 bitmap_write(dst
, be16_to_cpu(flags
), 0, BITS_PER_TYPE(__be16
));
265 __assign_bit(IP_TUNNEL_VTI_BIT
, dst
, flags
& VTI_ISVTI
);
268 static inline __be16
ip_tunnel_flags_to_be16(const unsigned long *flags
)
272 ret
= cpu_to_be16(bitmap_read(flags
, 0, BITS_PER_TYPE(__be16
)));
273 if (test_bit(IP_TUNNEL_VTI_BIT
, flags
))
279 static inline void ip_tunnel_key_init(struct ip_tunnel_key
*key
,
280 __be32 saddr
, __be32 daddr
,
281 u8 tos
, u8 ttl
, __be32 label
,
282 __be16 tp_src
, __be16 tp_dst
,
284 const unsigned long *tun_flags
)
286 key
->tun_id
= tun_id
;
287 key
->u
.ipv4
.src
= saddr
;
288 key
->u
.ipv4
.dst
= daddr
;
289 memset((unsigned char *)key
+ IP_TUNNEL_KEY_IPV4_PAD
,
290 0, IP_TUNNEL_KEY_IPV4_PAD_LEN
);
294 ip_tunnel_flags_copy(key
->tun_flags
, tun_flags
);
296 /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
297 * the upper tunnel are used.
298 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
300 key
->tp_src
= tp_src
;
301 key
->tp_dst
= tp_dst
;
303 /* Clear struct padding. */
304 if (sizeof(*key
) != IP_TUNNEL_KEY_SIZE
)
305 memset((unsigned char *)key
+ IP_TUNNEL_KEY_SIZE
,
306 0, sizeof(*key
) - IP_TUNNEL_KEY_SIZE
);
310 ip_tunnel_dst_cache_usable(const struct sk_buff
*skb
,
311 const struct ip_tunnel_info
*info
)
316 return !info
|| !test_bit(IP_TUNNEL_NOCACHE_BIT
, info
->key
.tun_flags
);
319 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
322 return tun_info
->mode
& IP_TUNNEL_INFO_IPV6
? AF_INET6
: AF_INET
;
325 static inline __be64
key32_to_tunnel_id(__be32 key
)
328 return (__force __be64
)key
;
330 return (__force __be64
)((__force u64
)key
<< 32);
334 /* Returns the least-significant 32 bits of a __be64. */
335 static inline __be32
tunnel_id_to_key32(__be64 tun_id
)
338 return (__force __be32
)tun_id
;
340 return (__force __be32
)((__force u64
)tun_id
>> 32);
346 static inline void ip_tunnel_init_flow(struct flowi4
*fl4
,
348 __be32 daddr
, __be32 saddr
,
349 __be32 key
, __u8 tos
,
350 struct net
*net
, int oif
,
351 __u32 mark
, __u32 tun_inner_hash
,
354 memset(fl4
, 0, sizeof(*fl4
));
357 fl4
->flowi4_l3mdev
= l3mdev_master_upper_ifindex_by_index(net
, oif
);
358 /* Legacy VRF/l3mdev use case */
359 fl4
->flowi4_oif
= fl4
->flowi4_l3mdev
? 0 : oif
;
364 fl4
->flowi4_tos
= tos
;
365 fl4
->flowi4_proto
= proto
;
366 fl4
->fl4_gre_key
= key
;
367 fl4
->flowi4_mark
= mark
;
368 fl4
->flowi4_multipath_hash
= tun_inner_hash
;
369 fl4
->flowi4_flags
= flow_flags
;
372 int ip_tunnel_init(struct net_device
*dev
);
373 void ip_tunnel_uninit(struct net_device
*dev
);
374 void ip_tunnel_dellink(struct net_device
*dev
, struct list_head
*head
);
375 struct net
*ip_tunnel_get_link_net(const struct net_device
*dev
);
376 int ip_tunnel_get_iflink(const struct net_device
*dev
);
377 int ip_tunnel_init_net(struct net
*net
, unsigned int ip_tnl_net_id
,
378 struct rtnl_link_ops
*ops
, char *devname
);
380 void ip_tunnel_delete_nets(struct list_head
*list_net
, unsigned int id
,
381 struct rtnl_link_ops
*ops
,
382 struct list_head
*dev_to_kill
);
384 void ip_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
385 const struct iphdr
*tnl_params
, const u8 protocol
);
386 void ip_md_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
387 const u8 proto
, int tunnel_hlen
);
388 int ip_tunnel_ctl(struct net_device
*dev
, struct ip_tunnel_parm_kern
*p
,
390 bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern
*kp
,
391 const void __user
*data
);
392 bool ip_tunnel_parm_to_user(void __user
*data
, struct ip_tunnel_parm_kern
*kp
);
393 int ip_tunnel_siocdevprivate(struct net_device
*dev
, struct ifreq
*ifr
,
394 void __user
*data
, int cmd
);
395 int __ip_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
, bool strict
);
396 int ip_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
);
398 struct ip_tunnel
*ip_tunnel_lookup(struct ip_tunnel_net
*itn
,
399 int link
, const unsigned long *flags
,
400 __be32 remote
, __be32 local
,
403 void ip_tunnel_md_udp_encap(struct sk_buff
*skb
, struct ip_tunnel_info
*info
);
404 int ip_tunnel_rcv(struct ip_tunnel
*tunnel
, struct sk_buff
*skb
,
405 const struct tnl_ptk_info
*tpi
, struct metadata_dst
*tun_dst
,
407 int ip_tunnel_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
408 struct ip_tunnel_parm_kern
*p
, __u32 fwmark
);
409 int ip_tunnel_newlink(struct net_device
*dev
, struct nlattr
*tb
[],
410 struct ip_tunnel_parm_kern
*p
, __u32 fwmark
);
411 void ip_tunnel_setup(struct net_device
*dev
, unsigned int net_id
);
413 bool ip_tunnel_netlink_encap_parms(struct nlattr
*data
[],
414 struct ip_tunnel_encap
*encap
);
416 void ip_tunnel_netlink_parms(struct nlattr
*data
[],
417 struct ip_tunnel_parm_kern
*parms
);
419 extern const struct header_ops ip_tunnel_header_ops
;
420 __be16
ip_tunnel_parse_protocol(const struct sk_buff
*skb
);
422 struct ip_tunnel_encap_ops
{
423 size_t (*encap_hlen
)(struct ip_tunnel_encap
*e
);
424 int (*build_header
)(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
425 u8
*protocol
, struct flowi4
*fl4
);
426 int (*err_handler
)(struct sk_buff
*skb
, u32 info
);
429 #define MAX_IPTUN_ENCAP_OPS 8
431 extern const struct ip_tunnel_encap_ops __rcu
*
432 iptun_encaps
[MAX_IPTUN_ENCAP_OPS
];
434 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops
*op
,
436 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops
*op
,
439 int ip_tunnel_encap_setup(struct ip_tunnel
*t
,
440 struct ip_tunnel_encap
*ipencap
);
442 static inline enum skb_drop_reason
443 pskb_inet_may_pull_reason(struct sk_buff
*skb
)
447 switch (skb
->protocol
) {
448 #if IS_ENABLED(CONFIG_IPV6)
449 case htons(ETH_P_IPV6
):
450 nhlen
= sizeof(struct ipv6hdr
);
453 case htons(ETH_P_IP
):
454 nhlen
= sizeof(struct iphdr
);
460 return pskb_network_may_pull_reason(skb
, nhlen
);
463 static inline bool pskb_inet_may_pull(struct sk_buff
*skb
)
465 return pskb_inet_may_pull_reason(skb
) == SKB_NOT_DROPPED_YET
;
468 /* Variant of pskb_inet_may_pull().
470 static inline enum skb_drop_reason
471 skb_vlan_inet_prepare(struct sk_buff
*skb
, bool inner_proto_inherit
)
473 int nhlen
= 0, maclen
= inner_proto_inherit
? 0 : ETH_HLEN
;
474 __be16 type
= skb
->protocol
;
475 enum skb_drop_reason reason
;
477 /* Essentially this is skb_protocol(skb, true)
478 * And we get MAC len.
480 if (eth_type_vlan(type
))
481 type
= __vlan_get_protocol(skb
, type
, &maclen
);
484 #if IS_ENABLED(CONFIG_IPV6)
485 case htons(ETH_P_IPV6
):
486 nhlen
= sizeof(struct ipv6hdr
);
489 case htons(ETH_P_IP
):
490 nhlen
= sizeof(struct iphdr
);
493 /* For ETH_P_IPV6/ETH_P_IP we make sure to pull
494 * a base network header in skb->head.
496 reason
= pskb_may_pull_reason(skb
, maclen
+ nhlen
);
500 skb_set_network_header(skb
, maclen
);
502 return SKB_NOT_DROPPED_YET
;
505 static inline int ip_encap_hlen(struct ip_tunnel_encap
*e
)
507 const struct ip_tunnel_encap_ops
*ops
;
510 if (e
->type
== TUNNEL_ENCAP_NONE
)
513 if (e
->type
>= MAX_IPTUN_ENCAP_OPS
)
517 ops
= rcu_dereference(iptun_encaps
[e
->type
]);
518 if (likely(ops
&& ops
->encap_hlen
))
519 hlen
= ops
->encap_hlen(e
);
525 static inline int ip_tunnel_encap(struct sk_buff
*skb
,
526 struct ip_tunnel_encap
*e
,
527 u8
*protocol
, struct flowi4
*fl4
)
529 const struct ip_tunnel_encap_ops
*ops
;
532 if (e
->type
== TUNNEL_ENCAP_NONE
)
535 if (e
->type
>= MAX_IPTUN_ENCAP_OPS
)
539 ops
= rcu_dereference(iptun_encaps
[e
->type
]);
540 if (likely(ops
&& ops
->build_header
))
541 ret
= ops
->build_header(skb
, e
, protocol
, fl4
);
547 /* Extract dsfield from inner protocol */
548 static inline u8
ip_tunnel_get_dsfield(const struct iphdr
*iph
,
549 const struct sk_buff
*skb
)
551 __be16 payload_protocol
= skb_protocol(skb
, true);
553 if (payload_protocol
== htons(ETH_P_IP
))
555 else if (payload_protocol
== htons(ETH_P_IPV6
))
556 return ipv6_get_dsfield((const struct ipv6hdr
*)iph
);
561 static inline __be32
ip_tunnel_get_flowlabel(const struct iphdr
*iph
,
562 const struct sk_buff
*skb
)
564 __be16 payload_protocol
= skb_protocol(skb
, true);
566 if (payload_protocol
== htons(ETH_P_IPV6
))
567 return ip6_flowlabel((const struct ipv6hdr
*)iph
);
572 static inline u8
ip_tunnel_get_ttl(const struct iphdr
*iph
,
573 const struct sk_buff
*skb
)
575 __be16 payload_protocol
= skb_protocol(skb
, true);
577 if (payload_protocol
== htons(ETH_P_IP
))
579 else if (payload_protocol
== htons(ETH_P_IPV6
))
580 return ((const struct ipv6hdr
*)iph
)->hop_limit
;
585 /* Propagate ECN bits out */
586 static inline u8
ip_tunnel_ecn_encap(u8 tos
, const struct iphdr
*iph
,
587 const struct sk_buff
*skb
)
589 u8 inner
= ip_tunnel_get_dsfield(iph
, skb
);
591 return INET_ECN_encapsulate(tos
, inner
);
594 int __iptunnel_pull_header(struct sk_buff
*skb
, int hdr_len
,
595 __be16 inner_proto
, bool raw_proto
, bool xnet
);
597 static inline int iptunnel_pull_header(struct sk_buff
*skb
, int hdr_len
,
598 __be16 inner_proto
, bool xnet
)
600 return __iptunnel_pull_header(skb
, hdr_len
, inner_proto
, false, xnet
);
603 void iptunnel_xmit(struct sock
*sk
, struct rtable
*rt
, struct sk_buff
*skb
,
604 __be32 src
, __be32 dst
, u8 proto
,
605 u8 tos
, u8 ttl
, __be16 df
, bool xnet
);
606 struct metadata_dst
*iptunnel_metadata_reply(struct metadata_dst
*md
,
608 int skb_tunnel_check_pmtu(struct sk_buff
*skb
, struct dst_entry
*encap_dst
,
609 int headroom
, bool reply
);
611 int iptunnel_handle_offloads(struct sk_buff
*skb
, int gso_type_mask
);
613 static inline int iptunnel_pull_offloads(struct sk_buff
*skb
)
615 if (skb_is_gso(skb
)) {
618 err
= skb_unclone(skb
, GFP_ATOMIC
);
621 skb_shinfo(skb
)->gso_type
&= ~(NETIF_F_GSO_ENCAP_ALL
>>
625 skb
->encapsulation
= 0;
629 static inline void iptunnel_xmit_stats(struct net_device
*dev
, int pkt_len
)
632 struct pcpu_sw_netstats
*tstats
= get_cpu_ptr(dev
->tstats
);
634 u64_stats_update_begin(&tstats
->syncp
);
635 u64_stats_add(&tstats
->tx_bytes
, pkt_len
);
636 u64_stats_inc(&tstats
->tx_packets
);
637 u64_stats_update_end(&tstats
->syncp
);
643 DEV_STATS_INC(dev
, tx_errors
);
644 DEV_STATS_INC(dev
, tx_aborted_errors
);
646 DEV_STATS_INC(dev
, tx_dropped
);
650 static inline void ip_tunnel_info_opts_get(void *to
,
651 const struct ip_tunnel_info
*info
)
653 memcpy(to
, info
+ 1, info
->options_len
);
656 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info
*info
,
657 const void *from
, int len
,
658 const unsigned long *flags
)
660 info
->options_len
= len
;
662 memcpy(ip_tunnel_info_opts(info
), from
, len
);
663 ip_tunnel_flags_or(info
->key
.tun_flags
, info
->key
.tun_flags
,
668 static inline struct ip_tunnel_info
*lwt_tun_info(struct lwtunnel_state
*lwtstate
)
670 return (struct ip_tunnel_info
*)lwtstate
->data
;
673 DECLARE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt
);
675 /* Returns > 0 if metadata should be collected */
676 static inline int ip_tunnel_collect_metadata(void)
678 return static_branch_unlikely(&ip_tunnel_metadata_cnt
);
681 void __init
ip_tunnel_core_init(void);
683 void ip_tunnel_need_metadata(void);
684 void ip_tunnel_unneed_metadata(void);
686 #else /* CONFIG_INET */
688 static inline struct ip_tunnel_info
*lwt_tun_info(struct lwtunnel_state
*lwtstate
)
693 static inline void ip_tunnel_need_metadata(void)
697 static inline void ip_tunnel_unneed_metadata(void)
701 static inline void ip_tunnel_info_opts_get(void *to
,
702 const struct ip_tunnel_info
*info
)
706 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info
*info
,
707 const void *from
, int len
,
708 const unsigned long *flags
)
710 info
->options_len
= 0;
713 #endif /* CONFIG_INET */
715 #endif /* __NET_IP_TUNNELS_H */