1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SR-IPv6 implementation
6 * David Lebrun <david.lebrun@uclouvain.be>
9 #include <linux/types.h>
10 #include <linux/skbuff.h>
11 #include <linux/net.h>
12 #include <linux/module.h>
14 #include <net/ip_tunnels.h>
15 #include <net/lwtunnel.h>
16 #include <net/netevent.h>
17 #include <net/netns/generic.h>
18 #include <net/ip6_fib.h>
19 #include <net/route.h>
21 #include <linux/seg6.h>
22 #include <linux/seg6_iptunnel.h>
23 #include <net/addrconf.h>
24 #include <net/ip6_route.h>
25 #include <net/dst_cache.h>
26 #ifdef CONFIG_IPV6_SEG6_HMAC
27 #include <net/seg6_hmac.h>
29 #include <linux/netfilter.h>
31 static size_t seg6_lwt_headroom(struct seg6_iptunnel_encap
*tuninfo
)
35 switch (tuninfo
->mode
) {
36 case SEG6_IPTUN_MODE_INLINE
:
38 case SEG6_IPTUN_MODE_ENCAP
:
39 case SEG6_IPTUN_MODE_ENCAP_RED
:
40 head
= sizeof(struct ipv6hdr
);
42 case SEG6_IPTUN_MODE_L2ENCAP
:
43 case SEG6_IPTUN_MODE_L2ENCAP_RED
:
47 return ((tuninfo
->srh
->hdrlen
+ 1) << 3) + head
;
51 struct dst_cache cache
;
52 struct seg6_iptunnel_encap tuninfo
[];
55 static inline struct seg6_lwt
*seg6_lwt_lwtunnel(struct lwtunnel_state
*lwt
)
57 return (struct seg6_lwt
*)lwt
->data
;
60 static inline struct seg6_iptunnel_encap
*
61 seg6_encap_lwtunnel(struct lwtunnel_state
*lwt
)
63 return seg6_lwt_lwtunnel(lwt
)->tuninfo
;
66 static const struct nla_policy seg6_iptunnel_policy
[SEG6_IPTUNNEL_MAX
+ 1] = {
67 [SEG6_IPTUNNEL_SRH
] = { .type
= NLA_BINARY
},
70 static int nla_put_srh(struct sk_buff
*skb
, int attrtype
,
71 struct seg6_iptunnel_encap
*tuninfo
)
73 struct seg6_iptunnel_encap
*data
;
77 len
= SEG6_IPTUN_ENCAP_SIZE(tuninfo
);
79 nla
= nla_reserve(skb
, attrtype
, len
);
84 memcpy(data
, tuninfo
, len
);
89 static void set_tun_src(struct net
*net
, struct net_device
*dev
,
90 struct in6_addr
*daddr
, struct in6_addr
*saddr
)
92 struct seg6_pernet_data
*sdata
= seg6_pernet(net
);
93 struct in6_addr
*tun_src
;
97 tun_src
= rcu_dereference(sdata
->tun_src
);
99 if (!ipv6_addr_any(tun_src
)) {
100 memcpy(saddr
, tun_src
, sizeof(struct in6_addr
));
102 ipv6_dev_get_saddr(net
, dev
, daddr
, IPV6_PREFER_SRC_PUBLIC
,
109 /* Compute flowlabel for outer IPv6 header */
110 static __be32
seg6_make_flowlabel(struct net
*net
, struct sk_buff
*skb
,
111 struct ipv6hdr
*inner_hdr
)
113 int do_flowlabel
= net
->ipv6
.sysctl
.seg6_flowlabel
;
114 __be32 flowlabel
= 0;
117 if (do_flowlabel
> 0) {
118 hash
= skb_get_hash(skb
);
119 hash
= rol32(hash
, 16);
120 flowlabel
= (__force __be32
)hash
& IPV6_FLOWLABEL_MASK
;
121 } else if (!do_flowlabel
&& skb
->protocol
== htons(ETH_P_IPV6
)) {
122 flowlabel
= ip6_flowlabel(inner_hdr
);
127 static int __seg6_do_srh_encap(struct sk_buff
*skb
, struct ipv6_sr_hdr
*osrh
,
128 int proto
, struct dst_entry
*cache_dst
)
130 struct dst_entry
*dst
= skb_dst(skb
);
131 struct net
*net
= dev_net(dst
->dev
);
132 struct ipv6hdr
*hdr
, *inner_hdr
;
133 struct ipv6_sr_hdr
*isrh
;
134 int hdrlen
, tot_len
, err
;
137 hdrlen
= (osrh
->hdrlen
+ 1) << 3;
138 tot_len
= hdrlen
+ sizeof(*hdr
);
140 err
= skb_cow_head(skb
, tot_len
+ dst_dev_overhead(cache_dst
, skb
));
144 inner_hdr
= ipv6_hdr(skb
);
145 flowlabel
= seg6_make_flowlabel(net
, skb
, inner_hdr
);
147 skb_push(skb
, tot_len
);
148 skb_reset_network_header(skb
);
149 skb_mac_header_rebuild(skb
);
152 /* inherit tc, flowlabel and hlim
153 * hlim will be decremented in ip6_forward() afterwards and
154 * decapsulation will overwrite inner hlim with outer hlim
157 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
158 ip6_flow_hdr(hdr
, ip6_tclass(ip6_flowinfo(inner_hdr
)),
160 hdr
->hop_limit
= inner_hdr
->hop_limit
;
162 ip6_flow_hdr(hdr
, 0, flowlabel
);
163 hdr
->hop_limit
= ip6_dst_hoplimit(skb_dst(skb
));
165 memset(IP6CB(skb
), 0, sizeof(*IP6CB(skb
)));
167 /* the control block has been erased, so we have to set the
169 * We read the receiving interface index directly from the
170 * skb->skb_iif as it is done in the IPv4 receiving path (i.e.:
173 IP6CB(skb
)->iif
= skb
->skb_iif
;
176 hdr
->nexthdr
= NEXTHDR_ROUTING
;
178 isrh
= (void *)hdr
+ sizeof(*hdr
);
179 memcpy(isrh
, osrh
, hdrlen
);
181 isrh
->nexthdr
= proto
;
183 hdr
->daddr
= isrh
->segments
[isrh
->first_segment
];
184 set_tun_src(net
, dst
->dev
, &hdr
->daddr
, &hdr
->saddr
);
186 #ifdef CONFIG_IPV6_SEG6_HMAC
187 if (sr_has_hmac(isrh
)) {
188 err
= seg6_push_hmac(net
, &hdr
->saddr
, isrh
);
194 hdr
->payload_len
= htons(skb
->len
- sizeof(struct ipv6hdr
));
196 skb_postpush_rcsum(skb
, hdr
, tot_len
);
201 /* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
202 int seg6_do_srh_encap(struct sk_buff
*skb
, struct ipv6_sr_hdr
*osrh
, int proto
)
204 return __seg6_do_srh_encap(skb
, osrh
, proto
, NULL
);
206 EXPORT_SYMBOL_GPL(seg6_do_srh_encap
);
208 /* encapsulate an IPv6 packet within an outer IPv6 header with reduced SRH */
209 static int seg6_do_srh_encap_red(struct sk_buff
*skb
,
210 struct ipv6_sr_hdr
*osrh
, int proto
,
211 struct dst_entry
*cache_dst
)
213 __u8 first_seg
= osrh
->first_segment
;
214 struct dst_entry
*dst
= skb_dst(skb
);
215 struct net
*net
= dev_net(dst
->dev
);
216 struct ipv6hdr
*hdr
, *inner_hdr
;
217 int hdrlen
= ipv6_optlen(osrh
);
218 int red_tlv_offset
, tlv_offset
;
219 struct ipv6_sr_hdr
*isrh
;
220 bool skip_srh
= false;
227 red_hdrlen
= hdrlen
- sizeof(struct in6_addr
);
229 /* NOTE: if tag/flags and/or other TLVs are introduced in the
230 * seg6_iptunnel infrastructure, they should be considered when
231 * deciding to skip the SRH.
233 skip_srh
= !sr_has_hmac(osrh
);
235 red_hdrlen
= skip_srh
? 0 : hdrlen
;
238 tot_len
= red_hdrlen
+ sizeof(struct ipv6hdr
);
240 err
= skb_cow_head(skb
, tot_len
+ dst_dev_overhead(cache_dst
, skb
));
244 inner_hdr
= ipv6_hdr(skb
);
245 flowlabel
= seg6_make_flowlabel(net
, skb
, inner_hdr
);
247 skb_push(skb
, tot_len
);
248 skb_reset_network_header(skb
);
249 skb_mac_header_rebuild(skb
);
252 /* based on seg6_do_srh_encap() */
253 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
254 ip6_flow_hdr(hdr
, ip6_tclass(ip6_flowinfo(inner_hdr
)),
256 hdr
->hop_limit
= inner_hdr
->hop_limit
;
258 ip6_flow_hdr(hdr
, 0, flowlabel
);
259 hdr
->hop_limit
= ip6_dst_hoplimit(skb_dst(skb
));
261 memset(IP6CB(skb
), 0, sizeof(*IP6CB(skb
)));
262 IP6CB(skb
)->iif
= skb
->skb_iif
;
265 /* no matter if we have to skip the SRH or not, the first segment
266 * always comes in the pushed IPv6 header.
268 hdr
->daddr
= osrh
->segments
[first_seg
];
271 hdr
->nexthdr
= proto
;
273 set_tun_src(net
, dst
->dev
, &hdr
->daddr
, &hdr
->saddr
);
277 /* we cannot skip the SRH, slow path */
279 hdr
->nexthdr
= NEXTHDR_ROUTING
;
280 isrh
= (void *)hdr
+ sizeof(struct ipv6hdr
);
282 if (unlikely(!first_seg
)) {
283 /* this is a very rare case; we have only one SID but
284 * we cannot skip the SRH since we are carrying some
287 memcpy(isrh
, osrh
, hdrlen
);
291 tlv_offset
= sizeof(*osrh
) + (first_seg
+ 1) * sizeof(struct in6_addr
);
292 red_tlv_offset
= tlv_offset
- sizeof(struct in6_addr
);
294 memcpy(isrh
, osrh
, red_tlv_offset
);
296 tlvs_len
= hdrlen
- tlv_offset
;
297 if (unlikely(tlvs_len
> 0)) {
298 const void *s
= (const void *)osrh
+ tlv_offset
;
299 void *d
= (void *)isrh
+ red_tlv_offset
;
301 memcpy(d
, s
, tlvs_len
);
304 --isrh
->first_segment
;
308 isrh
->nexthdr
= proto
;
309 set_tun_src(net
, dst
->dev
, &hdr
->daddr
, &hdr
->saddr
);
311 #ifdef CONFIG_IPV6_SEG6_HMAC
312 if (unlikely(!skip_srh
&& sr_has_hmac(isrh
))) {
313 err
= seg6_push_hmac(net
, &hdr
->saddr
, isrh
);
320 hdr
->payload_len
= htons(skb
->len
- sizeof(struct ipv6hdr
));
322 skb_postpush_rcsum(skb
, hdr
, tot_len
);
327 static int __seg6_do_srh_inline(struct sk_buff
*skb
, struct ipv6_sr_hdr
*osrh
,
328 struct dst_entry
*cache_dst
)
330 struct ipv6hdr
*hdr
, *oldhdr
;
331 struct ipv6_sr_hdr
*isrh
;
334 hdrlen
= (osrh
->hdrlen
+ 1) << 3;
336 err
= skb_cow_head(skb
, hdrlen
+ dst_dev_overhead(cache_dst
, skb
));
340 oldhdr
= ipv6_hdr(skb
);
342 skb_pull(skb
, sizeof(struct ipv6hdr
));
343 skb_postpull_rcsum(skb
, skb_network_header(skb
),
344 sizeof(struct ipv6hdr
));
346 skb_push(skb
, sizeof(struct ipv6hdr
) + hdrlen
);
347 skb_reset_network_header(skb
);
348 skb_mac_header_rebuild(skb
);
352 memmove(hdr
, oldhdr
, sizeof(*hdr
));
354 isrh
= (void *)hdr
+ sizeof(*hdr
);
355 memcpy(isrh
, osrh
, hdrlen
);
357 isrh
->nexthdr
= hdr
->nexthdr
;
358 hdr
->nexthdr
= NEXTHDR_ROUTING
;
360 isrh
->segments
[0] = hdr
->daddr
;
361 hdr
->daddr
= isrh
->segments
[isrh
->first_segment
];
363 #ifdef CONFIG_IPV6_SEG6_HMAC
364 if (sr_has_hmac(isrh
)) {
365 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
367 err
= seg6_push_hmac(net
, &hdr
->saddr
, isrh
);
373 hdr
->payload_len
= htons(skb
->len
- sizeof(struct ipv6hdr
));
375 skb_postpush_rcsum(skb
, hdr
, sizeof(struct ipv6hdr
) + hdrlen
);
380 static int seg6_do_srh(struct sk_buff
*skb
, struct dst_entry
*cache_dst
)
382 struct dst_entry
*dst
= skb_dst(skb
);
383 struct seg6_iptunnel_encap
*tinfo
;
386 tinfo
= seg6_encap_lwtunnel(dst
->lwtstate
);
388 switch (tinfo
->mode
) {
389 case SEG6_IPTUN_MODE_INLINE
:
390 if (skb
->protocol
!= htons(ETH_P_IPV6
))
393 err
= __seg6_do_srh_inline(skb
, tinfo
->srh
, cache_dst
);
397 case SEG6_IPTUN_MODE_ENCAP
:
398 case SEG6_IPTUN_MODE_ENCAP_RED
:
399 err
= iptunnel_handle_offloads(skb
, SKB_GSO_IPXIP6
);
403 if (skb
->protocol
== htons(ETH_P_IPV6
))
404 proto
= IPPROTO_IPV6
;
405 else if (skb
->protocol
== htons(ETH_P_IP
))
406 proto
= IPPROTO_IPIP
;
410 if (tinfo
->mode
== SEG6_IPTUN_MODE_ENCAP
)
411 err
= __seg6_do_srh_encap(skb
, tinfo
->srh
,
414 err
= seg6_do_srh_encap_red(skb
, tinfo
->srh
,
420 skb_set_inner_transport_header(skb
, skb_transport_offset(skb
));
421 skb_set_inner_protocol(skb
, skb
->protocol
);
422 skb
->protocol
= htons(ETH_P_IPV6
);
424 case SEG6_IPTUN_MODE_L2ENCAP
:
425 case SEG6_IPTUN_MODE_L2ENCAP_RED
:
426 if (!skb_mac_header_was_set(skb
))
429 if (pskb_expand_head(skb
, skb
->mac_len
, 0, GFP_ATOMIC
) < 0)
432 skb_mac_header_rebuild(skb
);
433 skb_push(skb
, skb
->mac_len
);
435 if (tinfo
->mode
== SEG6_IPTUN_MODE_L2ENCAP
)
436 err
= __seg6_do_srh_encap(skb
, tinfo
->srh
,
440 err
= seg6_do_srh_encap_red(skb
, tinfo
->srh
,
447 skb
->protocol
= htons(ETH_P_IPV6
);
451 skb_set_transport_header(skb
, sizeof(struct ipv6hdr
));
457 /* insert an SRH within an IPv6 packet, just after the IPv6 header */
458 int seg6_do_srh_inline(struct sk_buff
*skb
, struct ipv6_sr_hdr
*osrh
)
460 return __seg6_do_srh_inline(skb
, osrh
, NULL
);
462 EXPORT_SYMBOL_GPL(seg6_do_srh_inline
);
464 static int seg6_input_finish(struct net
*net
, struct sock
*sk
,
467 return dst_input(skb
);
470 static int seg6_input_core(struct net
*net
, struct sock
*sk
,
473 struct dst_entry
*orig_dst
= skb_dst(skb
);
474 struct dst_entry
*dst
= NULL
;
475 struct seg6_lwt
*slwt
;
478 slwt
= seg6_lwt_lwtunnel(orig_dst
->lwtstate
);
481 dst
= dst_cache_get(&slwt
->cache
);
484 err
= seg6_do_srh(skb
, dst
);
489 ip6_route_input(skb
);
493 dst_cache_set_ip6(&slwt
->cache
, dst
,
494 &ipv6_hdr(skb
)->saddr
);
498 err
= skb_cow_head(skb
, LL_RESERVED_SPACE(dst
->dev
));
503 skb_dst_set(skb
, dst
);
506 if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled
))
507 return NF_HOOK(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
,
508 dev_net(skb
->dev
), NULL
, skb
, NULL
,
509 skb_dst(skb
)->dev
, seg6_input_finish
);
511 return seg6_input_finish(dev_net(skb
->dev
), NULL
, skb
);
517 static int seg6_input_nf(struct sk_buff
*skb
)
519 struct net_device
*dev
= skb_dst(skb
)->dev
;
520 struct net
*net
= dev_net(skb
->dev
);
522 switch (skb
->protocol
) {
523 case htons(ETH_P_IP
):
524 return NF_HOOK(NFPROTO_IPV4
, NF_INET_POST_ROUTING
, net
, NULL
,
525 skb
, NULL
, dev
, seg6_input_core
);
526 case htons(ETH_P_IPV6
):
527 return NF_HOOK(NFPROTO_IPV6
, NF_INET_POST_ROUTING
, net
, NULL
,
528 skb
, NULL
, dev
, seg6_input_core
);
534 static int seg6_input(struct sk_buff
*skb
)
536 if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled
))
537 return seg6_input_nf(skb
);
539 return seg6_input_core(dev_net(skb
->dev
), NULL
, skb
);
542 static int seg6_output_core(struct net
*net
, struct sock
*sk
,
545 struct dst_entry
*orig_dst
= skb_dst(skb
);
546 struct dst_entry
*dst
= NULL
;
547 struct seg6_lwt
*slwt
;
550 slwt
= seg6_lwt_lwtunnel(orig_dst
->lwtstate
);
553 dst
= dst_cache_get(&slwt
->cache
);
556 err
= seg6_do_srh(skb
, dst
);
560 if (unlikely(!dst
)) {
561 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
564 memset(&fl6
, 0, sizeof(fl6
));
565 fl6
.daddr
= hdr
->daddr
;
566 fl6
.saddr
= hdr
->saddr
;
567 fl6
.flowlabel
= ip6_flowinfo(hdr
);
568 fl6
.flowi6_mark
= skb
->mark
;
569 fl6
.flowi6_proto
= hdr
->nexthdr
;
571 dst
= ip6_route_output(net
, NULL
, &fl6
);
579 dst_cache_set_ip6(&slwt
->cache
, dst
, &fl6
.saddr
);
582 err
= skb_cow_head(skb
, LL_RESERVED_SPACE(dst
->dev
));
588 skb_dst_set(skb
, dst
);
590 if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled
))
591 return NF_HOOK(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
, net
, sk
, skb
,
592 NULL
, skb_dst(skb
)->dev
, dst_output
);
594 return dst_output(net
, sk
, skb
);
600 static int seg6_output_nf(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
602 struct net_device
*dev
= skb_dst(skb
)->dev
;
604 switch (skb
->protocol
) {
605 case htons(ETH_P_IP
):
606 return NF_HOOK(NFPROTO_IPV4
, NF_INET_POST_ROUTING
, net
, sk
, skb
,
607 NULL
, dev
, seg6_output_core
);
608 case htons(ETH_P_IPV6
):
609 return NF_HOOK(NFPROTO_IPV6
, NF_INET_POST_ROUTING
, net
, sk
, skb
,
610 NULL
, dev
, seg6_output_core
);
616 static int seg6_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
618 if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled
))
619 return seg6_output_nf(net
, sk
, skb
);
621 return seg6_output_core(net
, sk
, skb
);
624 static int seg6_build_state(struct net
*net
, struct nlattr
*nla
,
625 unsigned int family
, const void *cfg
,
626 struct lwtunnel_state
**ts
,
627 struct netlink_ext_ack
*extack
)
629 struct nlattr
*tb
[SEG6_IPTUNNEL_MAX
+ 1];
630 struct seg6_iptunnel_encap
*tuninfo
;
631 struct lwtunnel_state
*newts
;
632 int tuninfo_len
, min_size
;
633 struct seg6_lwt
*slwt
;
636 if (family
!= AF_INET
&& family
!= AF_INET6
)
639 err
= nla_parse_nested_deprecated(tb
, SEG6_IPTUNNEL_MAX
, nla
,
640 seg6_iptunnel_policy
, extack
);
645 if (!tb
[SEG6_IPTUNNEL_SRH
])
648 tuninfo
= nla_data(tb
[SEG6_IPTUNNEL_SRH
]);
649 tuninfo_len
= nla_len(tb
[SEG6_IPTUNNEL_SRH
]);
651 /* tuninfo must contain at least the iptunnel encap structure,
652 * the SRH and one segment
654 min_size
= sizeof(*tuninfo
) + sizeof(struct ipv6_sr_hdr
) +
655 sizeof(struct in6_addr
);
656 if (tuninfo_len
< min_size
)
659 switch (tuninfo
->mode
) {
660 case SEG6_IPTUN_MODE_INLINE
:
661 if (family
!= AF_INET6
)
665 case SEG6_IPTUN_MODE_ENCAP
:
667 case SEG6_IPTUN_MODE_L2ENCAP
:
669 case SEG6_IPTUN_MODE_ENCAP_RED
:
671 case SEG6_IPTUN_MODE_L2ENCAP_RED
:
677 /* verify that SRH is consistent */
678 if (!seg6_validate_srh(tuninfo
->srh
, tuninfo_len
- sizeof(*tuninfo
), false))
681 newts
= lwtunnel_state_alloc(tuninfo_len
+ sizeof(*slwt
));
685 slwt
= seg6_lwt_lwtunnel(newts
);
687 err
= dst_cache_init(&slwt
->cache
, GFP_ATOMIC
);
693 memcpy(&slwt
->tuninfo
, tuninfo
, tuninfo_len
);
695 newts
->type
= LWTUNNEL_ENCAP_SEG6
;
696 newts
->flags
|= LWTUNNEL_STATE_INPUT_REDIRECT
;
698 if (tuninfo
->mode
!= SEG6_IPTUN_MODE_L2ENCAP
)
699 newts
->flags
|= LWTUNNEL_STATE_OUTPUT_REDIRECT
;
701 newts
->headroom
= seg6_lwt_headroom(tuninfo
);
708 static void seg6_destroy_state(struct lwtunnel_state
*lwt
)
710 dst_cache_destroy(&seg6_lwt_lwtunnel(lwt
)->cache
);
713 static int seg6_fill_encap_info(struct sk_buff
*skb
,
714 struct lwtunnel_state
*lwtstate
)
716 struct seg6_iptunnel_encap
*tuninfo
= seg6_encap_lwtunnel(lwtstate
);
718 if (nla_put_srh(skb
, SEG6_IPTUNNEL_SRH
, tuninfo
))
724 static int seg6_encap_nlsize(struct lwtunnel_state
*lwtstate
)
726 struct seg6_iptunnel_encap
*tuninfo
= seg6_encap_lwtunnel(lwtstate
);
728 return nla_total_size(SEG6_IPTUN_ENCAP_SIZE(tuninfo
));
731 static int seg6_encap_cmp(struct lwtunnel_state
*a
, struct lwtunnel_state
*b
)
733 struct seg6_iptunnel_encap
*a_hdr
= seg6_encap_lwtunnel(a
);
734 struct seg6_iptunnel_encap
*b_hdr
= seg6_encap_lwtunnel(b
);
735 int len
= SEG6_IPTUN_ENCAP_SIZE(a_hdr
);
737 if (len
!= SEG6_IPTUN_ENCAP_SIZE(b_hdr
))
740 return memcmp(a_hdr
, b_hdr
, len
);
743 static const struct lwtunnel_encap_ops seg6_iptun_ops
= {
744 .build_state
= seg6_build_state
,
745 .destroy_state
= seg6_destroy_state
,
746 .output
= seg6_output
,
748 .fill_encap
= seg6_fill_encap_info
,
749 .get_encap_size
= seg6_encap_nlsize
,
750 .cmp_encap
= seg6_encap_cmp
,
751 .owner
= THIS_MODULE
,
754 int __init
seg6_iptunnel_init(void)
756 return lwtunnel_encap_add_ops(&seg6_iptun_ops
, LWTUNNEL_ENCAP_SEG6
);
759 void seg6_iptunnel_exit(void)
761 lwtunnel_encap_del_ops(&seg6_iptun_ops
, LWTUNNEL_ENCAP_SEG6
);