1 #include <linux/module.h>
2 #include <linux/errno.h>
3 #include <linux/socket.h>
4 #include <linux/skbuff.h>
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <net/genetlink.h>
12 #include <net/protocol.h>
14 #include <net/udp_tunnel.h>
16 #include <uapi/linux/fou.h>
17 #include <uapi/linux/genetlink.h>
25 struct udp_offload udp_offloads
;
26 struct list_head list
;
30 #define FOU_F_REMCSUM_NOPARTIAL BIT(0)
36 struct udp_port_cfg udp_config
;
39 static unsigned int fou_net_id
;
42 struct list_head fou_list
;
43 struct mutex fou_lock
;
46 static inline struct fou
*fou_from_sock(struct sock
*sk
)
48 return sk
->sk_user_data
;
51 static int fou_recv_pull(struct sk_buff
*skb
, size_t len
)
53 struct iphdr
*iph
= ip_hdr(skb
);
55 /* Remove 'len' bytes from the packet (UDP header and
56 * FOU header if present).
58 iph
->tot_len
= htons(ntohs(iph
->tot_len
) - len
);
60 skb_postpull_rcsum(skb
, udp_hdr(skb
), len
);
61 skb_reset_transport_header(skb
);
62 return iptunnel_pull_offloads(skb
);
65 static int fou_udp_recv(struct sock
*sk
, struct sk_buff
*skb
)
67 struct fou
*fou
= fou_from_sock(sk
);
72 if (fou_recv_pull(skb
, sizeof(struct udphdr
)))
75 return -fou
->protocol
;
82 static struct guehdr
*gue_remcsum(struct sk_buff
*skb
, struct guehdr
*guehdr
,
83 void *data
, size_t hdrlen
, u8 ipproto
,
87 size_t start
= ntohs(pd
[0]);
88 size_t offset
= ntohs(pd
[1]);
89 size_t plen
= sizeof(struct udphdr
) + hdrlen
+
90 max_t(size_t, offset
+ sizeof(u16
), start
);
92 if (skb
->remcsum_offload
)
95 if (!pskb_may_pull(skb
, plen
))
97 guehdr
= (struct guehdr
*)&udp_hdr(skb
)[1];
99 skb_remcsum_process(skb
, (void *)guehdr
+ hdrlen
,
100 start
, offset
, nopartial
);
105 static int gue_control_message(struct sk_buff
*skb
, struct guehdr
*guehdr
)
112 static int gue_udp_recv(struct sock
*sk
, struct sk_buff
*skb
)
114 struct fou
*fou
= fou_from_sock(sk
);
115 size_t len
, optlen
, hdrlen
;
116 struct guehdr
*guehdr
;
123 len
= sizeof(struct udphdr
) + sizeof(struct guehdr
);
124 if (!pskb_may_pull(skb
, len
))
127 guehdr
= (struct guehdr
*)&udp_hdr(skb
)[1];
129 optlen
= guehdr
->hlen
<< 2;
132 if (!pskb_may_pull(skb
, len
))
135 /* guehdr may change after pull */
136 guehdr
= (struct guehdr
*)&udp_hdr(skb
)[1];
138 hdrlen
= sizeof(struct guehdr
) + optlen
;
140 if (guehdr
->version
!= 0 || validate_gue_flags(guehdr
, optlen
))
143 hdrlen
= sizeof(struct guehdr
) + optlen
;
145 ip_hdr(skb
)->tot_len
= htons(ntohs(ip_hdr(skb
)->tot_len
) - len
);
147 /* Pull csum through the guehdr now . This can be used if
148 * there is a remote checksum offload.
150 skb_postpull_rcsum(skb
, udp_hdr(skb
), len
);
154 if (guehdr
->flags
& GUE_FLAG_PRIV
) {
155 __be32 flags
= *(__be32
*)(data
+ doffset
);
157 doffset
+= GUE_LEN_PRIV
;
159 if (flags
& GUE_PFLAG_REMCSUM
) {
160 guehdr
= gue_remcsum(skb
, guehdr
, data
+ doffset
,
161 hdrlen
, guehdr
->proto_ctype
,
163 FOU_F_REMCSUM_NOPARTIAL
));
169 doffset
+= GUE_PLEN_REMCSUM
;
173 if (unlikely(guehdr
->control
))
174 return gue_control_message(skb
, guehdr
);
176 __skb_pull(skb
, sizeof(struct udphdr
) + hdrlen
);
177 skb_reset_transport_header(skb
);
179 if (iptunnel_pull_offloads(skb
))
182 return -guehdr
->proto_ctype
;
189 static struct sk_buff
**fou_gro_receive(struct sk_buff
**head
,
191 struct udp_offload
*uoff
)
193 const struct net_offload
*ops
;
194 struct sk_buff
**pp
= NULL
;
195 u8 proto
= NAPI_GRO_CB(skb
)->proto
;
196 const struct net_offload
**offloads
;
198 /* We can clear the encap_mark for FOU as we are essentially doing
199 * one of two possible things. We are either adding an L4 tunnel
200 * header to the outer L3 tunnel header, or we are are simply
201 * treating the GRE tunnel header as though it is a UDP protocol
202 * specific header such as VXLAN or GENEVE.
204 NAPI_GRO_CB(skb
)->encap_mark
= 0;
206 /* Flag this frame as already having an outer encap header */
207 NAPI_GRO_CB(skb
)->is_fou
= 1;
210 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
211 ops
= rcu_dereference(offloads
[proto
]);
212 if (!ops
|| !ops
->callbacks
.gro_receive
)
215 pp
= ops
->callbacks
.gro_receive(head
, skb
);
223 static int fou_gro_complete(struct sk_buff
*skb
, int nhoff
,
224 struct udp_offload
*uoff
)
226 const struct net_offload
*ops
;
227 u8 proto
= NAPI_GRO_CB(skb
)->proto
;
229 const struct net_offload
**offloads
;
231 udp_tunnel_gro_complete(skb
, nhoff
);
234 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
235 ops
= rcu_dereference(offloads
[proto
]);
236 if (WARN_ON(!ops
|| !ops
->callbacks
.gro_complete
))
239 err
= ops
->callbacks
.gro_complete(skb
, nhoff
);
247 static struct guehdr
*gue_gro_remcsum(struct sk_buff
*skb
, unsigned int off
,
248 struct guehdr
*guehdr
, void *data
,
249 size_t hdrlen
, struct gro_remcsum
*grc
,
253 size_t start
= ntohs(pd
[0]);
254 size_t offset
= ntohs(pd
[1]);
256 if (skb
->remcsum_offload
)
259 if (!NAPI_GRO_CB(skb
)->csum_valid
)
262 guehdr
= skb_gro_remcsum_process(skb
, (void *)guehdr
, off
, hdrlen
,
263 start
, offset
, grc
, nopartial
);
265 skb
->remcsum_offload
= 1;
270 static struct sk_buff
**gue_gro_receive(struct sk_buff
**head
,
272 struct udp_offload
*uoff
)
274 const struct net_offload
**offloads
;
275 const struct net_offload
*ops
;
276 struct sk_buff
**pp
= NULL
;
278 struct guehdr
*guehdr
;
279 size_t len
, optlen
, hdrlen
, off
;
283 struct fou
*fou
= container_of(uoff
, struct fou
, udp_offloads
);
284 struct gro_remcsum grc
;
286 skb_gro_remcsum_init(&grc
);
288 off
= skb_gro_offset(skb
);
289 len
= off
+ sizeof(*guehdr
);
291 guehdr
= skb_gro_header_fast(skb
, off
);
292 if (skb_gro_header_hard(skb
, len
)) {
293 guehdr
= skb_gro_header_slow(skb
, len
, off
);
294 if (unlikely(!guehdr
))
298 optlen
= guehdr
->hlen
<< 2;
301 if (skb_gro_header_hard(skb
, len
)) {
302 guehdr
= skb_gro_header_slow(skb
, len
, off
);
303 if (unlikely(!guehdr
))
307 if (unlikely(guehdr
->control
) || guehdr
->version
!= 0 ||
308 validate_gue_flags(guehdr
, optlen
))
311 hdrlen
= sizeof(*guehdr
) + optlen
;
313 /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
314 * this is needed if there is a remote checkcsum offload.
316 skb_gro_postpull_rcsum(skb
, guehdr
, hdrlen
);
320 if (guehdr
->flags
& GUE_FLAG_PRIV
) {
321 __be32 flags
= *(__be32
*)(data
+ doffset
);
323 doffset
+= GUE_LEN_PRIV
;
325 if (flags
& GUE_PFLAG_REMCSUM
) {
326 guehdr
= gue_gro_remcsum(skb
, off
, guehdr
,
327 data
+ doffset
, hdrlen
, &grc
,
329 FOU_F_REMCSUM_NOPARTIAL
));
336 doffset
+= GUE_PLEN_REMCSUM
;
340 skb_gro_pull(skb
, hdrlen
);
342 for (p
= *head
; p
; p
= p
->next
) {
343 const struct guehdr
*guehdr2
;
345 if (!NAPI_GRO_CB(p
)->same_flow
)
348 guehdr2
= (struct guehdr
*)(p
->data
+ off
);
350 /* Compare base GUE header to be equal (covers
351 * hlen, version, proto_ctype, and flags.
353 if (guehdr
->word
!= guehdr2
->word
) {
354 NAPI_GRO_CB(p
)->same_flow
= 0;
358 /* Compare optional fields are the same. */
359 if (guehdr
->hlen
&& memcmp(&guehdr
[1], &guehdr2
[1],
360 guehdr
->hlen
<< 2)) {
361 NAPI_GRO_CB(p
)->same_flow
= 0;
366 /* We can clear the encap_mark for GUE as we are essentially doing
367 * one of two possible things. We are either adding an L4 tunnel
368 * header to the outer L3 tunnel header, or we are are simply
369 * treating the GRE tunnel header as though it is a UDP protocol
370 * specific header such as VXLAN or GENEVE.
372 NAPI_GRO_CB(skb
)->encap_mark
= 0;
374 /* Flag this frame as already having an outer encap header */
375 NAPI_GRO_CB(skb
)->is_fou
= 1;
378 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
379 ops
= rcu_dereference(offloads
[guehdr
->proto_ctype
]);
380 if (WARN_ON_ONCE(!ops
|| !ops
->callbacks
.gro_receive
))
383 pp
= ops
->callbacks
.gro_receive(head
, skb
);
389 NAPI_GRO_CB(skb
)->flush
|= flush
;
390 skb_gro_remcsum_cleanup(skb
, &grc
);
395 static int gue_gro_complete(struct sk_buff
*skb
, int nhoff
,
396 struct udp_offload
*uoff
)
398 const struct net_offload
**offloads
;
399 struct guehdr
*guehdr
= (struct guehdr
*)(skb
->data
+ nhoff
);
400 const struct net_offload
*ops
;
401 unsigned int guehlen
;
405 proto
= guehdr
->proto_ctype
;
407 guehlen
= sizeof(*guehdr
) + (guehdr
->hlen
<< 2);
410 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
411 ops
= rcu_dereference(offloads
[proto
]);
412 if (WARN_ON(!ops
|| !ops
->callbacks
.gro_complete
))
415 err
= ops
->callbacks
.gro_complete(skb
, nhoff
+ guehlen
);
422 static int fou_add_to_port_list(struct net
*net
, struct fou
*fou
)
424 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
427 mutex_lock(&fn
->fou_lock
);
428 list_for_each_entry(fout
, &fn
->fou_list
, list
) {
429 if (fou
->port
== fout
->port
) {
430 mutex_unlock(&fn
->fou_lock
);
435 list_add(&fou
->list
, &fn
->fou_list
);
436 mutex_unlock(&fn
->fou_lock
);
441 static void fou_release(struct fou
*fou
)
443 struct socket
*sock
= fou
->sock
;
444 struct sock
*sk
= sock
->sk
;
446 if (sk
->sk_family
== AF_INET
)
447 udp_del_offload(&fou
->udp_offloads
);
448 list_del(&fou
->list
);
449 udp_tunnel_sock_release(sock
);
454 static int fou_encap_init(struct sock
*sk
, struct fou
*fou
, struct fou_cfg
*cfg
)
456 udp_sk(sk
)->encap_rcv
= fou_udp_recv
;
457 fou
->protocol
= cfg
->protocol
;
458 fou
->udp_offloads
.callbacks
.gro_receive
= fou_gro_receive
;
459 fou
->udp_offloads
.callbacks
.gro_complete
= fou_gro_complete
;
460 fou
->udp_offloads
.port
= cfg
->udp_config
.local_udp_port
;
461 fou
->udp_offloads
.ipproto
= cfg
->protocol
;
466 static int gue_encap_init(struct sock
*sk
, struct fou
*fou
, struct fou_cfg
*cfg
)
468 udp_sk(sk
)->encap_rcv
= gue_udp_recv
;
469 fou
->udp_offloads
.callbacks
.gro_receive
= gue_gro_receive
;
470 fou
->udp_offloads
.callbacks
.gro_complete
= gue_gro_complete
;
471 fou
->udp_offloads
.port
= cfg
->udp_config
.local_udp_port
;
476 static int fou_create(struct net
*net
, struct fou_cfg
*cfg
,
477 struct socket
**sockp
)
479 struct socket
*sock
= NULL
;
480 struct fou
*fou
= NULL
;
484 /* Open UDP socket */
485 err
= udp_sock_create(net
, &cfg
->udp_config
, &sock
);
489 /* Allocate FOU port structure */
490 fou
= kzalloc(sizeof(*fou
), GFP_KERNEL
);
498 fou
->flags
= cfg
->flags
;
499 fou
->port
= cfg
->udp_config
.local_udp_port
;
501 /* Initial for fou type */
503 case FOU_ENCAP_DIRECT
:
504 err
= fou_encap_init(sk
, fou
, cfg
);
509 err
= gue_encap_init(sk
, fou
, cfg
);
518 fou
->type
= cfg
->type
;
520 udp_sk(sk
)->encap_type
= 1;
523 sk
->sk_user_data
= fou
;
526 inet_inc_convert_csum(sk
);
528 sk
->sk_allocation
= GFP_ATOMIC
;
530 if (cfg
->udp_config
.family
== AF_INET
) {
531 err
= udp_add_offload(net
, &fou
->udp_offloads
);
536 err
= fou_add_to_port_list(net
, fou
);
548 udp_tunnel_sock_release(sock
);
553 static int fou_destroy(struct net
*net
, struct fou_cfg
*cfg
)
555 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
556 __be16 port
= cfg
->udp_config
.local_udp_port
;
560 mutex_lock(&fn
->fou_lock
);
561 list_for_each_entry(fou
, &fn
->fou_list
, list
) {
562 if (fou
->port
== port
) {
568 mutex_unlock(&fn
->fou_lock
);
573 static struct genl_family fou_nl_family
= {
574 .id
= GENL_ID_GENERATE
,
576 .name
= FOU_GENL_NAME
,
577 .version
= FOU_GENL_VERSION
,
578 .maxattr
= FOU_ATTR_MAX
,
582 static struct nla_policy fou_nl_policy
[FOU_ATTR_MAX
+ 1] = {
583 [FOU_ATTR_PORT
] = { .type
= NLA_U16
, },
584 [FOU_ATTR_AF
] = { .type
= NLA_U8
, },
585 [FOU_ATTR_IPPROTO
] = { .type
= NLA_U8
, },
586 [FOU_ATTR_TYPE
] = { .type
= NLA_U8
, },
587 [FOU_ATTR_REMCSUM_NOPARTIAL
] = { .type
= NLA_FLAG
, },
590 static int parse_nl_config(struct genl_info
*info
,
593 memset(cfg
, 0, sizeof(*cfg
));
595 cfg
->udp_config
.family
= AF_INET
;
597 if (info
->attrs
[FOU_ATTR_AF
]) {
598 u8 family
= nla_get_u8(info
->attrs
[FOU_ATTR_AF
]);
600 if (family
!= AF_INET
)
603 cfg
->udp_config
.family
= family
;
606 if (info
->attrs
[FOU_ATTR_PORT
]) {
607 __be16 port
= nla_get_be16(info
->attrs
[FOU_ATTR_PORT
]);
609 cfg
->udp_config
.local_udp_port
= port
;
612 if (info
->attrs
[FOU_ATTR_IPPROTO
])
613 cfg
->protocol
= nla_get_u8(info
->attrs
[FOU_ATTR_IPPROTO
]);
615 if (info
->attrs
[FOU_ATTR_TYPE
])
616 cfg
->type
= nla_get_u8(info
->attrs
[FOU_ATTR_TYPE
]);
618 if (info
->attrs
[FOU_ATTR_REMCSUM_NOPARTIAL
])
619 cfg
->flags
|= FOU_F_REMCSUM_NOPARTIAL
;
624 static int fou_nl_cmd_add_port(struct sk_buff
*skb
, struct genl_info
*info
)
626 struct net
*net
= genl_info_net(info
);
630 err
= parse_nl_config(info
, &cfg
);
634 return fou_create(net
, &cfg
, NULL
);
637 static int fou_nl_cmd_rm_port(struct sk_buff
*skb
, struct genl_info
*info
)
639 struct net
*net
= genl_info_net(info
);
643 err
= parse_nl_config(info
, &cfg
);
647 return fou_destroy(net
, &cfg
);
650 static int fou_fill_info(struct fou
*fou
, struct sk_buff
*msg
)
652 if (nla_put_u8(msg
, FOU_ATTR_AF
, fou
->sock
->sk
->sk_family
) ||
653 nla_put_be16(msg
, FOU_ATTR_PORT
, fou
->port
) ||
654 nla_put_u8(msg
, FOU_ATTR_IPPROTO
, fou
->protocol
) ||
655 nla_put_u8(msg
, FOU_ATTR_TYPE
, fou
->type
))
658 if (fou
->flags
& FOU_F_REMCSUM_NOPARTIAL
)
659 if (nla_put_flag(msg
, FOU_ATTR_REMCSUM_NOPARTIAL
))
664 static int fou_dump_info(struct fou
*fou
, u32 portid
, u32 seq
,
665 u32 flags
, struct sk_buff
*skb
, u8 cmd
)
669 hdr
= genlmsg_put(skb
, portid
, seq
, &fou_nl_family
, flags
, cmd
);
673 if (fou_fill_info(fou
, skb
) < 0)
674 goto nla_put_failure
;
676 genlmsg_end(skb
, hdr
);
680 genlmsg_cancel(skb
, hdr
);
684 static int fou_nl_cmd_get_port(struct sk_buff
*skb
, struct genl_info
*info
)
686 struct net
*net
= genl_info_net(info
);
687 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
694 ret
= parse_nl_config(info
, &cfg
);
697 port
= cfg
.udp_config
.local_udp_port
;
701 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
706 mutex_lock(&fn
->fou_lock
);
707 list_for_each_entry(fout
, &fn
->fou_list
, list
) {
708 if (port
== fout
->port
) {
709 ret
= fou_dump_info(fout
, info
->snd_portid
,
710 info
->snd_seq
, 0, msg
,
715 mutex_unlock(&fn
->fou_lock
);
719 return genlmsg_reply(msg
, info
);
726 static int fou_nl_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
728 struct net
*net
= sock_net(skb
->sk
);
729 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
733 mutex_lock(&fn
->fou_lock
);
734 list_for_each_entry(fout
, &fn
->fou_list
, list
) {
735 if (idx
++ < cb
->args
[0])
737 ret
= fou_dump_info(fout
, NETLINK_CB(cb
->skb
).portid
,
738 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
743 mutex_unlock(&fn
->fou_lock
);
749 static const struct genl_ops fou_nl_ops
[] = {
752 .doit
= fou_nl_cmd_add_port
,
753 .policy
= fou_nl_policy
,
754 .flags
= GENL_ADMIN_PERM
,
758 .doit
= fou_nl_cmd_rm_port
,
759 .policy
= fou_nl_policy
,
760 .flags
= GENL_ADMIN_PERM
,
764 .doit
= fou_nl_cmd_get_port
,
765 .dumpit
= fou_nl_dump
,
766 .policy
= fou_nl_policy
,
770 size_t fou_encap_hlen(struct ip_tunnel_encap
*e
)
772 return sizeof(struct udphdr
);
774 EXPORT_SYMBOL(fou_encap_hlen
);
776 size_t gue_encap_hlen(struct ip_tunnel_encap
*e
)
779 bool need_priv
= false;
781 len
= sizeof(struct udphdr
) + sizeof(struct guehdr
);
783 if (e
->flags
& TUNNEL_ENCAP_FLAG_REMCSUM
) {
784 len
+= GUE_PLEN_REMCSUM
;
788 len
+= need_priv
? GUE_LEN_PRIV
: 0;
792 EXPORT_SYMBOL(gue_encap_hlen
);
794 static void fou_build_udp(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
795 struct flowi4
*fl4
, u8
*protocol
, __be16 sport
)
799 skb_push(skb
, sizeof(struct udphdr
));
800 skb_reset_transport_header(skb
);
806 uh
->len
= htons(skb
->len
);
807 udp_set_csum(!(e
->flags
& TUNNEL_ENCAP_FLAG_CSUM
), skb
,
808 fl4
->saddr
, fl4
->daddr
, skb
->len
);
810 *protocol
= IPPROTO_UDP
;
813 int fou_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
814 u8
*protocol
, struct flowi4
*fl4
)
816 int type
= e
->flags
& TUNNEL_ENCAP_FLAG_CSUM
? SKB_GSO_UDP_TUNNEL_CSUM
:
820 skb
= iptunnel_handle_offloads(skb
, type
);
825 sport
= e
->sport
? : udp_flow_src_port(dev_net(skb
->dev
),
827 fou_build_udp(skb
, e
, fl4
, protocol
, sport
);
831 EXPORT_SYMBOL(fou_build_header
);
833 int gue_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
834 u8
*protocol
, struct flowi4
*fl4
)
836 int type
= e
->flags
& TUNNEL_ENCAP_FLAG_CSUM
? SKB_GSO_UDP_TUNNEL_CSUM
:
838 struct guehdr
*guehdr
;
839 size_t hdrlen
, optlen
= 0;
842 bool need_priv
= false;
844 if ((e
->flags
& TUNNEL_ENCAP_FLAG_REMCSUM
) &&
845 skb
->ip_summed
== CHECKSUM_PARTIAL
) {
846 optlen
+= GUE_PLEN_REMCSUM
;
847 type
|= SKB_GSO_TUNNEL_REMCSUM
;
851 optlen
+= need_priv
? GUE_LEN_PRIV
: 0;
853 skb
= iptunnel_handle_offloads(skb
, type
);
858 /* Get source port (based on flow hash) before skb_push */
859 sport
= e
->sport
? : udp_flow_src_port(dev_net(skb
->dev
),
862 hdrlen
= sizeof(struct guehdr
) + optlen
;
864 skb_push(skb
, hdrlen
);
866 guehdr
= (struct guehdr
*)skb
->data
;
870 guehdr
->hlen
= optlen
>> 2;
872 guehdr
->proto_ctype
= *protocol
;
877 __be32
*flags
= data
;
879 guehdr
->flags
|= GUE_FLAG_PRIV
;
881 data
+= GUE_LEN_PRIV
;
883 if (type
& SKB_GSO_TUNNEL_REMCSUM
) {
884 u16 csum_start
= skb_checksum_start_offset(skb
);
887 if (csum_start
< hdrlen
)
890 csum_start
-= hdrlen
;
891 pd
[0] = htons(csum_start
);
892 pd
[1] = htons(csum_start
+ skb
->csum_offset
);
894 if (!skb_is_gso(skb
)) {
895 skb
->ip_summed
= CHECKSUM_NONE
;
896 skb
->encapsulation
= 0;
899 *flags
|= GUE_PFLAG_REMCSUM
;
900 data
+= GUE_PLEN_REMCSUM
;
905 fou_build_udp(skb
, e
, fl4
, protocol
, sport
);
909 EXPORT_SYMBOL(gue_build_header
);
911 #ifdef CONFIG_NET_FOU_IP_TUNNELS
913 static const struct ip_tunnel_encap_ops fou_iptun_ops
= {
914 .encap_hlen
= fou_encap_hlen
,
915 .build_header
= fou_build_header
,
918 static const struct ip_tunnel_encap_ops gue_iptun_ops
= {
919 .encap_hlen
= gue_encap_hlen
,
920 .build_header
= gue_build_header
,
923 static int ip_tunnel_encap_add_fou_ops(void)
927 ret
= ip_tunnel_encap_add_ops(&fou_iptun_ops
, TUNNEL_ENCAP_FOU
);
929 pr_err("can't add fou ops\n");
933 ret
= ip_tunnel_encap_add_ops(&gue_iptun_ops
, TUNNEL_ENCAP_GUE
);
935 pr_err("can't add gue ops\n");
936 ip_tunnel_encap_del_ops(&fou_iptun_ops
, TUNNEL_ENCAP_FOU
);
943 static void ip_tunnel_encap_del_fou_ops(void)
945 ip_tunnel_encap_del_ops(&fou_iptun_ops
, TUNNEL_ENCAP_FOU
);
946 ip_tunnel_encap_del_ops(&gue_iptun_ops
, TUNNEL_ENCAP_GUE
);
951 static int ip_tunnel_encap_add_fou_ops(void)
956 static void ip_tunnel_encap_del_fou_ops(void)
962 static __net_init
int fou_init_net(struct net
*net
)
964 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
966 INIT_LIST_HEAD(&fn
->fou_list
);
967 mutex_init(&fn
->fou_lock
);
971 static __net_exit
void fou_exit_net(struct net
*net
)
973 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
974 struct fou
*fou
, *next
;
976 /* Close all the FOU sockets */
977 mutex_lock(&fn
->fou_lock
);
978 list_for_each_entry_safe(fou
, next
, &fn
->fou_list
, list
)
980 mutex_unlock(&fn
->fou_lock
);
983 static struct pernet_operations fou_net_ops
= {
984 .init
= fou_init_net
,
985 .exit
= fou_exit_net
,
987 .size
= sizeof(struct fou_net
),
990 static int __init
fou_init(void)
994 ret
= register_pernet_device(&fou_net_ops
);
998 ret
= genl_register_family_with_ops(&fou_nl_family
,
1003 ret
= ip_tunnel_encap_add_fou_ops();
1007 genl_unregister_family(&fou_nl_family
);
1009 unregister_pernet_device(&fou_net_ops
);
1014 static void __exit
fou_fini(void)
1016 ip_tunnel_encap_del_fou_ops();
1017 genl_unregister_family(&fou_nl_family
);
1018 unregister_pernet_device(&fou_net_ops
);
1021 module_init(fou_init
);
1022 module_exit(fou_fini
);
1023 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
1024 MODULE_LICENSE("GPL");