1 #include <linux/module.h>
2 #include <linux/errno.h>
3 #include <linux/socket.h>
4 #include <linux/skbuff.h>
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <net/genetlink.h>
13 #include <net/protocol.h>
15 #include <net/udp_tunnel.h>
17 #include <uapi/linux/fou.h>
18 #include <uapi/linux/genetlink.h>
27 struct list_head list
;
31 #define FOU_F_REMCSUM_NOPARTIAL BIT(0)
37 struct udp_port_cfg udp_config
;
40 static unsigned int fou_net_id
;
43 struct list_head fou_list
;
44 struct mutex fou_lock
;
47 static inline struct fou
*fou_from_sock(struct sock
*sk
)
49 return sk
->sk_user_data
;
52 static int fou_recv_pull(struct sk_buff
*skb
, struct fou
*fou
, size_t len
)
54 /* Remove 'len' bytes from the packet (UDP header and
55 * FOU header if present).
57 if (fou
->family
== AF_INET
)
58 ip_hdr(skb
)->tot_len
= htons(ntohs(ip_hdr(skb
)->tot_len
) - len
);
60 ipv6_hdr(skb
)->payload_len
=
61 htons(ntohs(ipv6_hdr(skb
)->payload_len
) - len
);
64 skb_postpull_rcsum(skb
, udp_hdr(skb
), len
);
65 skb_reset_transport_header(skb
);
66 return iptunnel_pull_offloads(skb
);
69 static int fou_udp_recv(struct sock
*sk
, struct sk_buff
*skb
)
71 struct fou
*fou
= fou_from_sock(sk
);
76 if (fou_recv_pull(skb
, fou
, sizeof(struct udphdr
)))
79 return -fou
->protocol
;
86 static struct guehdr
*gue_remcsum(struct sk_buff
*skb
, struct guehdr
*guehdr
,
87 void *data
, size_t hdrlen
, u8 ipproto
,
91 size_t start
= ntohs(pd
[0]);
92 size_t offset
= ntohs(pd
[1]);
93 size_t plen
= sizeof(struct udphdr
) + hdrlen
+
94 max_t(size_t, offset
+ sizeof(u16
), start
);
96 if (skb
->remcsum_offload
)
99 if (!pskb_may_pull(skb
, plen
))
101 guehdr
= (struct guehdr
*)&udp_hdr(skb
)[1];
103 skb_remcsum_process(skb
, (void *)guehdr
+ hdrlen
,
104 start
, offset
, nopartial
);
109 static int gue_control_message(struct sk_buff
*skb
, struct guehdr
*guehdr
)
116 static int gue_udp_recv(struct sock
*sk
, struct sk_buff
*skb
)
118 struct fou
*fou
= fou_from_sock(sk
);
119 size_t len
, optlen
, hdrlen
;
120 struct guehdr
*guehdr
;
127 len
= sizeof(struct udphdr
) + sizeof(struct guehdr
);
128 if (!pskb_may_pull(skb
, len
))
131 guehdr
= (struct guehdr
*)&udp_hdr(skb
)[1];
133 switch (guehdr
->version
) {
134 case 0: /* Full GUE header present */
138 /* Direct encasulation of IPv4 or IPv6 */
142 switch (((struct iphdr
*)guehdr
)->version
) {
153 if (fou_recv_pull(skb
, fou
, sizeof(struct udphdr
)))
159 default: /* Undefined version */
163 optlen
= guehdr
->hlen
<< 2;
166 if (!pskb_may_pull(skb
, len
))
169 /* guehdr may change after pull */
170 guehdr
= (struct guehdr
*)&udp_hdr(skb
)[1];
172 hdrlen
= sizeof(struct guehdr
) + optlen
;
174 if (guehdr
->version
!= 0 || validate_gue_flags(guehdr
, optlen
))
177 hdrlen
= sizeof(struct guehdr
) + optlen
;
179 if (fou
->family
== AF_INET
)
180 ip_hdr(skb
)->tot_len
= htons(ntohs(ip_hdr(skb
)->tot_len
) - len
);
182 ipv6_hdr(skb
)->payload_len
=
183 htons(ntohs(ipv6_hdr(skb
)->payload_len
) - len
);
185 /* Pull csum through the guehdr now . This can be used if
186 * there is a remote checksum offload.
188 skb_postpull_rcsum(skb
, udp_hdr(skb
), len
);
192 if (guehdr
->flags
& GUE_FLAG_PRIV
) {
193 __be32 flags
= *(__be32
*)(data
+ doffset
);
195 doffset
+= GUE_LEN_PRIV
;
197 if (flags
& GUE_PFLAG_REMCSUM
) {
198 guehdr
= gue_remcsum(skb
, guehdr
, data
+ doffset
,
199 hdrlen
, guehdr
->proto_ctype
,
201 FOU_F_REMCSUM_NOPARTIAL
));
207 doffset
+= GUE_PLEN_REMCSUM
;
211 if (unlikely(guehdr
->control
))
212 return gue_control_message(skb
, guehdr
);
214 __skb_pull(skb
, sizeof(struct udphdr
) + hdrlen
);
215 skb_reset_transport_header(skb
);
217 if (iptunnel_pull_offloads(skb
))
220 return -guehdr
->proto_ctype
;
227 static struct sk_buff
*fou_gro_receive(struct sock
*sk
,
228 struct list_head
*head
,
231 u8 proto
= fou_from_sock(sk
)->protocol
;
232 const struct net_offload
**offloads
;
233 const struct net_offload
*ops
;
234 struct sk_buff
*pp
= NULL
;
236 /* We can clear the encap_mark for FOU as we are essentially doing
237 * one of two possible things. We are either adding an L4 tunnel
238 * header to the outer L3 tunnel header, or we are are simply
239 * treating the GRE tunnel header as though it is a UDP protocol
240 * specific header such as VXLAN or GENEVE.
242 NAPI_GRO_CB(skb
)->encap_mark
= 0;
244 /* Flag this frame as already having an outer encap header */
245 NAPI_GRO_CB(skb
)->is_fou
= 1;
248 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
249 ops
= rcu_dereference(offloads
[proto
]);
250 if (!ops
|| !ops
->callbacks
.gro_receive
)
253 pp
= call_gro_receive(ops
->callbacks
.gro_receive
, head
, skb
);
261 static int fou_gro_complete(struct sock
*sk
, struct sk_buff
*skb
,
264 const struct net_offload
*ops
;
265 u8 proto
= fou_from_sock(sk
)->protocol
;
267 const struct net_offload
**offloads
;
270 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
271 ops
= rcu_dereference(offloads
[proto
]);
272 if (WARN_ON(!ops
|| !ops
->callbacks
.gro_complete
))
275 err
= ops
->callbacks
.gro_complete(skb
, nhoff
);
277 skb_set_inner_mac_header(skb
, nhoff
);
285 static struct guehdr
*gue_gro_remcsum(struct sk_buff
*skb
, unsigned int off
,
286 struct guehdr
*guehdr
, void *data
,
287 size_t hdrlen
, struct gro_remcsum
*grc
,
291 size_t start
= ntohs(pd
[0]);
292 size_t offset
= ntohs(pd
[1]);
294 if (skb
->remcsum_offload
)
297 if (!NAPI_GRO_CB(skb
)->csum_valid
)
300 guehdr
= skb_gro_remcsum_process(skb
, (void *)guehdr
, off
, hdrlen
,
301 start
, offset
, grc
, nopartial
);
303 skb
->remcsum_offload
= 1;
308 static struct sk_buff
*gue_gro_receive(struct sock
*sk
,
309 struct list_head
*head
,
312 const struct net_offload
**offloads
;
313 const struct net_offload
*ops
;
314 struct sk_buff
*pp
= NULL
;
316 struct guehdr
*guehdr
;
317 size_t len
, optlen
, hdrlen
, off
;
321 struct fou
*fou
= fou_from_sock(sk
);
322 struct gro_remcsum grc
;
325 skb_gro_remcsum_init(&grc
);
327 off
= skb_gro_offset(skb
);
328 len
= off
+ sizeof(*guehdr
);
330 guehdr
= skb_gro_header_fast(skb
, off
);
331 if (skb_gro_header_hard(skb
, len
)) {
332 guehdr
= skb_gro_header_slow(skb
, len
, off
);
333 if (unlikely(!guehdr
))
337 switch (guehdr
->version
) {
341 switch (((struct iphdr
*)guehdr
)->version
) {
343 proto
= IPPROTO_IPIP
;
346 proto
= IPPROTO_IPV6
;
356 optlen
= guehdr
->hlen
<< 2;
359 if (skb_gro_header_hard(skb
, len
)) {
360 guehdr
= skb_gro_header_slow(skb
, len
, off
);
361 if (unlikely(!guehdr
))
365 if (unlikely(guehdr
->control
) || guehdr
->version
!= 0 ||
366 validate_gue_flags(guehdr
, optlen
))
369 hdrlen
= sizeof(*guehdr
) + optlen
;
371 /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
372 * this is needed if there is a remote checkcsum offload.
374 skb_gro_postpull_rcsum(skb
, guehdr
, hdrlen
);
378 if (guehdr
->flags
& GUE_FLAG_PRIV
) {
379 __be32 flags
= *(__be32
*)(data
+ doffset
);
381 doffset
+= GUE_LEN_PRIV
;
383 if (flags
& GUE_PFLAG_REMCSUM
) {
384 guehdr
= gue_gro_remcsum(skb
, off
, guehdr
,
385 data
+ doffset
, hdrlen
, &grc
,
387 FOU_F_REMCSUM_NOPARTIAL
));
394 doffset
+= GUE_PLEN_REMCSUM
;
398 skb_gro_pull(skb
, hdrlen
);
400 list_for_each_entry(p
, head
, list
) {
401 const struct guehdr
*guehdr2
;
403 if (!NAPI_GRO_CB(p
)->same_flow
)
406 guehdr2
= (struct guehdr
*)(p
->data
+ off
);
408 /* Compare base GUE header to be equal (covers
409 * hlen, version, proto_ctype, and flags.
411 if (guehdr
->word
!= guehdr2
->word
) {
412 NAPI_GRO_CB(p
)->same_flow
= 0;
416 /* Compare optional fields are the same. */
417 if (guehdr
->hlen
&& memcmp(&guehdr
[1], &guehdr2
[1],
418 guehdr
->hlen
<< 2)) {
419 NAPI_GRO_CB(p
)->same_flow
= 0;
424 proto
= guehdr
->proto_ctype
;
428 /* We can clear the encap_mark for GUE as we are essentially doing
429 * one of two possible things. We are either adding an L4 tunnel
430 * header to the outer L3 tunnel header, or we are are simply
431 * treating the GRE tunnel header as though it is a UDP protocol
432 * specific header such as VXLAN or GENEVE.
434 NAPI_GRO_CB(skb
)->encap_mark
= 0;
436 /* Flag this frame as already having an outer encap header */
437 NAPI_GRO_CB(skb
)->is_fou
= 1;
440 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
441 ops
= rcu_dereference(offloads
[proto
]);
442 if (WARN_ON_ONCE(!ops
|| !ops
->callbacks
.gro_receive
))
445 pp
= call_gro_receive(ops
->callbacks
.gro_receive
, head
, skb
);
451 skb_gro_flush_final_remcsum(skb
, pp
, flush
, &grc
);
456 static int gue_gro_complete(struct sock
*sk
, struct sk_buff
*skb
, int nhoff
)
458 const struct net_offload
**offloads
;
459 struct guehdr
*guehdr
= (struct guehdr
*)(skb
->data
+ nhoff
);
460 const struct net_offload
*ops
;
461 unsigned int guehlen
= 0;
465 switch (guehdr
->version
) {
467 proto
= guehdr
->proto_ctype
;
468 guehlen
= sizeof(*guehdr
) + (guehdr
->hlen
<< 2);
471 switch (((struct iphdr
*)guehdr
)->version
) {
473 proto
= IPPROTO_IPIP
;
476 proto
= IPPROTO_IPV6
;
487 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
488 ops
= rcu_dereference(offloads
[proto
]);
489 if (WARN_ON(!ops
|| !ops
->callbacks
.gro_complete
))
492 err
= ops
->callbacks
.gro_complete(skb
, nhoff
+ guehlen
);
494 skb_set_inner_mac_header(skb
, nhoff
+ guehlen
);
501 static int fou_add_to_port_list(struct net
*net
, struct fou
*fou
)
503 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
506 mutex_lock(&fn
->fou_lock
);
507 list_for_each_entry(fout
, &fn
->fou_list
, list
) {
508 if (fou
->port
== fout
->port
&&
509 fou
->family
== fout
->family
) {
510 mutex_unlock(&fn
->fou_lock
);
515 list_add(&fou
->list
, &fn
->fou_list
);
516 mutex_unlock(&fn
->fou_lock
);
521 static void fou_release(struct fou
*fou
)
523 struct socket
*sock
= fou
->sock
;
525 list_del(&fou
->list
);
526 udp_tunnel_sock_release(sock
);
531 static int fou_create(struct net
*net
, struct fou_cfg
*cfg
,
532 struct socket
**sockp
)
534 struct socket
*sock
= NULL
;
535 struct fou
*fou
= NULL
;
537 struct udp_tunnel_sock_cfg tunnel_cfg
;
540 /* Open UDP socket */
541 err
= udp_sock_create(net
, &cfg
->udp_config
, &sock
);
545 /* Allocate FOU port structure */
546 fou
= kzalloc(sizeof(*fou
), GFP_KERNEL
);
554 fou
->port
= cfg
->udp_config
.local_udp_port
;
555 fou
->family
= cfg
->udp_config
.family
;
556 fou
->flags
= cfg
->flags
;
557 fou
->type
= cfg
->type
;
560 memset(&tunnel_cfg
, 0, sizeof(tunnel_cfg
));
561 tunnel_cfg
.encap_type
= 1;
562 tunnel_cfg
.sk_user_data
= fou
;
563 tunnel_cfg
.encap_destroy
= NULL
;
565 /* Initial for fou type */
567 case FOU_ENCAP_DIRECT
:
568 tunnel_cfg
.encap_rcv
= fou_udp_recv
;
569 tunnel_cfg
.gro_receive
= fou_gro_receive
;
570 tunnel_cfg
.gro_complete
= fou_gro_complete
;
571 fou
->protocol
= cfg
->protocol
;
574 tunnel_cfg
.encap_rcv
= gue_udp_recv
;
575 tunnel_cfg
.gro_receive
= gue_gro_receive
;
576 tunnel_cfg
.gro_complete
= gue_gro_complete
;
583 setup_udp_tunnel_sock(net
, sock
, &tunnel_cfg
);
585 sk
->sk_allocation
= GFP_ATOMIC
;
587 err
= fou_add_to_port_list(net
, fou
);
599 udp_tunnel_sock_release(sock
);
604 static int fou_destroy(struct net
*net
, struct fou_cfg
*cfg
)
606 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
607 __be16 port
= cfg
->udp_config
.local_udp_port
;
608 u8 family
= cfg
->udp_config
.family
;
612 mutex_lock(&fn
->fou_lock
);
613 list_for_each_entry(fou
, &fn
->fou_list
, list
) {
614 if (fou
->port
== port
&& fou
->family
== family
) {
620 mutex_unlock(&fn
->fou_lock
);
625 static struct genl_family fou_nl_family
;
627 static const struct nla_policy fou_nl_policy
[FOU_ATTR_MAX
+ 1] = {
628 [FOU_ATTR_PORT
] = { .type
= NLA_U16
, },
629 [FOU_ATTR_AF
] = { .type
= NLA_U8
, },
630 [FOU_ATTR_IPPROTO
] = { .type
= NLA_U8
, },
631 [FOU_ATTR_TYPE
] = { .type
= NLA_U8
, },
632 [FOU_ATTR_REMCSUM_NOPARTIAL
] = { .type
= NLA_FLAG
, },
635 static int parse_nl_config(struct genl_info
*info
,
638 memset(cfg
, 0, sizeof(*cfg
));
640 cfg
->udp_config
.family
= AF_INET
;
642 if (info
->attrs
[FOU_ATTR_AF
]) {
643 u8 family
= nla_get_u8(info
->attrs
[FOU_ATTR_AF
]);
649 cfg
->udp_config
.ipv6_v6only
= 1;
652 return -EAFNOSUPPORT
;
655 cfg
->udp_config
.family
= family
;
658 if (info
->attrs
[FOU_ATTR_PORT
]) {
659 __be16 port
= nla_get_be16(info
->attrs
[FOU_ATTR_PORT
]);
661 cfg
->udp_config
.local_udp_port
= port
;
664 if (info
->attrs
[FOU_ATTR_IPPROTO
])
665 cfg
->protocol
= nla_get_u8(info
->attrs
[FOU_ATTR_IPPROTO
]);
667 if (info
->attrs
[FOU_ATTR_TYPE
])
668 cfg
->type
= nla_get_u8(info
->attrs
[FOU_ATTR_TYPE
]);
670 if (info
->attrs
[FOU_ATTR_REMCSUM_NOPARTIAL
])
671 cfg
->flags
|= FOU_F_REMCSUM_NOPARTIAL
;
676 static int fou_nl_cmd_add_port(struct sk_buff
*skb
, struct genl_info
*info
)
678 struct net
*net
= genl_info_net(info
);
682 err
= parse_nl_config(info
, &cfg
);
686 return fou_create(net
, &cfg
, NULL
);
689 static int fou_nl_cmd_rm_port(struct sk_buff
*skb
, struct genl_info
*info
)
691 struct net
*net
= genl_info_net(info
);
695 err
= parse_nl_config(info
, &cfg
);
699 return fou_destroy(net
, &cfg
);
702 static int fou_fill_info(struct fou
*fou
, struct sk_buff
*msg
)
704 if (nla_put_u8(msg
, FOU_ATTR_AF
, fou
->sock
->sk
->sk_family
) ||
705 nla_put_be16(msg
, FOU_ATTR_PORT
, fou
->port
) ||
706 nla_put_u8(msg
, FOU_ATTR_IPPROTO
, fou
->protocol
) ||
707 nla_put_u8(msg
, FOU_ATTR_TYPE
, fou
->type
))
710 if (fou
->flags
& FOU_F_REMCSUM_NOPARTIAL
)
711 if (nla_put_flag(msg
, FOU_ATTR_REMCSUM_NOPARTIAL
))
716 static int fou_dump_info(struct fou
*fou
, u32 portid
, u32 seq
,
717 u32 flags
, struct sk_buff
*skb
, u8 cmd
)
721 hdr
= genlmsg_put(skb
, portid
, seq
, &fou_nl_family
, flags
, cmd
);
725 if (fou_fill_info(fou
, skb
) < 0)
726 goto nla_put_failure
;
728 genlmsg_end(skb
, hdr
);
732 genlmsg_cancel(skb
, hdr
);
736 static int fou_nl_cmd_get_port(struct sk_buff
*skb
, struct genl_info
*info
)
738 struct net
*net
= genl_info_net(info
);
739 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
747 ret
= parse_nl_config(info
, &cfg
);
750 port
= cfg
.udp_config
.local_udp_port
;
754 family
= cfg
.udp_config
.family
;
755 if (family
!= AF_INET
&& family
!= AF_INET6
)
758 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
763 mutex_lock(&fn
->fou_lock
);
764 list_for_each_entry(fout
, &fn
->fou_list
, list
) {
765 if (port
== fout
->port
&& family
== fout
->family
) {
766 ret
= fou_dump_info(fout
, info
->snd_portid
,
767 info
->snd_seq
, 0, msg
,
772 mutex_unlock(&fn
->fou_lock
);
776 return genlmsg_reply(msg
, info
);
783 static int fou_nl_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
785 struct net
*net
= sock_net(skb
->sk
);
786 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
790 mutex_lock(&fn
->fou_lock
);
791 list_for_each_entry(fout
, &fn
->fou_list
, list
) {
792 if (idx
++ < cb
->args
[0])
794 ret
= fou_dump_info(fout
, NETLINK_CB(cb
->skb
).portid
,
795 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
800 mutex_unlock(&fn
->fou_lock
);
806 static const struct genl_ops fou_nl_ops
[] = {
809 .doit
= fou_nl_cmd_add_port
,
810 .policy
= fou_nl_policy
,
811 .flags
= GENL_ADMIN_PERM
,
815 .doit
= fou_nl_cmd_rm_port
,
816 .policy
= fou_nl_policy
,
817 .flags
= GENL_ADMIN_PERM
,
821 .doit
= fou_nl_cmd_get_port
,
822 .dumpit
= fou_nl_dump
,
823 .policy
= fou_nl_policy
,
827 static struct genl_family fou_nl_family __ro_after_init
= {
829 .name
= FOU_GENL_NAME
,
830 .version
= FOU_GENL_VERSION
,
831 .maxattr
= FOU_ATTR_MAX
,
833 .module
= THIS_MODULE
,
835 .n_ops
= ARRAY_SIZE(fou_nl_ops
),
838 size_t fou_encap_hlen(struct ip_tunnel_encap
*e
)
840 return sizeof(struct udphdr
);
842 EXPORT_SYMBOL(fou_encap_hlen
);
844 size_t gue_encap_hlen(struct ip_tunnel_encap
*e
)
847 bool need_priv
= false;
849 len
= sizeof(struct udphdr
) + sizeof(struct guehdr
);
851 if (e
->flags
& TUNNEL_ENCAP_FLAG_REMCSUM
) {
852 len
+= GUE_PLEN_REMCSUM
;
856 len
+= need_priv
? GUE_LEN_PRIV
: 0;
860 EXPORT_SYMBOL(gue_encap_hlen
);
862 int __fou_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
863 u8
*protocol
, __be16
*sport
, int type
)
867 err
= iptunnel_handle_offloads(skb
, type
);
871 *sport
= e
->sport
? : udp_flow_src_port(dev_net(skb
->dev
),
876 EXPORT_SYMBOL(__fou_build_header
);
878 int __gue_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
879 u8
*protocol
, __be16
*sport
, int type
)
881 struct guehdr
*guehdr
;
882 size_t hdrlen
, optlen
= 0;
884 bool need_priv
= false;
887 if ((e
->flags
& TUNNEL_ENCAP_FLAG_REMCSUM
) &&
888 skb
->ip_summed
== CHECKSUM_PARTIAL
) {
889 optlen
+= GUE_PLEN_REMCSUM
;
890 type
|= SKB_GSO_TUNNEL_REMCSUM
;
894 optlen
+= need_priv
? GUE_LEN_PRIV
: 0;
896 err
= iptunnel_handle_offloads(skb
, type
);
900 /* Get source port (based on flow hash) before skb_push */
901 *sport
= e
->sport
? : udp_flow_src_port(dev_net(skb
->dev
),
904 hdrlen
= sizeof(struct guehdr
) + optlen
;
906 skb_push(skb
, hdrlen
);
908 guehdr
= (struct guehdr
*)skb
->data
;
912 guehdr
->hlen
= optlen
>> 2;
914 guehdr
->proto_ctype
= *protocol
;
919 __be32
*flags
= data
;
921 guehdr
->flags
|= GUE_FLAG_PRIV
;
923 data
+= GUE_LEN_PRIV
;
925 if (type
& SKB_GSO_TUNNEL_REMCSUM
) {
926 u16 csum_start
= skb_checksum_start_offset(skb
);
929 if (csum_start
< hdrlen
)
932 csum_start
-= hdrlen
;
933 pd
[0] = htons(csum_start
);
934 pd
[1] = htons(csum_start
+ skb
->csum_offset
);
936 if (!skb_is_gso(skb
)) {
937 skb
->ip_summed
= CHECKSUM_NONE
;
938 skb
->encapsulation
= 0;
941 *flags
|= GUE_PFLAG_REMCSUM
;
942 data
+= GUE_PLEN_REMCSUM
;
949 EXPORT_SYMBOL(__gue_build_header
);
951 #ifdef CONFIG_NET_FOU_IP_TUNNELS
953 static void fou_build_udp(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
954 struct flowi4
*fl4
, u8
*protocol
, __be16 sport
)
958 skb_push(skb
, sizeof(struct udphdr
));
959 skb_reset_transport_header(skb
);
965 uh
->len
= htons(skb
->len
);
966 udp_set_csum(!(e
->flags
& TUNNEL_ENCAP_FLAG_CSUM
), skb
,
967 fl4
->saddr
, fl4
->daddr
, skb
->len
);
969 *protocol
= IPPROTO_UDP
;
972 static int fou_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
973 u8
*protocol
, struct flowi4
*fl4
)
975 int type
= e
->flags
& TUNNEL_ENCAP_FLAG_CSUM
? SKB_GSO_UDP_TUNNEL_CSUM
:
980 err
= __fou_build_header(skb
, e
, protocol
, &sport
, type
);
984 fou_build_udp(skb
, e
, fl4
, protocol
, sport
);
989 static int gue_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
990 u8
*protocol
, struct flowi4
*fl4
)
992 int type
= e
->flags
& TUNNEL_ENCAP_FLAG_CSUM
? SKB_GSO_UDP_TUNNEL_CSUM
:
997 err
= __gue_build_header(skb
, e
, protocol
, &sport
, type
);
1001 fou_build_udp(skb
, e
, fl4
, protocol
, sport
);
1007 static const struct ip_tunnel_encap_ops fou_iptun_ops
= {
1008 .encap_hlen
= fou_encap_hlen
,
1009 .build_header
= fou_build_header
,
1012 static const struct ip_tunnel_encap_ops gue_iptun_ops
= {
1013 .encap_hlen
= gue_encap_hlen
,
1014 .build_header
= gue_build_header
,
1017 static int ip_tunnel_encap_add_fou_ops(void)
1021 ret
= ip_tunnel_encap_add_ops(&fou_iptun_ops
, TUNNEL_ENCAP_FOU
);
1023 pr_err("can't add fou ops\n");
1027 ret
= ip_tunnel_encap_add_ops(&gue_iptun_ops
, TUNNEL_ENCAP_GUE
);
1029 pr_err("can't add gue ops\n");
1030 ip_tunnel_encap_del_ops(&fou_iptun_ops
, TUNNEL_ENCAP_FOU
);
1037 static void ip_tunnel_encap_del_fou_ops(void)
1039 ip_tunnel_encap_del_ops(&fou_iptun_ops
, TUNNEL_ENCAP_FOU
);
1040 ip_tunnel_encap_del_ops(&gue_iptun_ops
, TUNNEL_ENCAP_GUE
);
1045 static int ip_tunnel_encap_add_fou_ops(void)
1050 static void ip_tunnel_encap_del_fou_ops(void)
1056 static __net_init
int fou_init_net(struct net
*net
)
1058 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
1060 INIT_LIST_HEAD(&fn
->fou_list
);
1061 mutex_init(&fn
->fou_lock
);
1065 static __net_exit
void fou_exit_net(struct net
*net
)
1067 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
1068 struct fou
*fou
, *next
;
1070 /* Close all the FOU sockets */
1071 mutex_lock(&fn
->fou_lock
);
1072 list_for_each_entry_safe(fou
, next
, &fn
->fou_list
, list
)
1074 mutex_unlock(&fn
->fou_lock
);
1077 static struct pernet_operations fou_net_ops
= {
1078 .init
= fou_init_net
,
1079 .exit
= fou_exit_net
,
1081 .size
= sizeof(struct fou_net
),
1084 static int __init
fou_init(void)
1088 ret
= register_pernet_device(&fou_net_ops
);
1092 ret
= genl_register_family(&fou_nl_family
);
1096 ret
= ip_tunnel_encap_add_fou_ops();
1100 genl_unregister_family(&fou_nl_family
);
1102 unregister_pernet_device(&fou_net_ops
);
1107 static void __exit
fou_fini(void)
1109 ip_tunnel_encap_del_fou_ops();
1110 genl_unregister_family(&fou_nl_family
);
1111 unregister_pernet_device(&fou_net_ops
);
1114 module_init(fou_init
);
1115 module_exit(fou_fini
);
1116 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
1117 MODULE_LICENSE("GPL");