1 #include <linux/module.h>
2 #include <linux/errno.h>
3 #include <linux/socket.h>
4 #include <linux/skbuff.h>
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <net/genetlink.h>
12 #include <net/protocol.h>
14 #include <net/udp_tunnel.h>
16 #include <uapi/linux/fou.h>
17 #include <uapi/linux/genetlink.h>
26 struct list_head list
;
30 #define FOU_F_REMCSUM_NOPARTIAL BIT(0)
36 struct udp_port_cfg udp_config
;
39 static unsigned int fou_net_id
;
42 struct list_head fou_list
;
43 struct mutex fou_lock
;
46 static inline struct fou
*fou_from_sock(struct sock
*sk
)
48 return sk
->sk_user_data
;
51 static int fou_recv_pull(struct sk_buff
*skb
, struct fou
*fou
, size_t len
)
53 /* Remove 'len' bytes from the packet (UDP header and
54 * FOU header if present).
56 if (fou
->family
== AF_INET
)
57 ip_hdr(skb
)->tot_len
= htons(ntohs(ip_hdr(skb
)->tot_len
) - len
);
59 ipv6_hdr(skb
)->payload_len
=
60 htons(ntohs(ipv6_hdr(skb
)->payload_len
) - len
);
63 skb_postpull_rcsum(skb
, udp_hdr(skb
), len
);
64 skb_reset_transport_header(skb
);
65 return iptunnel_pull_offloads(skb
);
68 static int fou_udp_recv(struct sock
*sk
, struct sk_buff
*skb
)
70 struct fou
*fou
= fou_from_sock(sk
);
75 if (fou_recv_pull(skb
, fou
, sizeof(struct udphdr
)))
78 return -fou
->protocol
;
85 static struct guehdr
*gue_remcsum(struct sk_buff
*skb
, struct guehdr
*guehdr
,
86 void *data
, size_t hdrlen
, u8 ipproto
,
90 size_t start
= ntohs(pd
[0]);
91 size_t offset
= ntohs(pd
[1]);
92 size_t plen
= sizeof(struct udphdr
) + hdrlen
+
93 max_t(size_t, offset
+ sizeof(u16
), start
);
95 if (skb
->remcsum_offload
)
98 if (!pskb_may_pull(skb
, plen
))
100 guehdr
= (struct guehdr
*)&udp_hdr(skb
)[1];
102 skb_remcsum_process(skb
, (void *)guehdr
+ hdrlen
,
103 start
, offset
, nopartial
);
108 static int gue_control_message(struct sk_buff
*skb
, struct guehdr
*guehdr
)
115 static int gue_udp_recv(struct sock
*sk
, struct sk_buff
*skb
)
117 struct fou
*fou
= fou_from_sock(sk
);
118 size_t len
, optlen
, hdrlen
;
119 struct guehdr
*guehdr
;
126 len
= sizeof(struct udphdr
) + sizeof(struct guehdr
);
127 if (!pskb_may_pull(skb
, len
))
130 guehdr
= (struct guehdr
*)&udp_hdr(skb
)[1];
132 switch (guehdr
->version
) {
133 case 0: /* Full GUE header present */
137 /* Direct encasulation of IPv4 or IPv6 */
141 switch (((struct iphdr
*)guehdr
)->version
) {
152 if (fou_recv_pull(skb
, fou
, sizeof(struct udphdr
)))
158 default: /* Undefined version */
162 optlen
= guehdr
->hlen
<< 2;
165 if (!pskb_may_pull(skb
, len
))
168 /* guehdr may change after pull */
169 guehdr
= (struct guehdr
*)&udp_hdr(skb
)[1];
171 hdrlen
= sizeof(struct guehdr
) + optlen
;
173 if (guehdr
->version
!= 0 || validate_gue_flags(guehdr
, optlen
))
176 hdrlen
= sizeof(struct guehdr
) + optlen
;
178 if (fou
->family
== AF_INET
)
179 ip_hdr(skb
)->tot_len
= htons(ntohs(ip_hdr(skb
)->tot_len
) - len
);
181 ipv6_hdr(skb
)->payload_len
=
182 htons(ntohs(ipv6_hdr(skb
)->payload_len
) - len
);
184 /* Pull csum through the guehdr now . This can be used if
185 * there is a remote checksum offload.
187 skb_postpull_rcsum(skb
, udp_hdr(skb
), len
);
191 if (guehdr
->flags
& GUE_FLAG_PRIV
) {
192 __be32 flags
= *(__be32
*)(data
+ doffset
);
194 doffset
+= GUE_LEN_PRIV
;
196 if (flags
& GUE_PFLAG_REMCSUM
) {
197 guehdr
= gue_remcsum(skb
, guehdr
, data
+ doffset
,
198 hdrlen
, guehdr
->proto_ctype
,
200 FOU_F_REMCSUM_NOPARTIAL
));
206 doffset
+= GUE_PLEN_REMCSUM
;
210 if (unlikely(guehdr
->control
))
211 return gue_control_message(skb
, guehdr
);
213 __skb_pull(skb
, sizeof(struct udphdr
) + hdrlen
);
214 skb_reset_transport_header(skb
);
216 if (iptunnel_pull_offloads(skb
))
219 return -guehdr
->proto_ctype
;
226 static struct sk_buff
**fou_gro_receive(struct sock
*sk
,
227 struct sk_buff
**head
,
230 const struct net_offload
*ops
;
231 struct sk_buff
**pp
= NULL
;
232 u8 proto
= fou_from_sock(sk
)->protocol
;
233 const struct net_offload
**offloads
;
235 /* We can clear the encap_mark for FOU as we are essentially doing
236 * one of two possible things. We are either adding an L4 tunnel
237 * header to the outer L3 tunnel header, or we are are simply
238 * treating the GRE tunnel header as though it is a UDP protocol
239 * specific header such as VXLAN or GENEVE.
241 NAPI_GRO_CB(skb
)->encap_mark
= 0;
243 /* Flag this frame as already having an outer encap header */
244 NAPI_GRO_CB(skb
)->is_fou
= 1;
247 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
248 ops
= rcu_dereference(offloads
[proto
]);
249 if (!ops
|| !ops
->callbacks
.gro_receive
)
252 pp
= call_gro_receive(ops
->callbacks
.gro_receive
, head
, skb
);
260 static int fou_gro_complete(struct sock
*sk
, struct sk_buff
*skb
,
263 const struct net_offload
*ops
;
264 u8 proto
= fou_from_sock(sk
)->protocol
;
266 const struct net_offload
**offloads
;
269 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
270 ops
= rcu_dereference(offloads
[proto
]);
271 if (WARN_ON(!ops
|| !ops
->callbacks
.gro_complete
))
274 err
= ops
->callbacks
.gro_complete(skb
, nhoff
);
276 skb_set_inner_mac_header(skb
, nhoff
);
284 static struct guehdr
*gue_gro_remcsum(struct sk_buff
*skb
, unsigned int off
,
285 struct guehdr
*guehdr
, void *data
,
286 size_t hdrlen
, struct gro_remcsum
*grc
,
290 size_t start
= ntohs(pd
[0]);
291 size_t offset
= ntohs(pd
[1]);
293 if (skb
->remcsum_offload
)
296 if (!NAPI_GRO_CB(skb
)->csum_valid
)
299 guehdr
= skb_gro_remcsum_process(skb
, (void *)guehdr
, off
, hdrlen
,
300 start
, offset
, grc
, nopartial
);
302 skb
->remcsum_offload
= 1;
307 static struct sk_buff
**gue_gro_receive(struct sock
*sk
,
308 struct sk_buff
**head
,
311 const struct net_offload
**offloads
;
312 const struct net_offload
*ops
;
313 struct sk_buff
**pp
= NULL
;
315 struct guehdr
*guehdr
;
316 size_t len
, optlen
, hdrlen
, off
;
320 struct fou
*fou
= fou_from_sock(sk
);
321 struct gro_remcsum grc
;
324 skb_gro_remcsum_init(&grc
);
326 off
= skb_gro_offset(skb
);
327 len
= off
+ sizeof(*guehdr
);
329 guehdr
= skb_gro_header_fast(skb
, off
);
330 if (skb_gro_header_hard(skb
, len
)) {
331 guehdr
= skb_gro_header_slow(skb
, len
, off
);
332 if (unlikely(!guehdr
))
336 switch (guehdr
->version
) {
340 switch (((struct iphdr
*)guehdr
)->version
) {
342 proto
= IPPROTO_IPIP
;
345 proto
= IPPROTO_IPV6
;
355 optlen
= guehdr
->hlen
<< 2;
358 if (skb_gro_header_hard(skb
, len
)) {
359 guehdr
= skb_gro_header_slow(skb
, len
, off
);
360 if (unlikely(!guehdr
))
364 if (unlikely(guehdr
->control
) || guehdr
->version
!= 0 ||
365 validate_gue_flags(guehdr
, optlen
))
368 hdrlen
= sizeof(*guehdr
) + optlen
;
370 /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
371 * this is needed if there is a remote checkcsum offload.
373 skb_gro_postpull_rcsum(skb
, guehdr
, hdrlen
);
377 if (guehdr
->flags
& GUE_FLAG_PRIV
) {
378 __be32 flags
= *(__be32
*)(data
+ doffset
);
380 doffset
+= GUE_LEN_PRIV
;
382 if (flags
& GUE_PFLAG_REMCSUM
) {
383 guehdr
= gue_gro_remcsum(skb
, off
, guehdr
,
384 data
+ doffset
, hdrlen
, &grc
,
386 FOU_F_REMCSUM_NOPARTIAL
));
393 doffset
+= GUE_PLEN_REMCSUM
;
397 skb_gro_pull(skb
, hdrlen
);
399 for (p
= *head
; p
; p
= p
->next
) {
400 const struct guehdr
*guehdr2
;
402 if (!NAPI_GRO_CB(p
)->same_flow
)
405 guehdr2
= (struct guehdr
*)(p
->data
+ off
);
407 /* Compare base GUE header to be equal (covers
408 * hlen, version, proto_ctype, and flags.
410 if (guehdr
->word
!= guehdr2
->word
) {
411 NAPI_GRO_CB(p
)->same_flow
= 0;
415 /* Compare optional fields are the same. */
416 if (guehdr
->hlen
&& memcmp(&guehdr
[1], &guehdr2
[1],
417 guehdr
->hlen
<< 2)) {
418 NAPI_GRO_CB(p
)->same_flow
= 0;
423 proto
= guehdr
->proto_ctype
;
427 /* We can clear the encap_mark for GUE as we are essentially doing
428 * one of two possible things. We are either adding an L4 tunnel
429 * header to the outer L3 tunnel header, or we are are simply
430 * treating the GRE tunnel header as though it is a UDP protocol
431 * specific header such as VXLAN or GENEVE.
433 NAPI_GRO_CB(skb
)->encap_mark
= 0;
435 /* Flag this frame as already having an outer encap header */
436 NAPI_GRO_CB(skb
)->is_fou
= 1;
439 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
440 ops
= rcu_dereference(offloads
[proto
]);
441 if (WARN_ON_ONCE(!ops
|| !ops
->callbacks
.gro_receive
))
444 pp
= call_gro_receive(ops
->callbacks
.gro_receive
, head
, skb
);
450 NAPI_GRO_CB(skb
)->flush
|= flush
;
451 skb_gro_remcsum_cleanup(skb
, &grc
);
456 static int gue_gro_complete(struct sock
*sk
, struct sk_buff
*skb
, int nhoff
)
458 const struct net_offload
**offloads
;
459 struct guehdr
*guehdr
= (struct guehdr
*)(skb
->data
+ nhoff
);
460 const struct net_offload
*ops
;
461 unsigned int guehlen
= 0;
465 switch (guehdr
->version
) {
467 proto
= guehdr
->proto_ctype
;
468 guehlen
= sizeof(*guehdr
) + (guehdr
->hlen
<< 2);
471 switch (((struct iphdr
*)guehdr
)->version
) {
473 proto
= IPPROTO_IPIP
;
476 proto
= IPPROTO_IPV6
;
487 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
488 ops
= rcu_dereference(offloads
[proto
]);
489 if (WARN_ON(!ops
|| !ops
->callbacks
.gro_complete
))
492 err
= ops
->callbacks
.gro_complete(skb
, nhoff
+ guehlen
);
494 skb_set_inner_mac_header(skb
, nhoff
+ guehlen
);
501 static int fou_add_to_port_list(struct net
*net
, struct fou
*fou
)
503 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
506 mutex_lock(&fn
->fou_lock
);
507 list_for_each_entry(fout
, &fn
->fou_list
, list
) {
508 if (fou
->port
== fout
->port
&&
509 fou
->family
== fout
->family
) {
510 mutex_unlock(&fn
->fou_lock
);
515 list_add(&fou
->list
, &fn
->fou_list
);
516 mutex_unlock(&fn
->fou_lock
);
521 static void fou_release(struct fou
*fou
)
523 struct socket
*sock
= fou
->sock
;
525 list_del(&fou
->list
);
526 udp_tunnel_sock_release(sock
);
531 static int fou_create(struct net
*net
, struct fou_cfg
*cfg
,
532 struct socket
**sockp
)
534 struct socket
*sock
= NULL
;
535 struct fou
*fou
= NULL
;
537 struct udp_tunnel_sock_cfg tunnel_cfg
;
540 /* Open UDP socket */
541 err
= udp_sock_create(net
, &cfg
->udp_config
, &sock
);
545 /* Allocate FOU port structure */
546 fou
= kzalloc(sizeof(*fou
), GFP_KERNEL
);
554 fou
->port
= cfg
->udp_config
.local_udp_port
;
555 fou
->family
= cfg
->udp_config
.family
;
556 fou
->flags
= cfg
->flags
;
557 fou
->type
= cfg
->type
;
560 memset(&tunnel_cfg
, 0, sizeof(tunnel_cfg
));
561 tunnel_cfg
.encap_type
= 1;
562 tunnel_cfg
.sk_user_data
= fou
;
563 tunnel_cfg
.encap_destroy
= NULL
;
565 /* Initial for fou type */
567 case FOU_ENCAP_DIRECT
:
568 tunnel_cfg
.encap_rcv
= fou_udp_recv
;
569 tunnel_cfg
.gro_receive
= fou_gro_receive
;
570 tunnel_cfg
.gro_complete
= fou_gro_complete
;
571 fou
->protocol
= cfg
->protocol
;
574 tunnel_cfg
.encap_rcv
= gue_udp_recv
;
575 tunnel_cfg
.gro_receive
= gue_gro_receive
;
576 tunnel_cfg
.gro_complete
= gue_gro_complete
;
583 setup_udp_tunnel_sock(net
, sock
, &tunnel_cfg
);
585 sk
->sk_allocation
= GFP_ATOMIC
;
587 err
= fou_add_to_port_list(net
, fou
);
599 udp_tunnel_sock_release(sock
);
604 static int fou_destroy(struct net
*net
, struct fou_cfg
*cfg
)
606 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
607 __be16 port
= cfg
->udp_config
.local_udp_port
;
608 u8 family
= cfg
->udp_config
.family
;
612 mutex_lock(&fn
->fou_lock
);
613 list_for_each_entry(fou
, &fn
->fou_list
, list
) {
614 if (fou
->port
== port
&& fou
->family
== family
) {
620 mutex_unlock(&fn
->fou_lock
);
625 static struct genl_family fou_nl_family
;
627 static const struct nla_policy fou_nl_policy
[FOU_ATTR_MAX
+ 1] = {
628 [FOU_ATTR_PORT
] = { .type
= NLA_U16
, },
629 [FOU_ATTR_AF
] = { .type
= NLA_U8
, },
630 [FOU_ATTR_IPPROTO
] = { .type
= NLA_U8
, },
631 [FOU_ATTR_TYPE
] = { .type
= NLA_U8
, },
632 [FOU_ATTR_REMCSUM_NOPARTIAL
] = { .type
= NLA_FLAG
, },
635 static int parse_nl_config(struct genl_info
*info
,
638 memset(cfg
, 0, sizeof(*cfg
));
640 cfg
->udp_config
.family
= AF_INET
;
642 if (info
->attrs
[FOU_ATTR_AF
]) {
643 u8 family
= nla_get_u8(info
->attrs
[FOU_ATTR_AF
]);
649 cfg
->udp_config
.ipv6_v6only
= 1;
652 return -EAFNOSUPPORT
;
655 cfg
->udp_config
.family
= family
;
658 if (info
->attrs
[FOU_ATTR_PORT
]) {
659 __be16 port
= nla_get_be16(info
->attrs
[FOU_ATTR_PORT
]);
661 cfg
->udp_config
.local_udp_port
= port
;
664 if (info
->attrs
[FOU_ATTR_IPPROTO
])
665 cfg
->protocol
= nla_get_u8(info
->attrs
[FOU_ATTR_IPPROTO
]);
667 if (info
->attrs
[FOU_ATTR_TYPE
])
668 cfg
->type
= nla_get_u8(info
->attrs
[FOU_ATTR_TYPE
]);
670 if (info
->attrs
[FOU_ATTR_REMCSUM_NOPARTIAL
])
671 cfg
->flags
|= FOU_F_REMCSUM_NOPARTIAL
;
676 static int fou_nl_cmd_add_port(struct sk_buff
*skb
, struct genl_info
*info
)
678 struct net
*net
= genl_info_net(info
);
682 err
= parse_nl_config(info
, &cfg
);
686 return fou_create(net
, &cfg
, NULL
);
689 static int fou_nl_cmd_rm_port(struct sk_buff
*skb
, struct genl_info
*info
)
691 struct net
*net
= genl_info_net(info
);
695 err
= parse_nl_config(info
, &cfg
);
699 return fou_destroy(net
, &cfg
);
702 static int fou_fill_info(struct fou
*fou
, struct sk_buff
*msg
)
704 if (nla_put_u8(msg
, FOU_ATTR_AF
, fou
->sock
->sk
->sk_family
) ||
705 nla_put_be16(msg
, FOU_ATTR_PORT
, fou
->port
) ||
706 nla_put_u8(msg
, FOU_ATTR_IPPROTO
, fou
->protocol
) ||
707 nla_put_u8(msg
, FOU_ATTR_TYPE
, fou
->type
))
710 if (fou
->flags
& FOU_F_REMCSUM_NOPARTIAL
)
711 if (nla_put_flag(msg
, FOU_ATTR_REMCSUM_NOPARTIAL
))
716 static int fou_dump_info(struct fou
*fou
, u32 portid
, u32 seq
,
717 u32 flags
, struct sk_buff
*skb
, u8 cmd
)
721 hdr
= genlmsg_put(skb
, portid
, seq
, &fou_nl_family
, flags
, cmd
);
725 if (fou_fill_info(fou
, skb
) < 0)
726 goto nla_put_failure
;
728 genlmsg_end(skb
, hdr
);
732 genlmsg_cancel(skb
, hdr
);
736 static int fou_nl_cmd_get_port(struct sk_buff
*skb
, struct genl_info
*info
)
738 struct net
*net
= genl_info_net(info
);
739 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
747 ret
= parse_nl_config(info
, &cfg
);
750 port
= cfg
.udp_config
.local_udp_port
;
754 family
= cfg
.udp_config
.family
;
755 if (family
!= AF_INET
&& family
!= AF_INET6
)
758 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
763 mutex_lock(&fn
->fou_lock
);
764 list_for_each_entry(fout
, &fn
->fou_list
, list
) {
765 if (port
== fout
->port
&& family
== fout
->family
) {
766 ret
= fou_dump_info(fout
, info
->snd_portid
,
767 info
->snd_seq
, 0, msg
,
772 mutex_unlock(&fn
->fou_lock
);
776 return genlmsg_reply(msg
, info
);
783 static int fou_nl_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
785 struct net
*net
= sock_net(skb
->sk
);
786 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
790 mutex_lock(&fn
->fou_lock
);
791 list_for_each_entry(fout
, &fn
->fou_list
, list
) {
792 if (idx
++ < cb
->args
[0])
794 ret
= fou_dump_info(fout
, NETLINK_CB(cb
->skb
).portid
,
795 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
800 mutex_unlock(&fn
->fou_lock
);
806 static const struct genl_ops fou_nl_ops
[] = {
809 .doit
= fou_nl_cmd_add_port
,
810 .policy
= fou_nl_policy
,
811 .flags
= GENL_ADMIN_PERM
,
815 .doit
= fou_nl_cmd_rm_port
,
816 .policy
= fou_nl_policy
,
817 .flags
= GENL_ADMIN_PERM
,
821 .doit
= fou_nl_cmd_get_port
,
822 .dumpit
= fou_nl_dump
,
823 .policy
= fou_nl_policy
,
827 static struct genl_family fou_nl_family __ro_after_init
= {
829 .name
= FOU_GENL_NAME
,
830 .version
= FOU_GENL_VERSION
,
831 .maxattr
= FOU_ATTR_MAX
,
833 .module
= THIS_MODULE
,
835 .n_ops
= ARRAY_SIZE(fou_nl_ops
),
838 size_t fou_encap_hlen(struct ip_tunnel_encap
*e
)
840 return sizeof(struct udphdr
);
842 EXPORT_SYMBOL(fou_encap_hlen
);
844 size_t gue_encap_hlen(struct ip_tunnel_encap
*e
)
847 bool need_priv
= false;
849 len
= sizeof(struct udphdr
) + sizeof(struct guehdr
);
851 if (e
->flags
& TUNNEL_ENCAP_FLAG_REMCSUM
) {
852 len
+= GUE_PLEN_REMCSUM
;
856 len
+= need_priv
? GUE_LEN_PRIV
: 0;
860 EXPORT_SYMBOL(gue_encap_hlen
);
862 static void fou_build_udp(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
863 struct flowi4
*fl4
, u8
*protocol
, __be16 sport
)
867 skb_push(skb
, sizeof(struct udphdr
));
868 skb_reset_transport_header(skb
);
874 uh
->len
= htons(skb
->len
);
875 udp_set_csum(!(e
->flags
& TUNNEL_ENCAP_FLAG_CSUM
), skb
,
876 fl4
->saddr
, fl4
->daddr
, skb
->len
);
878 *protocol
= IPPROTO_UDP
;
881 int __fou_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
882 u8
*protocol
, __be16
*sport
, int type
)
886 err
= iptunnel_handle_offloads(skb
, type
);
890 *sport
= e
->sport
? : udp_flow_src_port(dev_net(skb
->dev
),
895 EXPORT_SYMBOL(__fou_build_header
);
897 int fou_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
898 u8
*protocol
, struct flowi4
*fl4
)
900 int type
= e
->flags
& TUNNEL_ENCAP_FLAG_CSUM
? SKB_GSO_UDP_TUNNEL_CSUM
:
905 err
= __fou_build_header(skb
, e
, protocol
, &sport
, type
);
909 fou_build_udp(skb
, e
, fl4
, protocol
, sport
);
913 EXPORT_SYMBOL(fou_build_header
);
915 int __gue_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
916 u8
*protocol
, __be16
*sport
, int type
)
918 struct guehdr
*guehdr
;
919 size_t hdrlen
, optlen
= 0;
921 bool need_priv
= false;
924 if ((e
->flags
& TUNNEL_ENCAP_FLAG_REMCSUM
) &&
925 skb
->ip_summed
== CHECKSUM_PARTIAL
) {
926 optlen
+= GUE_PLEN_REMCSUM
;
927 type
|= SKB_GSO_TUNNEL_REMCSUM
;
931 optlen
+= need_priv
? GUE_LEN_PRIV
: 0;
933 err
= iptunnel_handle_offloads(skb
, type
);
937 /* Get source port (based on flow hash) before skb_push */
938 *sport
= e
->sport
? : udp_flow_src_port(dev_net(skb
->dev
),
941 hdrlen
= sizeof(struct guehdr
) + optlen
;
943 skb_push(skb
, hdrlen
);
945 guehdr
= (struct guehdr
*)skb
->data
;
949 guehdr
->hlen
= optlen
>> 2;
951 guehdr
->proto_ctype
= *protocol
;
956 __be32
*flags
= data
;
958 guehdr
->flags
|= GUE_FLAG_PRIV
;
960 data
+= GUE_LEN_PRIV
;
962 if (type
& SKB_GSO_TUNNEL_REMCSUM
) {
963 u16 csum_start
= skb_checksum_start_offset(skb
);
966 if (csum_start
< hdrlen
)
969 csum_start
-= hdrlen
;
970 pd
[0] = htons(csum_start
);
971 pd
[1] = htons(csum_start
+ skb
->csum_offset
);
973 if (!skb_is_gso(skb
)) {
974 skb
->ip_summed
= CHECKSUM_NONE
;
975 skb
->encapsulation
= 0;
978 *flags
|= GUE_PFLAG_REMCSUM
;
979 data
+= GUE_PLEN_REMCSUM
;
986 EXPORT_SYMBOL(__gue_build_header
);
988 int gue_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
989 u8
*protocol
, struct flowi4
*fl4
)
991 int type
= e
->flags
& TUNNEL_ENCAP_FLAG_CSUM
? SKB_GSO_UDP_TUNNEL_CSUM
:
996 err
= __gue_build_header(skb
, e
, protocol
, &sport
, type
);
1000 fou_build_udp(skb
, e
, fl4
, protocol
, sport
);
1004 EXPORT_SYMBOL(gue_build_header
);
1006 #ifdef CONFIG_NET_FOU_IP_TUNNELS
1008 static const struct ip_tunnel_encap_ops fou_iptun_ops
= {
1009 .encap_hlen
= fou_encap_hlen
,
1010 .build_header
= fou_build_header
,
1013 static const struct ip_tunnel_encap_ops gue_iptun_ops
= {
1014 .encap_hlen
= gue_encap_hlen
,
1015 .build_header
= gue_build_header
,
1018 static int ip_tunnel_encap_add_fou_ops(void)
1022 ret
= ip_tunnel_encap_add_ops(&fou_iptun_ops
, TUNNEL_ENCAP_FOU
);
1024 pr_err("can't add fou ops\n");
1028 ret
= ip_tunnel_encap_add_ops(&gue_iptun_ops
, TUNNEL_ENCAP_GUE
);
1030 pr_err("can't add gue ops\n");
1031 ip_tunnel_encap_del_ops(&fou_iptun_ops
, TUNNEL_ENCAP_FOU
);
1038 static void ip_tunnel_encap_del_fou_ops(void)
1040 ip_tunnel_encap_del_ops(&fou_iptun_ops
, TUNNEL_ENCAP_FOU
);
1041 ip_tunnel_encap_del_ops(&gue_iptun_ops
, TUNNEL_ENCAP_GUE
);
1046 static int ip_tunnel_encap_add_fou_ops(void)
1051 static void ip_tunnel_encap_del_fou_ops(void)
1057 static __net_init
int fou_init_net(struct net
*net
)
1059 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
1061 INIT_LIST_HEAD(&fn
->fou_list
);
1062 mutex_init(&fn
->fou_lock
);
1066 static __net_exit
void fou_exit_net(struct net
*net
)
1068 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
1069 struct fou
*fou
, *next
;
1071 /* Close all the FOU sockets */
1072 mutex_lock(&fn
->fou_lock
);
1073 list_for_each_entry_safe(fou
, next
, &fn
->fou_list
, list
)
1075 mutex_unlock(&fn
->fou_lock
);
1078 static struct pernet_operations fou_net_ops
= {
1079 .init
= fou_init_net
,
1080 .exit
= fou_exit_net
,
1082 .size
= sizeof(struct fou_net
),
1085 static int __init
fou_init(void)
1089 ret
= register_pernet_device(&fou_net_ops
);
1093 ret
= genl_register_family(&fou_nl_family
);
1097 ret
= ip_tunnel_encap_add_fou_ops();
1101 genl_unregister_family(&fou_nl_family
);
1103 unregister_pernet_device(&fou_net_ops
);
1108 static void __exit
fou_fini(void)
1110 ip_tunnel_encap_del_fou_ops();
1111 genl_unregister_family(&fou_nl_family
);
1112 unregister_pernet_device(&fou_net_ops
);
1115 module_init(fou_init
);
1116 module_exit(fou_fini
);
1117 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
1118 MODULE_LICENSE("GPL");