2 * GENEVE: Generic Network Virtualization Encapsulation
4 * Copyright (c) 2015 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/etherdevice.h>
16 #include <linux/hash.h>
17 #include <net/dst_metadata.h>
18 #include <net/gro_cells.h>
19 #include <net/rtnetlink.h>
20 #include <net/geneve.h>
21 #include <net/protocol.h>
23 #define GENEVE_NETDEV_VER "0.6"
25 #define GENEVE_UDP_PORT 6081
27 #define GENEVE_N_VID (1u << 24)
28 #define GENEVE_VID_MASK (GENEVE_N_VID - 1)
30 #define VNI_HASH_BITS 10
31 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
33 static bool log_ecn_error
= true;
34 module_param(log_ecn_error
, bool, 0644);
35 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
38 #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
39 #define GENEVE_IPV4_HLEN (ETH_HLEN + sizeof(struct iphdr) + GENEVE_BASE_HLEN)
40 #define GENEVE_IPV6_HLEN (ETH_HLEN + sizeof(struct ipv6hdr) + GENEVE_BASE_HLEN)
42 /* per-network namespace private data for this module */
44 struct list_head geneve_list
;
45 struct list_head sock_list
;
48 static unsigned int geneve_net_id
;
50 struct geneve_dev_node
{
51 struct hlist_node hlist
;
52 struct geneve_dev
*geneve
;
55 /* Pseudo network device */
57 struct geneve_dev_node hlist4
; /* vni hash table for IPv4 socket */
58 #if IS_ENABLED(CONFIG_IPV6)
59 struct geneve_dev_node hlist6
; /* vni hash table for IPv6 socket */
61 struct net
*net
; /* netns for packet i/o */
62 struct net_device
*dev
; /* netdev for geneve tunnel */
63 struct ip_tunnel_info info
;
64 struct geneve_sock __rcu
*sock4
; /* IPv4 socket used for geneve tunnel */
65 #if IS_ENABLED(CONFIG_IPV6)
66 struct geneve_sock __rcu
*sock6
; /* IPv6 socket used for geneve tunnel */
68 struct list_head next
; /* geneve's per namespace list */
69 struct gro_cells gro_cells
;
71 bool use_udp6_rx_checksums
;
76 struct list_head list
;
80 struct hlist_head vni_list
[VNI_HASH_SIZE
];
83 static inline __u32
geneve_net_vni_hash(u8 vni
[3])
87 vnid
= (vni
[0] << 16) | (vni
[1] << 8) | vni
[2];
88 return hash_32(vnid
, VNI_HASH_BITS
);
91 static __be64
vni_to_tunnel_id(const __u8
*vni
)
94 return (vni
[0] << 16) | (vni
[1] << 8) | vni
[2];
96 return (__force __be64
)(((__force u64
)vni
[0] << 40) |
97 ((__force u64
)vni
[1] << 48) |
98 ((__force u64
)vni
[2] << 56));
102 /* Convert 64 bit tunnel ID to 24 bit VNI. */
103 static void tunnel_id_to_vni(__be64 tun_id
, __u8
*vni
)
106 vni
[0] = (__force __u8
)(tun_id
>> 16);
107 vni
[1] = (__force __u8
)(tun_id
>> 8);
108 vni
[2] = (__force __u8
)tun_id
;
110 vni
[0] = (__force __u8
)((__force u64
)tun_id
>> 40);
111 vni
[1] = (__force __u8
)((__force u64
)tun_id
>> 48);
112 vni
[2] = (__force __u8
)((__force u64
)tun_id
>> 56);
116 static bool eq_tun_id_and_vni(u8
*tun_id
, u8
*vni
)
118 return !memcmp(vni
, &tun_id
[5], 3);
121 static sa_family_t
geneve_get_sk_family(struct geneve_sock
*gs
)
123 return gs
->sock
->sk
->sk_family
;
126 static struct geneve_dev
*geneve_lookup(struct geneve_sock
*gs
,
127 __be32 addr
, u8 vni
[])
129 struct hlist_head
*vni_list_head
;
130 struct geneve_dev_node
*node
;
133 /* Find the device for this VNI */
134 hash
= geneve_net_vni_hash(vni
);
135 vni_list_head
= &gs
->vni_list
[hash
];
136 hlist_for_each_entry_rcu(node
, vni_list_head
, hlist
) {
137 if (eq_tun_id_and_vni((u8
*)&node
->geneve
->info
.key
.tun_id
, vni
) &&
138 addr
== node
->geneve
->info
.key
.u
.ipv4
.dst
)
144 #if IS_ENABLED(CONFIG_IPV6)
145 static struct geneve_dev
*geneve6_lookup(struct geneve_sock
*gs
,
146 struct in6_addr addr6
, u8 vni
[])
148 struct hlist_head
*vni_list_head
;
149 struct geneve_dev_node
*node
;
152 /* Find the device for this VNI */
153 hash
= geneve_net_vni_hash(vni
);
154 vni_list_head
= &gs
->vni_list
[hash
];
155 hlist_for_each_entry_rcu(node
, vni_list_head
, hlist
) {
156 if (eq_tun_id_and_vni((u8
*)&node
->geneve
->info
.key
.tun_id
, vni
) &&
157 ipv6_addr_equal(&addr6
, &node
->geneve
->info
.key
.u
.ipv6
.dst
))
164 static inline struct genevehdr
*geneve_hdr(const struct sk_buff
*skb
)
166 return (struct genevehdr
*)(udp_hdr(skb
) + 1);
169 static struct geneve_dev
*geneve_lookup_skb(struct geneve_sock
*gs
,
172 static u8 zero_vni
[3];
175 if (geneve_get_sk_family(gs
) == AF_INET
) {
179 iph
= ip_hdr(skb
); /* outer IP header... */
181 if (gs
->collect_md
) {
185 vni
= geneve_hdr(skb
)->vni
;
189 return geneve_lookup(gs
, addr
, vni
);
190 #if IS_ENABLED(CONFIG_IPV6)
191 } else if (geneve_get_sk_family(gs
) == AF_INET6
) {
192 static struct in6_addr zero_addr6
;
193 struct ipv6hdr
*ip6h
;
194 struct in6_addr addr6
;
196 ip6h
= ipv6_hdr(skb
); /* outer IPv6 header... */
198 if (gs
->collect_md
) {
202 vni
= geneve_hdr(skb
)->vni
;
206 return geneve6_lookup(gs
, addr6
, vni
);
212 /* geneve receive/decap routine */
213 static void geneve_rx(struct geneve_dev
*geneve
, struct geneve_sock
*gs
,
216 struct genevehdr
*gnvh
= geneve_hdr(skb
);
217 struct metadata_dst
*tun_dst
= NULL
;
218 struct pcpu_sw_netstats
*stats
;
223 if (ip_tunnel_collect_metadata() || gs
->collect_md
) {
226 flags
= TUNNEL_KEY
| TUNNEL_GENEVE_OPT
|
227 (gnvh
->oam
? TUNNEL_OAM
: 0) |
228 (gnvh
->critical
? TUNNEL_CRIT_OPT
: 0);
230 tun_dst
= udp_tun_rx_dst(skb
, geneve_get_sk_family(gs
), flags
,
231 vni_to_tunnel_id(gnvh
->vni
),
234 geneve
->dev
->stats
.rx_dropped
++;
237 /* Update tunnel dst according to Geneve options. */
238 ip_tunnel_info_opts_set(&tun_dst
->u
.tun_info
,
239 gnvh
->options
, gnvh
->opt_len
* 4,
242 /* Drop packets w/ critical options,
243 * since we don't support any...
245 if (gnvh
->critical
) {
246 geneve
->dev
->stats
.rx_frame_errors
++;
247 geneve
->dev
->stats
.rx_errors
++;
252 skb_reset_mac_header(skb
);
253 skb
->protocol
= eth_type_trans(skb
, geneve
->dev
);
254 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
257 skb_dst_set(skb
, &tun_dst
->dst
);
259 /* Ignore packet loops (and multicast echo) */
260 if (ether_addr_equal(eth_hdr(skb
)->h_source
, geneve
->dev
->dev_addr
)) {
261 geneve
->dev
->stats
.rx_errors
++;
265 oiph
= skb_network_header(skb
);
266 skb_reset_network_header(skb
);
268 if (geneve_get_sk_family(gs
) == AF_INET
)
269 err
= IP_ECN_decapsulate(oiph
, skb
);
270 #if IS_ENABLED(CONFIG_IPV6)
272 err
= IP6_ECN_decapsulate(oiph
, skb
);
277 if (geneve_get_sk_family(gs
) == AF_INET
)
278 net_info_ratelimited("non-ECT from %pI4 "
280 &((struct iphdr
*)oiph
)->saddr
,
281 ((struct iphdr
*)oiph
)->tos
);
282 #if IS_ENABLED(CONFIG_IPV6)
284 net_info_ratelimited("non-ECT from %pI6\n",
285 &((struct ipv6hdr
*)oiph
)->saddr
);
289 ++geneve
->dev
->stats
.rx_frame_errors
;
290 ++geneve
->dev
->stats
.rx_errors
;
296 err
= gro_cells_receive(&geneve
->gro_cells
, skb
);
297 if (likely(err
== NET_RX_SUCCESS
)) {
298 stats
= this_cpu_ptr(geneve
->dev
->tstats
);
299 u64_stats_update_begin(&stats
->syncp
);
301 stats
->rx_bytes
+= len
;
302 u64_stats_update_end(&stats
->syncp
);
306 /* Consume bad packet */
310 /* Setup stats when device is created */
311 static int geneve_init(struct net_device
*dev
)
313 struct geneve_dev
*geneve
= netdev_priv(dev
);
316 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
320 err
= gro_cells_init(&geneve
->gro_cells
, dev
);
322 free_percpu(dev
->tstats
);
326 err
= dst_cache_init(&geneve
->info
.dst_cache
, GFP_KERNEL
);
328 free_percpu(dev
->tstats
);
329 gro_cells_destroy(&geneve
->gro_cells
);
335 static void geneve_uninit(struct net_device
*dev
)
337 struct geneve_dev
*geneve
= netdev_priv(dev
);
339 dst_cache_destroy(&geneve
->info
.dst_cache
);
340 gro_cells_destroy(&geneve
->gro_cells
);
341 free_percpu(dev
->tstats
);
344 /* Callback from net/ipv4/udp.c to receive packets */
345 static int geneve_udp_encap_recv(struct sock
*sk
, struct sk_buff
*skb
)
347 struct genevehdr
*geneveh
;
348 struct geneve_dev
*geneve
;
349 struct geneve_sock
*gs
;
352 /* Need UDP and Geneve header to be present */
353 if (unlikely(!pskb_may_pull(skb
, GENEVE_BASE_HLEN
)))
356 /* Return packets with reserved bits set */
357 geneveh
= geneve_hdr(skb
);
358 if (unlikely(geneveh
->ver
!= GENEVE_VER
))
361 if (unlikely(geneveh
->proto_type
!= htons(ETH_P_TEB
)))
364 gs
= rcu_dereference_sk_user_data(sk
);
368 geneve
= geneve_lookup_skb(gs
, skb
);
372 opts_len
= geneveh
->opt_len
* 4;
373 if (iptunnel_pull_header(skb
, GENEVE_BASE_HLEN
+ opts_len
,
375 !net_eq(geneve
->net
, dev_net(geneve
->dev
)))) {
376 geneve
->dev
->stats
.rx_dropped
++;
380 geneve_rx(geneve
, gs
, skb
);
384 /* Consume bad packet */
389 static struct socket
*geneve_create_sock(struct net
*net
, bool ipv6
,
390 __be16 port
, bool ipv6_rx_csum
)
393 struct udp_port_cfg udp_conf
;
396 memset(&udp_conf
, 0, sizeof(udp_conf
));
399 udp_conf
.family
= AF_INET6
;
400 udp_conf
.ipv6_v6only
= 1;
401 udp_conf
.use_udp6_rx_checksums
= ipv6_rx_csum
;
403 udp_conf
.family
= AF_INET
;
404 udp_conf
.local_ip
.s_addr
= htonl(INADDR_ANY
);
407 udp_conf
.local_udp_port
= port
;
409 /* Open UDP socket */
410 err
= udp_sock_create(net
, &udp_conf
, &sock
);
417 static int geneve_hlen(struct genevehdr
*gh
)
419 return sizeof(*gh
) + gh
->opt_len
* 4;
422 static struct sk_buff
*geneve_gro_receive(struct sock
*sk
,
423 struct list_head
*head
,
426 struct sk_buff
*pp
= NULL
;
428 struct genevehdr
*gh
, *gh2
;
429 unsigned int hlen
, gh_len
, off_gnv
;
430 const struct packet_offload
*ptype
;
434 off_gnv
= skb_gro_offset(skb
);
435 hlen
= off_gnv
+ sizeof(*gh
);
436 gh
= skb_gro_header_fast(skb
, off_gnv
);
437 if (skb_gro_header_hard(skb
, hlen
)) {
438 gh
= skb_gro_header_slow(skb
, hlen
, off_gnv
);
443 if (gh
->ver
!= GENEVE_VER
|| gh
->oam
)
445 gh_len
= geneve_hlen(gh
);
447 hlen
= off_gnv
+ gh_len
;
448 if (skb_gro_header_hard(skb
, hlen
)) {
449 gh
= skb_gro_header_slow(skb
, hlen
, off_gnv
);
454 list_for_each_entry(p
, head
, list
) {
455 if (!NAPI_GRO_CB(p
)->same_flow
)
458 gh2
= (struct genevehdr
*)(p
->data
+ off_gnv
);
459 if (gh
->opt_len
!= gh2
->opt_len
||
460 memcmp(gh
, gh2
, gh_len
)) {
461 NAPI_GRO_CB(p
)->same_flow
= 0;
466 type
= gh
->proto_type
;
469 ptype
= gro_find_receive_by_type(type
);
473 skb_gro_pull(skb
, gh_len
);
474 skb_gro_postpull_rcsum(skb
, gh
, gh_len
);
475 pp
= call_gro_receive(ptype
->callbacks
.gro_receive
, head
, skb
);
481 skb_gro_flush_final(skb
, pp
, flush
);
486 static int geneve_gro_complete(struct sock
*sk
, struct sk_buff
*skb
,
489 struct genevehdr
*gh
;
490 struct packet_offload
*ptype
;
495 gh
= (struct genevehdr
*)(skb
->data
+ nhoff
);
496 gh_len
= geneve_hlen(gh
);
497 type
= gh
->proto_type
;
500 ptype
= gro_find_complete_by_type(type
);
502 err
= ptype
->callbacks
.gro_complete(skb
, nhoff
+ gh_len
);
506 skb_set_inner_mac_header(skb
, nhoff
+ gh_len
);
511 /* Create new listen socket if needed */
512 static struct geneve_sock
*geneve_socket_create(struct net
*net
, __be16 port
,
513 bool ipv6
, bool ipv6_rx_csum
)
515 struct geneve_net
*gn
= net_generic(net
, geneve_net_id
);
516 struct geneve_sock
*gs
;
518 struct udp_tunnel_sock_cfg tunnel_cfg
;
521 gs
= kzalloc(sizeof(*gs
), GFP_KERNEL
);
523 return ERR_PTR(-ENOMEM
);
525 sock
= geneve_create_sock(net
, ipv6
, port
, ipv6_rx_csum
);
528 return ERR_CAST(sock
);
533 for (h
= 0; h
< VNI_HASH_SIZE
; ++h
)
534 INIT_HLIST_HEAD(&gs
->vni_list
[h
]);
536 /* Initialize the geneve udp offloads structure */
537 udp_tunnel_notify_add_rx_port(gs
->sock
, UDP_TUNNEL_TYPE_GENEVE
);
539 /* Mark socket as an encapsulation socket */
540 memset(&tunnel_cfg
, 0, sizeof(tunnel_cfg
));
541 tunnel_cfg
.sk_user_data
= gs
;
542 tunnel_cfg
.encap_type
= 1;
543 tunnel_cfg
.gro_receive
= geneve_gro_receive
;
544 tunnel_cfg
.gro_complete
= geneve_gro_complete
;
545 tunnel_cfg
.encap_rcv
= geneve_udp_encap_recv
;
546 tunnel_cfg
.encap_destroy
= NULL
;
547 setup_udp_tunnel_sock(net
, sock
, &tunnel_cfg
);
548 list_add(&gs
->list
, &gn
->sock_list
);
552 static void __geneve_sock_release(struct geneve_sock
*gs
)
554 if (!gs
|| --gs
->refcnt
)
558 udp_tunnel_notify_del_rx_port(gs
->sock
, UDP_TUNNEL_TYPE_GENEVE
);
559 udp_tunnel_sock_release(gs
->sock
);
563 static void geneve_sock_release(struct geneve_dev
*geneve
)
565 struct geneve_sock
*gs4
= rtnl_dereference(geneve
->sock4
);
566 #if IS_ENABLED(CONFIG_IPV6)
567 struct geneve_sock
*gs6
= rtnl_dereference(geneve
->sock6
);
569 rcu_assign_pointer(geneve
->sock6
, NULL
);
572 rcu_assign_pointer(geneve
->sock4
, NULL
);
575 __geneve_sock_release(gs4
);
576 #if IS_ENABLED(CONFIG_IPV6)
577 __geneve_sock_release(gs6
);
581 static struct geneve_sock
*geneve_find_sock(struct geneve_net
*gn
,
585 struct geneve_sock
*gs
;
587 list_for_each_entry(gs
, &gn
->sock_list
, list
) {
588 if (inet_sk(gs
->sock
->sk
)->inet_sport
== dst_port
&&
589 geneve_get_sk_family(gs
) == family
) {
596 static int geneve_sock_add(struct geneve_dev
*geneve
, bool ipv6
)
598 struct net
*net
= geneve
->net
;
599 struct geneve_net
*gn
= net_generic(net
, geneve_net_id
);
600 struct geneve_dev_node
*node
;
601 struct geneve_sock
*gs
;
605 gs
= geneve_find_sock(gn
, ipv6
? AF_INET6
: AF_INET
, geneve
->info
.key
.tp_dst
);
611 gs
= geneve_socket_create(net
, geneve
->info
.key
.tp_dst
, ipv6
,
612 geneve
->use_udp6_rx_checksums
);
617 gs
->collect_md
= geneve
->collect_md
;
618 #if IS_ENABLED(CONFIG_IPV6)
620 rcu_assign_pointer(geneve
->sock6
, gs
);
621 node
= &geneve
->hlist6
;
625 rcu_assign_pointer(geneve
->sock4
, gs
);
626 node
= &geneve
->hlist4
;
628 node
->geneve
= geneve
;
630 tunnel_id_to_vni(geneve
->info
.key
.tun_id
, vni
);
631 hash
= geneve_net_vni_hash(vni
);
632 hlist_add_head_rcu(&node
->hlist
, &gs
->vni_list
[hash
]);
636 static int geneve_open(struct net_device
*dev
)
638 struct geneve_dev
*geneve
= netdev_priv(dev
);
639 bool metadata
= geneve
->collect_md
;
643 ipv6
= geneve
->info
.mode
& IP_TUNNEL_INFO_IPV6
|| metadata
;
644 ipv4
= !ipv6
|| metadata
;
645 #if IS_ENABLED(CONFIG_IPV6)
647 ret
= geneve_sock_add(geneve
, true);
648 if (ret
< 0 && ret
!= -EAFNOSUPPORT
)
653 ret
= geneve_sock_add(geneve
, false);
655 geneve_sock_release(geneve
);
660 static int geneve_stop(struct net_device
*dev
)
662 struct geneve_dev
*geneve
= netdev_priv(dev
);
664 hlist_del_init_rcu(&geneve
->hlist4
.hlist
);
665 #if IS_ENABLED(CONFIG_IPV6)
666 hlist_del_init_rcu(&geneve
->hlist6
.hlist
);
668 geneve_sock_release(geneve
);
672 static void geneve_build_header(struct genevehdr
*geneveh
,
673 const struct ip_tunnel_info
*info
)
675 geneveh
->ver
= GENEVE_VER
;
676 geneveh
->opt_len
= info
->options_len
/ 4;
677 geneveh
->oam
= !!(info
->key
.tun_flags
& TUNNEL_OAM
);
678 geneveh
->critical
= !!(info
->key
.tun_flags
& TUNNEL_CRIT_OPT
);
680 tunnel_id_to_vni(info
->key
.tun_id
, geneveh
->vni
);
681 geneveh
->proto_type
= htons(ETH_P_TEB
);
684 if (info
->key
.tun_flags
& TUNNEL_GENEVE_OPT
)
685 ip_tunnel_info_opts_get(geneveh
->options
, info
);
688 static int geneve_build_skb(struct dst_entry
*dst
, struct sk_buff
*skb
,
689 const struct ip_tunnel_info
*info
,
690 bool xnet
, int ip_hdr_len
)
692 bool udp_sum
= !!(info
->key
.tun_flags
& TUNNEL_CSUM
);
693 struct genevehdr
*gnvh
;
697 skb_reset_mac_header(skb
);
698 skb_scrub_packet(skb
, xnet
);
700 min_headroom
= LL_RESERVED_SPACE(dst
->dev
) + dst
->header_len
+
701 GENEVE_BASE_HLEN
+ info
->options_len
+ ip_hdr_len
;
702 err
= skb_cow_head(skb
, min_headroom
);
706 err
= udp_tunnel_handle_offloads(skb
, udp_sum
);
710 gnvh
= __skb_push(skb
, sizeof(*gnvh
) + info
->options_len
);
711 geneve_build_header(gnvh
, info
);
712 skb_set_inner_protocol(skb
, htons(ETH_P_TEB
));
720 static struct rtable
*geneve_get_v4_rt(struct sk_buff
*skb
,
721 struct net_device
*dev
,
722 struct geneve_sock
*gs4
,
724 const struct ip_tunnel_info
*info
)
726 bool use_cache
= ip_tunnel_dst_cache_usable(skb
, info
);
727 struct geneve_dev
*geneve
= netdev_priv(dev
);
728 struct dst_cache
*dst_cache
;
729 struct rtable
*rt
= NULL
;
733 return ERR_PTR(-EIO
);
735 memset(fl4
, 0, sizeof(*fl4
));
736 fl4
->flowi4_mark
= skb
->mark
;
737 fl4
->flowi4_proto
= IPPROTO_UDP
;
738 fl4
->daddr
= info
->key
.u
.ipv4
.dst
;
739 fl4
->saddr
= info
->key
.u
.ipv4
.src
;
742 if ((tos
== 1) && !geneve
->collect_md
) {
743 tos
= ip_tunnel_get_dsfield(ip_hdr(skb
), skb
);
746 fl4
->flowi4_tos
= RT_TOS(tos
);
748 dst_cache
= (struct dst_cache
*)&info
->dst_cache
;
750 rt
= dst_cache_get_ip4(dst_cache
, &fl4
->saddr
);
754 rt
= ip_route_output_key(geneve
->net
, fl4
);
756 netdev_dbg(dev
, "no route to %pI4\n", &fl4
->daddr
);
757 return ERR_PTR(-ENETUNREACH
);
759 if (rt
->dst
.dev
== dev
) { /* is this necessary? */
760 netdev_dbg(dev
, "circular route to %pI4\n", &fl4
->daddr
);
762 return ERR_PTR(-ELOOP
);
765 dst_cache_set_ip4(dst_cache
, &rt
->dst
, fl4
->saddr
);
769 #if IS_ENABLED(CONFIG_IPV6)
770 static struct dst_entry
*geneve_get_v6_dst(struct sk_buff
*skb
,
771 struct net_device
*dev
,
772 struct geneve_sock
*gs6
,
774 const struct ip_tunnel_info
*info
)
776 bool use_cache
= ip_tunnel_dst_cache_usable(skb
, info
);
777 struct geneve_dev
*geneve
= netdev_priv(dev
);
778 struct dst_entry
*dst
= NULL
;
779 struct dst_cache
*dst_cache
;
783 return ERR_PTR(-EIO
);
785 memset(fl6
, 0, sizeof(*fl6
));
786 fl6
->flowi6_mark
= skb
->mark
;
787 fl6
->flowi6_proto
= IPPROTO_UDP
;
788 fl6
->daddr
= info
->key
.u
.ipv6
.dst
;
789 fl6
->saddr
= info
->key
.u
.ipv6
.src
;
790 prio
= info
->key
.tos
;
791 if ((prio
== 1) && !geneve
->collect_md
) {
792 prio
= ip_tunnel_get_dsfield(ip_hdr(skb
), skb
);
796 fl6
->flowlabel
= ip6_make_flowinfo(RT_TOS(prio
),
798 dst_cache
= (struct dst_cache
*)&info
->dst_cache
;
800 dst
= dst_cache_get_ip6(dst_cache
, &fl6
->saddr
);
804 dst
= ipv6_stub
->ipv6_dst_lookup_flow(geneve
->net
, gs6
->sock
->sk
, fl6
,
807 netdev_dbg(dev
, "no route to %pI6\n", &fl6
->daddr
);
808 return ERR_PTR(-ENETUNREACH
);
810 if (dst
->dev
== dev
) { /* is this necessary? */
811 netdev_dbg(dev
, "circular route to %pI6\n", &fl6
->daddr
);
813 return ERR_PTR(-ELOOP
);
817 dst_cache_set_ip6(dst_cache
, dst
, &fl6
->saddr
);
822 static int geneve_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
,
823 struct geneve_dev
*geneve
,
824 const struct ip_tunnel_info
*info
)
826 bool xnet
= !net_eq(geneve
->net
, dev_net(geneve
->dev
));
827 struct geneve_sock
*gs4
= rcu_dereference(geneve
->sock4
);
828 const struct ip_tunnel_key
*key
= &info
->key
;
836 rt
= geneve_get_v4_rt(skb
, dev
, gs4
, &fl4
, info
);
840 skb_tunnel_check_pmtu(skb
, &rt
->dst
,
841 GENEVE_IPV4_HLEN
+ info
->options_len
);
843 sport
= udp_flow_src_port(geneve
->net
, skb
, 1, USHRT_MAX
, true);
844 if (geneve
->collect_md
) {
845 tos
= ip_tunnel_ecn_encap(key
->tos
, ip_hdr(skb
), skb
);
848 tos
= ip_tunnel_ecn_encap(fl4
.flowi4_tos
, ip_hdr(skb
), skb
);
849 ttl
= key
->ttl
? : ip4_dst_hoplimit(&rt
->dst
);
851 df
= key
->tun_flags
& TUNNEL_DONT_FRAGMENT
? htons(IP_DF
) : 0;
853 err
= geneve_build_skb(&rt
->dst
, skb
, info
, xnet
, sizeof(struct iphdr
));
857 udp_tunnel_xmit_skb(rt
, gs4
->sock
->sk
, skb
, fl4
.saddr
, fl4
.daddr
,
858 tos
, ttl
, df
, sport
, geneve
->info
.key
.tp_dst
,
859 !net_eq(geneve
->net
, dev_net(geneve
->dev
)),
860 !(info
->key
.tun_flags
& TUNNEL_CSUM
));
864 #if IS_ENABLED(CONFIG_IPV6)
865 static int geneve6_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
,
866 struct geneve_dev
*geneve
,
867 const struct ip_tunnel_info
*info
)
869 bool xnet
= !net_eq(geneve
->net
, dev_net(geneve
->dev
));
870 struct geneve_sock
*gs6
= rcu_dereference(geneve
->sock6
);
871 const struct ip_tunnel_key
*key
= &info
->key
;
872 struct dst_entry
*dst
= NULL
;
878 dst
= geneve_get_v6_dst(skb
, dev
, gs6
, &fl6
, info
);
882 skb_tunnel_check_pmtu(skb
, dst
, GENEVE_IPV6_HLEN
+ info
->options_len
);
884 sport
= udp_flow_src_port(geneve
->net
, skb
, 1, USHRT_MAX
, true);
885 if (geneve
->collect_md
) {
886 prio
= ip_tunnel_ecn_encap(key
->tos
, ip_hdr(skb
), skb
);
889 prio
= ip_tunnel_ecn_encap(ip6_tclass(fl6
.flowlabel
),
891 ttl
= key
->ttl
? : ip6_dst_hoplimit(dst
);
893 err
= geneve_build_skb(dst
, skb
, info
, xnet
, sizeof(struct ipv6hdr
));
897 udp_tunnel6_xmit_skb(dst
, gs6
->sock
->sk
, skb
, dev
,
898 &fl6
.saddr
, &fl6
.daddr
, prio
, ttl
,
899 info
->key
.label
, sport
, geneve
->info
.key
.tp_dst
,
900 !(info
->key
.tun_flags
& TUNNEL_CSUM
));
905 static netdev_tx_t
geneve_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
907 struct geneve_dev
*geneve
= netdev_priv(dev
);
908 struct ip_tunnel_info
*info
= NULL
;
911 if (geneve
->collect_md
) {
912 info
= skb_tunnel_info(skb
);
913 if (unlikely(!info
|| !(info
->mode
& IP_TUNNEL_INFO_TX
))) {
914 netdev_dbg(dev
, "no tunnel metadata\n");
916 dev
->stats
.tx_dropped
++;
920 info
= &geneve
->info
;
924 #if IS_ENABLED(CONFIG_IPV6)
925 if (info
->mode
& IP_TUNNEL_INFO_IPV6
)
926 err
= geneve6_xmit_skb(skb
, dev
, geneve
, info
);
929 err
= geneve_xmit_skb(skb
, dev
, geneve
, info
);
938 dev
->stats
.collisions
++;
939 else if (err
== -ENETUNREACH
)
940 dev
->stats
.tx_carrier_errors
++;
942 dev
->stats
.tx_errors
++;
946 static int geneve_change_mtu(struct net_device
*dev
, int new_mtu
)
948 if (new_mtu
> dev
->max_mtu
)
949 new_mtu
= dev
->max_mtu
;
950 else if (new_mtu
< dev
->min_mtu
)
951 new_mtu
= dev
->min_mtu
;
957 static int geneve_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
959 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
960 struct geneve_dev
*geneve
= netdev_priv(dev
);
962 if (ip_tunnel_info_af(info
) == AF_INET
) {
965 struct geneve_sock
*gs4
= rcu_dereference(geneve
->sock4
);
967 rt
= geneve_get_v4_rt(skb
, dev
, gs4
, &fl4
, info
);
972 info
->key
.u
.ipv4
.src
= fl4
.saddr
;
973 #if IS_ENABLED(CONFIG_IPV6)
974 } else if (ip_tunnel_info_af(info
) == AF_INET6
) {
975 struct dst_entry
*dst
;
977 struct geneve_sock
*gs6
= rcu_dereference(geneve
->sock6
);
979 dst
= geneve_get_v6_dst(skb
, dev
, gs6
, &fl6
, info
);
984 info
->key
.u
.ipv6
.src
= fl6
.saddr
;
990 info
->key
.tp_src
= udp_flow_src_port(geneve
->net
, skb
,
992 info
->key
.tp_dst
= geneve
->info
.key
.tp_dst
;
996 static const struct net_device_ops geneve_netdev_ops
= {
997 .ndo_init
= geneve_init
,
998 .ndo_uninit
= geneve_uninit
,
999 .ndo_open
= geneve_open
,
1000 .ndo_stop
= geneve_stop
,
1001 .ndo_start_xmit
= geneve_xmit
,
1002 .ndo_get_stats64
= ip_tunnel_get_stats64
,
1003 .ndo_change_mtu
= geneve_change_mtu
,
1004 .ndo_validate_addr
= eth_validate_addr
,
1005 .ndo_set_mac_address
= eth_mac_addr
,
1006 .ndo_fill_metadata_dst
= geneve_fill_metadata_dst
,
1009 static void geneve_get_drvinfo(struct net_device
*dev
,
1010 struct ethtool_drvinfo
*drvinfo
)
1012 strlcpy(drvinfo
->version
, GENEVE_NETDEV_VER
, sizeof(drvinfo
->version
));
1013 strlcpy(drvinfo
->driver
, "geneve", sizeof(drvinfo
->driver
));
1016 static const struct ethtool_ops geneve_ethtool_ops
= {
1017 .get_drvinfo
= geneve_get_drvinfo
,
1018 .get_link
= ethtool_op_get_link
,
1021 /* Info for udev, that this is a virtual tunnel endpoint */
1022 static struct device_type geneve_type
= {
1026 /* Calls the ndo_udp_tunnel_add of the caller in order to
1027 * supply the listening GENEVE udp ports. Callers are expected
1028 * to implement the ndo_udp_tunnel_add.
1030 static void geneve_offload_rx_ports(struct net_device
*dev
, bool push
)
1032 struct net
*net
= dev_net(dev
);
1033 struct geneve_net
*gn
= net_generic(net
, geneve_net_id
);
1034 struct geneve_sock
*gs
;
1037 list_for_each_entry_rcu(gs
, &gn
->sock_list
, list
) {
1039 udp_tunnel_push_rx_port(dev
, gs
->sock
,
1040 UDP_TUNNEL_TYPE_GENEVE
);
1042 udp_tunnel_drop_rx_port(dev
, gs
->sock
,
1043 UDP_TUNNEL_TYPE_GENEVE
);
1049 /* Initialize the device structure. */
1050 static void geneve_setup(struct net_device
*dev
)
1054 dev
->netdev_ops
= &geneve_netdev_ops
;
1055 dev
->ethtool_ops
= &geneve_ethtool_ops
;
1056 dev
->needs_free_netdev
= true;
1058 SET_NETDEV_DEVTYPE(dev
, &geneve_type
);
1060 dev
->features
|= NETIF_F_LLTX
;
1061 dev
->features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
1062 dev
->features
|= NETIF_F_RXCSUM
;
1063 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1065 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
1066 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
1068 /* MTU range: 68 - (something less than 65535) */
1069 dev
->min_mtu
= ETH_MIN_MTU
;
1070 /* The max_mtu calculation does not take account of GENEVE
1071 * options, to avoid excluding potentially valid
1072 * configurations. This will be further reduced by IPvX hdr size.
1074 dev
->max_mtu
= IP_MAX_MTU
- GENEVE_BASE_HLEN
- dev
->hard_header_len
;
1076 netif_keep_dst(dev
);
1077 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1078 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
| IFF_NO_QUEUE
;
1079 eth_hw_addr_random(dev
);
1082 static const struct nla_policy geneve_policy
[IFLA_GENEVE_MAX
+ 1] = {
1083 [IFLA_GENEVE_ID
] = { .type
= NLA_U32
},
1084 [IFLA_GENEVE_REMOTE
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
1085 [IFLA_GENEVE_REMOTE6
] = { .len
= sizeof(struct in6_addr
) },
1086 [IFLA_GENEVE_TTL
] = { .type
= NLA_U8
},
1087 [IFLA_GENEVE_TOS
] = { .type
= NLA_U8
},
1088 [IFLA_GENEVE_LABEL
] = { .type
= NLA_U32
},
1089 [IFLA_GENEVE_PORT
] = { .type
= NLA_U16
},
1090 [IFLA_GENEVE_COLLECT_METADATA
] = { .type
= NLA_FLAG
},
1091 [IFLA_GENEVE_UDP_CSUM
] = { .type
= NLA_U8
},
1092 [IFLA_GENEVE_UDP_ZERO_CSUM6_TX
] = { .type
= NLA_U8
},
1093 [IFLA_GENEVE_UDP_ZERO_CSUM6_RX
] = { .type
= NLA_U8
},
1096 static int geneve_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1097 struct netlink_ext_ack
*extack
)
1099 if (tb
[IFLA_ADDRESS
]) {
1100 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
) {
1101 NL_SET_ERR_MSG_ATTR(extack
, tb
[IFLA_ADDRESS
],
1102 "Provided link layer address is not Ethernet");
1106 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
]))) {
1107 NL_SET_ERR_MSG_ATTR(extack
, tb
[IFLA_ADDRESS
],
1108 "Provided Ethernet address is not unicast");
1109 return -EADDRNOTAVAIL
;
1114 NL_SET_ERR_MSG(extack
,
1115 "Not enough attributes provided to perform the operation");
1119 if (data
[IFLA_GENEVE_ID
]) {
1120 __u32 vni
= nla_get_u32(data
[IFLA_GENEVE_ID
]);
1122 if (vni
>= GENEVE_N_VID
) {
1123 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_ID
],
1124 "Geneve ID must be lower than 16777216");
1132 static struct geneve_dev
*geneve_find_dev(struct geneve_net
*gn
,
1133 const struct ip_tunnel_info
*info
,
1134 bool *tun_on_same_port
,
1135 bool *tun_collect_md
)
1137 struct geneve_dev
*geneve
, *t
= NULL
;
1139 *tun_on_same_port
= false;
1140 *tun_collect_md
= false;
1141 list_for_each_entry(geneve
, &gn
->geneve_list
, next
) {
1142 if (info
->key
.tp_dst
== geneve
->info
.key
.tp_dst
) {
1143 *tun_collect_md
= geneve
->collect_md
;
1144 *tun_on_same_port
= true;
1146 if (info
->key
.tun_id
== geneve
->info
.key
.tun_id
&&
1147 info
->key
.tp_dst
== geneve
->info
.key
.tp_dst
&&
1148 !memcmp(&info
->key
.u
, &geneve
->info
.key
.u
, sizeof(info
->key
.u
)))
1154 static bool is_tnl_info_zero(const struct ip_tunnel_info
*info
)
1156 return !(info
->key
.tun_id
|| info
->key
.tun_flags
|| info
->key
.tos
||
1157 info
->key
.ttl
|| info
->key
.label
|| info
->key
.tp_src
||
1158 memchr_inv(&info
->key
.u
, 0, sizeof(info
->key
.u
)));
1161 static bool geneve_dst_addr_equal(struct ip_tunnel_info
*a
,
1162 struct ip_tunnel_info
*b
)
1164 if (ip_tunnel_info_af(a
) == AF_INET
)
1165 return a
->key
.u
.ipv4
.dst
== b
->key
.u
.ipv4
.dst
;
1167 return ipv6_addr_equal(&a
->key
.u
.ipv6
.dst
, &b
->key
.u
.ipv6
.dst
);
1170 static int geneve_configure(struct net
*net
, struct net_device
*dev
,
1171 struct netlink_ext_ack
*extack
,
1172 const struct ip_tunnel_info
*info
,
1173 bool metadata
, bool ipv6_rx_csum
)
1175 struct geneve_net
*gn
= net_generic(net
, geneve_net_id
);
1176 struct geneve_dev
*t
, *geneve
= netdev_priv(dev
);
1177 bool tun_collect_md
, tun_on_same_port
;
1180 if (metadata
&& !is_tnl_info_zero(info
)) {
1181 NL_SET_ERR_MSG(extack
,
1182 "Device is externally controlled, so attributes (VNI, Port, and so on) must not be specified");
1189 t
= geneve_find_dev(gn
, info
, &tun_on_same_port
, &tun_collect_md
);
1193 /* make enough headroom for basic scenario */
1194 encap_len
= GENEVE_BASE_HLEN
+ ETH_HLEN
;
1195 if (!metadata
&& ip_tunnel_info_af(info
) == AF_INET
) {
1196 encap_len
+= sizeof(struct iphdr
);
1197 dev
->max_mtu
-= sizeof(struct iphdr
);
1199 encap_len
+= sizeof(struct ipv6hdr
);
1200 dev
->max_mtu
-= sizeof(struct ipv6hdr
);
1202 dev
->needed_headroom
= encap_len
+ ETH_HLEN
;
1205 if (tun_on_same_port
) {
1206 NL_SET_ERR_MSG(extack
,
1207 "There can be only one externally controlled device on a destination port");
1211 if (tun_collect_md
) {
1212 NL_SET_ERR_MSG(extack
,
1213 "There already exists an externally controlled device on this destination port");
1218 dst_cache_reset(&geneve
->info
.dst_cache
);
1219 geneve
->info
= *info
;
1220 geneve
->collect_md
= metadata
;
1221 geneve
->use_udp6_rx_checksums
= ipv6_rx_csum
;
1223 err
= register_netdevice(dev
);
1227 list_add(&geneve
->next
, &gn
->geneve_list
);
1231 static void init_tnl_info(struct ip_tunnel_info
*info
, __u16 dst_port
)
1233 memset(info
, 0, sizeof(*info
));
1234 info
->key
.tp_dst
= htons(dst_port
);
1237 static int geneve_nl2info(struct nlattr
*tb
[], struct nlattr
*data
[],
1238 struct netlink_ext_ack
*extack
,
1239 struct ip_tunnel_info
*info
, bool *metadata
,
1240 bool *use_udp6_rx_checksums
, bool changelink
)
1244 if (data
[IFLA_GENEVE_REMOTE
] && data
[IFLA_GENEVE_REMOTE6
]) {
1245 NL_SET_ERR_MSG(extack
,
1246 "Cannot specify both IPv4 and IPv6 Remote addresses");
1250 if (data
[IFLA_GENEVE_REMOTE
]) {
1251 if (changelink
&& (ip_tunnel_info_af(info
) == AF_INET6
)) {
1252 attrtype
= IFLA_GENEVE_REMOTE
;
1256 info
->key
.u
.ipv4
.dst
=
1257 nla_get_in_addr(data
[IFLA_GENEVE_REMOTE
]);
1259 if (IN_MULTICAST(ntohl(info
->key
.u
.ipv4
.dst
))) {
1260 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_REMOTE
],
1261 "Remote IPv4 address cannot be Multicast");
1266 if (data
[IFLA_GENEVE_REMOTE6
]) {
1267 #if IS_ENABLED(CONFIG_IPV6)
1268 if (changelink
&& (ip_tunnel_info_af(info
) == AF_INET
)) {
1269 attrtype
= IFLA_GENEVE_REMOTE6
;
1273 info
->mode
= IP_TUNNEL_INFO_IPV6
;
1274 info
->key
.u
.ipv6
.dst
=
1275 nla_get_in6_addr(data
[IFLA_GENEVE_REMOTE6
]);
1277 if (ipv6_addr_type(&info
->key
.u
.ipv6
.dst
) &
1278 IPV6_ADDR_LINKLOCAL
) {
1279 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_REMOTE6
],
1280 "Remote IPv6 address cannot be link-local");
1283 if (ipv6_addr_is_multicast(&info
->key
.u
.ipv6
.dst
)) {
1284 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_REMOTE6
],
1285 "Remote IPv6 address cannot be Multicast");
1288 info
->key
.tun_flags
|= TUNNEL_CSUM
;
1289 *use_udp6_rx_checksums
= true;
1291 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_REMOTE6
],
1292 "IPv6 support not enabled in the kernel");
1293 return -EPFNOSUPPORT
;
1297 if (data
[IFLA_GENEVE_ID
]) {
1302 vni
= nla_get_u32(data
[IFLA_GENEVE_ID
]);
1303 tvni
[0] = (vni
& 0x00ff0000) >> 16;
1304 tvni
[1] = (vni
& 0x0000ff00) >> 8;
1305 tvni
[2] = vni
& 0x000000ff;
1307 tunid
= vni_to_tunnel_id(tvni
);
1308 if (changelink
&& (tunid
!= info
->key
.tun_id
)) {
1309 attrtype
= IFLA_GENEVE_ID
;
1312 info
->key
.tun_id
= tunid
;
1315 if (data
[IFLA_GENEVE_TTL
])
1316 info
->key
.ttl
= nla_get_u8(data
[IFLA_GENEVE_TTL
]);
1318 if (data
[IFLA_GENEVE_TOS
])
1319 info
->key
.tos
= nla_get_u8(data
[IFLA_GENEVE_TOS
]);
1321 if (data
[IFLA_GENEVE_LABEL
]) {
1322 info
->key
.label
= nla_get_be32(data
[IFLA_GENEVE_LABEL
]) &
1323 IPV6_FLOWLABEL_MASK
;
1324 if (info
->key
.label
&& (!(info
->mode
& IP_TUNNEL_INFO_IPV6
))) {
1325 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_LABEL
],
1326 "Label attribute only applies for IPv6 Geneve devices");
1331 if (data
[IFLA_GENEVE_PORT
]) {
1333 attrtype
= IFLA_GENEVE_PORT
;
1336 info
->key
.tp_dst
= nla_get_be16(data
[IFLA_GENEVE_PORT
]);
1339 if (data
[IFLA_GENEVE_COLLECT_METADATA
]) {
1341 attrtype
= IFLA_GENEVE_COLLECT_METADATA
;
1347 if (data
[IFLA_GENEVE_UDP_CSUM
]) {
1349 attrtype
= IFLA_GENEVE_UDP_CSUM
;
1352 if (nla_get_u8(data
[IFLA_GENEVE_UDP_CSUM
]))
1353 info
->key
.tun_flags
|= TUNNEL_CSUM
;
1356 if (data
[IFLA_GENEVE_UDP_ZERO_CSUM6_TX
]) {
1357 #if IS_ENABLED(CONFIG_IPV6)
1359 attrtype
= IFLA_GENEVE_UDP_ZERO_CSUM6_TX
;
1362 if (nla_get_u8(data
[IFLA_GENEVE_UDP_ZERO_CSUM6_TX
]))
1363 info
->key
.tun_flags
&= ~TUNNEL_CSUM
;
1365 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_UDP_ZERO_CSUM6_TX
],
1366 "IPv6 support not enabled in the kernel");
1367 return -EPFNOSUPPORT
;
1371 if (data
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX
]) {
1372 #if IS_ENABLED(CONFIG_IPV6)
1374 attrtype
= IFLA_GENEVE_UDP_ZERO_CSUM6_RX
;
1377 if (nla_get_u8(data
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX
]))
1378 *use_udp6_rx_checksums
= false;
1380 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX
],
1381 "IPv6 support not enabled in the kernel");
1382 return -EPFNOSUPPORT
;
1388 NL_SET_ERR_MSG_ATTR(extack
, data
[attrtype
],
1389 "Changing VNI, Port, endpoint IP address family, external, and UDP checksum attributes are not supported");
1393 static void geneve_link_config(struct net_device
*dev
,
1394 struct ip_tunnel_info
*info
, struct nlattr
*tb
[])
1396 struct geneve_dev
*geneve
= netdev_priv(dev
);
1400 geneve_change_mtu(dev
, nla_get_u32(tb
[IFLA_MTU
]));
1404 switch (ip_tunnel_info_af(info
)) {
1406 struct flowi4 fl4
= { .daddr
= info
->key
.u
.ipv4
.dst
};
1407 struct rtable
*rt
= ip_route_output_key(geneve
->net
, &fl4
);
1409 if (!IS_ERR(rt
) && rt
->dst
.dev
) {
1410 ldev_mtu
= rt
->dst
.dev
->mtu
- GENEVE_IPV4_HLEN
;
1415 #if IS_ENABLED(CONFIG_IPV6)
1417 struct rt6_info
*rt
;
1419 if (!__in6_dev_get(dev
))
1422 rt
= rt6_lookup(geneve
->net
, &info
->key
.u
.ipv6
.dst
, NULL
, 0,
1425 if (rt
&& rt
->dst
.dev
)
1426 ldev_mtu
= rt
->dst
.dev
->mtu
- GENEVE_IPV6_HLEN
;
1436 geneve_change_mtu(dev
, ldev_mtu
- info
->options_len
);
1439 static int geneve_newlink(struct net
*net
, struct net_device
*dev
,
1440 struct nlattr
*tb
[], struct nlattr
*data
[],
1441 struct netlink_ext_ack
*extack
)
1443 bool use_udp6_rx_checksums
= false;
1444 struct ip_tunnel_info info
;
1445 bool metadata
= false;
1448 init_tnl_info(&info
, GENEVE_UDP_PORT
);
1449 err
= geneve_nl2info(tb
, data
, extack
, &info
, &metadata
,
1450 &use_udp6_rx_checksums
, false);
1454 err
= geneve_configure(net
, dev
, extack
, &info
, metadata
,
1455 use_udp6_rx_checksums
);
1459 geneve_link_config(dev
, &info
, tb
);
1464 /* Quiesces the geneve device data path for both TX and RX.
1466 * On transmit geneve checks for non-NULL geneve_sock before it proceeds.
1467 * So, if we set that socket to NULL under RCU and wait for synchronize_net()
1468 * to complete for the existing set of in-flight packets to be transmitted,
1469 * then we would have quiesced the transmit data path. All the future packets
1470 * will get dropped until we unquiesce the data path.
1472 * On receive geneve dereference the geneve_sock stashed in the socket. So,
1473 * if we set that to NULL under RCU and wait for synchronize_net() to
1474 * complete, then we would have quiesced the receive data path.
1476 static void geneve_quiesce(struct geneve_dev
*geneve
, struct geneve_sock
**gs4
,
1477 struct geneve_sock
**gs6
)
1479 *gs4
= rtnl_dereference(geneve
->sock4
);
1480 rcu_assign_pointer(geneve
->sock4
, NULL
);
1482 rcu_assign_sk_user_data((*gs4
)->sock
->sk
, NULL
);
1483 #if IS_ENABLED(CONFIG_IPV6)
1484 *gs6
= rtnl_dereference(geneve
->sock6
);
1485 rcu_assign_pointer(geneve
->sock6
, NULL
);
1487 rcu_assign_sk_user_data((*gs6
)->sock
->sk
, NULL
);
1494 /* Resumes the geneve device data path for both TX and RX. */
1495 static void geneve_unquiesce(struct geneve_dev
*geneve
, struct geneve_sock
*gs4
,
1496 struct geneve_sock __maybe_unused
*gs6
)
1498 rcu_assign_pointer(geneve
->sock4
, gs4
);
1500 rcu_assign_sk_user_data(gs4
->sock
->sk
, gs4
);
1501 #if IS_ENABLED(CONFIG_IPV6)
1502 rcu_assign_pointer(geneve
->sock6
, gs6
);
1504 rcu_assign_sk_user_data(gs6
->sock
->sk
, gs6
);
1509 static int geneve_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
1510 struct nlattr
*data
[],
1511 struct netlink_ext_ack
*extack
)
1513 struct geneve_dev
*geneve
= netdev_priv(dev
);
1514 struct geneve_sock
*gs4
, *gs6
;
1515 struct ip_tunnel_info info
;
1517 bool use_udp6_rx_checksums
;
1520 /* If the geneve device is configured for metadata (or externally
1521 * controlled, for example, OVS), then nothing can be changed.
1523 if (geneve
->collect_md
)
1526 /* Start with the existing info. */
1527 memcpy(&info
, &geneve
->info
, sizeof(info
));
1528 metadata
= geneve
->collect_md
;
1529 use_udp6_rx_checksums
= geneve
->use_udp6_rx_checksums
;
1530 err
= geneve_nl2info(tb
, data
, extack
, &info
, &metadata
,
1531 &use_udp6_rx_checksums
, true);
1535 if (!geneve_dst_addr_equal(&geneve
->info
, &info
)) {
1536 dst_cache_reset(&info
.dst_cache
);
1537 geneve_link_config(dev
, &info
, tb
);
1540 geneve_quiesce(geneve
, &gs4
, &gs6
);
1541 geneve
->info
= info
;
1542 geneve
->collect_md
= metadata
;
1543 geneve
->use_udp6_rx_checksums
= use_udp6_rx_checksums
;
1544 geneve_unquiesce(geneve
, gs4
, gs6
);
1549 static void geneve_dellink(struct net_device
*dev
, struct list_head
*head
)
1551 struct geneve_dev
*geneve
= netdev_priv(dev
);
1553 list_del(&geneve
->next
);
1554 unregister_netdevice_queue(dev
, head
);
1557 static size_t geneve_get_size(const struct net_device
*dev
)
1559 return nla_total_size(sizeof(__u32
)) + /* IFLA_GENEVE_ID */
1560 nla_total_size(sizeof(struct in6_addr
)) + /* IFLA_GENEVE_REMOTE{6} */
1561 nla_total_size(sizeof(__u8
)) + /* IFLA_GENEVE_TTL */
1562 nla_total_size(sizeof(__u8
)) + /* IFLA_GENEVE_TOS */
1563 nla_total_size(sizeof(__be32
)) + /* IFLA_GENEVE_LABEL */
1564 nla_total_size(sizeof(__be16
)) + /* IFLA_GENEVE_PORT */
1565 nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
1566 nla_total_size(sizeof(__u8
)) + /* IFLA_GENEVE_UDP_CSUM */
1567 nla_total_size(sizeof(__u8
)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */
1568 nla_total_size(sizeof(__u8
)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */
1572 static int geneve_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1574 struct geneve_dev
*geneve
= netdev_priv(dev
);
1575 struct ip_tunnel_info
*info
= &geneve
->info
;
1576 bool metadata
= geneve
->collect_md
;
1580 tunnel_id_to_vni(info
->key
.tun_id
, tmp_vni
);
1581 vni
= (tmp_vni
[0] << 16) | (tmp_vni
[1] << 8) | tmp_vni
[2];
1582 if (nla_put_u32(skb
, IFLA_GENEVE_ID
, vni
))
1583 goto nla_put_failure
;
1585 if (!metadata
&& ip_tunnel_info_af(info
) == AF_INET
) {
1586 if (nla_put_in_addr(skb
, IFLA_GENEVE_REMOTE
,
1587 info
->key
.u
.ipv4
.dst
))
1588 goto nla_put_failure
;
1589 if (nla_put_u8(skb
, IFLA_GENEVE_UDP_CSUM
,
1590 !!(info
->key
.tun_flags
& TUNNEL_CSUM
)))
1591 goto nla_put_failure
;
1593 #if IS_ENABLED(CONFIG_IPV6)
1594 } else if (!metadata
) {
1595 if (nla_put_in6_addr(skb
, IFLA_GENEVE_REMOTE6
,
1596 &info
->key
.u
.ipv6
.dst
))
1597 goto nla_put_failure
;
1598 if (nla_put_u8(skb
, IFLA_GENEVE_UDP_ZERO_CSUM6_TX
,
1599 !(info
->key
.tun_flags
& TUNNEL_CSUM
)))
1600 goto nla_put_failure
;
1604 if (nla_put_u8(skb
, IFLA_GENEVE_TTL
, info
->key
.ttl
) ||
1605 nla_put_u8(skb
, IFLA_GENEVE_TOS
, info
->key
.tos
) ||
1606 nla_put_be32(skb
, IFLA_GENEVE_LABEL
, info
->key
.label
))
1607 goto nla_put_failure
;
1609 if (nla_put_be16(skb
, IFLA_GENEVE_PORT
, info
->key
.tp_dst
))
1610 goto nla_put_failure
;
1612 if (metadata
&& nla_put_flag(skb
, IFLA_GENEVE_COLLECT_METADATA
))
1613 goto nla_put_failure
;
1615 #if IS_ENABLED(CONFIG_IPV6)
1616 if (nla_put_u8(skb
, IFLA_GENEVE_UDP_ZERO_CSUM6_RX
,
1617 !geneve
->use_udp6_rx_checksums
))
1618 goto nla_put_failure
;
1627 static struct rtnl_link_ops geneve_link_ops __read_mostly
= {
1629 .maxtype
= IFLA_GENEVE_MAX
,
1630 .policy
= geneve_policy
,
1631 .priv_size
= sizeof(struct geneve_dev
),
1632 .setup
= geneve_setup
,
1633 .validate
= geneve_validate
,
1634 .newlink
= geneve_newlink
,
1635 .changelink
= geneve_changelink
,
1636 .dellink
= geneve_dellink
,
1637 .get_size
= geneve_get_size
,
1638 .fill_info
= geneve_fill_info
,
1641 struct net_device
*geneve_dev_create_fb(struct net
*net
, const char *name
,
1642 u8 name_assign_type
, u16 dst_port
)
1644 struct nlattr
*tb
[IFLA_MAX
+ 1];
1645 struct ip_tunnel_info info
;
1646 struct net_device
*dev
;
1647 LIST_HEAD(list_kill
);
1650 memset(tb
, 0, sizeof(tb
));
1651 dev
= rtnl_create_link(net
, name
, name_assign_type
,
1652 &geneve_link_ops
, tb
);
1656 init_tnl_info(&info
, dst_port
);
1657 err
= geneve_configure(net
, dev
, NULL
, &info
, true, true);
1660 return ERR_PTR(err
);
1663 /* openvswitch users expect packet sizes to be unrestricted,
1664 * so set the largest MTU we can.
1666 err
= geneve_change_mtu(dev
, IP_MAX_MTU
);
1670 err
= rtnl_configure_link(dev
, NULL
);
1676 geneve_dellink(dev
, &list_kill
);
1677 unregister_netdevice_many(&list_kill
);
1678 return ERR_PTR(err
);
1680 EXPORT_SYMBOL_GPL(geneve_dev_create_fb
);
1682 static int geneve_netdevice_event(struct notifier_block
*unused
,
1683 unsigned long event
, void *ptr
)
1685 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1687 if (event
== NETDEV_UDP_TUNNEL_PUSH_INFO
||
1688 event
== NETDEV_UDP_TUNNEL_DROP_INFO
) {
1689 geneve_offload_rx_ports(dev
, event
== NETDEV_UDP_TUNNEL_PUSH_INFO
);
1690 } else if (event
== NETDEV_UNREGISTER
) {
1691 geneve_offload_rx_ports(dev
, false);
1692 } else if (event
== NETDEV_REGISTER
) {
1693 geneve_offload_rx_ports(dev
, true);
1699 static struct notifier_block geneve_notifier_block __read_mostly
= {
1700 .notifier_call
= geneve_netdevice_event
,
1703 static __net_init
int geneve_init_net(struct net
*net
)
1705 struct geneve_net
*gn
= net_generic(net
, geneve_net_id
);
1707 INIT_LIST_HEAD(&gn
->geneve_list
);
1708 INIT_LIST_HEAD(&gn
->sock_list
);
1712 static void geneve_destroy_tunnels(struct net
*net
, struct list_head
*head
)
1714 struct geneve_net
*gn
= net_generic(net
, geneve_net_id
);
1715 struct geneve_dev
*geneve
, *next
;
1716 struct net_device
*dev
, *aux
;
1718 /* gather any geneve devices that were moved into this ns */
1719 for_each_netdev_safe(net
, dev
, aux
)
1720 if (dev
->rtnl_link_ops
== &geneve_link_ops
)
1721 unregister_netdevice_queue(dev
, head
);
1723 /* now gather any other geneve devices that were created in this ns */
1724 list_for_each_entry_safe(geneve
, next
, &gn
->geneve_list
, next
) {
1725 /* If geneve->dev is in the same netns, it was already added
1726 * to the list by the previous loop.
1728 if (!net_eq(dev_net(geneve
->dev
), net
))
1729 unregister_netdevice_queue(geneve
->dev
, head
);
1733 static void __net_exit
geneve_exit_batch_net(struct list_head
*net_list
)
1739 list_for_each_entry(net
, net_list
, exit_list
)
1740 geneve_destroy_tunnels(net
, &list
);
1742 /* unregister the devices gathered above */
1743 unregister_netdevice_many(&list
);
1746 list_for_each_entry(net
, net_list
, exit_list
) {
1747 const struct geneve_net
*gn
= net_generic(net
, geneve_net_id
);
1749 WARN_ON_ONCE(!list_empty(&gn
->sock_list
));
1753 static struct pernet_operations geneve_net_ops
= {
1754 .init
= geneve_init_net
,
1755 .exit_batch
= geneve_exit_batch_net
,
1756 .id
= &geneve_net_id
,
1757 .size
= sizeof(struct geneve_net
),
1760 static int __init
geneve_init_module(void)
1764 rc
= register_pernet_subsys(&geneve_net_ops
);
1768 rc
= register_netdevice_notifier(&geneve_notifier_block
);
1772 rc
= rtnl_link_register(&geneve_link_ops
);
1778 unregister_netdevice_notifier(&geneve_notifier_block
);
1780 unregister_pernet_subsys(&geneve_net_ops
);
1784 late_initcall(geneve_init_module
);
1786 static void __exit
geneve_cleanup_module(void)
1788 rtnl_link_unregister(&geneve_link_ops
);
1789 unregister_netdevice_notifier(&geneve_notifier_block
);
1790 unregister_pernet_subsys(&geneve_net_ops
);
1792 module_exit(geneve_cleanup_module
);
1794 MODULE_LICENSE("GPL");
1795 MODULE_VERSION(GENEVE_NETDEV_VER
);
1796 MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>");
1797 MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic");
1798 MODULE_ALIAS_RTNL_LINK("geneve");