2 * GENEVE: Generic Network Virtualization Encapsulation
4 * Copyright (c) 2015 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/etherdevice.h>
16 #include <linux/hash.h>
17 #include <net/dst_metadata.h>
18 #include <net/gro_cells.h>
19 #include <net/rtnetlink.h>
20 #include <net/geneve.h>
21 #include <net/protocol.h>
23 #define GENEVE_NETDEV_VER "0.6"
25 #define GENEVE_UDP_PORT 6081
27 #define GENEVE_N_VID (1u << 24)
28 #define GENEVE_VID_MASK (GENEVE_N_VID - 1)
30 #define VNI_HASH_BITS 10
31 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
33 static bool log_ecn_error
= true;
34 module_param(log_ecn_error
, bool, 0644);
35 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
38 #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
39 #define GENEVE_IPV4_HLEN (ETH_HLEN + sizeof(struct iphdr) + GENEVE_BASE_HLEN)
40 #define GENEVE_IPV6_HLEN (ETH_HLEN + sizeof(struct ipv6hdr) + GENEVE_BASE_HLEN)
42 /* per-network namespace private data for this module */
44 struct list_head geneve_list
;
45 struct list_head sock_list
;
48 static unsigned int geneve_net_id
;
50 struct geneve_dev_node
{
51 struct hlist_node hlist
;
52 struct geneve_dev
*geneve
;
55 /* Pseudo network device */
57 struct geneve_dev_node hlist4
; /* vni hash table for IPv4 socket */
58 #if IS_ENABLED(CONFIG_IPV6)
59 struct geneve_dev_node hlist6
; /* vni hash table for IPv6 socket */
61 struct net
*net
; /* netns for packet i/o */
62 struct net_device
*dev
; /* netdev for geneve tunnel */
63 struct ip_tunnel_info info
;
64 struct geneve_sock __rcu
*sock4
; /* IPv4 socket used for geneve tunnel */
65 #if IS_ENABLED(CONFIG_IPV6)
66 struct geneve_sock __rcu
*sock6
; /* IPv6 socket used for geneve tunnel */
68 struct list_head next
; /* geneve's per namespace list */
69 struct gro_cells gro_cells
;
71 bool use_udp6_rx_checksums
;
76 struct list_head list
;
80 struct hlist_head vni_list
[VNI_HASH_SIZE
];
83 static inline __u32
geneve_net_vni_hash(u8 vni
[3])
87 vnid
= (vni
[0] << 16) | (vni
[1] << 8) | vni
[2];
88 return hash_32(vnid
, VNI_HASH_BITS
);
91 static __be64
vni_to_tunnel_id(const __u8
*vni
)
94 return (vni
[0] << 16) | (vni
[1] << 8) | vni
[2];
96 return (__force __be64
)(((__force u64
)vni
[0] << 40) |
97 ((__force u64
)vni
[1] << 48) |
98 ((__force u64
)vni
[2] << 56));
102 /* Convert 64 bit tunnel ID to 24 bit VNI. */
103 static void tunnel_id_to_vni(__be64 tun_id
, __u8
*vni
)
106 vni
[0] = (__force __u8
)(tun_id
>> 16);
107 vni
[1] = (__force __u8
)(tun_id
>> 8);
108 vni
[2] = (__force __u8
)tun_id
;
110 vni
[0] = (__force __u8
)((__force u64
)tun_id
>> 40);
111 vni
[1] = (__force __u8
)((__force u64
)tun_id
>> 48);
112 vni
[2] = (__force __u8
)((__force u64
)tun_id
>> 56);
116 static bool eq_tun_id_and_vni(u8
*tun_id
, u8
*vni
)
118 return !memcmp(vni
, &tun_id
[5], 3);
121 static sa_family_t
geneve_get_sk_family(struct geneve_sock
*gs
)
123 return gs
->sock
->sk
->sk_family
;
126 static struct geneve_dev
*geneve_lookup(struct geneve_sock
*gs
,
127 __be32 addr
, u8 vni
[])
129 struct hlist_head
*vni_list_head
;
130 struct geneve_dev_node
*node
;
133 /* Find the device for this VNI */
134 hash
= geneve_net_vni_hash(vni
);
135 vni_list_head
= &gs
->vni_list
[hash
];
136 hlist_for_each_entry_rcu(node
, vni_list_head
, hlist
) {
137 if (eq_tun_id_and_vni((u8
*)&node
->geneve
->info
.key
.tun_id
, vni
) &&
138 addr
== node
->geneve
->info
.key
.u
.ipv4
.dst
)
144 #if IS_ENABLED(CONFIG_IPV6)
145 static struct geneve_dev
*geneve6_lookup(struct geneve_sock
*gs
,
146 struct in6_addr addr6
, u8 vni
[])
148 struct hlist_head
*vni_list_head
;
149 struct geneve_dev_node
*node
;
152 /* Find the device for this VNI */
153 hash
= geneve_net_vni_hash(vni
);
154 vni_list_head
= &gs
->vni_list
[hash
];
155 hlist_for_each_entry_rcu(node
, vni_list_head
, hlist
) {
156 if (eq_tun_id_and_vni((u8
*)&node
->geneve
->info
.key
.tun_id
, vni
) &&
157 ipv6_addr_equal(&addr6
, &node
->geneve
->info
.key
.u
.ipv6
.dst
))
164 static inline struct genevehdr
*geneve_hdr(const struct sk_buff
*skb
)
166 return (struct genevehdr
*)(udp_hdr(skb
) + 1);
169 static struct geneve_dev
*geneve_lookup_skb(struct geneve_sock
*gs
,
172 static u8 zero_vni
[3];
175 if (geneve_get_sk_family(gs
) == AF_INET
) {
179 iph
= ip_hdr(skb
); /* outer IP header... */
181 if (gs
->collect_md
) {
185 vni
= geneve_hdr(skb
)->vni
;
189 return geneve_lookup(gs
, addr
, vni
);
190 #if IS_ENABLED(CONFIG_IPV6)
191 } else if (geneve_get_sk_family(gs
) == AF_INET6
) {
192 static struct in6_addr zero_addr6
;
193 struct ipv6hdr
*ip6h
;
194 struct in6_addr addr6
;
196 ip6h
= ipv6_hdr(skb
); /* outer IPv6 header... */
198 if (gs
->collect_md
) {
202 vni
= geneve_hdr(skb
)->vni
;
206 return geneve6_lookup(gs
, addr6
, vni
);
212 /* geneve receive/decap routine */
213 static void geneve_rx(struct geneve_dev
*geneve
, struct geneve_sock
*gs
,
216 struct genevehdr
*gnvh
= geneve_hdr(skb
);
217 struct metadata_dst
*tun_dst
= NULL
;
218 struct pcpu_sw_netstats
*stats
;
223 if (ip_tunnel_collect_metadata() || gs
->collect_md
) {
226 flags
= TUNNEL_KEY
| TUNNEL_GENEVE_OPT
|
227 (gnvh
->oam
? TUNNEL_OAM
: 0) |
228 (gnvh
->critical
? TUNNEL_CRIT_OPT
: 0);
230 tun_dst
= udp_tun_rx_dst(skb
, geneve_get_sk_family(gs
), flags
,
231 vni_to_tunnel_id(gnvh
->vni
),
234 geneve
->dev
->stats
.rx_dropped
++;
237 /* Update tunnel dst according to Geneve options. */
238 ip_tunnel_info_opts_set(&tun_dst
->u
.tun_info
,
239 gnvh
->options
, gnvh
->opt_len
* 4);
241 /* Drop packets w/ critical options,
242 * since we don't support any...
244 if (gnvh
->critical
) {
245 geneve
->dev
->stats
.rx_frame_errors
++;
246 geneve
->dev
->stats
.rx_errors
++;
251 skb_reset_mac_header(skb
);
252 skb
->protocol
= eth_type_trans(skb
, geneve
->dev
);
253 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
256 skb_dst_set(skb
, &tun_dst
->dst
);
258 /* Ignore packet loops (and multicast echo) */
259 if (ether_addr_equal(eth_hdr(skb
)->h_source
, geneve
->dev
->dev_addr
)) {
260 geneve
->dev
->stats
.rx_errors
++;
264 oiph
= skb_network_header(skb
);
265 skb_reset_network_header(skb
);
267 if (geneve_get_sk_family(gs
) == AF_INET
)
268 err
= IP_ECN_decapsulate(oiph
, skb
);
269 #if IS_ENABLED(CONFIG_IPV6)
271 err
= IP6_ECN_decapsulate(oiph
, skb
);
276 if (geneve_get_sk_family(gs
) == AF_INET
)
277 net_info_ratelimited("non-ECT from %pI4 "
279 &((struct iphdr
*)oiph
)->saddr
,
280 ((struct iphdr
*)oiph
)->tos
);
281 #if IS_ENABLED(CONFIG_IPV6)
283 net_info_ratelimited("non-ECT from %pI6\n",
284 &((struct ipv6hdr
*)oiph
)->saddr
);
288 ++geneve
->dev
->stats
.rx_frame_errors
;
289 ++geneve
->dev
->stats
.rx_errors
;
295 err
= gro_cells_receive(&geneve
->gro_cells
, skb
);
296 if (likely(err
== NET_RX_SUCCESS
)) {
297 stats
= this_cpu_ptr(geneve
->dev
->tstats
);
298 u64_stats_update_begin(&stats
->syncp
);
300 stats
->rx_bytes
+= len
;
301 u64_stats_update_end(&stats
->syncp
);
305 /* Consume bad packet */
309 /* Setup stats when device is created */
310 static int geneve_init(struct net_device
*dev
)
312 struct geneve_dev
*geneve
= netdev_priv(dev
);
315 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
319 err
= gro_cells_init(&geneve
->gro_cells
, dev
);
321 free_percpu(dev
->tstats
);
325 err
= dst_cache_init(&geneve
->info
.dst_cache
, GFP_KERNEL
);
327 free_percpu(dev
->tstats
);
328 gro_cells_destroy(&geneve
->gro_cells
);
334 static void geneve_uninit(struct net_device
*dev
)
336 struct geneve_dev
*geneve
= netdev_priv(dev
);
338 dst_cache_destroy(&geneve
->info
.dst_cache
);
339 gro_cells_destroy(&geneve
->gro_cells
);
340 free_percpu(dev
->tstats
);
343 /* Callback from net/ipv4/udp.c to receive packets */
344 static int geneve_udp_encap_recv(struct sock
*sk
, struct sk_buff
*skb
)
346 struct genevehdr
*geneveh
;
347 struct geneve_dev
*geneve
;
348 struct geneve_sock
*gs
;
351 /* Need UDP and Geneve header to be present */
352 if (unlikely(!pskb_may_pull(skb
, GENEVE_BASE_HLEN
)))
355 /* Return packets with reserved bits set */
356 geneveh
= geneve_hdr(skb
);
357 if (unlikely(geneveh
->ver
!= GENEVE_VER
))
360 if (unlikely(geneveh
->proto_type
!= htons(ETH_P_TEB
)))
363 gs
= rcu_dereference_sk_user_data(sk
);
367 geneve
= geneve_lookup_skb(gs
, skb
);
371 opts_len
= geneveh
->opt_len
* 4;
372 if (iptunnel_pull_header(skb
, GENEVE_BASE_HLEN
+ opts_len
,
374 !net_eq(geneve
->net
, dev_net(geneve
->dev
)))) {
375 geneve
->dev
->stats
.rx_dropped
++;
379 geneve_rx(geneve
, gs
, skb
);
383 /* Consume bad packet */
388 static struct socket
*geneve_create_sock(struct net
*net
, bool ipv6
,
389 __be16 port
, bool ipv6_rx_csum
)
392 struct udp_port_cfg udp_conf
;
395 memset(&udp_conf
, 0, sizeof(udp_conf
));
398 udp_conf
.family
= AF_INET6
;
399 udp_conf
.ipv6_v6only
= 1;
400 udp_conf
.use_udp6_rx_checksums
= ipv6_rx_csum
;
402 udp_conf
.family
= AF_INET
;
403 udp_conf
.local_ip
.s_addr
= htonl(INADDR_ANY
);
406 udp_conf
.local_udp_port
= port
;
408 /* Open UDP socket */
409 err
= udp_sock_create(net
, &udp_conf
, &sock
);
416 static int geneve_hlen(struct genevehdr
*gh
)
418 return sizeof(*gh
) + gh
->opt_len
* 4;
421 static struct sk_buff
**geneve_gro_receive(struct sock
*sk
,
422 struct sk_buff
**head
,
425 struct sk_buff
*p
, **pp
= NULL
;
426 struct genevehdr
*gh
, *gh2
;
427 unsigned int hlen
, gh_len
, off_gnv
;
428 const struct packet_offload
*ptype
;
432 off_gnv
= skb_gro_offset(skb
);
433 hlen
= off_gnv
+ sizeof(*gh
);
434 gh
= skb_gro_header_fast(skb
, off_gnv
);
435 if (skb_gro_header_hard(skb
, hlen
)) {
436 gh
= skb_gro_header_slow(skb
, hlen
, off_gnv
);
441 if (gh
->ver
!= GENEVE_VER
|| gh
->oam
)
443 gh_len
= geneve_hlen(gh
);
445 hlen
= off_gnv
+ gh_len
;
446 if (skb_gro_header_hard(skb
, hlen
)) {
447 gh
= skb_gro_header_slow(skb
, hlen
, off_gnv
);
452 for (p
= *head
; p
; p
= p
->next
) {
453 if (!NAPI_GRO_CB(p
)->same_flow
)
456 gh2
= (struct genevehdr
*)(p
->data
+ off_gnv
);
457 if (gh
->opt_len
!= gh2
->opt_len
||
458 memcmp(gh
, gh2
, gh_len
)) {
459 NAPI_GRO_CB(p
)->same_flow
= 0;
464 type
= gh
->proto_type
;
467 ptype
= gro_find_receive_by_type(type
);
471 skb_gro_pull(skb
, gh_len
);
472 skb_gro_postpull_rcsum(skb
, gh
, gh_len
);
473 pp
= call_gro_receive(ptype
->callbacks
.gro_receive
, head
, skb
);
479 skb_gro_flush_final(skb
, pp
, flush
);
484 static int geneve_gro_complete(struct sock
*sk
, struct sk_buff
*skb
,
487 struct genevehdr
*gh
;
488 struct packet_offload
*ptype
;
493 gh
= (struct genevehdr
*)(skb
->data
+ nhoff
);
494 gh_len
= geneve_hlen(gh
);
495 type
= gh
->proto_type
;
498 ptype
= gro_find_complete_by_type(type
);
500 err
= ptype
->callbacks
.gro_complete(skb
, nhoff
+ gh_len
);
504 skb_set_inner_mac_header(skb
, nhoff
+ gh_len
);
509 /* Create new listen socket if needed */
510 static struct geneve_sock
*geneve_socket_create(struct net
*net
, __be16 port
,
511 bool ipv6
, bool ipv6_rx_csum
)
513 struct geneve_net
*gn
= net_generic(net
, geneve_net_id
);
514 struct geneve_sock
*gs
;
516 struct udp_tunnel_sock_cfg tunnel_cfg
;
519 gs
= kzalloc(sizeof(*gs
), GFP_KERNEL
);
521 return ERR_PTR(-ENOMEM
);
523 sock
= geneve_create_sock(net
, ipv6
, port
, ipv6_rx_csum
);
526 return ERR_CAST(sock
);
531 for (h
= 0; h
< VNI_HASH_SIZE
; ++h
)
532 INIT_HLIST_HEAD(&gs
->vni_list
[h
]);
534 /* Initialize the geneve udp offloads structure */
535 udp_tunnel_notify_add_rx_port(gs
->sock
, UDP_TUNNEL_TYPE_GENEVE
);
537 /* Mark socket as an encapsulation socket */
538 memset(&tunnel_cfg
, 0, sizeof(tunnel_cfg
));
539 tunnel_cfg
.sk_user_data
= gs
;
540 tunnel_cfg
.encap_type
= 1;
541 tunnel_cfg
.gro_receive
= geneve_gro_receive
;
542 tunnel_cfg
.gro_complete
= geneve_gro_complete
;
543 tunnel_cfg
.encap_rcv
= geneve_udp_encap_recv
;
544 tunnel_cfg
.encap_destroy
= NULL
;
545 setup_udp_tunnel_sock(net
, sock
, &tunnel_cfg
);
546 list_add(&gs
->list
, &gn
->sock_list
);
550 static void __geneve_sock_release(struct geneve_sock
*gs
)
552 if (!gs
|| --gs
->refcnt
)
556 udp_tunnel_notify_del_rx_port(gs
->sock
, UDP_TUNNEL_TYPE_GENEVE
);
557 udp_tunnel_sock_release(gs
->sock
);
561 static void geneve_sock_release(struct geneve_dev
*geneve
)
563 struct geneve_sock
*gs4
= rtnl_dereference(geneve
->sock4
);
564 #if IS_ENABLED(CONFIG_IPV6)
565 struct geneve_sock
*gs6
= rtnl_dereference(geneve
->sock6
);
567 rcu_assign_pointer(geneve
->sock6
, NULL
);
570 rcu_assign_pointer(geneve
->sock4
, NULL
);
573 __geneve_sock_release(gs4
);
574 #if IS_ENABLED(CONFIG_IPV6)
575 __geneve_sock_release(gs6
);
579 static struct geneve_sock
*geneve_find_sock(struct geneve_net
*gn
,
583 struct geneve_sock
*gs
;
585 list_for_each_entry(gs
, &gn
->sock_list
, list
) {
586 if (inet_sk(gs
->sock
->sk
)->inet_sport
== dst_port
&&
587 geneve_get_sk_family(gs
) == family
) {
594 static int geneve_sock_add(struct geneve_dev
*geneve
, bool ipv6
)
596 struct net
*net
= geneve
->net
;
597 struct geneve_net
*gn
= net_generic(net
, geneve_net_id
);
598 struct geneve_dev_node
*node
;
599 struct geneve_sock
*gs
;
603 gs
= geneve_find_sock(gn
, ipv6
? AF_INET6
: AF_INET
, geneve
->info
.key
.tp_dst
);
609 gs
= geneve_socket_create(net
, geneve
->info
.key
.tp_dst
, ipv6
,
610 geneve
->use_udp6_rx_checksums
);
615 gs
->collect_md
= geneve
->collect_md
;
616 #if IS_ENABLED(CONFIG_IPV6)
618 rcu_assign_pointer(geneve
->sock6
, gs
);
619 node
= &geneve
->hlist6
;
623 rcu_assign_pointer(geneve
->sock4
, gs
);
624 node
= &geneve
->hlist4
;
626 node
->geneve
= geneve
;
628 tunnel_id_to_vni(geneve
->info
.key
.tun_id
, vni
);
629 hash
= geneve_net_vni_hash(vni
);
630 hlist_add_head_rcu(&node
->hlist
, &gs
->vni_list
[hash
]);
634 static int geneve_open(struct net_device
*dev
)
636 struct geneve_dev
*geneve
= netdev_priv(dev
);
637 bool ipv6
= !!(geneve
->info
.mode
& IP_TUNNEL_INFO_IPV6
);
638 bool metadata
= geneve
->collect_md
;
641 #if IS_ENABLED(CONFIG_IPV6)
642 if (ipv6
|| metadata
)
643 ret
= geneve_sock_add(geneve
, true);
645 if (!ret
&& (!ipv6
|| metadata
))
646 ret
= geneve_sock_add(geneve
, false);
648 geneve_sock_release(geneve
);
653 static int geneve_stop(struct net_device
*dev
)
655 struct geneve_dev
*geneve
= netdev_priv(dev
);
657 hlist_del_init_rcu(&geneve
->hlist4
.hlist
);
658 #if IS_ENABLED(CONFIG_IPV6)
659 hlist_del_init_rcu(&geneve
->hlist6
.hlist
);
661 geneve_sock_release(geneve
);
665 static void geneve_build_header(struct genevehdr
*geneveh
,
666 const struct ip_tunnel_info
*info
)
668 geneveh
->ver
= GENEVE_VER
;
669 geneveh
->opt_len
= info
->options_len
/ 4;
670 geneveh
->oam
= !!(info
->key
.tun_flags
& TUNNEL_OAM
);
671 geneveh
->critical
= !!(info
->key
.tun_flags
& TUNNEL_CRIT_OPT
);
673 tunnel_id_to_vni(info
->key
.tun_id
, geneveh
->vni
);
674 geneveh
->proto_type
= htons(ETH_P_TEB
);
677 ip_tunnel_info_opts_get(geneveh
->options
, info
);
680 static int geneve_build_skb(struct dst_entry
*dst
, struct sk_buff
*skb
,
681 const struct ip_tunnel_info
*info
,
682 bool xnet
, int ip_hdr_len
)
684 bool udp_sum
= !!(info
->key
.tun_flags
& TUNNEL_CSUM
);
685 struct genevehdr
*gnvh
;
689 skb_reset_mac_header(skb
);
690 skb_scrub_packet(skb
, xnet
);
692 min_headroom
= LL_RESERVED_SPACE(dst
->dev
) + dst
->header_len
+
693 GENEVE_BASE_HLEN
+ info
->options_len
+ ip_hdr_len
;
694 err
= skb_cow_head(skb
, min_headroom
);
698 err
= udp_tunnel_handle_offloads(skb
, udp_sum
);
702 gnvh
= __skb_push(skb
, sizeof(*gnvh
) + info
->options_len
);
703 geneve_build_header(gnvh
, info
);
704 skb_set_inner_protocol(skb
, htons(ETH_P_TEB
));
712 static struct rtable
*geneve_get_v4_rt(struct sk_buff
*skb
,
713 struct net_device
*dev
,
714 struct geneve_sock
*gs4
,
716 const struct ip_tunnel_info
*info
)
718 bool use_cache
= ip_tunnel_dst_cache_usable(skb
, info
);
719 struct geneve_dev
*geneve
= netdev_priv(dev
);
720 struct dst_cache
*dst_cache
;
721 struct rtable
*rt
= NULL
;
725 return ERR_PTR(-EIO
);
727 memset(fl4
, 0, sizeof(*fl4
));
728 fl4
->flowi4_mark
= skb
->mark
;
729 fl4
->flowi4_proto
= IPPROTO_UDP
;
730 fl4
->daddr
= info
->key
.u
.ipv4
.dst
;
731 fl4
->saddr
= info
->key
.u
.ipv4
.src
;
734 if ((tos
== 1) && !geneve
->collect_md
) {
735 tos
= ip_tunnel_get_dsfield(ip_hdr(skb
), skb
);
738 fl4
->flowi4_tos
= RT_TOS(tos
);
740 dst_cache
= (struct dst_cache
*)&info
->dst_cache
;
742 rt
= dst_cache_get_ip4(dst_cache
, &fl4
->saddr
);
746 rt
= ip_route_output_key(geneve
->net
, fl4
);
748 netdev_dbg(dev
, "no route to %pI4\n", &fl4
->daddr
);
749 return ERR_PTR(-ENETUNREACH
);
751 if (rt
->dst
.dev
== dev
) { /* is this necessary? */
752 netdev_dbg(dev
, "circular route to %pI4\n", &fl4
->daddr
);
754 return ERR_PTR(-ELOOP
);
757 dst_cache_set_ip4(dst_cache
, &rt
->dst
, fl4
->saddr
);
761 #if IS_ENABLED(CONFIG_IPV6)
762 static struct dst_entry
*geneve_get_v6_dst(struct sk_buff
*skb
,
763 struct net_device
*dev
,
764 struct geneve_sock
*gs6
,
766 const struct ip_tunnel_info
*info
)
768 bool use_cache
= ip_tunnel_dst_cache_usable(skb
, info
);
769 struct geneve_dev
*geneve
= netdev_priv(dev
);
770 struct dst_entry
*dst
= NULL
;
771 struct dst_cache
*dst_cache
;
775 return ERR_PTR(-EIO
);
777 memset(fl6
, 0, sizeof(*fl6
));
778 fl6
->flowi6_mark
= skb
->mark
;
779 fl6
->flowi6_proto
= IPPROTO_UDP
;
780 fl6
->daddr
= info
->key
.u
.ipv6
.dst
;
781 fl6
->saddr
= info
->key
.u
.ipv6
.src
;
782 prio
= info
->key
.tos
;
783 if ((prio
== 1) && !geneve
->collect_md
) {
784 prio
= ip_tunnel_get_dsfield(ip_hdr(skb
), skb
);
788 fl6
->flowlabel
= ip6_make_flowinfo(RT_TOS(prio
),
790 dst_cache
= (struct dst_cache
*)&info
->dst_cache
;
792 dst
= dst_cache_get_ip6(dst_cache
, &fl6
->saddr
);
796 if (ipv6_stub
->ipv6_dst_lookup(geneve
->net
, gs6
->sock
->sk
, &dst
, fl6
)) {
797 netdev_dbg(dev
, "no route to %pI6\n", &fl6
->daddr
);
798 return ERR_PTR(-ENETUNREACH
);
800 if (dst
->dev
== dev
) { /* is this necessary? */
801 netdev_dbg(dev
, "circular route to %pI6\n", &fl6
->daddr
);
803 return ERR_PTR(-ELOOP
);
807 dst_cache_set_ip6(dst_cache
, dst
, &fl6
->saddr
);
812 static int geneve_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
,
813 struct geneve_dev
*geneve
,
814 const struct ip_tunnel_info
*info
)
816 bool xnet
= !net_eq(geneve
->net
, dev_net(geneve
->dev
));
817 struct geneve_sock
*gs4
= rcu_dereference(geneve
->sock4
);
818 const struct ip_tunnel_key
*key
= &info
->key
;
826 rt
= geneve_get_v4_rt(skb
, dev
, gs4
, &fl4
, info
);
831 int mtu
= dst_mtu(&rt
->dst
) - GENEVE_IPV4_HLEN
-
834 skb_dst_update_pmtu(skb
, mtu
);
837 sport
= udp_flow_src_port(geneve
->net
, skb
, 1, USHRT_MAX
, true);
838 if (geneve
->collect_md
) {
839 tos
= ip_tunnel_ecn_encap(key
->tos
, ip_hdr(skb
), skb
);
842 tos
= ip_tunnel_ecn_encap(fl4
.flowi4_tos
, ip_hdr(skb
), skb
);
843 ttl
= key
->ttl
? : ip4_dst_hoplimit(&rt
->dst
);
845 df
= key
->tun_flags
& TUNNEL_DONT_FRAGMENT
? htons(IP_DF
) : 0;
847 err
= geneve_build_skb(&rt
->dst
, skb
, info
, xnet
, sizeof(struct iphdr
));
851 udp_tunnel_xmit_skb(rt
, gs4
->sock
->sk
, skb
, fl4
.saddr
, fl4
.daddr
,
852 tos
, ttl
, df
, sport
, geneve
->info
.key
.tp_dst
,
853 !net_eq(geneve
->net
, dev_net(geneve
->dev
)),
854 !(info
->key
.tun_flags
& TUNNEL_CSUM
));
858 #if IS_ENABLED(CONFIG_IPV6)
859 static int geneve6_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
,
860 struct geneve_dev
*geneve
,
861 const struct ip_tunnel_info
*info
)
863 bool xnet
= !net_eq(geneve
->net
, dev_net(geneve
->dev
));
864 struct geneve_sock
*gs6
= rcu_dereference(geneve
->sock6
);
865 const struct ip_tunnel_key
*key
= &info
->key
;
866 struct dst_entry
*dst
= NULL
;
872 dst
= geneve_get_v6_dst(skb
, dev
, gs6
, &fl6
, info
);
877 int mtu
= dst_mtu(dst
) - GENEVE_IPV6_HLEN
- info
->options_len
;
879 skb_dst_update_pmtu(skb
, mtu
);
882 sport
= udp_flow_src_port(geneve
->net
, skb
, 1, USHRT_MAX
, true);
883 if (geneve
->collect_md
) {
884 prio
= ip_tunnel_ecn_encap(key
->tos
, ip_hdr(skb
), skb
);
887 prio
= ip_tunnel_ecn_encap(ip6_tclass(fl6
.flowlabel
),
889 ttl
= key
->ttl
? : ip6_dst_hoplimit(dst
);
891 err
= geneve_build_skb(dst
, skb
, info
, xnet
, sizeof(struct ipv6hdr
));
895 udp_tunnel6_xmit_skb(dst
, gs6
->sock
->sk
, skb
, dev
,
896 &fl6
.saddr
, &fl6
.daddr
, prio
, ttl
,
897 info
->key
.label
, sport
, geneve
->info
.key
.tp_dst
,
898 !(info
->key
.tun_flags
& TUNNEL_CSUM
));
903 static netdev_tx_t
geneve_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
905 struct geneve_dev
*geneve
= netdev_priv(dev
);
906 struct ip_tunnel_info
*info
= NULL
;
909 if (geneve
->collect_md
) {
910 info
= skb_tunnel_info(skb
);
911 if (unlikely(!info
|| !(info
->mode
& IP_TUNNEL_INFO_TX
))) {
913 netdev_dbg(dev
, "no tunnel metadata\n");
917 info
= &geneve
->info
;
921 #if IS_ENABLED(CONFIG_IPV6)
922 if (info
->mode
& IP_TUNNEL_INFO_IPV6
)
923 err
= geneve6_xmit_skb(skb
, dev
, geneve
, info
);
926 err
= geneve_xmit_skb(skb
, dev
, geneve
, info
);
935 dev
->stats
.collisions
++;
936 else if (err
== -ENETUNREACH
)
937 dev
->stats
.tx_carrier_errors
++;
939 dev
->stats
.tx_errors
++;
943 static int geneve_change_mtu(struct net_device
*dev
, int new_mtu
)
945 if (new_mtu
> dev
->max_mtu
)
946 new_mtu
= dev
->max_mtu
;
947 else if (new_mtu
< dev
->min_mtu
)
948 new_mtu
= dev
->min_mtu
;
954 static int geneve_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
956 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
957 struct geneve_dev
*geneve
= netdev_priv(dev
);
959 if (ip_tunnel_info_af(info
) == AF_INET
) {
962 struct geneve_sock
*gs4
= rcu_dereference(geneve
->sock4
);
964 rt
= geneve_get_v4_rt(skb
, dev
, gs4
, &fl4
, info
);
969 info
->key
.u
.ipv4
.src
= fl4
.saddr
;
970 #if IS_ENABLED(CONFIG_IPV6)
971 } else if (ip_tunnel_info_af(info
) == AF_INET6
) {
972 struct dst_entry
*dst
;
974 struct geneve_sock
*gs6
= rcu_dereference(geneve
->sock6
);
976 dst
= geneve_get_v6_dst(skb
, dev
, gs6
, &fl6
, info
);
981 info
->key
.u
.ipv6
.src
= fl6
.saddr
;
987 info
->key
.tp_src
= udp_flow_src_port(geneve
->net
, skb
,
989 info
->key
.tp_dst
= geneve
->info
.key
.tp_dst
;
993 static const struct net_device_ops geneve_netdev_ops
= {
994 .ndo_init
= geneve_init
,
995 .ndo_uninit
= geneve_uninit
,
996 .ndo_open
= geneve_open
,
997 .ndo_stop
= geneve_stop
,
998 .ndo_start_xmit
= geneve_xmit
,
999 .ndo_get_stats64
= ip_tunnel_get_stats64
,
1000 .ndo_change_mtu
= geneve_change_mtu
,
1001 .ndo_validate_addr
= eth_validate_addr
,
1002 .ndo_set_mac_address
= eth_mac_addr
,
1003 .ndo_fill_metadata_dst
= geneve_fill_metadata_dst
,
1006 static void geneve_get_drvinfo(struct net_device
*dev
,
1007 struct ethtool_drvinfo
*drvinfo
)
1009 strlcpy(drvinfo
->version
, GENEVE_NETDEV_VER
, sizeof(drvinfo
->version
));
1010 strlcpy(drvinfo
->driver
, "geneve", sizeof(drvinfo
->driver
));
1013 static const struct ethtool_ops geneve_ethtool_ops
= {
1014 .get_drvinfo
= geneve_get_drvinfo
,
1015 .get_link
= ethtool_op_get_link
,
1018 /* Info for udev, that this is a virtual tunnel endpoint */
1019 static struct device_type geneve_type
= {
1023 /* Calls the ndo_udp_tunnel_add of the caller in order to
1024 * supply the listening GENEVE udp ports. Callers are expected
1025 * to implement the ndo_udp_tunnel_add.
1027 static void geneve_offload_rx_ports(struct net_device
*dev
, bool push
)
1029 struct net
*net
= dev_net(dev
);
1030 struct geneve_net
*gn
= net_generic(net
, geneve_net_id
);
1031 struct geneve_sock
*gs
;
1034 list_for_each_entry_rcu(gs
, &gn
->sock_list
, list
) {
1036 udp_tunnel_push_rx_port(dev
, gs
->sock
,
1037 UDP_TUNNEL_TYPE_GENEVE
);
1039 udp_tunnel_drop_rx_port(dev
, gs
->sock
,
1040 UDP_TUNNEL_TYPE_GENEVE
);
1046 /* Initialize the device structure. */
1047 static void geneve_setup(struct net_device
*dev
)
1051 dev
->netdev_ops
= &geneve_netdev_ops
;
1052 dev
->ethtool_ops
= &geneve_ethtool_ops
;
1053 dev
->needs_free_netdev
= true;
1055 SET_NETDEV_DEVTYPE(dev
, &geneve_type
);
1057 dev
->features
|= NETIF_F_LLTX
;
1058 dev
->features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
1059 dev
->features
|= NETIF_F_RXCSUM
;
1060 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1062 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
1063 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
1065 /* MTU range: 68 - (something less than 65535) */
1066 dev
->min_mtu
= ETH_MIN_MTU
;
1067 /* The max_mtu calculation does not take account of GENEVE
1068 * options, to avoid excluding potentially valid
1069 * configurations. This will be further reduced by IPvX hdr size.
1071 dev
->max_mtu
= IP_MAX_MTU
- GENEVE_BASE_HLEN
- dev
->hard_header_len
;
1073 netif_keep_dst(dev
);
1074 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1075 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
| IFF_NO_QUEUE
;
1076 eth_hw_addr_random(dev
);
1079 static const struct nla_policy geneve_policy
[IFLA_GENEVE_MAX
+ 1] = {
1080 [IFLA_GENEVE_ID
] = { .type
= NLA_U32
},
1081 [IFLA_GENEVE_REMOTE
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
1082 [IFLA_GENEVE_REMOTE6
] = { .len
= sizeof(struct in6_addr
) },
1083 [IFLA_GENEVE_TTL
] = { .type
= NLA_U8
},
1084 [IFLA_GENEVE_TOS
] = { .type
= NLA_U8
},
1085 [IFLA_GENEVE_LABEL
] = { .type
= NLA_U32
},
1086 [IFLA_GENEVE_PORT
] = { .type
= NLA_U16
},
1087 [IFLA_GENEVE_COLLECT_METADATA
] = { .type
= NLA_FLAG
},
1088 [IFLA_GENEVE_UDP_CSUM
] = { .type
= NLA_U8
},
1089 [IFLA_GENEVE_UDP_ZERO_CSUM6_TX
] = { .type
= NLA_U8
},
1090 [IFLA_GENEVE_UDP_ZERO_CSUM6_RX
] = { .type
= NLA_U8
},
1093 static int geneve_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1094 struct netlink_ext_ack
*extack
)
1096 if (tb
[IFLA_ADDRESS
]) {
1097 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
) {
1098 NL_SET_ERR_MSG_ATTR(extack
, tb
[IFLA_ADDRESS
],
1099 "Provided link layer address is not Ethernet");
1103 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
]))) {
1104 NL_SET_ERR_MSG_ATTR(extack
, tb
[IFLA_ADDRESS
],
1105 "Provided Ethernet address is not unicast");
1106 return -EADDRNOTAVAIL
;
1111 NL_SET_ERR_MSG(extack
,
1112 "Not enough attributes provided to perform the operation");
1116 if (data
[IFLA_GENEVE_ID
]) {
1117 __u32 vni
= nla_get_u32(data
[IFLA_GENEVE_ID
]);
1119 if (vni
>= GENEVE_N_VID
) {
1120 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_ID
],
1121 "Geneve ID must be lower than 16777216");
1129 static struct geneve_dev
*geneve_find_dev(struct geneve_net
*gn
,
1130 const struct ip_tunnel_info
*info
,
1131 bool *tun_on_same_port
,
1132 bool *tun_collect_md
)
1134 struct geneve_dev
*geneve
, *t
= NULL
;
1136 *tun_on_same_port
= false;
1137 *tun_collect_md
= false;
1138 list_for_each_entry(geneve
, &gn
->geneve_list
, next
) {
1139 if (info
->key
.tp_dst
== geneve
->info
.key
.tp_dst
) {
1140 *tun_collect_md
= geneve
->collect_md
;
1141 *tun_on_same_port
= true;
1143 if (info
->key
.tun_id
== geneve
->info
.key
.tun_id
&&
1144 info
->key
.tp_dst
== geneve
->info
.key
.tp_dst
&&
1145 !memcmp(&info
->key
.u
, &geneve
->info
.key
.u
, sizeof(info
->key
.u
)))
1151 static bool is_tnl_info_zero(const struct ip_tunnel_info
*info
)
1153 return !(info
->key
.tun_id
|| info
->key
.tun_flags
|| info
->key
.tos
||
1154 info
->key
.ttl
|| info
->key
.label
|| info
->key
.tp_src
||
1155 memchr_inv(&info
->key
.u
, 0, sizeof(info
->key
.u
)));
1158 static bool geneve_dst_addr_equal(struct ip_tunnel_info
*a
,
1159 struct ip_tunnel_info
*b
)
1161 if (ip_tunnel_info_af(a
) == AF_INET
)
1162 return a
->key
.u
.ipv4
.dst
== b
->key
.u
.ipv4
.dst
;
1164 return ipv6_addr_equal(&a
->key
.u
.ipv6
.dst
, &b
->key
.u
.ipv6
.dst
);
1167 static int geneve_configure(struct net
*net
, struct net_device
*dev
,
1168 struct netlink_ext_ack
*extack
,
1169 const struct ip_tunnel_info
*info
,
1170 bool metadata
, bool ipv6_rx_csum
)
1172 struct geneve_net
*gn
= net_generic(net
, geneve_net_id
);
1173 struct geneve_dev
*t
, *geneve
= netdev_priv(dev
);
1174 bool tun_collect_md
, tun_on_same_port
;
1177 if (metadata
&& !is_tnl_info_zero(info
)) {
1178 NL_SET_ERR_MSG(extack
,
1179 "Device is externally controlled, so attributes (VNI, Port, and so on) must not be specified");
1186 t
= geneve_find_dev(gn
, info
, &tun_on_same_port
, &tun_collect_md
);
1190 /* make enough headroom for basic scenario */
1191 encap_len
= GENEVE_BASE_HLEN
+ ETH_HLEN
;
1192 if (!metadata
&& ip_tunnel_info_af(info
) == AF_INET
) {
1193 encap_len
+= sizeof(struct iphdr
);
1194 dev
->max_mtu
-= sizeof(struct iphdr
);
1196 encap_len
+= sizeof(struct ipv6hdr
);
1197 dev
->max_mtu
-= sizeof(struct ipv6hdr
);
1199 dev
->needed_headroom
= encap_len
+ ETH_HLEN
;
1202 if (tun_on_same_port
) {
1203 NL_SET_ERR_MSG(extack
,
1204 "There can be only one externally controlled device on a destination port");
1208 if (tun_collect_md
) {
1209 NL_SET_ERR_MSG(extack
,
1210 "There already exists an externally controlled device on this destination port");
1215 dst_cache_reset(&geneve
->info
.dst_cache
);
1216 geneve
->info
= *info
;
1217 geneve
->collect_md
= metadata
;
1218 geneve
->use_udp6_rx_checksums
= ipv6_rx_csum
;
1220 err
= register_netdevice(dev
);
1224 list_add(&geneve
->next
, &gn
->geneve_list
);
1228 static void init_tnl_info(struct ip_tunnel_info
*info
, __u16 dst_port
)
1230 memset(info
, 0, sizeof(*info
));
1231 info
->key
.tp_dst
= htons(dst_port
);
1234 static int geneve_nl2info(struct nlattr
*tb
[], struct nlattr
*data
[],
1235 struct netlink_ext_ack
*extack
,
1236 struct ip_tunnel_info
*info
, bool *metadata
,
1237 bool *use_udp6_rx_checksums
, bool changelink
)
1241 if (data
[IFLA_GENEVE_REMOTE
] && data
[IFLA_GENEVE_REMOTE6
]) {
1242 NL_SET_ERR_MSG(extack
,
1243 "Cannot specify both IPv4 and IPv6 Remote addresses");
1247 if (data
[IFLA_GENEVE_REMOTE
]) {
1248 if (changelink
&& (ip_tunnel_info_af(info
) == AF_INET6
)) {
1249 attrtype
= IFLA_GENEVE_REMOTE
;
1253 info
->key
.u
.ipv4
.dst
=
1254 nla_get_in_addr(data
[IFLA_GENEVE_REMOTE
]);
1256 if (IN_MULTICAST(ntohl(info
->key
.u
.ipv4
.dst
))) {
1257 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_REMOTE
],
1258 "Remote IPv4 address cannot be Multicast");
1263 if (data
[IFLA_GENEVE_REMOTE6
]) {
1264 #if IS_ENABLED(CONFIG_IPV6)
1265 if (changelink
&& (ip_tunnel_info_af(info
) == AF_INET
)) {
1266 attrtype
= IFLA_GENEVE_REMOTE6
;
1270 info
->mode
= IP_TUNNEL_INFO_IPV6
;
1271 info
->key
.u
.ipv6
.dst
=
1272 nla_get_in6_addr(data
[IFLA_GENEVE_REMOTE6
]);
1274 if (ipv6_addr_type(&info
->key
.u
.ipv6
.dst
) &
1275 IPV6_ADDR_LINKLOCAL
) {
1276 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_REMOTE6
],
1277 "Remote IPv6 address cannot be link-local");
1280 if (ipv6_addr_is_multicast(&info
->key
.u
.ipv6
.dst
)) {
1281 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_REMOTE6
],
1282 "Remote IPv6 address cannot be Multicast");
1285 info
->key
.tun_flags
|= TUNNEL_CSUM
;
1286 *use_udp6_rx_checksums
= true;
1288 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_REMOTE6
],
1289 "IPv6 support not enabled in the kernel");
1290 return -EPFNOSUPPORT
;
1294 if (data
[IFLA_GENEVE_ID
]) {
1299 vni
= nla_get_u32(data
[IFLA_GENEVE_ID
]);
1300 tvni
[0] = (vni
& 0x00ff0000) >> 16;
1301 tvni
[1] = (vni
& 0x0000ff00) >> 8;
1302 tvni
[2] = vni
& 0x000000ff;
1304 tunid
= vni_to_tunnel_id(tvni
);
1305 if (changelink
&& (tunid
!= info
->key
.tun_id
)) {
1306 attrtype
= IFLA_GENEVE_ID
;
1309 info
->key
.tun_id
= tunid
;
1312 if (data
[IFLA_GENEVE_TTL
])
1313 info
->key
.ttl
= nla_get_u8(data
[IFLA_GENEVE_TTL
]);
1315 if (data
[IFLA_GENEVE_TOS
])
1316 info
->key
.tos
= nla_get_u8(data
[IFLA_GENEVE_TOS
]);
1318 if (data
[IFLA_GENEVE_LABEL
]) {
1319 info
->key
.label
= nla_get_be32(data
[IFLA_GENEVE_LABEL
]) &
1320 IPV6_FLOWLABEL_MASK
;
1321 if (info
->key
.label
&& (!(info
->mode
& IP_TUNNEL_INFO_IPV6
))) {
1322 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_LABEL
],
1323 "Label attribute only applies for IPv6 Geneve devices");
1328 if (data
[IFLA_GENEVE_PORT
]) {
1330 attrtype
= IFLA_GENEVE_PORT
;
1333 info
->key
.tp_dst
= nla_get_be16(data
[IFLA_GENEVE_PORT
]);
1336 if (data
[IFLA_GENEVE_COLLECT_METADATA
]) {
1338 attrtype
= IFLA_GENEVE_COLLECT_METADATA
;
1344 if (data
[IFLA_GENEVE_UDP_CSUM
]) {
1346 attrtype
= IFLA_GENEVE_UDP_CSUM
;
1349 if (nla_get_u8(data
[IFLA_GENEVE_UDP_CSUM
]))
1350 info
->key
.tun_flags
|= TUNNEL_CSUM
;
1353 if (data
[IFLA_GENEVE_UDP_ZERO_CSUM6_TX
]) {
1354 #if IS_ENABLED(CONFIG_IPV6)
1356 attrtype
= IFLA_GENEVE_UDP_ZERO_CSUM6_TX
;
1359 if (nla_get_u8(data
[IFLA_GENEVE_UDP_ZERO_CSUM6_TX
]))
1360 info
->key
.tun_flags
&= ~TUNNEL_CSUM
;
1362 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_UDP_ZERO_CSUM6_TX
],
1363 "IPv6 support not enabled in the kernel");
1364 return -EPFNOSUPPORT
;
1368 if (data
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX
]) {
1369 #if IS_ENABLED(CONFIG_IPV6)
1371 attrtype
= IFLA_GENEVE_UDP_ZERO_CSUM6_RX
;
1374 if (nla_get_u8(data
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX
]))
1375 *use_udp6_rx_checksums
= false;
1377 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX
],
1378 "IPv6 support not enabled in the kernel");
1379 return -EPFNOSUPPORT
;
1385 NL_SET_ERR_MSG_ATTR(extack
, data
[attrtype
],
1386 "Changing VNI, Port, endpoint IP address family, external, and UDP checksum attributes are not supported");
1390 static void geneve_link_config(struct net_device
*dev
,
1391 struct ip_tunnel_info
*info
, struct nlattr
*tb
[])
1393 struct geneve_dev
*geneve
= netdev_priv(dev
);
1397 geneve_change_mtu(dev
, nla_get_u32(tb
[IFLA_MTU
]));
1401 switch (ip_tunnel_info_af(info
)) {
1403 struct flowi4 fl4
= { .daddr
= info
->key
.u
.ipv4
.dst
};
1404 struct rtable
*rt
= ip_route_output_key(geneve
->net
, &fl4
);
1406 if (!IS_ERR(rt
) && rt
->dst
.dev
) {
1407 ldev_mtu
= rt
->dst
.dev
->mtu
- GENEVE_IPV4_HLEN
;
1412 #if IS_ENABLED(CONFIG_IPV6)
1414 struct rt6_info
*rt
= rt6_lookup(geneve
->net
,
1415 &info
->key
.u
.ipv6
.dst
, NULL
, 0,
1418 if (rt
&& rt
->dst
.dev
)
1419 ldev_mtu
= rt
->dst
.dev
->mtu
- GENEVE_IPV6_HLEN
;
1429 geneve_change_mtu(dev
, ldev_mtu
- info
->options_len
);
1432 static int geneve_newlink(struct net
*net
, struct net_device
*dev
,
1433 struct nlattr
*tb
[], struct nlattr
*data
[],
1434 struct netlink_ext_ack
*extack
)
1436 bool use_udp6_rx_checksums
= false;
1437 struct ip_tunnel_info info
;
1438 bool metadata
= false;
1441 init_tnl_info(&info
, GENEVE_UDP_PORT
);
1442 err
= geneve_nl2info(tb
, data
, extack
, &info
, &metadata
,
1443 &use_udp6_rx_checksums
, false);
1447 err
= geneve_configure(net
, dev
, extack
, &info
, metadata
,
1448 use_udp6_rx_checksums
);
1452 geneve_link_config(dev
, &info
, tb
);
1457 /* Quiesces the geneve device data path for both TX and RX.
1459 * On transmit geneve checks for non-NULL geneve_sock before it proceeds.
1460 * So, if we set that socket to NULL under RCU and wait for synchronize_net()
1461 * to complete for the existing set of in-flight packets to be transmitted,
1462 * then we would have quiesced the transmit data path. All the future packets
1463 * will get dropped until we unquiesce the data path.
1465 * On receive geneve dereference the geneve_sock stashed in the socket. So,
1466 * if we set that to NULL under RCU and wait for synchronize_net() to
1467 * complete, then we would have quiesced the receive data path.
1469 static void geneve_quiesce(struct geneve_dev
*geneve
, struct geneve_sock
**gs4
,
1470 struct geneve_sock
**gs6
)
1472 *gs4
= rtnl_dereference(geneve
->sock4
);
1473 rcu_assign_pointer(geneve
->sock4
, NULL
);
1475 rcu_assign_sk_user_data((*gs4
)->sock
->sk
, NULL
);
1476 #if IS_ENABLED(CONFIG_IPV6)
1477 *gs6
= rtnl_dereference(geneve
->sock6
);
1478 rcu_assign_pointer(geneve
->sock6
, NULL
);
1480 rcu_assign_sk_user_data((*gs6
)->sock
->sk
, NULL
);
1487 /* Resumes the geneve device data path for both TX and RX. */
1488 static void geneve_unquiesce(struct geneve_dev
*geneve
, struct geneve_sock
*gs4
,
1489 struct geneve_sock __maybe_unused
*gs6
)
1491 rcu_assign_pointer(geneve
->sock4
, gs4
);
1493 rcu_assign_sk_user_data(gs4
->sock
->sk
, gs4
);
1494 #if IS_ENABLED(CONFIG_IPV6)
1495 rcu_assign_pointer(geneve
->sock6
, gs6
);
1497 rcu_assign_sk_user_data(gs6
->sock
->sk
, gs6
);
1502 static int geneve_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
1503 struct nlattr
*data
[],
1504 struct netlink_ext_ack
*extack
)
1506 struct geneve_dev
*geneve
= netdev_priv(dev
);
1507 struct geneve_sock
*gs4
, *gs6
;
1508 struct ip_tunnel_info info
;
1510 bool use_udp6_rx_checksums
;
1513 /* If the geneve device is configured for metadata (or externally
1514 * controlled, for example, OVS), then nothing can be changed.
1516 if (geneve
->collect_md
)
1519 /* Start with the existing info. */
1520 memcpy(&info
, &geneve
->info
, sizeof(info
));
1521 metadata
= geneve
->collect_md
;
1522 use_udp6_rx_checksums
= geneve
->use_udp6_rx_checksums
;
1523 err
= geneve_nl2info(tb
, data
, extack
, &info
, &metadata
,
1524 &use_udp6_rx_checksums
, true);
1528 if (!geneve_dst_addr_equal(&geneve
->info
, &info
)) {
1529 dst_cache_reset(&info
.dst_cache
);
1530 geneve_link_config(dev
, &info
, tb
);
1533 geneve_quiesce(geneve
, &gs4
, &gs6
);
1534 geneve
->info
= info
;
1535 geneve
->collect_md
= metadata
;
1536 geneve
->use_udp6_rx_checksums
= use_udp6_rx_checksums
;
1537 geneve_unquiesce(geneve
, gs4
, gs6
);
1542 static void geneve_dellink(struct net_device
*dev
, struct list_head
*head
)
1544 struct geneve_dev
*geneve
= netdev_priv(dev
);
1546 list_del(&geneve
->next
);
1547 unregister_netdevice_queue(dev
, head
);
1550 static size_t geneve_get_size(const struct net_device
*dev
)
1552 return nla_total_size(sizeof(__u32
)) + /* IFLA_GENEVE_ID */
1553 nla_total_size(sizeof(struct in6_addr
)) + /* IFLA_GENEVE_REMOTE{6} */
1554 nla_total_size(sizeof(__u8
)) + /* IFLA_GENEVE_TTL */
1555 nla_total_size(sizeof(__u8
)) + /* IFLA_GENEVE_TOS */
1556 nla_total_size(sizeof(__be32
)) + /* IFLA_GENEVE_LABEL */
1557 nla_total_size(sizeof(__be16
)) + /* IFLA_GENEVE_PORT */
1558 nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
1559 nla_total_size(sizeof(__u8
)) + /* IFLA_GENEVE_UDP_CSUM */
1560 nla_total_size(sizeof(__u8
)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */
1561 nla_total_size(sizeof(__u8
)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */
1565 static int geneve_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1567 struct geneve_dev
*geneve
= netdev_priv(dev
);
1568 struct ip_tunnel_info
*info
= &geneve
->info
;
1569 bool metadata
= geneve
->collect_md
;
1573 tunnel_id_to_vni(info
->key
.tun_id
, tmp_vni
);
1574 vni
= (tmp_vni
[0] << 16) | (tmp_vni
[1] << 8) | tmp_vni
[2];
1575 if (nla_put_u32(skb
, IFLA_GENEVE_ID
, vni
))
1576 goto nla_put_failure
;
1578 if (!metadata
&& ip_tunnel_info_af(info
) == AF_INET
) {
1579 if (nla_put_in_addr(skb
, IFLA_GENEVE_REMOTE
,
1580 info
->key
.u
.ipv4
.dst
))
1581 goto nla_put_failure
;
1582 if (nla_put_u8(skb
, IFLA_GENEVE_UDP_CSUM
,
1583 !!(info
->key
.tun_flags
& TUNNEL_CSUM
)))
1584 goto nla_put_failure
;
1586 #if IS_ENABLED(CONFIG_IPV6)
1587 } else if (!metadata
) {
1588 if (nla_put_in6_addr(skb
, IFLA_GENEVE_REMOTE6
,
1589 &info
->key
.u
.ipv6
.dst
))
1590 goto nla_put_failure
;
1591 if (nla_put_u8(skb
, IFLA_GENEVE_UDP_ZERO_CSUM6_TX
,
1592 !(info
->key
.tun_flags
& TUNNEL_CSUM
)))
1593 goto nla_put_failure
;
1597 if (nla_put_u8(skb
, IFLA_GENEVE_TTL
, info
->key
.ttl
) ||
1598 nla_put_u8(skb
, IFLA_GENEVE_TOS
, info
->key
.tos
) ||
1599 nla_put_be32(skb
, IFLA_GENEVE_LABEL
, info
->key
.label
))
1600 goto nla_put_failure
;
1602 if (nla_put_be16(skb
, IFLA_GENEVE_PORT
, info
->key
.tp_dst
))
1603 goto nla_put_failure
;
1605 if (metadata
&& nla_put_flag(skb
, IFLA_GENEVE_COLLECT_METADATA
))
1606 goto nla_put_failure
;
1608 #if IS_ENABLED(CONFIG_IPV6)
1609 if (nla_put_u8(skb
, IFLA_GENEVE_UDP_ZERO_CSUM6_RX
,
1610 !geneve
->use_udp6_rx_checksums
))
1611 goto nla_put_failure
;
1620 static struct rtnl_link_ops geneve_link_ops __read_mostly
= {
1622 .maxtype
= IFLA_GENEVE_MAX
,
1623 .policy
= geneve_policy
,
1624 .priv_size
= sizeof(struct geneve_dev
),
1625 .setup
= geneve_setup
,
1626 .validate
= geneve_validate
,
1627 .newlink
= geneve_newlink
,
1628 .changelink
= geneve_changelink
,
1629 .dellink
= geneve_dellink
,
1630 .get_size
= geneve_get_size
,
1631 .fill_info
= geneve_fill_info
,
1634 struct net_device
*geneve_dev_create_fb(struct net
*net
, const char *name
,
1635 u8 name_assign_type
, u16 dst_port
)
1637 struct nlattr
*tb
[IFLA_MAX
+ 1];
1638 struct ip_tunnel_info info
;
1639 struct net_device
*dev
;
1640 LIST_HEAD(list_kill
);
1643 memset(tb
, 0, sizeof(tb
));
1644 dev
= rtnl_create_link(net
, name
, name_assign_type
,
1645 &geneve_link_ops
, tb
);
1649 init_tnl_info(&info
, dst_port
);
1650 err
= geneve_configure(net
, dev
, NULL
, &info
, true, true);
1653 return ERR_PTR(err
);
1656 /* openvswitch users expect packet sizes to be unrestricted,
1657 * so set the largest MTU we can.
1659 err
= geneve_change_mtu(dev
, IP_MAX_MTU
);
1663 err
= rtnl_configure_link(dev
, NULL
);
1669 geneve_dellink(dev
, &list_kill
);
1670 unregister_netdevice_many(&list_kill
);
1671 return ERR_PTR(err
);
1673 EXPORT_SYMBOL_GPL(geneve_dev_create_fb
);
1675 static int geneve_netdevice_event(struct notifier_block
*unused
,
1676 unsigned long event
, void *ptr
)
1678 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1680 if (event
== NETDEV_UDP_TUNNEL_PUSH_INFO
||
1681 event
== NETDEV_UDP_TUNNEL_DROP_INFO
) {
1682 geneve_offload_rx_ports(dev
, event
== NETDEV_UDP_TUNNEL_PUSH_INFO
);
1683 } else if (event
== NETDEV_UNREGISTER
) {
1684 geneve_offload_rx_ports(dev
, false);
1685 } else if (event
== NETDEV_REGISTER
) {
1686 geneve_offload_rx_ports(dev
, true);
1692 static struct notifier_block geneve_notifier_block __read_mostly
= {
1693 .notifier_call
= geneve_netdevice_event
,
1696 static __net_init
int geneve_init_net(struct net
*net
)
1698 struct geneve_net
*gn
= net_generic(net
, geneve_net_id
);
1700 INIT_LIST_HEAD(&gn
->geneve_list
);
1701 INIT_LIST_HEAD(&gn
->sock_list
);
1705 static void geneve_destroy_tunnels(struct net
*net
, struct list_head
*head
)
1707 struct geneve_net
*gn
= net_generic(net
, geneve_net_id
);
1708 struct geneve_dev
*geneve
, *next
;
1709 struct net_device
*dev
, *aux
;
1711 /* gather any geneve devices that were moved into this ns */
1712 for_each_netdev_safe(net
, dev
, aux
)
1713 if (dev
->rtnl_link_ops
== &geneve_link_ops
)
1714 unregister_netdevice_queue(dev
, head
);
1716 /* now gather any other geneve devices that were created in this ns */
1717 list_for_each_entry_safe(geneve
, next
, &gn
->geneve_list
, next
) {
1718 /* If geneve->dev is in the same netns, it was already added
1719 * to the list by the previous loop.
1721 if (!net_eq(dev_net(geneve
->dev
), net
))
1722 unregister_netdevice_queue(geneve
->dev
, head
);
1725 WARN_ON_ONCE(!list_empty(&gn
->sock_list
));
1728 static void __net_exit
geneve_exit_batch_net(struct list_head
*net_list
)
1734 list_for_each_entry(net
, net_list
, exit_list
)
1735 geneve_destroy_tunnels(net
, &list
);
1737 /* unregister the devices gathered above */
1738 unregister_netdevice_many(&list
);
1742 static struct pernet_operations geneve_net_ops
= {
1743 .init
= geneve_init_net
,
1744 .exit_batch
= geneve_exit_batch_net
,
1745 .id
= &geneve_net_id
,
1746 .size
= sizeof(struct geneve_net
),
1749 static int __init
geneve_init_module(void)
1753 rc
= register_pernet_subsys(&geneve_net_ops
);
1757 rc
= register_netdevice_notifier(&geneve_notifier_block
);
1761 rc
= rtnl_link_register(&geneve_link_ops
);
1767 unregister_netdevice_notifier(&geneve_notifier_block
);
1769 unregister_pernet_subsys(&geneve_net_ops
);
1773 late_initcall(geneve_init_module
);
1775 static void __exit
geneve_cleanup_module(void)
1777 rtnl_link_unregister(&geneve_link_ops
);
1778 unregister_netdevice_notifier(&geneve_notifier_block
);
1779 unregister_pernet_subsys(&geneve_net_ops
);
1781 module_exit(geneve_cleanup_module
);
1783 MODULE_LICENSE("GPL");
1784 MODULE_VERSION(GENEVE_NETDEV_VER
);
1785 MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>");
1786 MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic");
1787 MODULE_ALIAS_RTNL_LINK("geneve");