2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 * - use IANA UDP port number (when defined)
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/module.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/rculist.h>
24 #include <linux/netdevice.h>
27 #include <linux/udp.h>
28 #include <linux/igmp.h>
29 #include <linux/etherdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/hash.h>
35 #include <net/rtnetlink.h>
36 #include <net/route.h>
37 #include <net/dsfield.h>
38 #include <net/inet_ecn.h>
39 #include <net/net_namespace.h>
40 #include <net/netns/generic.h>
42 #define VXLAN_VERSION "0.1"
44 #define VNI_HASH_BITS 10
45 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
46 #define FDB_HASH_BITS 8
47 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
48 #define FDB_AGE_DEFAULT 300 /* 5 min */
49 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
51 #define VXLAN_N_VID (1u << 24)
52 #define VXLAN_VID_MASK (VXLAN_N_VID - 1)
53 /* IP header + UDP + VXLAN + Ethernet header */
54 #define VXLAN_HEADROOM (20 + 8 + 8 + 14)
56 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
58 /* VXLAN protocol header */
64 /* UDP port for VXLAN traffic. */
65 static unsigned int vxlan_port __read_mostly
= 8472;
66 module_param_named(udp_port
, vxlan_port
, uint
, 0444);
67 MODULE_PARM_DESC(udp_port
, "Destination UDP port");
69 static bool log_ecn_error
= true;
70 module_param(log_ecn_error
, bool, 0644);
71 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
73 /* per-net private data for this module */
74 static unsigned int vxlan_net_id
;
76 struct socket
*sock
; /* UDP encap socket */
77 struct hlist_head vni_list
[VNI_HASH_SIZE
];
80 /* Forwarding table entry */
82 struct hlist_node hlist
; /* linked list of entries */
84 unsigned long updated
; /* jiffies */
87 u16 state
; /* see ndm_state */
88 u8 eth_addr
[ETH_ALEN
];
91 /* Per-cpu network traffic stats */
97 struct u64_stats_sync syncp
;
100 /* Pseudo network device */
102 struct hlist_node hlist
;
103 struct net_device
*dev
;
104 struct vxlan_stats __percpu
*stats
;
105 __u32 vni
; /* virtual network id */
106 __be32 gaddr
; /* multicast group */
107 __be32 saddr
; /* source address */
108 unsigned int link
; /* link to multicast over */
109 __u16 port_min
; /* source port range */
111 __u8 tos
; /* TOS override */
115 unsigned long age_interval
;
116 struct timer_list age_timer
;
117 spinlock_t hash_lock
;
118 unsigned int addrcnt
;
119 unsigned int addrmax
;
120 unsigned int addrexceeded
;
122 struct hlist_head fdb_head
[FDB_HASH_SIZE
];
125 /* salt for hash table */
126 static u32 vxlan_salt __read_mostly
;
128 static inline struct hlist_head
*vni_head(struct net
*net
, u32 id
)
130 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
132 return &vn
->vni_list
[hash_32(id
, VNI_HASH_BITS
)];
135 /* Look up VNI in a per net namespace table */
136 static struct vxlan_dev
*vxlan_find_vni(struct net
*net
, u32 id
)
138 struct vxlan_dev
*vxlan
;
139 struct hlist_node
*node
;
141 hlist_for_each_entry_rcu(vxlan
, node
, vni_head(net
, id
), hlist
) {
142 if (vxlan
->vni
== id
)
149 /* Fill in neighbour message in skbuff. */
150 static int vxlan_fdb_info(struct sk_buff
*skb
, struct vxlan_dev
*vxlan
,
151 const struct vxlan_fdb
*fdb
,
152 u32 portid
, u32 seq
, int type
, unsigned int flags
)
154 unsigned long now
= jiffies
;
155 struct nda_cacheinfo ci
;
156 struct nlmsghdr
*nlh
;
159 nlh
= nlmsg_put(skb
, portid
, seq
, type
, sizeof(*ndm
), flags
);
163 ndm
= nlmsg_data(nlh
);
164 memset(ndm
, 0, sizeof(*ndm
));
165 ndm
->ndm_family
= AF_BRIDGE
;
166 ndm
->ndm_state
= fdb
->state
;
167 ndm
->ndm_ifindex
= vxlan
->dev
->ifindex
;
168 ndm
->ndm_flags
= NTF_SELF
;
169 ndm
->ndm_type
= NDA_DST
;
171 if (nla_put(skb
, NDA_LLADDR
, ETH_ALEN
, &fdb
->eth_addr
))
172 goto nla_put_failure
;
174 if (nla_put_be32(skb
, NDA_DST
, fdb
->remote_ip
))
175 goto nla_put_failure
;
177 ci
.ndm_used
= jiffies_to_clock_t(now
- fdb
->used
);
178 ci
.ndm_confirmed
= 0;
179 ci
.ndm_updated
= jiffies_to_clock_t(now
- fdb
->updated
);
182 if (nla_put(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
))
183 goto nla_put_failure
;
185 return nlmsg_end(skb
, nlh
);
188 nlmsg_cancel(skb
, nlh
);
192 static inline size_t vxlan_nlmsg_size(void)
194 return NLMSG_ALIGN(sizeof(struct ndmsg
))
195 + nla_total_size(ETH_ALEN
) /* NDA_LLADDR */
196 + nla_total_size(sizeof(__be32
)) /* NDA_DST */
197 + nla_total_size(sizeof(struct nda_cacheinfo
));
200 static void vxlan_fdb_notify(struct vxlan_dev
*vxlan
,
201 const struct vxlan_fdb
*fdb
, int type
)
203 struct net
*net
= dev_net(vxlan
->dev
);
207 skb
= nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC
);
211 err
= vxlan_fdb_info(skb
, vxlan
, fdb
, 0, 0, type
, 0);
213 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
214 WARN_ON(err
== -EMSGSIZE
);
219 rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
223 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
226 /* Hash Ethernet address */
227 static u32
eth_hash(const unsigned char *addr
)
229 u64 value
= get_unaligned((u64
*)addr
);
231 /* only want 6 bytes */
237 return hash_64(value
, FDB_HASH_BITS
);
240 /* Hash chain to use given mac address */
241 static inline struct hlist_head
*vxlan_fdb_head(struct vxlan_dev
*vxlan
,
244 return &vxlan
->fdb_head
[eth_hash(mac
)];
247 /* Look up Ethernet address in forwarding table */
248 static struct vxlan_fdb
*vxlan_find_mac(struct vxlan_dev
*vxlan
,
252 struct hlist_head
*head
= vxlan_fdb_head(vxlan
, mac
);
254 struct hlist_node
*node
;
256 hlist_for_each_entry_rcu(f
, node
, head
, hlist
) {
257 if (compare_ether_addr(mac
, f
->eth_addr
) == 0)
264 /* Add new entry to forwarding table -- assumes lock held */
265 static int vxlan_fdb_create(struct vxlan_dev
*vxlan
,
266 const u8
*mac
, __be32 ip
,
267 __u16 state
, __u16 flags
)
272 f
= vxlan_find_mac(vxlan
, mac
);
274 if (flags
& NLM_F_EXCL
) {
275 netdev_dbg(vxlan
->dev
,
276 "lost race to create %pM\n", mac
);
279 if (f
->state
!= state
) {
281 f
->updated
= jiffies
;
285 if (!(flags
& NLM_F_CREATE
))
288 if (vxlan
->addrmax
&& vxlan
->addrcnt
>= vxlan
->addrmax
)
291 netdev_dbg(vxlan
->dev
, "add %pM -> %pI4\n", mac
, &ip
);
292 f
= kmalloc(sizeof(*f
), GFP_ATOMIC
);
299 f
->updated
= f
->used
= jiffies
;
300 memcpy(f
->eth_addr
, mac
, ETH_ALEN
);
303 hlist_add_head_rcu(&f
->hlist
,
304 vxlan_fdb_head(vxlan
, mac
));
308 vxlan_fdb_notify(vxlan
, f
, RTM_NEWNEIGH
);
313 static void vxlan_fdb_destroy(struct vxlan_dev
*vxlan
, struct vxlan_fdb
*f
)
315 netdev_dbg(vxlan
->dev
,
316 "delete %pM\n", f
->eth_addr
);
319 vxlan_fdb_notify(vxlan
, f
, RTM_DELNEIGH
);
321 hlist_del_rcu(&f
->hlist
);
325 /* Add static entry (via netlink) */
326 static int vxlan_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
327 struct net_device
*dev
,
328 const unsigned char *addr
, u16 flags
)
330 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
334 if (!(ndm
->ndm_state
& (NUD_PERMANENT
|NUD_REACHABLE
))) {
335 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
340 if (tb
[NDA_DST
] == NULL
)
343 if (nla_len(tb
[NDA_DST
]) != sizeof(__be32
))
344 return -EAFNOSUPPORT
;
346 ip
= nla_get_be32(tb
[NDA_DST
]);
348 spin_lock_bh(&vxlan
->hash_lock
);
349 err
= vxlan_fdb_create(vxlan
, addr
, ip
, ndm
->ndm_state
, flags
);
350 spin_unlock_bh(&vxlan
->hash_lock
);
355 /* Delete entry (via netlink) */
356 static int vxlan_fdb_delete(struct ndmsg
*ndm
, struct net_device
*dev
,
357 const unsigned char *addr
)
359 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
363 spin_lock_bh(&vxlan
->hash_lock
);
364 f
= vxlan_find_mac(vxlan
, addr
);
366 vxlan_fdb_destroy(vxlan
, f
);
369 spin_unlock_bh(&vxlan
->hash_lock
);
374 /* Dump forwarding table */
375 static int vxlan_fdb_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
,
376 struct net_device
*dev
, int idx
)
378 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
381 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
383 struct hlist_node
*n
;
386 hlist_for_each_entry_rcu(f
, n
, &vxlan
->fdb_head
[h
], hlist
) {
387 if (idx
< cb
->args
[0])
390 err
= vxlan_fdb_info(skb
, vxlan
, f
,
391 NETLINK_CB(cb
->skb
).portid
,
405 /* Watch incoming packets to learn mapping between Ethernet address
406 * and Tunnel endpoint.
408 static void vxlan_snoop(struct net_device
*dev
,
409 __be32 src_ip
, const u8
*src_mac
)
411 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
415 f
= vxlan_find_mac(vxlan
, src_mac
);
418 if (likely(f
->remote_ip
== src_ip
))
423 "%pM migrated from %pI4 to %pI4\n",
424 src_mac
, &f
->remote_ip
, &src_ip
);
426 f
->remote_ip
= src_ip
;
427 f
->updated
= jiffies
;
429 /* learned new entry */
430 spin_lock(&vxlan
->hash_lock
);
431 err
= vxlan_fdb_create(vxlan
, src_mac
, src_ip
,
433 NLM_F_EXCL
|NLM_F_CREATE
);
434 spin_unlock(&vxlan
->hash_lock
);
439 /* See if multicast group is already in use by other ID */
440 static bool vxlan_group_used(struct vxlan_net
*vn
,
441 const struct vxlan_dev
*this)
443 const struct vxlan_dev
*vxlan
;
444 struct hlist_node
*node
;
447 for (h
= 0; h
< VNI_HASH_SIZE
; ++h
)
448 hlist_for_each_entry(vxlan
, node
, &vn
->vni_list
[h
], hlist
) {
452 if (!netif_running(vxlan
->dev
))
455 if (vxlan
->gaddr
== this->gaddr
)
462 /* kernel equivalent to IP_ADD_MEMBERSHIP */
463 static int vxlan_join_group(struct net_device
*dev
)
465 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
466 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
467 struct sock
*sk
= vn
->sock
->sk
;
468 struct ip_mreqn mreq
= {
469 .imr_multiaddr
.s_addr
= vxlan
->gaddr
,
473 /* Already a member of group */
474 if (vxlan_group_used(vn
, vxlan
))
477 /* Need to drop RTNL to call multicast join */
480 err
= ip_mc_join_group(sk
, &mreq
);
488 /* kernel equivalent to IP_DROP_MEMBERSHIP */
489 static int vxlan_leave_group(struct net_device
*dev
)
491 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
492 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
494 struct sock
*sk
= vn
->sock
->sk
;
495 struct ip_mreqn mreq
= {
496 .imr_multiaddr
.s_addr
= vxlan
->gaddr
,
499 /* Only leave group when last vxlan is done. */
500 if (vxlan_group_used(vn
, vxlan
))
503 /* Need to drop RTNL to call multicast leave */
506 err
= ip_mc_leave_group(sk
, &mreq
);
513 /* Callback from net/ipv4/udp.c to receive packets */
514 static int vxlan_udp_encap_recv(struct sock
*sk
, struct sk_buff
*skb
)
517 struct vxlanhdr
*vxh
;
518 struct vxlan_dev
*vxlan
;
519 struct vxlan_stats
*stats
;
523 /* pop off outer UDP header */
524 __skb_pull(skb
, sizeof(struct udphdr
));
526 /* Need Vxlan and inner Ethernet header to be present */
527 if (!pskb_may_pull(skb
, sizeof(struct vxlanhdr
)))
530 /* Drop packets with reserved bits set */
531 vxh
= (struct vxlanhdr
*) skb
->data
;
532 if (vxh
->vx_flags
!= htonl(VXLAN_FLAGS
) ||
533 (vxh
->vx_vni
& htonl(0xff))) {
534 netdev_dbg(skb
->dev
, "invalid vxlan flags=%#x vni=%#x\n",
535 ntohl(vxh
->vx_flags
), ntohl(vxh
->vx_vni
));
539 __skb_pull(skb
, sizeof(struct vxlanhdr
));
541 /* Is this VNI defined? */
542 vni
= ntohl(vxh
->vx_vni
) >> 8;
543 vxlan
= vxlan_find_vni(sock_net(sk
), vni
);
545 netdev_dbg(skb
->dev
, "unknown vni %d\n", vni
);
549 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
550 vxlan
->dev
->stats
.rx_length_errors
++;
551 vxlan
->dev
->stats
.rx_errors
++;
555 /* Re-examine inner Ethernet packet */
557 skb
->protocol
= eth_type_trans(skb
, vxlan
->dev
);
559 /* Ignore packet loops (and multicast echo) */
560 if (compare_ether_addr(eth_hdr(skb
)->h_source
,
561 vxlan
->dev
->dev_addr
) == 0)
565 vxlan_snoop(skb
->dev
, oip
->saddr
, eth_hdr(skb
)->h_source
);
567 __skb_tunnel_rx(skb
, vxlan
->dev
);
568 skb_reset_network_header(skb
);
569 skb
->ip_summed
= CHECKSUM_NONE
;
571 err
= IP_ECN_decapsulate(oip
, skb
);
574 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
575 &oip
->saddr
, oip
->tos
);
577 ++vxlan
->dev
->stats
.rx_frame_errors
;
578 ++vxlan
->dev
->stats
.rx_errors
;
583 stats
= this_cpu_ptr(vxlan
->stats
);
584 u64_stats_update_begin(&stats
->syncp
);
586 stats
->rx_bytes
+= skb
->len
;
587 u64_stats_update_end(&stats
->syncp
);
593 /* Put UDP header back */
594 __skb_push(skb
, sizeof(struct udphdr
));
598 /* Consume bad packet */
603 /* Extract dsfield from inner protocol */
604 static inline u8
vxlan_get_dsfield(const struct iphdr
*iph
,
605 const struct sk_buff
*skb
)
607 if (skb
->protocol
== htons(ETH_P_IP
))
609 else if (skb
->protocol
== htons(ETH_P_IPV6
))
610 return ipv6_get_dsfield((const struct ipv6hdr
*)iph
);
615 /* Propogate ECN bits out */
616 static inline u8
vxlan_ecn_encap(u8 tos
,
617 const struct iphdr
*iph
,
618 const struct sk_buff
*skb
)
620 u8 inner
= vxlan_get_dsfield(iph
, skb
);
622 return INET_ECN_encapsulate(tos
, inner
);
625 static __be32
vxlan_find_dst(struct vxlan_dev
*vxlan
, struct sk_buff
*skb
)
627 const struct ethhdr
*eth
= (struct ethhdr
*) skb
->data
;
628 const struct vxlan_fdb
*f
;
630 if (is_multicast_ether_addr(eth
->h_dest
))
633 f
= vxlan_find_mac(vxlan
, eth
->h_dest
);
641 static void vxlan_sock_free(struct sk_buff
*skb
)
646 /* On transmit, associate with the tunnel socket */
647 static void vxlan_set_owner(struct net_device
*dev
, struct sk_buff
*skb
)
649 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
650 struct sock
*sk
= vn
->sock
->sk
;
655 skb
->destructor
= vxlan_sock_free
;
658 /* Compute source port for outgoing packet
659 * first choice to use L4 flow hash since it will spread
660 * better and maybe available from hardware
661 * secondary choice is to use jhash on the Ethernet header
663 static u16
vxlan_src_port(const struct vxlan_dev
*vxlan
, struct sk_buff
*skb
)
665 unsigned int range
= (vxlan
->port_max
- vxlan
->port_min
) + 1;
668 hash
= skb_get_rxhash(skb
);
670 hash
= jhash(skb
->data
, 2 * ETH_ALEN
,
671 (__force u32
) skb
->protocol
);
673 return (((u64
) hash
* range
) >> 32) + vxlan
->port_min
;
676 /* Transmit local packets over Vxlan
678 * Outer IP header inherits ECN and DF from inner header.
679 * Outer UDP destination is the VXLAN assigned port.
680 * source port is based on hash of flow
682 static netdev_tx_t
vxlan_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
684 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
686 const struct iphdr
*old_iph
;
688 struct vxlanhdr
*vxh
;
691 unsigned int pkt_len
= skb
->len
;
698 dst
= vxlan_find_dst(vxlan
, skb
);
702 /* Need space for new headers (invalidates iph ptr) */
703 if (skb_cow_head(skb
, VXLAN_HEADROOM
))
706 old_iph
= ip_hdr(skb
);
709 if (!ttl
&& IN_MULTICAST(ntohl(dst
)))
714 tos
= vxlan_get_dsfield(old_iph
, skb
);
716 src_port
= vxlan_src_port(vxlan
, skb
);
718 memset(&fl4
, 0, sizeof(fl4
));
719 fl4
.flowi4_oif
= vxlan
->link
;
720 fl4
.flowi4_tos
= RT_TOS(tos
);
722 fl4
.saddr
= vxlan
->saddr
;
724 rt
= ip_route_output_key(dev_net(dev
), &fl4
);
726 netdev_dbg(dev
, "no route to %pI4\n", &dst
);
727 dev
->stats
.tx_carrier_errors
++;
731 if (rt
->dst
.dev
== dev
) {
732 netdev_dbg(dev
, "circular route to %pI4\n", &dst
);
734 dev
->stats
.collisions
++;
738 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
739 IPCB(skb
)->flags
&= ~(IPSKB_XFRM_TUNNEL_SIZE
| IPSKB_XFRM_TRANSFORMED
|
742 skb_dst_set(skb
, &rt
->dst
);
744 vxh
= (struct vxlanhdr
*) __skb_push(skb
, sizeof(*vxh
));
745 vxh
->vx_flags
= htonl(VXLAN_FLAGS
);
746 vxh
->vx_vni
= htonl(vxlan
->vni
<< 8);
748 __skb_push(skb
, sizeof(*uh
));
749 skb_reset_transport_header(skb
);
752 uh
->dest
= htons(vxlan_port
);
753 uh
->source
= htons(src_port
);
755 uh
->len
= htons(skb
->len
);
758 __skb_push(skb
, sizeof(*iph
));
759 skb_reset_network_header(skb
);
762 iph
->ihl
= sizeof(struct iphdr
) >> 2;
764 iph
->protocol
= IPPROTO_UDP
;
765 iph
->tos
= vxlan_ecn_encap(tos
, old_iph
, skb
);
767 iph
->saddr
= fl4
.saddr
;
768 iph
->ttl
= ttl
? : ip4_dst_hoplimit(&rt
->dst
);
770 vxlan_set_owner(dev
, skb
);
772 /* See __IPTUNNEL_XMIT */
773 skb
->ip_summed
= CHECKSUM_NONE
;
774 ip_select_ident(iph
, &rt
->dst
, NULL
);
776 err
= ip_local_out(skb
);
777 if (likely(net_xmit_eval(err
) == 0)) {
778 struct vxlan_stats
*stats
= this_cpu_ptr(vxlan
->stats
);
780 u64_stats_update_begin(&stats
->syncp
);
782 stats
->tx_bytes
+= pkt_len
;
783 u64_stats_update_end(&stats
->syncp
);
785 dev
->stats
.tx_errors
++;
786 dev
->stats
.tx_aborted_errors
++;
791 dev
->stats
.tx_dropped
++;
795 dev
->stats
.tx_errors
++;
801 /* Walk the forwarding table and purge stale entries */
802 static void vxlan_cleanup(unsigned long arg
)
804 struct vxlan_dev
*vxlan
= (struct vxlan_dev
*) arg
;
805 unsigned long next_timer
= jiffies
+ FDB_AGE_INTERVAL
;
808 if (!netif_running(vxlan
->dev
))
811 spin_lock_bh(&vxlan
->hash_lock
);
812 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
813 struct hlist_node
*p
, *n
;
814 hlist_for_each_safe(p
, n
, &vxlan
->fdb_head
[h
]) {
816 = container_of(p
, struct vxlan_fdb
, hlist
);
817 unsigned long timeout
;
819 if (f
->state
& NUD_PERMANENT
)
822 timeout
= f
->used
+ vxlan
->age_interval
* HZ
;
823 if (time_before_eq(timeout
, jiffies
)) {
824 netdev_dbg(vxlan
->dev
,
825 "garbage collect %pM\n",
827 f
->state
= NUD_STALE
;
828 vxlan_fdb_destroy(vxlan
, f
);
829 } else if (time_before(timeout
, next_timer
))
830 next_timer
= timeout
;
833 spin_unlock_bh(&vxlan
->hash_lock
);
835 mod_timer(&vxlan
->age_timer
, next_timer
);
838 /* Setup stats when device is created */
839 static int vxlan_init(struct net_device
*dev
)
841 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
843 vxlan
->stats
= alloc_percpu(struct vxlan_stats
);
850 /* Start ageing timer and join group when device is brought up */
851 static int vxlan_open(struct net_device
*dev
)
853 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
857 err
= vxlan_join_group(dev
);
862 if (vxlan
->age_interval
)
863 mod_timer(&vxlan
->age_timer
, jiffies
+ FDB_AGE_INTERVAL
);
868 /* Purge the forwarding table */
869 static void vxlan_flush(struct vxlan_dev
*vxlan
)
873 spin_lock_bh(&vxlan
->hash_lock
);
874 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
875 struct hlist_node
*p
, *n
;
876 hlist_for_each_safe(p
, n
, &vxlan
->fdb_head
[h
]) {
878 = container_of(p
, struct vxlan_fdb
, hlist
);
879 vxlan_fdb_destroy(vxlan
, f
);
882 spin_unlock_bh(&vxlan
->hash_lock
);
885 /* Cleanup timer and forwarding table on shutdown */
886 static int vxlan_stop(struct net_device
*dev
)
888 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
891 vxlan_leave_group(dev
);
893 del_timer_sync(&vxlan
->age_timer
);
900 /* Merge per-cpu statistics */
901 static struct rtnl_link_stats64
*vxlan_stats64(struct net_device
*dev
,
902 struct rtnl_link_stats64
*stats
)
904 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
905 struct vxlan_stats tmp
, sum
= { 0 };
908 for_each_possible_cpu(cpu
) {
910 const struct vxlan_stats
*stats
911 = per_cpu_ptr(vxlan
->stats
, cpu
);
914 start
= u64_stats_fetch_begin_bh(&stats
->syncp
);
915 memcpy(&tmp
, stats
, sizeof(tmp
));
916 } while (u64_stats_fetch_retry_bh(&stats
->syncp
, start
));
918 sum
.tx_bytes
+= tmp
.tx_bytes
;
919 sum
.tx_packets
+= tmp
.tx_packets
;
920 sum
.rx_bytes
+= tmp
.rx_bytes
;
921 sum
.rx_packets
+= tmp
.rx_packets
;
924 stats
->tx_bytes
= sum
.tx_bytes
;
925 stats
->tx_packets
= sum
.tx_packets
;
926 stats
->rx_bytes
= sum
.rx_bytes
;
927 stats
->rx_packets
= sum
.rx_packets
;
929 stats
->multicast
= dev
->stats
.multicast
;
930 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
931 stats
->rx_frame_errors
= dev
->stats
.rx_frame_errors
;
932 stats
->rx_errors
= dev
->stats
.rx_errors
;
934 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
935 stats
->tx_carrier_errors
= dev
->stats
.tx_carrier_errors
;
936 stats
->tx_aborted_errors
= dev
->stats
.tx_aborted_errors
;
937 stats
->collisions
= dev
->stats
.collisions
;
938 stats
->tx_errors
= dev
->stats
.tx_errors
;
943 /* Stub, nothing needs to be done. */
944 static void vxlan_set_multicast_list(struct net_device
*dev
)
948 static const struct net_device_ops vxlan_netdev_ops
= {
949 .ndo_init
= vxlan_init
,
950 .ndo_open
= vxlan_open
,
951 .ndo_stop
= vxlan_stop
,
952 .ndo_start_xmit
= vxlan_xmit
,
953 .ndo_get_stats64
= vxlan_stats64
,
954 .ndo_set_rx_mode
= vxlan_set_multicast_list
,
955 .ndo_change_mtu
= eth_change_mtu
,
956 .ndo_validate_addr
= eth_validate_addr
,
957 .ndo_set_mac_address
= eth_mac_addr
,
958 .ndo_fdb_add
= vxlan_fdb_add
,
959 .ndo_fdb_del
= vxlan_fdb_delete
,
960 .ndo_fdb_dump
= vxlan_fdb_dump
,
963 /* Info for udev, that this is a virtual tunnel endpoint */
964 static struct device_type vxlan_type
= {
968 static void vxlan_free(struct net_device
*dev
)
970 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
972 free_percpu(vxlan
->stats
);
976 /* Initialize the device structure. */
977 static void vxlan_setup(struct net_device
*dev
)
979 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
983 eth_hw_addr_random(dev
);
985 dev
->hard_header_len
= ETH_HLEN
+ VXLAN_HEADROOM
;
987 dev
->netdev_ops
= &vxlan_netdev_ops
;
988 dev
->destructor
= vxlan_free
;
989 SET_NETDEV_DEVTYPE(dev
, &vxlan_type
);
991 dev
->tx_queue_len
= 0;
992 dev
->features
|= NETIF_F_LLTX
;
993 dev
->features
|= NETIF_F_NETNS_LOCAL
;
994 dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
996 spin_lock_init(&vxlan
->hash_lock
);
998 init_timer_deferrable(&vxlan
->age_timer
);
999 vxlan
->age_timer
.function
= vxlan_cleanup
;
1000 vxlan
->age_timer
.data
= (unsigned long) vxlan
;
1002 inet_get_local_port_range(&low
, &high
);
1003 vxlan
->port_min
= low
;
1004 vxlan
->port_max
= high
;
1008 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
)
1009 INIT_HLIST_HEAD(&vxlan
->fdb_head
[h
]);
1012 static const struct nla_policy vxlan_policy
[IFLA_VXLAN_MAX
+ 1] = {
1013 [IFLA_VXLAN_ID
] = { .type
= NLA_U32
},
1014 [IFLA_VXLAN_GROUP
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
1015 [IFLA_VXLAN_LINK
] = { .type
= NLA_U32
},
1016 [IFLA_VXLAN_LOCAL
] = { .len
= FIELD_SIZEOF(struct iphdr
, saddr
) },
1017 [IFLA_VXLAN_TOS
] = { .type
= NLA_U8
},
1018 [IFLA_VXLAN_TTL
] = { .type
= NLA_U8
},
1019 [IFLA_VXLAN_LEARNING
] = { .type
= NLA_U8
},
1020 [IFLA_VXLAN_AGEING
] = { .type
= NLA_U32
},
1021 [IFLA_VXLAN_LIMIT
] = { .type
= NLA_U32
},
1022 [IFLA_VXLAN_PORT_RANGE
] = { .len
= sizeof(struct ifla_vxlan_port_range
) },
1025 static int vxlan_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1027 if (tb
[IFLA_ADDRESS
]) {
1028 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
) {
1029 pr_debug("invalid link address (not ethernet)\n");
1033 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
]))) {
1034 pr_debug("invalid all zero ethernet address\n");
1035 return -EADDRNOTAVAIL
;
1042 if (data
[IFLA_VXLAN_ID
]) {
1043 __u32 id
= nla_get_u32(data
[IFLA_VXLAN_ID
]);
1044 if (id
>= VXLAN_VID_MASK
)
1048 if (data
[IFLA_VXLAN_GROUP
]) {
1049 __be32 gaddr
= nla_get_be32(data
[IFLA_VXLAN_GROUP
]);
1050 if (!IN_MULTICAST(ntohl(gaddr
))) {
1051 pr_debug("group address is not IPv4 multicast\n");
1052 return -EADDRNOTAVAIL
;
1056 if (data
[IFLA_VXLAN_PORT_RANGE
]) {
1057 const struct ifla_vxlan_port_range
*p
1058 = nla_data(data
[IFLA_VXLAN_PORT_RANGE
]);
1060 if (ntohs(p
->high
) < ntohs(p
->low
)) {
1061 pr_debug("port range %u .. %u not valid\n",
1062 ntohs(p
->low
), ntohs(p
->high
));
1070 static int vxlan_newlink(struct net
*net
, struct net_device
*dev
,
1071 struct nlattr
*tb
[], struct nlattr
*data
[])
1073 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1077 if (!data
[IFLA_VXLAN_ID
])
1080 vni
= nla_get_u32(data
[IFLA_VXLAN_ID
]);
1081 if (vxlan_find_vni(net
, vni
)) {
1082 pr_info("duplicate VNI %u\n", vni
);
1087 if (data
[IFLA_VXLAN_GROUP
])
1088 vxlan
->gaddr
= nla_get_be32(data
[IFLA_VXLAN_GROUP
]);
1090 if (data
[IFLA_VXLAN_LOCAL
])
1091 vxlan
->saddr
= nla_get_be32(data
[IFLA_VXLAN_LOCAL
]);
1093 if (data
[IFLA_VXLAN_LINK
] &&
1094 (vxlan
->link
= nla_get_u32(data
[IFLA_VXLAN_LINK
]))) {
1095 struct net_device
*lowerdev
1096 = __dev_get_by_index(net
, vxlan
->link
);
1099 pr_info("ifindex %d does not exist\n", vxlan
->link
);
1104 dev
->mtu
= lowerdev
->mtu
- VXLAN_HEADROOM
;
1106 /* update header length based on lower device */
1107 dev
->hard_header_len
= lowerdev
->hard_header_len
+
1111 if (data
[IFLA_VXLAN_TOS
])
1112 vxlan
->tos
= nla_get_u8(data
[IFLA_VXLAN_TOS
]);
1114 if (!data
[IFLA_VXLAN_LEARNING
] || nla_get_u8(data
[IFLA_VXLAN_LEARNING
]))
1115 vxlan
->learn
= true;
1117 if (data
[IFLA_VXLAN_AGEING
])
1118 vxlan
->age_interval
= nla_get_u32(data
[IFLA_VXLAN_AGEING
]);
1120 vxlan
->age_interval
= FDB_AGE_DEFAULT
;
1122 if (data
[IFLA_VXLAN_LIMIT
])
1123 vxlan
->addrmax
= nla_get_u32(data
[IFLA_VXLAN_LIMIT
]);
1125 if (data
[IFLA_VXLAN_PORT_RANGE
]) {
1126 const struct ifla_vxlan_port_range
*p
1127 = nla_data(data
[IFLA_VXLAN_PORT_RANGE
]);
1128 vxlan
->port_min
= ntohs(p
->low
);
1129 vxlan
->port_max
= ntohs(p
->high
);
1132 err
= register_netdevice(dev
);
1134 hlist_add_head_rcu(&vxlan
->hlist
, vni_head(net
, vxlan
->vni
));
1139 static void vxlan_dellink(struct net_device
*dev
, struct list_head
*head
)
1141 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1143 hlist_del_rcu(&vxlan
->hlist
);
1145 unregister_netdevice_queue(dev
, head
);
1148 static size_t vxlan_get_size(const struct net_device
*dev
)
1151 return nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_ID */
1152 nla_total_size(sizeof(__be32
)) +/* IFLA_VXLAN_GROUP */
1153 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_LINK */
1154 nla_total_size(sizeof(__be32
))+ /* IFLA_VXLAN_LOCAL */
1155 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_TTL */
1156 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_TOS */
1157 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_LEARNING */
1158 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_AGEING */
1159 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_LIMIT */
1160 nla_total_size(sizeof(struct ifla_vxlan_port_range
)) +
1164 static int vxlan_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1166 const struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1167 struct ifla_vxlan_port_range ports
= {
1168 .low
= htons(vxlan
->port_min
),
1169 .high
= htons(vxlan
->port_max
),
1172 if (nla_put_u32(skb
, IFLA_VXLAN_ID
, vxlan
->vni
))
1173 goto nla_put_failure
;
1175 if (vxlan
->gaddr
&& nla_put_be32(skb
, IFLA_VXLAN_GROUP
, vxlan
->gaddr
))
1176 goto nla_put_failure
;
1178 if (vxlan
->link
&& nla_put_u32(skb
, IFLA_VXLAN_LINK
, vxlan
->link
))
1179 goto nla_put_failure
;
1181 if (vxlan
->saddr
&& nla_put_be32(skb
, IFLA_VXLAN_LOCAL
, vxlan
->saddr
))
1182 goto nla_put_failure
;
1184 if (nla_put_u8(skb
, IFLA_VXLAN_TTL
, vxlan
->ttl
) ||
1185 nla_put_u8(skb
, IFLA_VXLAN_TOS
, vxlan
->tos
) ||
1186 nla_put_u8(skb
, IFLA_VXLAN_LEARNING
, vxlan
->learn
) ||
1187 nla_put_u32(skb
, IFLA_VXLAN_AGEING
, vxlan
->age_interval
) ||
1188 nla_put_u32(skb
, IFLA_VXLAN_LIMIT
, vxlan
->addrmax
))
1189 goto nla_put_failure
;
1191 if (nla_put(skb
, IFLA_VXLAN_PORT_RANGE
, sizeof(ports
), &ports
))
1192 goto nla_put_failure
;
1200 static struct rtnl_link_ops vxlan_link_ops __read_mostly
= {
1202 .maxtype
= IFLA_VXLAN_MAX
,
1203 .policy
= vxlan_policy
,
1204 .priv_size
= sizeof(struct vxlan_dev
),
1205 .setup
= vxlan_setup
,
1206 .validate
= vxlan_validate
,
1207 .newlink
= vxlan_newlink
,
1208 .dellink
= vxlan_dellink
,
1209 .get_size
= vxlan_get_size
,
1210 .fill_info
= vxlan_fill_info
,
1213 static __net_init
int vxlan_init_net(struct net
*net
)
1215 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1217 struct sockaddr_in vxlan_addr
= {
1218 .sin_family
= AF_INET
,
1219 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
1224 /* Create UDP socket for encapsulation receive. */
1225 rc
= sock_create_kern(AF_INET
, SOCK_DGRAM
, IPPROTO_UDP
, &vn
->sock
);
1227 pr_debug("UDP socket create failed\n");
1230 /* Put in proper namespace */
1232 sk_change_net(sk
, net
);
1234 vxlan_addr
.sin_port
= htons(vxlan_port
);
1236 rc
= kernel_bind(vn
->sock
, (struct sockaddr
*) &vxlan_addr
,
1237 sizeof(vxlan_addr
));
1239 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1240 &vxlan_addr
.sin_addr
, ntohs(vxlan_addr
.sin_port
), rc
);
1241 sk_release_kernel(sk
);
1246 /* Disable multicast loopback */
1247 inet_sk(sk
)->mc_loop
= 0;
1249 /* Mark socket as an encapsulation socket. */
1250 udp_sk(sk
)->encap_type
= 1;
1251 udp_sk(sk
)->encap_rcv
= vxlan_udp_encap_recv
;
1254 for (h
= 0; h
< VNI_HASH_SIZE
; ++h
)
1255 INIT_HLIST_HEAD(&vn
->vni_list
[h
]);
1260 static __net_exit
void vxlan_exit_net(struct net
*net
)
1262 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1265 sk_release_kernel(vn
->sock
->sk
);
1270 static struct pernet_operations vxlan_net_ops
= {
1271 .init
= vxlan_init_net
,
1272 .exit
= vxlan_exit_net
,
1273 .id
= &vxlan_net_id
,
1274 .size
= sizeof(struct vxlan_net
),
1277 static int __init
vxlan_init_module(void)
1281 get_random_bytes(&vxlan_salt
, sizeof(vxlan_salt
));
1283 rc
= register_pernet_device(&vxlan_net_ops
);
1287 rc
= rtnl_link_register(&vxlan_link_ops
);
1294 unregister_pernet_device(&vxlan_net_ops
);
1298 module_init(vxlan_init_module
);
1300 static void __exit
vxlan_cleanup_module(void)
1302 rtnl_link_unregister(&vxlan_link_ops
);
1303 unregister_pernet_device(&vxlan_net_ops
);
1305 module_exit(vxlan_cleanup_module
);
1307 MODULE_LICENSE("GPL");
1308 MODULE_VERSION(VXLAN_VERSION
);
1309 MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
1310 MODULE_ALIAS_RTNL_LINK("vxlan");