2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/module.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/rculist.h>
23 #include <linux/netdevice.h>
26 #include <linux/udp.h>
27 #include <linux/igmp.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_ether.h>
30 #include <linux/hash.h>
31 #include <linux/ethtool.h>
33 #include <net/ndisc.h>
35 #include <net/ip_tunnels.h>
38 #include <net/rtnetlink.h>
39 #include <net/route.h>
40 #include <net/dsfield.h>
41 #include <net/inet_ecn.h>
42 #include <net/net_namespace.h>
43 #include <net/netns/generic.h>
45 #define VXLAN_VERSION "0.1"
47 #define PORT_HASH_BITS 8
48 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
49 #define VNI_HASH_BITS 10
50 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
51 #define FDB_HASH_BITS 8
52 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
53 #define FDB_AGE_DEFAULT 300 /* 5 min */
54 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
56 #define VXLAN_N_VID (1u << 24)
57 #define VXLAN_VID_MASK (VXLAN_N_VID - 1)
58 /* IP header + UDP + VXLAN + Ethernet header */
59 #define VXLAN_HEADROOM (20 + 8 + 8 + 14)
61 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
63 /* VXLAN protocol header */
69 /* UDP port for VXLAN traffic.
70 * The IANA assigned port is 4789, but the Linux default is 8472
71 * for compatibility with early adopters.
73 static unsigned short vxlan_port __read_mostly
= 8472;
74 module_param_named(udp_port
, vxlan_port
, ushort
, 0444);
75 MODULE_PARM_DESC(udp_port
, "Destination UDP port");
77 static bool log_ecn_error
= true;
78 module_param(log_ecn_error
, bool, 0644);
79 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
81 static int vxlan_net_id
;
83 static const u8 all_zeros_mac
[ETH_ALEN
];
85 /* per UDP socket information */
87 struct hlist_node hlist
;
89 struct work_struct del_work
;
92 struct hlist_head vni_list
[VNI_HASH_SIZE
];
95 /* per-network namespace private data for this module */
97 struct list_head vxlan_list
;
98 struct hlist_head sock_list
[PORT_HASH_SIZE
];
107 struct list_head list
;
111 /* Forwarding table entry */
113 struct hlist_node hlist
; /* linked list of entries */
115 unsigned long updated
; /* jiffies */
117 struct list_head remotes
;
118 u16 state
; /* see ndm_state */
119 u8 flags
; /* see ndm_flags */
120 u8 eth_addr
[ETH_ALEN
];
123 /* Pseudo network device */
125 struct hlist_node hlist
; /* vni hash table */
126 struct list_head next
; /* vxlan's per namespace list */
127 struct vxlan_sock
*vn_sock
; /* listening socket */
128 struct net_device
*dev
;
129 struct vxlan_rdst default_dst
; /* default destination */
130 __be32 saddr
; /* source address */
132 __u16 port_min
; /* source port range */
134 __u8 tos
; /* TOS override */
136 u32 flags
; /* VXLAN_F_* below */
138 struct work_struct sock_work
;
139 struct work_struct igmp_work
;
141 unsigned long age_interval
;
142 struct timer_list age_timer
;
143 spinlock_t hash_lock
;
144 unsigned int addrcnt
;
145 unsigned int addrmax
;
147 struct hlist_head fdb_head
[FDB_HASH_SIZE
];
150 #define VXLAN_F_LEARN 0x01
151 #define VXLAN_F_PROXY 0x02
152 #define VXLAN_F_RSC 0x04
153 #define VXLAN_F_L2MISS 0x08
154 #define VXLAN_F_L3MISS 0x10
156 /* salt for hash table */
157 static u32 vxlan_salt __read_mostly
;
158 static struct workqueue_struct
*vxlan_wq
;
160 static void vxlan_sock_work(struct work_struct
*work
);
162 /* Virtual Network hash table head */
163 static inline struct hlist_head
*vni_head(struct vxlan_sock
*vs
, u32 id
)
165 return &vs
->vni_list
[hash_32(id
, VNI_HASH_BITS
)];
168 /* Socket hash table head */
169 static inline struct hlist_head
*vs_head(struct net
*net
, __be16 port
)
171 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
173 return &vn
->sock_list
[hash_32(ntohs(port
), PORT_HASH_BITS
)];
176 /* First remote destination for a forwarding entry.
177 * Guaranteed to be non-NULL because remotes are never deleted.
179 static inline struct vxlan_rdst
*first_remote(struct vxlan_fdb
*fdb
)
181 return list_first_or_null_rcu(&fdb
->remotes
, struct vxlan_rdst
, list
);
184 /* Find VXLAN socket based on network namespace and UDP port */
185 static struct vxlan_sock
*vxlan_find_port(struct net
*net
, __be16 port
)
187 struct vxlan_sock
*vs
;
189 hlist_for_each_entry_rcu(vs
, vs_head(net
, port
), hlist
) {
190 if (inet_sk(vs
->sock
->sk
)->inet_sport
== port
)
196 /* Look up VNI in a per net namespace table */
197 static struct vxlan_dev
*vxlan_find_vni(struct net
*net
, u32 id
, __be16 port
)
199 struct vxlan_sock
*vs
;
200 struct vxlan_dev
*vxlan
;
202 vs
= vxlan_find_port(net
, port
);
206 hlist_for_each_entry_rcu(vxlan
, vni_head(vs
, id
), hlist
) {
207 if (vxlan
->default_dst
.remote_vni
== id
)
214 /* Fill in neighbour message in skbuff. */
215 static int vxlan_fdb_info(struct sk_buff
*skb
, struct vxlan_dev
*vxlan
,
216 const struct vxlan_fdb
*fdb
,
217 u32 portid
, u32 seq
, int type
, unsigned int flags
,
218 const struct vxlan_rdst
*rdst
)
220 unsigned long now
= jiffies
;
221 struct nda_cacheinfo ci
;
222 struct nlmsghdr
*nlh
;
224 bool send_ip
, send_eth
;
226 nlh
= nlmsg_put(skb
, portid
, seq
, type
, sizeof(*ndm
), flags
);
230 ndm
= nlmsg_data(nlh
);
231 memset(ndm
, 0, sizeof(*ndm
));
233 send_eth
= send_ip
= true;
235 if (type
== RTM_GETNEIGH
) {
236 ndm
->ndm_family
= AF_INET
;
237 send_ip
= rdst
->remote_ip
!= htonl(INADDR_ANY
);
238 send_eth
= !is_zero_ether_addr(fdb
->eth_addr
);
240 ndm
->ndm_family
= AF_BRIDGE
;
241 ndm
->ndm_state
= fdb
->state
;
242 ndm
->ndm_ifindex
= vxlan
->dev
->ifindex
;
243 ndm
->ndm_flags
= fdb
->flags
;
244 ndm
->ndm_type
= NDA_DST
;
246 if (send_eth
&& nla_put(skb
, NDA_LLADDR
, ETH_ALEN
, &fdb
->eth_addr
))
247 goto nla_put_failure
;
249 if (send_ip
&& nla_put_be32(skb
, NDA_DST
, rdst
->remote_ip
))
250 goto nla_put_failure
;
252 if (rdst
->remote_port
&& rdst
->remote_port
!= vxlan
->dst_port
&&
253 nla_put_be16(skb
, NDA_PORT
, rdst
->remote_port
))
254 goto nla_put_failure
;
255 if (rdst
->remote_vni
!= vxlan
->default_dst
.remote_vni
&&
256 nla_put_u32(skb
, NDA_VNI
, rdst
->remote_vni
))
257 goto nla_put_failure
;
258 if (rdst
->remote_ifindex
&&
259 nla_put_u32(skb
, NDA_IFINDEX
, rdst
->remote_ifindex
))
260 goto nla_put_failure
;
262 ci
.ndm_used
= jiffies_to_clock_t(now
- fdb
->used
);
263 ci
.ndm_confirmed
= 0;
264 ci
.ndm_updated
= jiffies_to_clock_t(now
- fdb
->updated
);
267 if (nla_put(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
))
268 goto nla_put_failure
;
270 return nlmsg_end(skb
, nlh
);
273 nlmsg_cancel(skb
, nlh
);
277 static inline size_t vxlan_nlmsg_size(void)
279 return NLMSG_ALIGN(sizeof(struct ndmsg
))
280 + nla_total_size(ETH_ALEN
) /* NDA_LLADDR */
281 + nla_total_size(sizeof(__be32
)) /* NDA_DST */
282 + nla_total_size(sizeof(__be16
)) /* NDA_PORT */
283 + nla_total_size(sizeof(__be32
)) /* NDA_VNI */
284 + nla_total_size(sizeof(__u32
)) /* NDA_IFINDEX */
285 + nla_total_size(sizeof(struct nda_cacheinfo
));
288 static void vxlan_fdb_notify(struct vxlan_dev
*vxlan
,
289 struct vxlan_fdb
*fdb
, int type
)
291 struct net
*net
= dev_net(vxlan
->dev
);
295 skb
= nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC
);
299 err
= vxlan_fdb_info(skb
, vxlan
, fdb
, 0, 0, type
, 0, first_remote(fdb
));
301 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
302 WARN_ON(err
== -EMSGSIZE
);
307 rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
311 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
314 static void vxlan_ip_miss(struct net_device
*dev
, __be32 ipa
)
316 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
317 struct vxlan_fdb f
= {
320 struct vxlan_rdst remote
= {
321 .remote_ip
= ipa
, /* goes to NDA_DST */
322 .remote_vni
= VXLAN_N_VID
,
325 INIT_LIST_HEAD(&f
.remotes
);
326 list_add_rcu(&remote
.list
, &f
.remotes
);
328 vxlan_fdb_notify(vxlan
, &f
, RTM_GETNEIGH
);
331 static void vxlan_fdb_miss(struct vxlan_dev
*vxlan
, const u8 eth_addr
[ETH_ALEN
])
333 struct vxlan_fdb f
= {
337 INIT_LIST_HEAD(&f
.remotes
);
338 memcpy(f
.eth_addr
, eth_addr
, ETH_ALEN
);
340 vxlan_fdb_notify(vxlan
, &f
, RTM_GETNEIGH
);
343 /* Hash Ethernet address */
344 static u32
eth_hash(const unsigned char *addr
)
346 u64 value
= get_unaligned((u64
*)addr
);
348 /* only want 6 bytes */
354 return hash_64(value
, FDB_HASH_BITS
);
357 /* Hash chain to use given mac address */
358 static inline struct hlist_head
*vxlan_fdb_head(struct vxlan_dev
*vxlan
,
361 return &vxlan
->fdb_head
[eth_hash(mac
)];
364 /* Look up Ethernet address in forwarding table */
365 static struct vxlan_fdb
*__vxlan_find_mac(struct vxlan_dev
*vxlan
,
369 struct hlist_head
*head
= vxlan_fdb_head(vxlan
, mac
);
372 hlist_for_each_entry_rcu(f
, head
, hlist
) {
373 if (compare_ether_addr(mac
, f
->eth_addr
) == 0)
380 static struct vxlan_fdb
*vxlan_find_mac(struct vxlan_dev
*vxlan
,
385 f
= __vxlan_find_mac(vxlan
, mac
);
392 /* caller should hold vxlan->hash_lock */
393 static struct vxlan_rdst
*vxlan_fdb_find_rdst(struct vxlan_fdb
*f
,
394 __be32 ip
, __be16 port
,
395 __u32 vni
, __u32 ifindex
)
397 struct vxlan_rdst
*rd
;
399 list_for_each_entry(rd
, &f
->remotes
, list
) {
400 if (rd
->remote_ip
== ip
&&
401 rd
->remote_port
== port
&&
402 rd
->remote_vni
== vni
&&
403 rd
->remote_ifindex
== ifindex
)
410 /* Add/update destinations for multicast */
411 static int vxlan_fdb_append(struct vxlan_fdb
*f
,
412 __be32 ip
, __be16 port
, __u32 vni
, __u32 ifindex
)
414 struct vxlan_rdst
*rd
;
416 rd
= vxlan_fdb_find_rdst(f
, ip
, port
, vni
, ifindex
);
420 rd
= kmalloc(sizeof(*rd
), GFP_ATOMIC
);
424 rd
->remote_port
= port
;
425 rd
->remote_vni
= vni
;
426 rd
->remote_ifindex
= ifindex
;
428 list_add_tail_rcu(&rd
->list
, &f
->remotes
);
433 /* Add new entry to forwarding table -- assumes lock held */
434 static int vxlan_fdb_create(struct vxlan_dev
*vxlan
,
435 const u8
*mac
, __be32 ip
,
436 __u16 state
, __u16 flags
,
437 __be16 port
, __u32 vni
, __u32 ifindex
,
443 f
= __vxlan_find_mac(vxlan
, mac
);
445 if (flags
& NLM_F_EXCL
) {
446 netdev_dbg(vxlan
->dev
,
447 "lost race to create %pM\n", mac
);
450 if (f
->state
!= state
) {
452 f
->updated
= jiffies
;
455 if (f
->flags
!= ndm_flags
) {
456 f
->flags
= ndm_flags
;
457 f
->updated
= jiffies
;
460 if ((flags
& NLM_F_APPEND
) &&
461 (is_multicast_ether_addr(f
->eth_addr
) ||
462 is_zero_ether_addr(f
->eth_addr
))) {
463 int rc
= vxlan_fdb_append(f
, ip
, port
, vni
, ifindex
);
470 if (!(flags
& NLM_F_CREATE
))
473 if (vxlan
->addrmax
&& vxlan
->addrcnt
>= vxlan
->addrmax
)
476 netdev_dbg(vxlan
->dev
, "add %pM -> %pI4\n", mac
, &ip
);
477 f
= kmalloc(sizeof(*f
), GFP_ATOMIC
);
483 f
->flags
= ndm_flags
;
484 f
->updated
= f
->used
= jiffies
;
485 INIT_LIST_HEAD(&f
->remotes
);
486 memcpy(f
->eth_addr
, mac
, ETH_ALEN
);
488 vxlan_fdb_append(f
, ip
, port
, vni
, ifindex
);
491 hlist_add_head_rcu(&f
->hlist
,
492 vxlan_fdb_head(vxlan
, mac
));
496 vxlan_fdb_notify(vxlan
, f
, RTM_NEWNEIGH
);
501 static void vxlan_fdb_free_rdst(struct rcu_head
*head
)
503 struct vxlan_rdst
*rd
= container_of(head
, struct vxlan_rdst
, rcu
);
507 static void vxlan_fdb_free(struct rcu_head
*head
)
509 struct vxlan_fdb
*f
= container_of(head
, struct vxlan_fdb
, rcu
);
510 struct vxlan_rdst
*rd
, *nd
;
512 list_for_each_entry_safe(rd
, nd
, &f
->remotes
, list
)
517 static void vxlan_fdb_destroy(struct vxlan_dev
*vxlan
, struct vxlan_fdb
*f
)
519 netdev_dbg(vxlan
->dev
,
520 "delete %pM\n", f
->eth_addr
);
523 vxlan_fdb_notify(vxlan
, f
, RTM_DELNEIGH
);
525 hlist_del_rcu(&f
->hlist
);
526 call_rcu(&f
->rcu
, vxlan_fdb_free
);
529 static int vxlan_fdb_parse(struct nlattr
*tb
[], struct vxlan_dev
*vxlan
,
530 __be32
*ip
, __be16
*port
, u32
*vni
, u32
*ifindex
)
532 struct net
*net
= dev_net(vxlan
->dev
);
535 if (nla_len(tb
[NDA_DST
]) != sizeof(__be32
))
536 return -EAFNOSUPPORT
;
538 *ip
= nla_get_be32(tb
[NDA_DST
]);
540 *ip
= htonl(INADDR_ANY
);
544 if (nla_len(tb
[NDA_PORT
]) != sizeof(__be16
))
546 *port
= nla_get_be16(tb
[NDA_PORT
]);
548 *port
= vxlan
->dst_port
;
552 if (nla_len(tb
[NDA_VNI
]) != sizeof(u32
))
554 *vni
= nla_get_u32(tb
[NDA_VNI
]);
556 *vni
= vxlan
->default_dst
.remote_vni
;
559 if (tb
[NDA_IFINDEX
]) {
560 struct net_device
*tdev
;
562 if (nla_len(tb
[NDA_IFINDEX
]) != sizeof(u32
))
564 *ifindex
= nla_get_u32(tb
[NDA_IFINDEX
]);
565 tdev
= dev_get_by_index(net
, *ifindex
);
567 return -EADDRNOTAVAIL
;
576 /* Add static entry (via netlink) */
577 static int vxlan_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
578 struct net_device
*dev
,
579 const unsigned char *addr
, u16 flags
)
581 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
582 /* struct net *net = dev_net(vxlan->dev); */
588 if (!(ndm
->ndm_state
& (NUD_PERMANENT
|NUD_REACHABLE
))) {
589 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
594 if (tb
[NDA_DST
] == NULL
)
597 err
= vxlan_fdb_parse(tb
, vxlan
, &ip
, &port
, &vni
, &ifindex
);
601 spin_lock_bh(&vxlan
->hash_lock
);
602 err
= vxlan_fdb_create(vxlan
, addr
, ip
, ndm
->ndm_state
, flags
,
603 port
, vni
, ifindex
, ndm
->ndm_flags
);
604 spin_unlock_bh(&vxlan
->hash_lock
);
609 /* Delete entry (via netlink) */
610 static int vxlan_fdb_delete(struct ndmsg
*ndm
, struct nlattr
*tb
[],
611 struct net_device
*dev
,
612 const unsigned char *addr
)
614 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
616 struct vxlan_rdst
*rd
= NULL
;
622 err
= vxlan_fdb_parse(tb
, vxlan
, &ip
, &port
, &vni
, &ifindex
);
628 spin_lock_bh(&vxlan
->hash_lock
);
629 f
= vxlan_find_mac(vxlan
, addr
);
633 if (ip
!= htonl(INADDR_ANY
)) {
634 rd
= vxlan_fdb_find_rdst(f
, ip
, port
, vni
, ifindex
);
641 /* remove a destination if it's not the only one on the list,
642 * otherwise destroy the fdb entry
644 if (rd
&& !list_is_singular(&f
->remotes
)) {
645 list_del_rcu(&rd
->list
);
646 call_rcu(&rd
->rcu
, vxlan_fdb_free_rdst
);
650 vxlan_fdb_destroy(vxlan
, f
);
653 spin_unlock_bh(&vxlan
->hash_lock
);
658 /* Dump forwarding table */
659 static int vxlan_fdb_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
,
660 struct net_device
*dev
, int idx
)
662 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
665 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
669 hlist_for_each_entry_rcu(f
, &vxlan
->fdb_head
[h
], hlist
) {
670 struct vxlan_rdst
*rd
;
672 if (idx
< cb
->args
[0])
675 list_for_each_entry_rcu(rd
, &f
->remotes
, list
) {
676 err
= vxlan_fdb_info(skb
, vxlan
, f
,
677 NETLINK_CB(cb
->skb
).portid
,
692 /* Watch incoming packets to learn mapping between Ethernet address
693 * and Tunnel endpoint.
694 * Return true if packet is bogus and should be droppped.
696 static bool vxlan_snoop(struct net_device
*dev
,
697 __be32 src_ip
, const u8
*src_mac
)
699 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
702 f
= vxlan_find_mac(vxlan
, src_mac
);
704 struct vxlan_rdst
*rdst
= first_remote(f
);
706 if (likely(rdst
->remote_ip
== src_ip
))
709 /* Don't migrate static entries, drop packets */
710 if (f
->state
& NUD_NOARP
)
715 "%pM migrated from %pI4 to %pI4\n",
716 src_mac
, &rdst
->remote_ip
, &src_ip
);
718 rdst
->remote_ip
= src_ip
;
719 f
->updated
= jiffies
;
720 vxlan_fdb_notify(vxlan
, f
, RTM_NEWNEIGH
);
722 /* learned new entry */
723 spin_lock(&vxlan
->hash_lock
);
725 /* close off race between vxlan_flush and incoming packets */
726 if (netif_running(dev
))
727 vxlan_fdb_create(vxlan
, src_mac
, src_ip
,
729 NLM_F_EXCL
|NLM_F_CREATE
,
731 vxlan
->default_dst
.remote_vni
,
733 spin_unlock(&vxlan
->hash_lock
);
740 /* See if multicast group is already in use by other ID */
741 static bool vxlan_group_used(struct vxlan_net
*vn
, __be32 remote_ip
)
743 struct vxlan_dev
*vxlan
;
745 list_for_each_entry(vxlan
, &vn
->vxlan_list
, next
) {
746 if (!netif_running(vxlan
->dev
))
749 if (vxlan
->default_dst
.remote_ip
== remote_ip
)
756 static void vxlan_sock_hold(struct vxlan_sock
*vs
)
758 atomic_inc(&vs
->refcnt
);
761 static void vxlan_sock_release(struct vxlan_net
*vn
, struct vxlan_sock
*vs
)
763 if (!atomic_dec_and_test(&vs
->refcnt
))
766 spin_lock(&vn
->sock_lock
);
767 hlist_del_rcu(&vs
->hlist
);
768 spin_unlock(&vn
->sock_lock
);
770 queue_work(vxlan_wq
, &vs
->del_work
);
773 /* Callback to update multicast group membership.
774 * Scheduled when vxlan goes up/down.
776 static void vxlan_igmp_work(struct work_struct
*work
)
778 struct vxlan_dev
*vxlan
= container_of(work
, struct vxlan_dev
, igmp_work
);
779 struct vxlan_net
*vn
= net_generic(dev_net(vxlan
->dev
), vxlan_net_id
);
780 struct vxlan_sock
*vs
= vxlan
->vn_sock
;
781 struct sock
*sk
= vs
->sock
->sk
;
782 struct ip_mreqn mreq
= {
783 .imr_multiaddr
.s_addr
= vxlan
->default_dst
.remote_ip
,
784 .imr_ifindex
= vxlan
->default_dst
.remote_ifindex
,
788 if (vxlan_group_used(vn
, vxlan
->default_dst
.remote_ip
))
789 ip_mc_join_group(sk
, &mreq
);
791 ip_mc_leave_group(sk
, &mreq
);
794 vxlan_sock_release(vn
, vs
);
798 /* Callback from net/ipv4/udp.c to receive packets */
799 static int vxlan_udp_encap_recv(struct sock
*sk
, struct sk_buff
*skb
)
802 struct vxlanhdr
*vxh
;
803 struct vxlan_dev
*vxlan
;
804 struct pcpu_tstats
*stats
;
809 /* pop off outer UDP header */
810 __skb_pull(skb
, sizeof(struct udphdr
));
812 /* Need Vxlan and inner Ethernet header to be present */
813 if (!pskb_may_pull(skb
, sizeof(struct vxlanhdr
)))
816 /* Drop packets with reserved bits set */
817 vxh
= (struct vxlanhdr
*) skb
->data
;
818 if (vxh
->vx_flags
!= htonl(VXLAN_FLAGS
) ||
819 (vxh
->vx_vni
& htonl(0xff))) {
820 netdev_dbg(skb
->dev
, "invalid vxlan flags=%#x vni=%#x\n",
821 ntohl(vxh
->vx_flags
), ntohl(vxh
->vx_vni
));
825 __skb_pull(skb
, sizeof(struct vxlanhdr
));
827 /* Is this VNI defined? */
828 vni
= ntohl(vxh
->vx_vni
) >> 8;
829 port
= inet_sk(sk
)->inet_sport
;
830 vxlan
= vxlan_find_vni(sock_net(sk
), vni
, port
);
832 netdev_dbg(skb
->dev
, "unknown vni %d port %u\n",
837 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
838 vxlan
->dev
->stats
.rx_length_errors
++;
839 vxlan
->dev
->stats
.rx_errors
++;
843 skb_reset_mac_header(skb
);
845 /* Re-examine inner Ethernet packet */
847 skb
->protocol
= eth_type_trans(skb
, vxlan
->dev
);
849 /* Ignore packet loops (and multicast echo) */
850 if (compare_ether_addr(eth_hdr(skb
)->h_source
,
851 vxlan
->dev
->dev_addr
) == 0)
854 if ((vxlan
->flags
& VXLAN_F_LEARN
) &&
855 vxlan_snoop(skb
->dev
, oip
->saddr
, eth_hdr(skb
)->h_source
))
858 __skb_tunnel_rx(skb
, vxlan
->dev
);
859 skb_reset_network_header(skb
);
861 /* If the NIC driver gave us an encapsulated packet with
862 * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
863 * leave the CHECKSUM_UNNECESSARY, the device checksummed it
864 * for us. Otherwise force the upper layers to verify it.
866 if (skb
->ip_summed
!= CHECKSUM_UNNECESSARY
|| !skb
->encapsulation
||
867 !(vxlan
->dev
->features
& NETIF_F_RXCSUM
))
868 skb
->ip_summed
= CHECKSUM_NONE
;
870 skb
->encapsulation
= 0;
872 err
= IP_ECN_decapsulate(oip
, skb
);
875 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
876 &oip
->saddr
, oip
->tos
);
878 ++vxlan
->dev
->stats
.rx_frame_errors
;
879 ++vxlan
->dev
->stats
.rx_errors
;
884 stats
= this_cpu_ptr(vxlan
->dev
->tstats
);
885 u64_stats_update_begin(&stats
->syncp
);
887 stats
->rx_bytes
+= skb
->len
;
888 u64_stats_update_end(&stats
->syncp
);
894 /* Put UDP header back */
895 __skb_push(skb
, sizeof(struct udphdr
));
899 /* Consume bad packet */
904 static int arp_reduce(struct net_device
*dev
, struct sk_buff
*skb
)
906 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
912 if (dev
->flags
& IFF_NOARP
)
915 if (!pskb_may_pull(skb
, arp_hdr_len(dev
))) {
916 dev
->stats
.tx_dropped
++;
921 if ((parp
->ar_hrd
!= htons(ARPHRD_ETHER
) &&
922 parp
->ar_hrd
!= htons(ARPHRD_IEEE802
)) ||
923 parp
->ar_pro
!= htons(ETH_P_IP
) ||
924 parp
->ar_op
!= htons(ARPOP_REQUEST
) ||
925 parp
->ar_hln
!= dev
->addr_len
||
928 arpptr
= (u8
*)parp
+ sizeof(struct arphdr
);
930 arpptr
+= dev
->addr_len
; /* sha */
931 memcpy(&sip
, arpptr
, sizeof(sip
));
932 arpptr
+= sizeof(sip
);
933 arpptr
+= dev
->addr_len
; /* tha */
934 memcpy(&tip
, arpptr
, sizeof(tip
));
936 if (ipv4_is_loopback(tip
) ||
937 ipv4_is_multicast(tip
))
940 n
= neigh_lookup(&arp_tbl
, &tip
, dev
);
944 struct sk_buff
*reply
;
946 if (!(n
->nud_state
& NUD_CONNECTED
)) {
951 f
= vxlan_find_mac(vxlan
, n
->ha
);
952 if (f
&& first_remote(f
)->remote_ip
== htonl(INADDR_ANY
)) {
953 /* bridge-local neighbor */
958 reply
= arp_create(ARPOP_REPLY
, ETH_P_ARP
, sip
, dev
, tip
, sha
,
963 skb_reset_mac_header(reply
);
964 __skb_pull(reply
, skb_network_offset(reply
));
965 reply
->ip_summed
= CHECKSUM_UNNECESSARY
;
966 reply
->pkt_type
= PACKET_HOST
;
968 if (netif_rx_ni(reply
) == NET_RX_DROP
)
969 dev
->stats
.rx_dropped
++;
970 } else if (vxlan
->flags
& VXLAN_F_L3MISS
)
971 vxlan_ip_miss(dev
, tip
);
977 static bool route_shortcircuit(struct net_device
*dev
, struct sk_buff
*skb
)
979 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
983 if (is_multicast_ether_addr(eth_hdr(skb
)->h_dest
))
987 switch (ntohs(eth_hdr(skb
)->h_proto
)) {
989 if (!pskb_may_pull(skb
, sizeof(struct iphdr
)))
992 n
= neigh_lookup(&arp_tbl
, &pip
->daddr
, dev
);
1001 diff
= compare_ether_addr(eth_hdr(skb
)->h_dest
, n
->ha
) != 0;
1003 memcpy(eth_hdr(skb
)->h_source
, eth_hdr(skb
)->h_dest
,
1005 memcpy(eth_hdr(skb
)->h_dest
, n
->ha
, dev
->addr_len
);
1009 } else if (vxlan
->flags
& VXLAN_F_L3MISS
)
1010 vxlan_ip_miss(dev
, pip
->daddr
);
1014 static void vxlan_sock_put(struct sk_buff
*skb
)
1019 /* On transmit, associate with the tunnel socket */
1020 static void vxlan_set_owner(struct net_device
*dev
, struct sk_buff
*skb
)
1022 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1023 struct sock
*sk
= vxlan
->vn_sock
->sock
->sk
;
1028 skb
->destructor
= vxlan_sock_put
;
1031 /* Compute source port for outgoing packet
1032 * first choice to use L4 flow hash since it will spread
1033 * better and maybe available from hardware
1034 * secondary choice is to use jhash on the Ethernet header
1036 static __be16
vxlan_src_port(const struct vxlan_dev
*vxlan
, struct sk_buff
*skb
)
1038 unsigned int range
= (vxlan
->port_max
- vxlan
->port_min
) + 1;
1041 hash
= skb_get_rxhash(skb
);
1043 hash
= jhash(skb
->data
, 2 * ETH_ALEN
,
1044 (__force u32
) skb
->protocol
);
1046 return htons((((u64
) hash
* range
) >> 32) + vxlan
->port_min
);
1049 static int handle_offloads(struct sk_buff
*skb
)
1051 if (skb_is_gso(skb
)) {
1052 int err
= skb_unclone(skb
, GFP_ATOMIC
);
1056 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_TUNNEL
;
1057 } else if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1058 skb
->ip_summed
= CHECKSUM_NONE
;
1063 /* Bypass encapsulation if the destination is local */
1064 static void vxlan_encap_bypass(struct sk_buff
*skb
, struct vxlan_dev
*src_vxlan
,
1065 struct vxlan_dev
*dst_vxlan
)
1067 struct pcpu_tstats
*tx_stats
= this_cpu_ptr(src_vxlan
->dev
->tstats
);
1068 struct pcpu_tstats
*rx_stats
= this_cpu_ptr(dst_vxlan
->dev
->tstats
);
1070 skb
->pkt_type
= PACKET_HOST
;
1071 skb
->encapsulation
= 0;
1072 skb
->dev
= dst_vxlan
->dev
;
1073 __skb_pull(skb
, skb_network_offset(skb
));
1075 if (dst_vxlan
->flags
& VXLAN_F_LEARN
)
1076 vxlan_snoop(skb
->dev
, htonl(INADDR_LOOPBACK
),
1077 eth_hdr(skb
)->h_source
);
1079 u64_stats_update_begin(&tx_stats
->syncp
);
1080 tx_stats
->tx_packets
++;
1081 tx_stats
->tx_bytes
+= skb
->len
;
1082 u64_stats_update_end(&tx_stats
->syncp
);
1084 if (netif_rx(skb
) == NET_RX_SUCCESS
) {
1085 u64_stats_update_begin(&rx_stats
->syncp
);
1086 rx_stats
->rx_packets
++;
1087 rx_stats
->rx_bytes
+= skb
->len
;
1088 u64_stats_update_end(&rx_stats
->syncp
);
1090 skb
->dev
->stats
.rx_dropped
++;
1094 static void vxlan_xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
1095 struct vxlan_rdst
*rdst
, bool did_rsc
)
1097 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1099 const struct iphdr
*old_iph
;
1100 struct vxlanhdr
*vxh
;
1104 __be16 src_port
, dst_port
;
1110 dst_port
= rdst
->remote_port
? rdst
->remote_port
: vxlan
->dst_port
;
1111 vni
= rdst
->remote_vni
;
1112 dst
= rdst
->remote_ip
;
1116 /* short-circuited back to local bridge */
1117 vxlan_encap_bypass(skb
, vxlan
, vxlan
);
1123 if (!skb
->encapsulation
) {
1124 skb_reset_inner_headers(skb
);
1125 skb
->encapsulation
= 1;
1128 /* Need space for new headers (invalidates iph ptr) */
1129 if (skb_cow_head(skb
, VXLAN_HEADROOM
))
1132 old_iph
= ip_hdr(skb
);
1135 if (!ttl
&& IN_MULTICAST(ntohl(dst
)))
1140 tos
= ip_tunnel_get_dsfield(old_iph
, skb
);
1142 src_port
= vxlan_src_port(vxlan
, skb
);
1144 memset(&fl4
, 0, sizeof(fl4
));
1145 fl4
.flowi4_oif
= rdst
->remote_ifindex
;
1146 fl4
.flowi4_tos
= RT_TOS(tos
);
1148 fl4
.saddr
= vxlan
->saddr
;
1150 rt
= ip_route_output_key(dev_net(dev
), &fl4
);
1152 netdev_dbg(dev
, "no route to %pI4\n", &dst
);
1153 dev
->stats
.tx_carrier_errors
++;
1157 if (rt
->dst
.dev
== dev
) {
1158 netdev_dbg(dev
, "circular route to %pI4\n", &dst
);
1160 dev
->stats
.collisions
++;
1164 /* Bypass encapsulation if the destination is local */
1165 if (rt
->rt_flags
& RTCF_LOCAL
&&
1166 !(rt
->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))) {
1167 struct vxlan_dev
*dst_vxlan
;
1170 dst_vxlan
= vxlan_find_vni(dev_net(dev
), vni
, dst_port
);
1173 vxlan_encap_bypass(skb
, vxlan
, dst_vxlan
);
1176 vxh
= (struct vxlanhdr
*) __skb_push(skb
, sizeof(*vxh
));
1177 vxh
->vx_flags
= htonl(VXLAN_FLAGS
);
1178 vxh
->vx_vni
= htonl(vni
<< 8);
1180 __skb_push(skb
, sizeof(*uh
));
1181 skb_reset_transport_header(skb
);
1184 uh
->dest
= dst_port
;
1185 uh
->source
= src_port
;
1187 uh
->len
= htons(skb
->len
);
1190 vxlan_set_owner(dev
, skb
);
1192 if (handle_offloads(skb
))
1195 tos
= ip_tunnel_ecn_encap(tos
, old_iph
, skb
);
1196 ttl
= ttl
? : ip4_dst_hoplimit(&rt
->dst
);
1198 err
= iptunnel_xmit(dev_net(dev
), rt
, skb
, fl4
.saddr
, dst
,
1199 IPPROTO_UDP
, tos
, ttl
, df
);
1200 iptunnel_xmit_stats(err
, &dev
->stats
, dev
->tstats
);
1205 dev
->stats
.tx_dropped
++;
1209 dev
->stats
.tx_errors
++;
1214 /* Transmit local packets over Vxlan
1216 * Outer IP header inherits ECN and DF from inner header.
1217 * Outer UDP destination is the VXLAN assigned port.
1218 * source port is based on hash of flow
1220 static netdev_tx_t
vxlan_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1222 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1224 bool did_rsc
= false;
1225 struct vxlan_rdst
*rdst
;
1226 struct vxlan_fdb
*f
;
1228 skb_reset_mac_header(skb
);
1231 if ((vxlan
->flags
& VXLAN_F_PROXY
) && ntohs(eth
->h_proto
) == ETH_P_ARP
)
1232 return arp_reduce(dev
, skb
);
1234 f
= vxlan_find_mac(vxlan
, eth
->h_dest
);
1237 if (f
&& (f
->flags
& NTF_ROUTER
) && (vxlan
->flags
& VXLAN_F_RSC
) &&
1238 ntohs(eth
->h_proto
) == ETH_P_IP
) {
1239 did_rsc
= route_shortcircuit(dev
, skb
);
1241 f
= vxlan_find_mac(vxlan
, eth
->h_dest
);
1245 f
= vxlan_find_mac(vxlan
, all_zeros_mac
);
1247 if ((vxlan
->flags
& VXLAN_F_L2MISS
) &&
1248 !is_multicast_ether_addr(eth
->h_dest
))
1249 vxlan_fdb_miss(vxlan
, eth
->h_dest
);
1251 dev
->stats
.tx_dropped
++;
1253 return NETDEV_TX_OK
;
1257 list_for_each_entry_rcu(rdst
, &f
->remotes
, list
) {
1258 struct sk_buff
*skb1
;
1260 skb1
= skb_clone(skb
, GFP_ATOMIC
);
1262 vxlan_xmit_one(skb1
, dev
, rdst
, did_rsc
);
1266 return NETDEV_TX_OK
;
1269 /* Walk the forwarding table and purge stale entries */
1270 static void vxlan_cleanup(unsigned long arg
)
1272 struct vxlan_dev
*vxlan
= (struct vxlan_dev
*) arg
;
1273 unsigned long next_timer
= jiffies
+ FDB_AGE_INTERVAL
;
1276 if (!netif_running(vxlan
->dev
))
1279 spin_lock_bh(&vxlan
->hash_lock
);
1280 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
1281 struct hlist_node
*p
, *n
;
1282 hlist_for_each_safe(p
, n
, &vxlan
->fdb_head
[h
]) {
1284 = container_of(p
, struct vxlan_fdb
, hlist
);
1285 unsigned long timeout
;
1287 if (f
->state
& NUD_PERMANENT
)
1290 timeout
= f
->used
+ vxlan
->age_interval
* HZ
;
1291 if (time_before_eq(timeout
, jiffies
)) {
1292 netdev_dbg(vxlan
->dev
,
1293 "garbage collect %pM\n",
1295 f
->state
= NUD_STALE
;
1296 vxlan_fdb_destroy(vxlan
, f
);
1297 } else if (time_before(timeout
, next_timer
))
1298 next_timer
= timeout
;
1301 spin_unlock_bh(&vxlan
->hash_lock
);
1303 mod_timer(&vxlan
->age_timer
, next_timer
);
1306 /* Setup stats when device is created */
1307 static int vxlan_init(struct net_device
*dev
)
1309 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1310 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
1311 struct vxlan_sock
*vs
;
1312 __u32 vni
= vxlan
->default_dst
.remote_vni
;
1314 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
1318 spin_lock(&vn
->sock_lock
);
1319 vs
= vxlan_find_port(dev_net(dev
), vxlan
->dst_port
);
1321 /* If we have a socket with same port already, reuse it */
1322 atomic_inc(&vs
->refcnt
);
1323 vxlan
->vn_sock
= vs
;
1324 hlist_add_head_rcu(&vxlan
->hlist
, vni_head(vs
, vni
));
1326 /* otherwise make new socket outside of RTNL */
1328 queue_work(vxlan_wq
, &vxlan
->sock_work
);
1330 spin_unlock(&vn
->sock_lock
);
1335 static void vxlan_fdb_delete_default(struct vxlan_dev
*vxlan
)
1337 struct vxlan_fdb
*f
;
1339 spin_lock_bh(&vxlan
->hash_lock
);
1340 f
= __vxlan_find_mac(vxlan
, all_zeros_mac
);
1342 vxlan_fdb_destroy(vxlan
, f
);
1343 spin_unlock_bh(&vxlan
->hash_lock
);
1346 static void vxlan_uninit(struct net_device
*dev
)
1348 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1349 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
1350 struct vxlan_sock
*vs
= vxlan
->vn_sock
;
1352 vxlan_fdb_delete_default(vxlan
);
1355 vxlan_sock_release(vn
, vs
);
1356 free_percpu(dev
->tstats
);
1359 /* Start ageing timer and join group when device is brought up */
1360 static int vxlan_open(struct net_device
*dev
)
1362 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1363 struct vxlan_sock
*vs
= vxlan
->vn_sock
;
1365 /* socket hasn't been created */
1369 if (IN_MULTICAST(ntohl(vxlan
->default_dst
.remote_ip
))) {
1370 vxlan_sock_hold(vs
);
1372 queue_work(vxlan_wq
, &vxlan
->igmp_work
);
1375 if (vxlan
->age_interval
)
1376 mod_timer(&vxlan
->age_timer
, jiffies
+ FDB_AGE_INTERVAL
);
1381 /* Purge the forwarding table */
1382 static void vxlan_flush(struct vxlan_dev
*vxlan
)
1386 spin_lock_bh(&vxlan
->hash_lock
);
1387 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
1388 struct hlist_node
*p
, *n
;
1389 hlist_for_each_safe(p
, n
, &vxlan
->fdb_head
[h
]) {
1391 = container_of(p
, struct vxlan_fdb
, hlist
);
1392 /* the all_zeros_mac entry is deleted at vxlan_uninit */
1393 if (!is_zero_ether_addr(f
->eth_addr
))
1394 vxlan_fdb_destroy(vxlan
, f
);
1397 spin_unlock_bh(&vxlan
->hash_lock
);
1400 /* Cleanup timer and forwarding table on shutdown */
1401 static int vxlan_stop(struct net_device
*dev
)
1403 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1404 struct vxlan_sock
*vs
= vxlan
->vn_sock
;
1406 if (vs
&& IN_MULTICAST(ntohl(vxlan
->default_dst
.remote_ip
))) {
1407 vxlan_sock_hold(vs
);
1409 queue_work(vxlan_wq
, &vxlan
->igmp_work
);
1412 del_timer_sync(&vxlan
->age_timer
);
1419 /* Stub, nothing needs to be done. */
1420 static void vxlan_set_multicast_list(struct net_device
*dev
)
1424 static const struct net_device_ops vxlan_netdev_ops
= {
1425 .ndo_init
= vxlan_init
,
1426 .ndo_uninit
= vxlan_uninit
,
1427 .ndo_open
= vxlan_open
,
1428 .ndo_stop
= vxlan_stop
,
1429 .ndo_start_xmit
= vxlan_xmit
,
1430 .ndo_get_stats64
= ip_tunnel_get_stats64
,
1431 .ndo_set_rx_mode
= vxlan_set_multicast_list
,
1432 .ndo_change_mtu
= eth_change_mtu
,
1433 .ndo_validate_addr
= eth_validate_addr
,
1434 .ndo_set_mac_address
= eth_mac_addr
,
1435 .ndo_fdb_add
= vxlan_fdb_add
,
1436 .ndo_fdb_del
= vxlan_fdb_delete
,
1437 .ndo_fdb_dump
= vxlan_fdb_dump
,
1440 /* Info for udev, that this is a virtual tunnel endpoint */
1441 static struct device_type vxlan_type
= {
1445 /* Initialize the device structure. */
1446 static void vxlan_setup(struct net_device
*dev
)
1448 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1452 eth_hw_addr_random(dev
);
1454 dev
->hard_header_len
= ETH_HLEN
+ VXLAN_HEADROOM
;
1456 dev
->netdev_ops
= &vxlan_netdev_ops
;
1457 dev
->destructor
= free_netdev
;
1458 SET_NETDEV_DEVTYPE(dev
, &vxlan_type
);
1460 dev
->tx_queue_len
= 0;
1461 dev
->features
|= NETIF_F_LLTX
;
1462 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1463 dev
->features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
1464 dev
->features
|= NETIF_F_RXCSUM
;
1465 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1467 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
1468 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
1469 dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
1470 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1472 INIT_LIST_HEAD(&vxlan
->next
);
1473 spin_lock_init(&vxlan
->hash_lock
);
1474 INIT_WORK(&vxlan
->igmp_work
, vxlan_igmp_work
);
1475 INIT_WORK(&vxlan
->sock_work
, vxlan_sock_work
);
1477 init_timer_deferrable(&vxlan
->age_timer
);
1478 vxlan
->age_timer
.function
= vxlan_cleanup
;
1479 vxlan
->age_timer
.data
= (unsigned long) vxlan
;
1481 inet_get_local_port_range(&low
, &high
);
1482 vxlan
->port_min
= low
;
1483 vxlan
->port_max
= high
;
1484 vxlan
->dst_port
= htons(vxlan_port
);
1488 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
)
1489 INIT_HLIST_HEAD(&vxlan
->fdb_head
[h
]);
1492 static const struct nla_policy vxlan_policy
[IFLA_VXLAN_MAX
+ 1] = {
1493 [IFLA_VXLAN_ID
] = { .type
= NLA_U32
},
1494 [IFLA_VXLAN_GROUP
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
1495 [IFLA_VXLAN_LINK
] = { .type
= NLA_U32
},
1496 [IFLA_VXLAN_LOCAL
] = { .len
= FIELD_SIZEOF(struct iphdr
, saddr
) },
1497 [IFLA_VXLAN_TOS
] = { .type
= NLA_U8
},
1498 [IFLA_VXLAN_TTL
] = { .type
= NLA_U8
},
1499 [IFLA_VXLAN_LEARNING
] = { .type
= NLA_U8
},
1500 [IFLA_VXLAN_AGEING
] = { .type
= NLA_U32
},
1501 [IFLA_VXLAN_LIMIT
] = { .type
= NLA_U32
},
1502 [IFLA_VXLAN_PORT_RANGE
] = { .len
= sizeof(struct ifla_vxlan_port_range
) },
1503 [IFLA_VXLAN_PROXY
] = { .type
= NLA_U8
},
1504 [IFLA_VXLAN_RSC
] = { .type
= NLA_U8
},
1505 [IFLA_VXLAN_L2MISS
] = { .type
= NLA_U8
},
1506 [IFLA_VXLAN_L3MISS
] = { .type
= NLA_U8
},
1507 [IFLA_VXLAN_PORT
] = { .type
= NLA_U16
},
1510 static int vxlan_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1512 if (tb
[IFLA_ADDRESS
]) {
1513 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
) {
1514 pr_debug("invalid link address (not ethernet)\n");
1518 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
]))) {
1519 pr_debug("invalid all zero ethernet address\n");
1520 return -EADDRNOTAVAIL
;
1527 if (data
[IFLA_VXLAN_ID
]) {
1528 __u32 id
= nla_get_u32(data
[IFLA_VXLAN_ID
]);
1529 if (id
>= VXLAN_VID_MASK
)
1533 if (data
[IFLA_VXLAN_PORT_RANGE
]) {
1534 const struct ifla_vxlan_port_range
*p
1535 = nla_data(data
[IFLA_VXLAN_PORT_RANGE
]);
1537 if (ntohs(p
->high
) < ntohs(p
->low
)) {
1538 pr_debug("port range %u .. %u not valid\n",
1539 ntohs(p
->low
), ntohs(p
->high
));
1547 static void vxlan_get_drvinfo(struct net_device
*netdev
,
1548 struct ethtool_drvinfo
*drvinfo
)
1550 strlcpy(drvinfo
->version
, VXLAN_VERSION
, sizeof(drvinfo
->version
));
1551 strlcpy(drvinfo
->driver
, "vxlan", sizeof(drvinfo
->driver
));
1554 static const struct ethtool_ops vxlan_ethtool_ops
= {
1555 .get_drvinfo
= vxlan_get_drvinfo
,
1556 .get_link
= ethtool_op_get_link
,
1559 static void vxlan_del_work(struct work_struct
*work
)
1561 struct vxlan_sock
*vs
= container_of(work
, struct vxlan_sock
, del_work
);
1563 sk_release_kernel(vs
->sock
->sk
);
1567 static struct vxlan_sock
*vxlan_socket_create(struct net
*net
, __be16 port
)
1569 struct vxlan_sock
*vs
;
1571 struct sockaddr_in vxlan_addr
= {
1572 .sin_family
= AF_INET
,
1573 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
1579 vs
= kmalloc(sizeof(*vs
), GFP_KERNEL
);
1581 return ERR_PTR(-ENOMEM
);
1583 for (h
= 0; h
< VNI_HASH_SIZE
; ++h
)
1584 INIT_HLIST_HEAD(&vs
->vni_list
[h
]);
1586 INIT_WORK(&vs
->del_work
, vxlan_del_work
);
1588 /* Create UDP socket for encapsulation receive. */
1589 rc
= sock_create_kern(AF_INET
, SOCK_DGRAM
, IPPROTO_UDP
, &vs
->sock
);
1591 pr_debug("UDP socket create failed\n");
1596 /* Put in proper namespace */
1598 sk_change_net(sk
, net
);
1600 rc
= kernel_bind(vs
->sock
, (struct sockaddr
*) &vxlan_addr
,
1601 sizeof(vxlan_addr
));
1603 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1604 &vxlan_addr
.sin_addr
, ntohs(vxlan_addr
.sin_port
), rc
);
1605 sk_release_kernel(sk
);
1610 /* Disable multicast loopback */
1611 inet_sk(sk
)->mc_loop
= 0;
1613 /* Mark socket as an encapsulation socket. */
1614 udp_sk(sk
)->encap_type
= 1;
1615 udp_sk(sk
)->encap_rcv
= vxlan_udp_encap_recv
;
1617 atomic_set(&vs
->refcnt
, 1);
1622 /* Scheduled at device creation to bind to a socket */
1623 static void vxlan_sock_work(struct work_struct
*work
)
1625 struct vxlan_dev
*vxlan
1626 = container_of(work
, struct vxlan_dev
, sock_work
);
1627 struct net_device
*dev
= vxlan
->dev
;
1628 struct net
*net
= dev_net(dev
);
1629 __u32 vni
= vxlan
->default_dst
.remote_vni
;
1630 __be16 port
= vxlan
->dst_port
;
1631 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1632 struct vxlan_sock
*nvs
, *ovs
;
1634 nvs
= vxlan_socket_create(net
, port
);
1636 netdev_err(vxlan
->dev
, "Can not create UDP socket, %ld\n",
1641 spin_lock(&vn
->sock_lock
);
1642 /* Look again to see if can reuse socket */
1643 ovs
= vxlan_find_port(net
, port
);
1645 atomic_inc(&ovs
->refcnt
);
1646 vxlan
->vn_sock
= ovs
;
1647 hlist_add_head_rcu(&vxlan
->hlist
, vni_head(ovs
, vni
));
1648 spin_unlock(&vn
->sock_lock
);
1650 sk_release_kernel(nvs
->sock
->sk
);
1653 vxlan
->vn_sock
= nvs
;
1654 hlist_add_head_rcu(&nvs
->hlist
, vs_head(net
, port
));
1655 hlist_add_head_rcu(&vxlan
->hlist
, vni_head(nvs
, vni
));
1656 spin_unlock(&vn
->sock_lock
);
1662 static int vxlan_newlink(struct net
*net
, struct net_device
*dev
,
1663 struct nlattr
*tb
[], struct nlattr
*data
[])
1665 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1666 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1667 struct vxlan_rdst
*dst
= &vxlan
->default_dst
;
1671 if (!data
[IFLA_VXLAN_ID
])
1674 vni
= nla_get_u32(data
[IFLA_VXLAN_ID
]);
1675 dst
->remote_vni
= vni
;
1677 if (data
[IFLA_VXLAN_GROUP
])
1678 dst
->remote_ip
= nla_get_be32(data
[IFLA_VXLAN_GROUP
]);
1680 if (data
[IFLA_VXLAN_LOCAL
])
1681 vxlan
->saddr
= nla_get_be32(data
[IFLA_VXLAN_LOCAL
]);
1683 if (data
[IFLA_VXLAN_LINK
] &&
1684 (dst
->remote_ifindex
= nla_get_u32(data
[IFLA_VXLAN_LINK
]))) {
1685 struct net_device
*lowerdev
1686 = __dev_get_by_index(net
, dst
->remote_ifindex
);
1689 pr_info("ifindex %d does not exist\n", dst
->remote_ifindex
);
1694 dev
->mtu
= lowerdev
->mtu
- VXLAN_HEADROOM
;
1696 /* update header length based on lower device */
1697 dev
->hard_header_len
= lowerdev
->hard_header_len
+
1701 if (data
[IFLA_VXLAN_TOS
])
1702 vxlan
->tos
= nla_get_u8(data
[IFLA_VXLAN_TOS
]);
1704 if (data
[IFLA_VXLAN_TTL
])
1705 vxlan
->ttl
= nla_get_u8(data
[IFLA_VXLAN_TTL
]);
1707 if (!data
[IFLA_VXLAN_LEARNING
] || nla_get_u8(data
[IFLA_VXLAN_LEARNING
]))
1708 vxlan
->flags
|= VXLAN_F_LEARN
;
1710 if (data
[IFLA_VXLAN_AGEING
])
1711 vxlan
->age_interval
= nla_get_u32(data
[IFLA_VXLAN_AGEING
]);
1713 vxlan
->age_interval
= FDB_AGE_DEFAULT
;
1715 if (data
[IFLA_VXLAN_PROXY
] && nla_get_u8(data
[IFLA_VXLAN_PROXY
]))
1716 vxlan
->flags
|= VXLAN_F_PROXY
;
1718 if (data
[IFLA_VXLAN_RSC
] && nla_get_u8(data
[IFLA_VXLAN_RSC
]))
1719 vxlan
->flags
|= VXLAN_F_RSC
;
1721 if (data
[IFLA_VXLAN_L2MISS
] && nla_get_u8(data
[IFLA_VXLAN_L2MISS
]))
1722 vxlan
->flags
|= VXLAN_F_L2MISS
;
1724 if (data
[IFLA_VXLAN_L3MISS
] && nla_get_u8(data
[IFLA_VXLAN_L3MISS
]))
1725 vxlan
->flags
|= VXLAN_F_L3MISS
;
1727 if (data
[IFLA_VXLAN_LIMIT
])
1728 vxlan
->addrmax
= nla_get_u32(data
[IFLA_VXLAN_LIMIT
]);
1730 if (data
[IFLA_VXLAN_PORT_RANGE
]) {
1731 const struct ifla_vxlan_port_range
*p
1732 = nla_data(data
[IFLA_VXLAN_PORT_RANGE
]);
1733 vxlan
->port_min
= ntohs(p
->low
);
1734 vxlan
->port_max
= ntohs(p
->high
);
1737 if (data
[IFLA_VXLAN_PORT
])
1738 vxlan
->dst_port
= nla_get_be16(data
[IFLA_VXLAN_PORT
]);
1740 if (vxlan_find_vni(net
, vni
, vxlan
->dst_port
)) {
1741 pr_info("duplicate VNI %u\n", vni
);
1745 SET_ETHTOOL_OPS(dev
, &vxlan_ethtool_ops
);
1747 /* create an fdb entry for default destination */
1748 err
= vxlan_fdb_create(vxlan
, all_zeros_mac
,
1749 vxlan
->default_dst
.remote_ip
,
1750 NUD_REACHABLE
|NUD_PERMANENT
,
1751 NLM_F_EXCL
|NLM_F_CREATE
,
1752 vxlan
->dst_port
, vxlan
->default_dst
.remote_vni
,
1753 vxlan
->default_dst
.remote_ifindex
, NTF_SELF
);
1757 err
= register_netdevice(dev
);
1759 vxlan_fdb_delete_default(vxlan
);
1763 list_add(&vxlan
->next
, &vn
->vxlan_list
);
1768 static void vxlan_dellink(struct net_device
*dev
, struct list_head
*head
)
1770 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
1771 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1773 flush_workqueue(vxlan_wq
);
1775 spin_lock(&vn
->sock_lock
);
1776 hlist_del_rcu(&vxlan
->hlist
);
1777 spin_unlock(&vn
->sock_lock
);
1779 list_del(&vxlan
->next
);
1780 unregister_netdevice_queue(dev
, head
);
1783 static size_t vxlan_get_size(const struct net_device
*dev
)
1786 return nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_ID */
1787 nla_total_size(sizeof(__be32
)) +/* IFLA_VXLAN_GROUP */
1788 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_LINK */
1789 nla_total_size(sizeof(__be32
))+ /* IFLA_VXLAN_LOCAL */
1790 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_TTL */
1791 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_TOS */
1792 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_LEARNING */
1793 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_PROXY */
1794 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_RSC */
1795 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_L2MISS */
1796 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_L3MISS */
1797 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_AGEING */
1798 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_LIMIT */
1799 nla_total_size(sizeof(struct ifla_vxlan_port_range
)) +
1800 nla_total_size(sizeof(__be16
))+ /* IFLA_VXLAN_PORT */
1804 static int vxlan_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1806 const struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1807 const struct vxlan_rdst
*dst
= &vxlan
->default_dst
;
1808 struct ifla_vxlan_port_range ports
= {
1809 .low
= htons(vxlan
->port_min
),
1810 .high
= htons(vxlan
->port_max
),
1813 if (nla_put_u32(skb
, IFLA_VXLAN_ID
, dst
->remote_vni
))
1814 goto nla_put_failure
;
1816 if (dst
->remote_ip
&& nla_put_be32(skb
, IFLA_VXLAN_GROUP
, dst
->remote_ip
))
1817 goto nla_put_failure
;
1819 if (dst
->remote_ifindex
&& nla_put_u32(skb
, IFLA_VXLAN_LINK
, dst
->remote_ifindex
))
1820 goto nla_put_failure
;
1822 if (vxlan
->saddr
&& nla_put_be32(skb
, IFLA_VXLAN_LOCAL
, vxlan
->saddr
))
1823 goto nla_put_failure
;
1825 if (nla_put_u8(skb
, IFLA_VXLAN_TTL
, vxlan
->ttl
) ||
1826 nla_put_u8(skb
, IFLA_VXLAN_TOS
, vxlan
->tos
) ||
1827 nla_put_u8(skb
, IFLA_VXLAN_LEARNING
,
1828 !!(vxlan
->flags
& VXLAN_F_LEARN
)) ||
1829 nla_put_u8(skb
, IFLA_VXLAN_PROXY
,
1830 !!(vxlan
->flags
& VXLAN_F_PROXY
)) ||
1831 nla_put_u8(skb
, IFLA_VXLAN_RSC
, !!(vxlan
->flags
& VXLAN_F_RSC
)) ||
1832 nla_put_u8(skb
, IFLA_VXLAN_L2MISS
,
1833 !!(vxlan
->flags
& VXLAN_F_L2MISS
)) ||
1834 nla_put_u8(skb
, IFLA_VXLAN_L3MISS
,
1835 !!(vxlan
->flags
& VXLAN_F_L3MISS
)) ||
1836 nla_put_u32(skb
, IFLA_VXLAN_AGEING
, vxlan
->age_interval
) ||
1837 nla_put_u32(skb
, IFLA_VXLAN_LIMIT
, vxlan
->addrmax
) ||
1838 nla_put_be16(skb
, IFLA_VXLAN_PORT
, vxlan
->dst_port
))
1839 goto nla_put_failure
;
1841 if (nla_put(skb
, IFLA_VXLAN_PORT_RANGE
, sizeof(ports
), &ports
))
1842 goto nla_put_failure
;
1850 static struct rtnl_link_ops vxlan_link_ops __read_mostly
= {
1852 .maxtype
= IFLA_VXLAN_MAX
,
1853 .policy
= vxlan_policy
,
1854 .priv_size
= sizeof(struct vxlan_dev
),
1855 .setup
= vxlan_setup
,
1856 .validate
= vxlan_validate
,
1857 .newlink
= vxlan_newlink
,
1858 .dellink
= vxlan_dellink
,
1859 .get_size
= vxlan_get_size
,
1860 .fill_info
= vxlan_fill_info
,
1863 static __net_init
int vxlan_init_net(struct net
*net
)
1865 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1868 INIT_LIST_HEAD(&vn
->vxlan_list
);
1869 spin_lock_init(&vn
->sock_lock
);
1871 for (h
= 0; h
< PORT_HASH_SIZE
; ++h
)
1872 INIT_HLIST_HEAD(&vn
->sock_list
[h
]);
1877 static __net_exit
void vxlan_exit_net(struct net
*net
)
1879 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1880 struct vxlan_dev
*vxlan
;
1883 list_for_each_entry(vxlan
, &vn
->vxlan_list
, next
)
1884 dev_close(vxlan
->dev
);
1888 static struct pernet_operations vxlan_net_ops
= {
1889 .init
= vxlan_init_net
,
1890 .exit
= vxlan_exit_net
,
1891 .id
= &vxlan_net_id
,
1892 .size
= sizeof(struct vxlan_net
),
1895 static int __init
vxlan_init_module(void)
1899 vxlan_wq
= alloc_workqueue("vxlan", 0, 0);
1903 get_random_bytes(&vxlan_salt
, sizeof(vxlan_salt
));
1905 rc
= register_pernet_device(&vxlan_net_ops
);
1909 rc
= rtnl_link_register(&vxlan_link_ops
);
1916 unregister_pernet_device(&vxlan_net_ops
);
1918 destroy_workqueue(vxlan_wq
);
1921 late_initcall(vxlan_init_module
);
1923 static void __exit
vxlan_cleanup_module(void)
1925 rtnl_link_unregister(&vxlan_link_ops
);
1926 destroy_workqueue(vxlan_wq
);
1927 unregister_pernet_device(&vxlan_net_ops
);
1930 module_exit(vxlan_cleanup_module
);
1932 MODULE_LICENSE("GPL");
1933 MODULE_VERSION(VXLAN_VERSION
);
1934 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
1935 MODULE_ALIAS_RTNL_LINK("vxlan");