2 * vrf.c: device driver to encapsulate a VRF space
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
8 * Based on dummy, team and ipvlan drivers
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
21 #include <linux/init.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netfilter.h>
24 #include <linux/rtnetlink.h>
25 #include <net/rtnetlink.h>
26 #include <linux/u64_stats_sync.h>
27 #include <linux/hashtable.h>
29 #include <linux/inetdevice.h>
32 #include <net/ip_fib.h>
33 #include <net/ip6_fib.h>
34 #include <net/ip6_route.h>
35 #include <net/route.h>
36 #include <net/addrconf.h>
37 #include <net/l3mdev.h>
38 #include <net/fib_rules.h>
39 #include <net/netns/generic.h>
41 #define DRV_NAME "vrf"
42 #define DRV_VERSION "1.0"
44 #define FIB_RULE_PREF 1000 /* default preference for FIB rules */
46 static unsigned int vrf_net_id
;
49 struct rtable __rcu
*rth
;
50 struct rt6_info __rcu
*rt6
;
61 struct u64_stats_sync syncp
;
64 static void vrf_rx_stats(struct net_device
*dev
, int len
)
66 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
68 u64_stats_update_begin(&dstats
->syncp
);
70 dstats
->rx_bytes
+= len
;
71 u64_stats_update_end(&dstats
->syncp
);
74 static void vrf_tx_error(struct net_device
*vrf_dev
, struct sk_buff
*skb
)
76 vrf_dev
->stats
.tx_errors
++;
80 static void vrf_get_stats64(struct net_device
*dev
,
81 struct rtnl_link_stats64
*stats
)
85 for_each_possible_cpu(i
) {
86 const struct pcpu_dstats
*dstats
;
87 u64 tbytes
, tpkts
, tdrops
, rbytes
, rpkts
;
90 dstats
= per_cpu_ptr(dev
->dstats
, i
);
92 start
= u64_stats_fetch_begin_irq(&dstats
->syncp
);
93 tbytes
= dstats
->tx_bytes
;
94 tpkts
= dstats
->tx_pkts
;
95 tdrops
= dstats
->tx_drps
;
96 rbytes
= dstats
->rx_bytes
;
97 rpkts
= dstats
->rx_pkts
;
98 } while (u64_stats_fetch_retry_irq(&dstats
->syncp
, start
));
99 stats
->tx_bytes
+= tbytes
;
100 stats
->tx_packets
+= tpkts
;
101 stats
->tx_dropped
+= tdrops
;
102 stats
->rx_bytes
+= rbytes
;
103 stats
->rx_packets
+= rpkts
;
107 /* by default VRF devices do not have a qdisc and are expected
108 * to be created with only a single queue.
110 static bool qdisc_tx_is_default(const struct net_device
*dev
)
112 struct netdev_queue
*txq
;
115 if (dev
->num_tx_queues
> 1)
118 txq
= netdev_get_tx_queue(dev
, 0);
119 qdisc
= rcu_access_pointer(txq
->qdisc
);
121 return !qdisc
->enqueue
;
124 /* Local traffic destined to local address. Reinsert the packet to rx
125 * path, similar to loopback handling.
127 static int vrf_local_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
128 struct dst_entry
*dst
)
134 skb_dst_set(skb
, dst
);
136 /* set pkt_type to avoid skb hitting packet taps twice -
137 * once on Tx and again in Rx processing
139 skb
->pkt_type
= PACKET_LOOPBACK
;
141 skb
->protocol
= eth_type_trans(skb
, dev
);
143 if (likely(netif_rx(skb
) == NET_RX_SUCCESS
))
144 vrf_rx_stats(dev
, len
);
146 this_cpu_inc(dev
->dstats
->rx_drps
);
151 #if IS_ENABLED(CONFIG_IPV6)
152 static int vrf_ip6_local_out(struct net
*net
, struct sock
*sk
,
157 err
= nf_hook(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
, net
,
158 sk
, skb
, NULL
, skb_dst(skb
)->dev
, dst_output
);
160 if (likely(err
== 1))
161 err
= dst_output(net
, sk
, skb
);
166 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
167 struct net_device
*dev
)
169 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
170 struct net
*net
= dev_net(skb
->dev
);
171 struct flowi6 fl6
= {
172 /* needed to match OIF rule */
173 .flowi6_oif
= dev
->ifindex
,
174 .flowi6_iif
= LOOPBACK_IFINDEX
,
177 .flowlabel
= ip6_flowinfo(iph
),
178 .flowi6_mark
= skb
->mark
,
179 .flowi6_proto
= iph
->nexthdr
,
180 .flowi6_flags
= FLOWI_FLAG_SKIP_NH_OIF
,
182 int ret
= NET_XMIT_DROP
;
183 struct dst_entry
*dst
;
184 struct dst_entry
*dst_null
= &net
->ipv6
.ip6_null_entry
->dst
;
186 dst
= ip6_route_output(net
, NULL
, &fl6
);
192 /* if dst.dev is loopback or the VRF device again this is locally
193 * originated traffic destined to a local address. Short circuit
197 return vrf_local_xmit(skb
, dev
, dst
);
199 skb_dst_set(skb
, dst
);
201 /* strip the ethernet header added for pass through VRF device */
202 __skb_pull(skb
, skb_network_offset(skb
));
204 ret
= vrf_ip6_local_out(net
, skb
->sk
, skb
);
205 if (unlikely(net_xmit_eval(ret
)))
206 dev
->stats
.tx_errors
++;
208 ret
= NET_XMIT_SUCCESS
;
212 vrf_tx_error(dev
, skb
);
213 return NET_XMIT_DROP
;
216 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
217 struct net_device
*dev
)
219 vrf_tx_error(dev
, skb
);
220 return NET_XMIT_DROP
;
224 /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
225 static int vrf_ip_local_out(struct net
*net
, struct sock
*sk
,
230 err
= nf_hook(NFPROTO_IPV4
, NF_INET_LOCAL_OUT
, net
, sk
,
231 skb
, NULL
, skb_dst(skb
)->dev
, dst_output
);
232 if (likely(err
== 1))
233 err
= dst_output(net
, sk
, skb
);
238 static netdev_tx_t
vrf_process_v4_outbound(struct sk_buff
*skb
,
239 struct net_device
*vrf_dev
)
241 struct iphdr
*ip4h
= ip_hdr(skb
);
242 int ret
= NET_XMIT_DROP
;
243 struct flowi4 fl4
= {
244 /* needed to match OIF rule */
245 .flowi4_oif
= vrf_dev
->ifindex
,
246 .flowi4_iif
= LOOPBACK_IFINDEX
,
247 .flowi4_tos
= RT_TOS(ip4h
->tos
),
248 .flowi4_flags
= FLOWI_FLAG_ANYSRC
| FLOWI_FLAG_SKIP_NH_OIF
,
249 .flowi4_proto
= ip4h
->protocol
,
250 .daddr
= ip4h
->daddr
,
251 .saddr
= ip4h
->saddr
,
253 struct net
*net
= dev_net(vrf_dev
);
256 rt
= ip_route_output_flow(net
, &fl4
, NULL
);
262 /* if dst.dev is loopback or the VRF device again this is locally
263 * originated traffic destined to a local address. Short circuit
266 if (rt
->dst
.dev
== vrf_dev
)
267 return vrf_local_xmit(skb
, vrf_dev
, &rt
->dst
);
269 skb_dst_set(skb
, &rt
->dst
);
271 /* strip the ethernet header added for pass through VRF device */
272 __skb_pull(skb
, skb_network_offset(skb
));
275 ip4h
->saddr
= inet_select_addr(skb_dst(skb
)->dev
, 0,
279 ret
= vrf_ip_local_out(dev_net(skb_dst(skb
)->dev
), skb
->sk
, skb
);
280 if (unlikely(net_xmit_eval(ret
)))
281 vrf_dev
->stats
.tx_errors
++;
283 ret
= NET_XMIT_SUCCESS
;
288 vrf_tx_error(vrf_dev
, skb
);
292 static netdev_tx_t
is_ip_tx_frame(struct sk_buff
*skb
, struct net_device
*dev
)
294 switch (skb
->protocol
) {
295 case htons(ETH_P_IP
):
296 return vrf_process_v4_outbound(skb
, dev
);
297 case htons(ETH_P_IPV6
):
298 return vrf_process_v6_outbound(skb
, dev
);
300 vrf_tx_error(dev
, skb
);
301 return NET_XMIT_DROP
;
305 static netdev_tx_t
vrf_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
308 netdev_tx_t ret
= is_ip_tx_frame(skb
, dev
);
310 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
311 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
313 u64_stats_update_begin(&dstats
->syncp
);
315 dstats
->tx_bytes
+= len
;
316 u64_stats_update_end(&dstats
->syncp
);
318 this_cpu_inc(dev
->dstats
->tx_drps
);
324 static int vrf_finish_direct(struct net
*net
, struct sock
*sk
,
327 struct net_device
*vrf_dev
= skb
->dev
;
329 if (!list_empty(&vrf_dev
->ptype_all
) &&
330 likely(skb_headroom(skb
) >= ETH_HLEN
)) {
331 struct ethhdr
*eth
= skb_push(skb
, ETH_HLEN
);
333 ether_addr_copy(eth
->h_source
, vrf_dev
->dev_addr
);
334 eth_zero_addr(eth
->h_dest
);
335 eth
->h_proto
= skb
->protocol
;
338 dev_queue_xmit_nit(skb
, vrf_dev
);
339 rcu_read_unlock_bh();
341 skb_pull(skb
, ETH_HLEN
);
347 #if IS_ENABLED(CONFIG_IPV6)
348 /* modelled after ip6_finish_output2 */
349 static int vrf_finish_output6(struct net
*net
, struct sock
*sk
,
352 struct dst_entry
*dst
= skb_dst(skb
);
353 struct net_device
*dev
= dst
->dev
;
354 struct neighbour
*neigh
;
355 struct in6_addr
*nexthop
;
360 skb
->protocol
= htons(ETH_P_IPV6
);
364 nexthop
= rt6_nexthop((struct rt6_info
*)dst
, &ipv6_hdr(skb
)->daddr
);
365 neigh
= __ipv6_neigh_lookup_noref(dst
->dev
, nexthop
);
366 if (unlikely(!neigh
))
367 neigh
= __neigh_create(&nd_tbl
, nexthop
, dst
->dev
, false);
368 if (!IS_ERR(neigh
)) {
369 sock_confirm_neigh(skb
, neigh
);
370 ret
= neigh_output(neigh
, skb
);
371 rcu_read_unlock_bh();
374 rcu_read_unlock_bh();
376 IP6_INC_STATS(dev_net(dst
->dev
),
377 ip6_dst_idev(dst
), IPSTATS_MIB_OUTNOROUTES
);
382 /* modelled after ip6_output */
383 static int vrf_output6(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
385 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
386 net
, sk
, skb
, NULL
, skb_dst(skb
)->dev
,
388 !(IP6CB(skb
)->flags
& IP6SKB_REROUTED
));
391 /* set dst on skb to send packet to us via dev_xmit path. Allows
392 * packet to go through device based features such as qdisc, netfilter
393 * hooks and packet sockets with skb->dev set to vrf device.
395 static struct sk_buff
*vrf_ip6_out_redirect(struct net_device
*vrf_dev
,
398 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
399 struct dst_entry
*dst
= NULL
;
400 struct rt6_info
*rt6
;
404 rt6
= rcu_dereference(vrf
->rt6
);
412 if (unlikely(!dst
)) {
413 vrf_tx_error(vrf_dev
, skb
);
418 skb_dst_set(skb
, dst
);
423 static int vrf_output6_direct(struct net
*net
, struct sock
*sk
,
426 skb
->protocol
= htons(ETH_P_IPV6
);
428 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
429 net
, sk
, skb
, NULL
, skb
->dev
,
431 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
434 static struct sk_buff
*vrf_ip6_out_direct(struct net_device
*vrf_dev
,
438 struct net
*net
= dev_net(vrf_dev
);
443 err
= nf_hook(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
, net
, sk
,
444 skb
, NULL
, vrf_dev
, vrf_output6_direct
);
446 if (likely(err
== 1))
447 err
= vrf_output6_direct(net
, sk
, skb
);
449 /* reset skb device */
450 if (likely(err
== 1))
458 static struct sk_buff
*vrf_ip6_out(struct net_device
*vrf_dev
,
462 /* don't divert link scope packets */
463 if (rt6_need_strict(&ipv6_hdr(skb
)->daddr
))
466 if (qdisc_tx_is_default(vrf_dev
))
467 return vrf_ip6_out_direct(vrf_dev
, sk
, skb
);
469 return vrf_ip6_out_redirect(vrf_dev
, skb
);
473 static void vrf_rt6_release(struct net_device
*dev
, struct net_vrf
*vrf
)
475 struct rt6_info
*rt6
= rtnl_dereference(vrf
->rt6
);
476 struct net
*net
= dev_net(dev
);
477 struct dst_entry
*dst
;
479 RCU_INIT_POINTER(vrf
->rt6
, NULL
);
482 /* move dev in dst's to loopback so this VRF device can be deleted
483 * - based on dst_ifdown
488 dst
->dev
= net
->loopback_dev
;
494 static int vrf_rt6_create(struct net_device
*dev
)
496 int flags
= DST_HOST
| DST_NOPOLICY
| DST_NOXFRM
;
497 struct net_vrf
*vrf
= netdev_priv(dev
);
498 struct net
*net
= dev_net(dev
);
499 struct fib6_table
*rt6i_table
;
500 struct rt6_info
*rt6
;
503 /* IPv6 can be CONFIG enabled and then disabled runtime */
504 if (!ipv6_mod_enabled())
507 rt6i_table
= fib6_new_table(net
, vrf
->tb_id
);
511 /* create a dst for routing packets out a VRF device */
512 rt6
= ip6_dst_alloc(net
, dev
, flags
);
516 rt6
->rt6i_table
= rt6i_table
;
517 rt6
->dst
.output
= vrf_output6
;
519 rcu_assign_pointer(vrf
->rt6
, rt6
);
526 static struct sk_buff
*vrf_ip6_out(struct net_device
*vrf_dev
,
533 static void vrf_rt6_release(struct net_device
*dev
, struct net_vrf
*vrf
)
537 static int vrf_rt6_create(struct net_device
*dev
)
543 /* modelled after ip_finish_output2 */
544 static int vrf_finish_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
546 struct dst_entry
*dst
= skb_dst(skb
);
547 struct rtable
*rt
= (struct rtable
*)dst
;
548 struct net_device
*dev
= dst
->dev
;
549 unsigned int hh_len
= LL_RESERVED_SPACE(dev
);
550 struct neighbour
*neigh
;
556 /* Be paranoid, rather than too clever. */
557 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->header_ops
)) {
558 struct sk_buff
*skb2
;
560 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
566 skb_set_owner_w(skb2
, skb
->sk
);
574 nexthop
= (__force u32
)rt_nexthop(rt
, ip_hdr(skb
)->daddr
);
575 neigh
= __ipv4_neigh_lookup_noref(dev
, nexthop
);
576 if (unlikely(!neigh
))
577 neigh
= __neigh_create(&arp_tbl
, &nexthop
, dev
, false);
578 if (!IS_ERR(neigh
)) {
579 sock_confirm_neigh(skb
, neigh
);
580 ret
= neigh_output(neigh
, skb
);
583 rcu_read_unlock_bh();
585 if (unlikely(ret
< 0))
586 vrf_tx_error(skb
->dev
, skb
);
590 static int vrf_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
592 struct net_device
*dev
= skb_dst(skb
)->dev
;
594 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUT
, skb
->len
);
597 skb
->protocol
= htons(ETH_P_IP
);
599 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
600 net
, sk
, skb
, NULL
, dev
,
602 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
605 /* set dst on skb to send packet to us via dev_xmit path. Allows
606 * packet to go through device based features such as qdisc, netfilter
607 * hooks and packet sockets with skb->dev set to vrf device.
609 static struct sk_buff
*vrf_ip_out_redirect(struct net_device
*vrf_dev
,
612 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
613 struct dst_entry
*dst
= NULL
;
618 rth
= rcu_dereference(vrf
->rth
);
626 if (unlikely(!dst
)) {
627 vrf_tx_error(vrf_dev
, skb
);
632 skb_dst_set(skb
, dst
);
637 static int vrf_output_direct(struct net
*net
, struct sock
*sk
,
640 skb
->protocol
= htons(ETH_P_IP
);
642 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
643 net
, sk
, skb
, NULL
, skb
->dev
,
645 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
648 static struct sk_buff
*vrf_ip_out_direct(struct net_device
*vrf_dev
,
652 struct net
*net
= dev_net(vrf_dev
);
657 err
= nf_hook(NFPROTO_IPV4
, NF_INET_LOCAL_OUT
, net
, sk
,
658 skb
, NULL
, vrf_dev
, vrf_output_direct
);
660 if (likely(err
== 1))
661 err
= vrf_output_direct(net
, sk
, skb
);
663 /* reset skb device */
664 if (likely(err
== 1))
672 static struct sk_buff
*vrf_ip_out(struct net_device
*vrf_dev
,
676 /* don't divert multicast or local broadcast */
677 if (ipv4_is_multicast(ip_hdr(skb
)->daddr
) ||
678 ipv4_is_lbcast(ip_hdr(skb
)->daddr
))
681 if (qdisc_tx_is_default(vrf_dev
))
682 return vrf_ip_out_direct(vrf_dev
, sk
, skb
);
684 return vrf_ip_out_redirect(vrf_dev
, skb
);
687 /* called with rcu lock held */
688 static struct sk_buff
*vrf_l3_out(struct net_device
*vrf_dev
,
695 return vrf_ip_out(vrf_dev
, sk
, skb
);
697 return vrf_ip6_out(vrf_dev
, sk
, skb
);
704 static void vrf_rtable_release(struct net_device
*dev
, struct net_vrf
*vrf
)
706 struct rtable
*rth
= rtnl_dereference(vrf
->rth
);
707 struct net
*net
= dev_net(dev
);
708 struct dst_entry
*dst
;
710 RCU_INIT_POINTER(vrf
->rth
, NULL
);
713 /* move dev in dst's to loopback so this VRF device can be deleted
714 * - based on dst_ifdown
719 dst
->dev
= net
->loopback_dev
;
725 static int vrf_rtable_create(struct net_device
*dev
)
727 struct net_vrf
*vrf
= netdev_priv(dev
);
730 if (!fib_new_table(dev_net(dev
), vrf
->tb_id
))
733 /* create a dst for routing packets out through a VRF device */
734 rth
= rt_dst_alloc(dev
, 0, RTN_UNICAST
, 1, 1, 0);
738 rth
->dst
.output
= vrf_output
;
739 rth
->rt_table_id
= vrf
->tb_id
;
741 rcu_assign_pointer(vrf
->rth
, rth
);
746 /**************************** device handling ********************/
748 /* cycle interface to flush neighbor cache and move routes across tables */
749 static void cycle_netdev(struct net_device
*dev
)
751 unsigned int flags
= dev
->flags
;
754 if (!netif_running(dev
))
757 ret
= dev_change_flags(dev
, flags
& ~IFF_UP
);
759 ret
= dev_change_flags(dev
, flags
);
763 "Failed to cycle device %s; route tables might be wrong!\n",
768 static int do_vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
,
769 struct netlink_ext_ack
*extack
)
773 /* do not allow loopback device to be enslaved to a VRF.
774 * The vrf device acts as the loopback for the vrf.
776 if (port_dev
== dev_net(dev
)->loopback_dev
) {
777 NL_SET_ERR_MSG(extack
,
778 "Can not enslave loopback device to a VRF");
782 port_dev
->priv_flags
|= IFF_L3MDEV_SLAVE
;
783 ret
= netdev_master_upper_dev_link(port_dev
, dev
, NULL
, NULL
, extack
);
787 cycle_netdev(port_dev
);
792 port_dev
->priv_flags
&= ~IFF_L3MDEV_SLAVE
;
796 static int vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
,
797 struct netlink_ext_ack
*extack
)
799 if (netif_is_l3_master(port_dev
)) {
800 NL_SET_ERR_MSG(extack
,
801 "Can not enslave an L3 master device to a VRF");
805 if (netif_is_l3_slave(port_dev
))
808 return do_vrf_add_slave(dev
, port_dev
, extack
);
811 /* inverse of do_vrf_add_slave */
812 static int do_vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
814 netdev_upper_dev_unlink(port_dev
, dev
);
815 port_dev
->priv_flags
&= ~IFF_L3MDEV_SLAVE
;
817 cycle_netdev(port_dev
);
822 static int vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
824 return do_vrf_del_slave(dev
, port_dev
);
827 static void vrf_dev_uninit(struct net_device
*dev
)
829 struct net_vrf
*vrf
= netdev_priv(dev
);
831 vrf_rtable_release(dev
, vrf
);
832 vrf_rt6_release(dev
, vrf
);
834 free_percpu(dev
->dstats
);
838 static int vrf_dev_init(struct net_device
*dev
)
840 struct net_vrf
*vrf
= netdev_priv(dev
);
842 dev
->dstats
= netdev_alloc_pcpu_stats(struct pcpu_dstats
);
846 /* create the default dst which points back to us */
847 if (vrf_rtable_create(dev
) != 0)
850 if (vrf_rt6_create(dev
) != 0)
853 dev
->flags
= IFF_MASTER
| IFF_NOARP
;
855 /* MTU is irrelevant for VRF device; set to 64k similar to lo */
856 dev
->mtu
= 64 * 1024;
858 /* similarly, oper state is irrelevant; set to up to avoid confusion */
859 dev
->operstate
= IF_OPER_UP
;
860 netdev_lockdep_set_classes(dev
);
864 vrf_rtable_release(dev
, vrf
);
866 free_percpu(dev
->dstats
);
872 static const struct net_device_ops vrf_netdev_ops
= {
873 .ndo_init
= vrf_dev_init
,
874 .ndo_uninit
= vrf_dev_uninit
,
875 .ndo_start_xmit
= vrf_xmit
,
876 .ndo_get_stats64
= vrf_get_stats64
,
877 .ndo_add_slave
= vrf_add_slave
,
878 .ndo_del_slave
= vrf_del_slave
,
881 static u32
vrf_fib_table(const struct net_device
*dev
)
883 struct net_vrf
*vrf
= netdev_priv(dev
);
888 static int vrf_rcv_finish(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
894 static struct sk_buff
*vrf_rcv_nfhook(u8 pf
, unsigned int hook
,
896 struct net_device
*dev
)
898 struct net
*net
= dev_net(dev
);
900 if (nf_hook(pf
, hook
, net
, NULL
, skb
, dev
, NULL
, vrf_rcv_finish
) != 1)
901 skb
= NULL
; /* kfree_skb(skb) handled by nf code */
906 #if IS_ENABLED(CONFIG_IPV6)
907 /* neighbor handling is done with actual device; do not want
908 * to flip skb->dev for those ndisc packets. This really fails
909 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
912 static bool ipv6_ndisc_frame(const struct sk_buff
*skb
)
914 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
917 if (iph
->nexthdr
== NEXTHDR_ICMP
) {
918 const struct icmp6hdr
*icmph
;
919 struct icmp6hdr _icmph
;
921 icmph
= skb_header_pointer(skb
, sizeof(*iph
),
922 sizeof(_icmph
), &_icmph
);
926 switch (icmph
->icmp6_type
) {
927 case NDISC_ROUTER_SOLICITATION
:
928 case NDISC_ROUTER_ADVERTISEMENT
:
929 case NDISC_NEIGHBOUR_SOLICITATION
:
930 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
941 static struct rt6_info
*vrf_ip6_route_lookup(struct net
*net
,
942 const struct net_device
*dev
,
947 struct net_vrf
*vrf
= netdev_priv(dev
);
948 struct fib6_table
*table
= NULL
;
949 struct rt6_info
*rt6
;
953 /* fib6_table does not have a refcnt and can not be freed */
954 rt6
= rcu_dereference(vrf
->rt6
);
956 table
= rt6
->rt6i_table
;
963 return ip6_pol_route(net
, table
, ifindex
, fl6
, flags
);
966 static void vrf_ip6_input_dst(struct sk_buff
*skb
, struct net_device
*vrf_dev
,
969 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
970 struct flowi6 fl6
= {
971 .flowi6_iif
= ifindex
,
972 .flowi6_mark
= skb
->mark
,
973 .flowi6_proto
= iph
->nexthdr
,
976 .flowlabel
= ip6_flowinfo(iph
),
978 struct net
*net
= dev_net(vrf_dev
);
979 struct rt6_info
*rt6
;
981 rt6
= vrf_ip6_route_lookup(net
, vrf_dev
, &fl6
, ifindex
,
982 RT6_LOOKUP_F_HAS_SADDR
| RT6_LOOKUP_F_IFACE
);
986 if (unlikely(&rt6
->dst
== &net
->ipv6
.ip6_null_entry
->dst
))
989 skb_dst_set(skb
, &rt6
->dst
);
992 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
995 int orig_iif
= skb
->skb_iif
;
998 /* loopback traffic; do not push through packet taps again.
999 * Reset pkt_type for upper layers to process skb
1001 if (skb
->pkt_type
== PACKET_LOOPBACK
) {
1003 skb
->skb_iif
= vrf_dev
->ifindex
;
1004 IP6CB(skb
)->flags
|= IP6SKB_L3SLAVE
;
1005 skb
->pkt_type
= PACKET_HOST
;
1009 /* if packet is NDISC or addressed to multicast or link-local
1010 * then keep the ingress interface
1012 need_strict
= rt6_need_strict(&ipv6_hdr(skb
)->daddr
);
1013 if (!ipv6_ndisc_frame(skb
) && !need_strict
) {
1014 vrf_rx_stats(vrf_dev
, skb
->len
);
1016 skb
->skb_iif
= vrf_dev
->ifindex
;
1018 if (!list_empty(&vrf_dev
->ptype_all
)) {
1019 skb_push(skb
, skb
->mac_len
);
1020 dev_queue_xmit_nit(skb
, vrf_dev
);
1021 skb_pull(skb
, skb
->mac_len
);
1024 IP6CB(skb
)->flags
|= IP6SKB_L3SLAVE
;
1028 vrf_ip6_input_dst(skb
, vrf_dev
, orig_iif
);
1030 skb
= vrf_rcv_nfhook(NFPROTO_IPV6
, NF_INET_PRE_ROUTING
, skb
, vrf_dev
);
1036 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
1037 struct sk_buff
*skb
)
1043 static struct sk_buff
*vrf_ip_rcv(struct net_device
*vrf_dev
,
1044 struct sk_buff
*skb
)
1047 skb
->skb_iif
= vrf_dev
->ifindex
;
1048 IPCB(skb
)->flags
|= IPSKB_L3SLAVE
;
1050 if (ipv4_is_multicast(ip_hdr(skb
)->daddr
))
1053 /* loopback traffic; do not push through packet taps again.
1054 * Reset pkt_type for upper layers to process skb
1056 if (skb
->pkt_type
== PACKET_LOOPBACK
) {
1057 skb
->pkt_type
= PACKET_HOST
;
1061 vrf_rx_stats(vrf_dev
, skb
->len
);
1063 if (!list_empty(&vrf_dev
->ptype_all
)) {
1064 skb_push(skb
, skb
->mac_len
);
1065 dev_queue_xmit_nit(skb
, vrf_dev
);
1066 skb_pull(skb
, skb
->mac_len
);
1069 skb
= vrf_rcv_nfhook(NFPROTO_IPV4
, NF_INET_PRE_ROUTING
, skb
, vrf_dev
);
1074 /* called with rcu lock held */
1075 static struct sk_buff
*vrf_l3_rcv(struct net_device
*vrf_dev
,
1076 struct sk_buff
*skb
,
1081 return vrf_ip_rcv(vrf_dev
, skb
);
1083 return vrf_ip6_rcv(vrf_dev
, skb
);
1089 #if IS_ENABLED(CONFIG_IPV6)
1090 /* send to link-local or multicast address via interface enslaved to
1091 * VRF device. Force lookup to VRF table without changing flow struct
1093 static struct dst_entry
*vrf_link_scope_lookup(const struct net_device
*dev
,
1096 struct net
*net
= dev_net(dev
);
1097 int flags
= RT6_LOOKUP_F_IFACE
;
1098 struct dst_entry
*dst
= NULL
;
1099 struct rt6_info
*rt
;
1101 /* VRF device does not have a link-local address and
1102 * sending packets to link-local or mcast addresses over
1103 * a VRF device does not make sense
1105 if (fl6
->flowi6_oif
== dev
->ifindex
) {
1106 dst
= &net
->ipv6
.ip6_null_entry
->dst
;
1111 if (!ipv6_addr_any(&fl6
->saddr
))
1112 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
1114 rt
= vrf_ip6_route_lookup(net
, dev
, fl6
, fl6
->flowi6_oif
, flags
);
1122 static const struct l3mdev_ops vrf_l3mdev_ops
= {
1123 .l3mdev_fib_table
= vrf_fib_table
,
1124 .l3mdev_l3_rcv
= vrf_l3_rcv
,
1125 .l3mdev_l3_out
= vrf_l3_out
,
1126 #if IS_ENABLED(CONFIG_IPV6)
1127 .l3mdev_link_scope_lookup
= vrf_link_scope_lookup
,
1131 static void vrf_get_drvinfo(struct net_device
*dev
,
1132 struct ethtool_drvinfo
*info
)
1134 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
1135 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
1138 static const struct ethtool_ops vrf_ethtool_ops
= {
1139 .get_drvinfo
= vrf_get_drvinfo
,
1142 static inline size_t vrf_fib_rule_nl_size(void)
1146 sz
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
));
1147 sz
+= nla_total_size(sizeof(u8
)); /* FRA_L3MDEV */
1148 sz
+= nla_total_size(sizeof(u32
)); /* FRA_PRIORITY */
1153 static int vrf_fib_rule(const struct net_device
*dev
, __u8 family
, bool add_it
)
1155 struct fib_rule_hdr
*frh
;
1156 struct nlmsghdr
*nlh
;
1157 struct sk_buff
*skb
;
1160 if (family
== AF_INET6
&& !ipv6_mod_enabled())
1163 skb
= nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL
);
1167 nlh
= nlmsg_put(skb
, 0, 0, 0, sizeof(*frh
), 0);
1169 goto nla_put_failure
;
1171 /* rule only needs to appear once */
1172 nlh
->nlmsg_flags
|= NLM_F_EXCL
;
1174 frh
= nlmsg_data(nlh
);
1175 memset(frh
, 0, sizeof(*frh
));
1176 frh
->family
= family
;
1177 frh
->action
= FR_ACT_TO_TBL
;
1179 if (nla_put_u8(skb
, FRA_L3MDEV
, 1))
1180 goto nla_put_failure
;
1182 if (nla_put_u32(skb
, FRA_PRIORITY
, FIB_RULE_PREF
))
1183 goto nla_put_failure
;
1185 nlmsg_end(skb
, nlh
);
1187 /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1188 skb
->sk
= dev_net(dev
)->rtnl
;
1190 err
= fib_nl_newrule(skb
, nlh
, NULL
);
1194 err
= fib_nl_delrule(skb
, nlh
, NULL
);
1208 static int vrf_add_fib_rules(const struct net_device
*dev
)
1212 err
= vrf_fib_rule(dev
, AF_INET
, true);
1216 err
= vrf_fib_rule(dev
, AF_INET6
, true);
1220 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1221 err
= vrf_fib_rule(dev
, RTNL_FAMILY_IPMR
, true);
1228 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1230 vrf_fib_rule(dev
, AF_INET6
, false);
1234 vrf_fib_rule(dev
, AF_INET
, false);
1237 netdev_err(dev
, "Failed to add FIB rules.\n");
1241 static void vrf_setup(struct net_device
*dev
)
1245 /* Initialize the device structure. */
1246 dev
->netdev_ops
= &vrf_netdev_ops
;
1247 dev
->l3mdev_ops
= &vrf_l3mdev_ops
;
1248 dev
->ethtool_ops
= &vrf_ethtool_ops
;
1249 dev
->needs_free_netdev
= true;
1251 /* Fill in device structure with ethernet-generic values. */
1252 eth_hw_addr_random(dev
);
1254 /* don't acquire vrf device's netif_tx_lock when transmitting */
1255 dev
->features
|= NETIF_F_LLTX
;
1257 /* don't allow vrf devices to change network namespaces. */
1258 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1260 /* does not make sense for a VLAN to be added to a vrf device */
1261 dev
->features
|= NETIF_F_VLAN_CHALLENGED
;
1263 /* enable offload features */
1264 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1265 dev
->features
|= NETIF_F_RXCSUM
| NETIF_F_HW_CSUM
;
1266 dev
->features
|= NETIF_F_SG
| NETIF_F_FRAGLIST
| NETIF_F_HIGHDMA
;
1268 dev
->hw_features
= dev
->features
;
1269 dev
->hw_enc_features
= dev
->features
;
1271 /* default to no qdisc; user can add if desired */
1272 dev
->priv_flags
|= IFF_NO_QUEUE
;
1275 static int vrf_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1276 struct netlink_ext_ack
*extack
)
1278 if (tb
[IFLA_ADDRESS
]) {
1279 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
) {
1280 NL_SET_ERR_MSG(extack
, "Invalid hardware address");
1283 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
]))) {
1284 NL_SET_ERR_MSG(extack
, "Invalid hardware address");
1285 return -EADDRNOTAVAIL
;
1291 static void vrf_dellink(struct net_device
*dev
, struct list_head
*head
)
1293 struct net_device
*port_dev
;
1294 struct list_head
*iter
;
1296 netdev_for_each_lower_dev(dev
, port_dev
, iter
)
1297 vrf_del_slave(dev
, port_dev
);
1299 unregister_netdevice_queue(dev
, head
);
1302 static int vrf_newlink(struct net
*src_net
, struct net_device
*dev
,
1303 struct nlattr
*tb
[], struct nlattr
*data
[],
1304 struct netlink_ext_ack
*extack
)
1306 struct net_vrf
*vrf
= netdev_priv(dev
);
1307 bool *add_fib_rules
;
1311 if (!data
|| !data
[IFLA_VRF_TABLE
]) {
1312 NL_SET_ERR_MSG(extack
, "VRF table id is missing");
1316 vrf
->tb_id
= nla_get_u32(data
[IFLA_VRF_TABLE
]);
1317 if (vrf
->tb_id
== RT_TABLE_UNSPEC
) {
1318 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_VRF_TABLE
],
1319 "Invalid VRF table id");
1323 dev
->priv_flags
|= IFF_L3MDEV_MASTER
;
1325 err
= register_netdevice(dev
);
1330 add_fib_rules
= net_generic(net
, vrf_net_id
);
1331 if (*add_fib_rules
) {
1332 err
= vrf_add_fib_rules(dev
);
1334 unregister_netdevice(dev
);
1337 *add_fib_rules
= false;
1344 static size_t vrf_nl_getsize(const struct net_device
*dev
)
1346 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_TABLE */
1349 static int vrf_fillinfo(struct sk_buff
*skb
,
1350 const struct net_device
*dev
)
1352 struct net_vrf
*vrf
= netdev_priv(dev
);
1354 return nla_put_u32(skb
, IFLA_VRF_TABLE
, vrf
->tb_id
);
1357 static size_t vrf_get_slave_size(const struct net_device
*bond_dev
,
1358 const struct net_device
*slave_dev
)
1360 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_PORT_TABLE */
1363 static int vrf_fill_slave_info(struct sk_buff
*skb
,
1364 const struct net_device
*vrf_dev
,
1365 const struct net_device
*slave_dev
)
1367 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
1369 if (nla_put_u32(skb
, IFLA_VRF_PORT_TABLE
, vrf
->tb_id
))
1375 static const struct nla_policy vrf_nl_policy
[IFLA_VRF_MAX
+ 1] = {
1376 [IFLA_VRF_TABLE
] = { .type
= NLA_U32
},
1379 static struct rtnl_link_ops vrf_link_ops __read_mostly
= {
1381 .priv_size
= sizeof(struct net_vrf
),
1383 .get_size
= vrf_nl_getsize
,
1384 .policy
= vrf_nl_policy
,
1385 .validate
= vrf_validate
,
1386 .fill_info
= vrf_fillinfo
,
1388 .get_slave_size
= vrf_get_slave_size
,
1389 .fill_slave_info
= vrf_fill_slave_info
,
1391 .newlink
= vrf_newlink
,
1392 .dellink
= vrf_dellink
,
1394 .maxtype
= IFLA_VRF_MAX
,
1397 static int vrf_device_event(struct notifier_block
*unused
,
1398 unsigned long event
, void *ptr
)
1400 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1402 /* only care about unregister events to drop slave references */
1403 if (event
== NETDEV_UNREGISTER
) {
1404 struct net_device
*vrf_dev
;
1406 if (!netif_is_l3_slave(dev
))
1409 vrf_dev
= netdev_master_upper_dev_get(dev
);
1410 vrf_del_slave(vrf_dev
, dev
);
1416 static struct notifier_block vrf_notifier_block __read_mostly
= {
1417 .notifier_call
= vrf_device_event
,
1420 /* Initialize per network namespace state */
1421 static int __net_init
vrf_netns_init(struct net
*net
)
1423 bool *add_fib_rules
= net_generic(net
, vrf_net_id
);
1425 *add_fib_rules
= true;
1430 static struct pernet_operations vrf_net_ops __net_initdata
= {
1431 .init
= vrf_netns_init
,
1433 .size
= sizeof(bool),
1436 static int __init
vrf_init_module(void)
1440 register_netdevice_notifier(&vrf_notifier_block
);
1442 rc
= register_pernet_subsys(&vrf_net_ops
);
1446 rc
= rtnl_link_register(&vrf_link_ops
);
1448 unregister_pernet_subsys(&vrf_net_ops
);
1455 unregister_netdevice_notifier(&vrf_notifier_block
);
1459 module_init(vrf_init_module
);
1460 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1461 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1462 MODULE_LICENSE("GPL");
1463 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);
1464 MODULE_VERSION(DRV_VERSION
);