2 * vrf.c: device driver to encapsulate a VRF space
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
8 * Based on dummy, team and ipvlan drivers
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
21 #include <linux/init.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netfilter.h>
24 #include <linux/rtnetlink.h>
25 #include <net/rtnetlink.h>
26 #include <linux/u64_stats_sync.h>
27 #include <linux/hashtable.h>
29 #include <linux/inetdevice.h>
32 #include <net/ip_fib.h>
33 #include <net/ip6_fib.h>
34 #include <net/ip6_route.h>
35 #include <net/route.h>
36 #include <net/addrconf.h>
37 #include <net/l3mdev.h>
38 #include <net/fib_rules.h>
39 #include <net/netns/generic.h>
41 #define DRV_NAME "vrf"
42 #define DRV_VERSION "1.0"
44 #define FIB_RULE_PREF 1000 /* default preference for FIB rules */
46 static unsigned int vrf_net_id
;
49 struct rtable __rcu
*rth
;
50 struct rtable __rcu
*rth_local
;
51 struct rt6_info __rcu
*rt6
;
52 struct rt6_info __rcu
*rt6_local
;
63 struct u64_stats_sync syncp
;
66 static void vrf_rx_stats(struct net_device
*dev
, int len
)
68 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
70 u64_stats_update_begin(&dstats
->syncp
);
72 dstats
->rx_bytes
+= len
;
73 u64_stats_update_end(&dstats
->syncp
);
76 static void vrf_tx_error(struct net_device
*vrf_dev
, struct sk_buff
*skb
)
78 vrf_dev
->stats
.tx_errors
++;
82 static void vrf_get_stats64(struct net_device
*dev
,
83 struct rtnl_link_stats64
*stats
)
87 for_each_possible_cpu(i
) {
88 const struct pcpu_dstats
*dstats
;
89 u64 tbytes
, tpkts
, tdrops
, rbytes
, rpkts
;
92 dstats
= per_cpu_ptr(dev
->dstats
, i
);
94 start
= u64_stats_fetch_begin_irq(&dstats
->syncp
);
95 tbytes
= dstats
->tx_bytes
;
96 tpkts
= dstats
->tx_pkts
;
97 tdrops
= dstats
->tx_drps
;
98 rbytes
= dstats
->rx_bytes
;
99 rpkts
= dstats
->rx_pkts
;
100 } while (u64_stats_fetch_retry_irq(&dstats
->syncp
, start
));
101 stats
->tx_bytes
+= tbytes
;
102 stats
->tx_packets
+= tpkts
;
103 stats
->tx_dropped
+= tdrops
;
104 stats
->rx_bytes
+= rbytes
;
105 stats
->rx_packets
+= rpkts
;
109 /* Local traffic destined to local address. Reinsert the packet to rx
110 * path, similar to loopback handling.
112 static int vrf_local_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
113 struct dst_entry
*dst
)
119 skb_dst_set(skb
, dst
);
122 /* set pkt_type to avoid skb hitting packet taps twice -
123 * once on Tx and again in Rx processing
125 skb
->pkt_type
= PACKET_LOOPBACK
;
127 skb
->protocol
= eth_type_trans(skb
, dev
);
129 if (likely(netif_rx(skb
) == NET_RX_SUCCESS
))
130 vrf_rx_stats(dev
, len
);
132 this_cpu_inc(dev
->dstats
->rx_drps
);
137 #if IS_ENABLED(CONFIG_IPV6)
138 static int vrf_ip6_local_out(struct net
*net
, struct sock
*sk
,
143 err
= nf_hook(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
, net
,
144 sk
, skb
, NULL
, skb_dst(skb
)->dev
, dst_output
);
146 if (likely(err
== 1))
147 err
= dst_output(net
, sk
, skb
);
152 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
153 struct net_device
*dev
)
155 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
156 struct net
*net
= dev_net(skb
->dev
);
157 struct flowi6 fl6
= {
158 /* needed to match OIF rule */
159 .flowi6_oif
= dev
->ifindex
,
160 .flowi6_iif
= LOOPBACK_IFINDEX
,
163 .flowlabel
= ip6_flowinfo(iph
),
164 .flowi6_mark
= skb
->mark
,
165 .flowi6_proto
= iph
->nexthdr
,
166 .flowi6_flags
= FLOWI_FLAG_SKIP_NH_OIF
,
168 int ret
= NET_XMIT_DROP
;
169 struct dst_entry
*dst
;
170 struct dst_entry
*dst_null
= &net
->ipv6
.ip6_null_entry
->dst
;
172 dst
= ip6_route_output(net
, NULL
, &fl6
);
178 /* if dst.dev is loopback or the VRF device again this is locally
179 * originated traffic destined to a local address. Short circuit
180 * to Rx path using our local dst
182 if (dst
->dev
== net
->loopback_dev
|| dst
->dev
== dev
) {
183 struct net_vrf
*vrf
= netdev_priv(dev
);
184 struct rt6_info
*rt6_local
;
186 /* release looked up dst and use cached local dst */
191 rt6_local
= rcu_dereference(vrf
->rt6_local
);
192 if (unlikely(!rt6_local
)) {
197 /* Ordering issue: cached local dst is created on newlink
198 * before the IPv6 initialization. Using the local dst
199 * requires rt6i_idev to be set so make sure it is.
201 if (unlikely(!rt6_local
->rt6i_idev
)) {
202 rt6_local
->rt6i_idev
= in6_dev_get(dev
);
203 if (!rt6_local
->rt6i_idev
) {
209 dst
= &rt6_local
->dst
;
214 return vrf_local_xmit(skb
, dev
, &rt6_local
->dst
);
217 skb_dst_set(skb
, dst
);
219 /* strip the ethernet header added for pass through VRF device */
220 __skb_pull(skb
, skb_network_offset(skb
));
222 ret
= vrf_ip6_local_out(net
, skb
->sk
, skb
);
223 if (unlikely(net_xmit_eval(ret
)))
224 dev
->stats
.tx_errors
++;
226 ret
= NET_XMIT_SUCCESS
;
230 vrf_tx_error(dev
, skb
);
231 return NET_XMIT_DROP
;
234 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
235 struct net_device
*dev
)
237 vrf_tx_error(dev
, skb
);
238 return NET_XMIT_DROP
;
242 /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
243 static int vrf_ip_local_out(struct net
*net
, struct sock
*sk
,
248 err
= nf_hook(NFPROTO_IPV4
, NF_INET_LOCAL_OUT
, net
, sk
,
249 skb
, NULL
, skb_dst(skb
)->dev
, dst_output
);
250 if (likely(err
== 1))
251 err
= dst_output(net
, sk
, skb
);
256 static netdev_tx_t
vrf_process_v4_outbound(struct sk_buff
*skb
,
257 struct net_device
*vrf_dev
)
259 struct iphdr
*ip4h
= ip_hdr(skb
);
260 int ret
= NET_XMIT_DROP
;
261 struct flowi4 fl4
= {
262 /* needed to match OIF rule */
263 .flowi4_oif
= vrf_dev
->ifindex
,
264 .flowi4_iif
= LOOPBACK_IFINDEX
,
265 .flowi4_tos
= RT_TOS(ip4h
->tos
),
266 .flowi4_flags
= FLOWI_FLAG_ANYSRC
| FLOWI_FLAG_SKIP_NH_OIF
,
267 .flowi4_proto
= ip4h
->protocol
,
268 .daddr
= ip4h
->daddr
,
269 .saddr
= ip4h
->saddr
,
271 struct net
*net
= dev_net(vrf_dev
);
274 rt
= ip_route_output_flow(net
, &fl4
, NULL
);
280 /* if dst.dev is loopback or the VRF device again this is locally
281 * originated traffic destined to a local address. Short circuit
282 * to Rx path using our local dst
284 if (rt
->dst
.dev
== net
->loopback_dev
|| rt
->dst
.dev
== vrf_dev
) {
285 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
286 struct rtable
*rth_local
;
287 struct dst_entry
*dst
= NULL
;
293 rth_local
= rcu_dereference(vrf
->rth_local
);
294 if (likely(rth_local
)) {
295 dst
= &rth_local
->dst
;
304 return vrf_local_xmit(skb
, vrf_dev
, dst
);
307 skb_dst_set(skb
, &rt
->dst
);
309 /* strip the ethernet header added for pass through VRF device */
310 __skb_pull(skb
, skb_network_offset(skb
));
313 ip4h
->saddr
= inet_select_addr(skb_dst(skb
)->dev
, 0,
317 ret
= vrf_ip_local_out(dev_net(skb_dst(skb
)->dev
), skb
->sk
, skb
);
318 if (unlikely(net_xmit_eval(ret
)))
319 vrf_dev
->stats
.tx_errors
++;
321 ret
= NET_XMIT_SUCCESS
;
326 vrf_tx_error(vrf_dev
, skb
);
330 static netdev_tx_t
is_ip_tx_frame(struct sk_buff
*skb
, struct net_device
*dev
)
332 switch (skb
->protocol
) {
333 case htons(ETH_P_IP
):
334 return vrf_process_v4_outbound(skb
, dev
);
335 case htons(ETH_P_IPV6
):
336 return vrf_process_v6_outbound(skb
, dev
);
338 vrf_tx_error(dev
, skb
);
339 return NET_XMIT_DROP
;
343 static netdev_tx_t
vrf_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
346 netdev_tx_t ret
= is_ip_tx_frame(skb
, dev
);
348 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
349 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
351 u64_stats_update_begin(&dstats
->syncp
);
353 dstats
->tx_bytes
+= len
;
354 u64_stats_update_end(&dstats
->syncp
);
356 this_cpu_inc(dev
->dstats
->tx_drps
);
362 #if IS_ENABLED(CONFIG_IPV6)
363 /* modelled after ip6_finish_output2 */
364 static int vrf_finish_output6(struct net
*net
, struct sock
*sk
,
367 struct dst_entry
*dst
= skb_dst(skb
);
368 struct net_device
*dev
= dst
->dev
;
369 struct neighbour
*neigh
;
370 struct in6_addr
*nexthop
;
375 skb
->protocol
= htons(ETH_P_IPV6
);
379 nexthop
= rt6_nexthop((struct rt6_info
*)dst
, &ipv6_hdr(skb
)->daddr
);
380 neigh
= __ipv6_neigh_lookup_noref(dst
->dev
, nexthop
);
381 if (unlikely(!neigh
))
382 neigh
= __neigh_create(&nd_tbl
, nexthop
, dst
->dev
, false);
383 if (!IS_ERR(neigh
)) {
384 sock_confirm_neigh(skb
, neigh
);
385 ret
= neigh_output(neigh
, skb
);
386 rcu_read_unlock_bh();
389 rcu_read_unlock_bh();
391 IP6_INC_STATS(dev_net(dst
->dev
),
392 ip6_dst_idev(dst
), IPSTATS_MIB_OUTNOROUTES
);
397 /* modelled after ip6_output */
398 static int vrf_output6(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
400 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
401 net
, sk
, skb
, NULL
, skb_dst(skb
)->dev
,
403 !(IP6CB(skb
)->flags
& IP6SKB_REROUTED
));
406 /* set dst on skb to send packet to us via dev_xmit path. Allows
407 * packet to go through device based features such as qdisc, netfilter
408 * hooks and packet sockets with skb->dev set to vrf device.
410 static struct sk_buff
*vrf_ip6_out(struct net_device
*vrf_dev
,
414 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
415 struct dst_entry
*dst
= NULL
;
416 struct rt6_info
*rt6
;
418 /* don't divert link scope packets */
419 if (rt6_need_strict(&ipv6_hdr(skb
)->daddr
))
424 rt6
= rcu_dereference(vrf
->rt6
);
432 if (unlikely(!dst
)) {
433 vrf_tx_error(vrf_dev
, skb
);
438 skb_dst_set(skb
, dst
);
444 static void vrf_rt6_release(struct net_device
*dev
, struct net_vrf
*vrf
)
446 struct rt6_info
*rt6
= rtnl_dereference(vrf
->rt6
);
447 struct rt6_info
*rt6_local
= rtnl_dereference(vrf
->rt6_local
);
448 struct net
*net
= dev_net(dev
);
449 struct dst_entry
*dst
;
451 RCU_INIT_POINTER(vrf
->rt6
, NULL
);
452 RCU_INIT_POINTER(vrf
->rt6_local
, NULL
);
455 /* move dev in dst's to loopback so this VRF device can be deleted
456 * - based on dst_ifdown
461 dst
->dev
= net
->loopback_dev
;
467 if (rt6_local
->rt6i_idev
) {
468 in6_dev_put(rt6_local
->rt6i_idev
);
469 rt6_local
->rt6i_idev
= NULL
;
472 dst
= &rt6_local
->dst
;
474 dst
->dev
= net
->loopback_dev
;
480 static int vrf_rt6_create(struct net_device
*dev
)
482 int flags
= DST_HOST
| DST_NOPOLICY
| DST_NOXFRM
| DST_NOCACHE
;
483 struct net_vrf
*vrf
= netdev_priv(dev
);
484 struct net
*net
= dev_net(dev
);
485 struct fib6_table
*rt6i_table
;
486 struct rt6_info
*rt6
, *rt6_local
;
489 /* IPv6 can be CONFIG enabled and then disabled runtime */
490 if (!ipv6_mod_enabled())
493 rt6i_table
= fib6_new_table(net
, vrf
->tb_id
);
497 /* create a dst for routing packets out a VRF device */
498 rt6
= ip6_dst_alloc(net
, dev
, flags
);
504 rt6
->rt6i_table
= rt6i_table
;
505 rt6
->dst
.output
= vrf_output6
;
507 /* create a dst for local routing - packets sent locally
508 * to local address via the VRF device as a loopback
510 rt6_local
= ip6_dst_alloc(net
, dev
, flags
);
512 dst_release(&rt6
->dst
);
516 dst_hold(&rt6_local
->dst
);
518 rt6_local
->rt6i_idev
= in6_dev_get(dev
);
519 rt6_local
->rt6i_flags
= RTF_UP
| RTF_NONEXTHOP
| RTF_LOCAL
;
520 rt6_local
->rt6i_table
= rt6i_table
;
521 rt6_local
->dst
.input
= ip6_input
;
523 rcu_assign_pointer(vrf
->rt6
, rt6
);
524 rcu_assign_pointer(vrf
->rt6_local
, rt6_local
);
531 static struct sk_buff
*vrf_ip6_out(struct net_device
*vrf_dev
,
538 static void vrf_rt6_release(struct net_device
*dev
, struct net_vrf
*vrf
)
542 static int vrf_rt6_create(struct net_device
*dev
)
548 /* modelled after ip_finish_output2 */
549 static int vrf_finish_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
551 struct dst_entry
*dst
= skb_dst(skb
);
552 struct rtable
*rt
= (struct rtable
*)dst
;
553 struct net_device
*dev
= dst
->dev
;
554 unsigned int hh_len
= LL_RESERVED_SPACE(dev
);
555 struct neighbour
*neigh
;
561 /* Be paranoid, rather than too clever. */
562 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->header_ops
)) {
563 struct sk_buff
*skb2
;
565 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
571 skb_set_owner_w(skb2
, skb
->sk
);
579 nexthop
= (__force u32
)rt_nexthop(rt
, ip_hdr(skb
)->daddr
);
580 neigh
= __ipv4_neigh_lookup_noref(dev
, nexthop
);
581 if (unlikely(!neigh
))
582 neigh
= __neigh_create(&arp_tbl
, &nexthop
, dev
, false);
583 if (!IS_ERR(neigh
)) {
584 sock_confirm_neigh(skb
, neigh
);
585 ret
= neigh_output(neigh
, skb
);
588 rcu_read_unlock_bh();
590 if (unlikely(ret
< 0))
591 vrf_tx_error(skb
->dev
, skb
);
595 static int vrf_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
597 struct net_device
*dev
= skb_dst(skb
)->dev
;
599 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUT
, skb
->len
);
602 skb
->protocol
= htons(ETH_P_IP
);
604 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
605 net
, sk
, skb
, NULL
, dev
,
607 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
610 /* set dst on skb to send packet to us via dev_xmit path. Allows
611 * packet to go through device based features such as qdisc, netfilter
612 * hooks and packet sockets with skb->dev set to vrf device.
614 static struct sk_buff
*vrf_ip_out(struct net_device
*vrf_dev
,
618 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
619 struct dst_entry
*dst
= NULL
;
622 /* don't divert multicast */
623 if (ipv4_is_multicast(ip_hdr(skb
)->daddr
))
628 rth
= rcu_dereference(vrf
->rth
);
636 if (unlikely(!dst
)) {
637 vrf_tx_error(vrf_dev
, skb
);
642 skb_dst_set(skb
, dst
);
647 /* called with rcu lock held */
648 static struct sk_buff
*vrf_l3_out(struct net_device
*vrf_dev
,
655 return vrf_ip_out(vrf_dev
, sk
, skb
);
657 return vrf_ip6_out(vrf_dev
, sk
, skb
);
664 static void vrf_rtable_release(struct net_device
*dev
, struct net_vrf
*vrf
)
666 struct rtable
*rth
= rtnl_dereference(vrf
->rth
);
667 struct rtable
*rth_local
= rtnl_dereference(vrf
->rth_local
);
668 struct net
*net
= dev_net(dev
);
669 struct dst_entry
*dst
;
671 RCU_INIT_POINTER(vrf
->rth
, NULL
);
672 RCU_INIT_POINTER(vrf
->rth_local
, NULL
);
675 /* move dev in dst's to loopback so this VRF device can be deleted
676 * - based on dst_ifdown
681 dst
->dev
= net
->loopback_dev
;
687 dst
= &rth_local
->dst
;
689 dst
->dev
= net
->loopback_dev
;
695 static int vrf_rtable_create(struct net_device
*dev
)
697 struct net_vrf
*vrf
= netdev_priv(dev
);
698 struct rtable
*rth
, *rth_local
;
700 if (!fib_new_table(dev_net(dev
), vrf
->tb_id
))
703 /* create a dst for routing packets out through a VRF device */
704 rth
= rt_dst_alloc(dev
, 0, RTN_UNICAST
, 1, 1, 0);
708 /* create a dst for local ingress routing - packets sent locally
709 * to local address via the VRF device as a loopback
711 rth_local
= rt_dst_alloc(dev
, RTCF_LOCAL
, RTN_LOCAL
, 1, 1, 0);
713 dst_release(&rth
->dst
);
717 rth
->dst
.output
= vrf_output
;
718 rth
->rt_table_id
= vrf
->tb_id
;
720 rth_local
->rt_table_id
= vrf
->tb_id
;
722 rcu_assign_pointer(vrf
->rth
, rth
);
723 rcu_assign_pointer(vrf
->rth_local
, rth_local
);
728 /**************************** device handling ********************/
730 /* cycle interface to flush neighbor cache and move routes across tables */
731 static void cycle_netdev(struct net_device
*dev
)
733 unsigned int flags
= dev
->flags
;
736 if (!netif_running(dev
))
739 ret
= dev_change_flags(dev
, flags
& ~IFF_UP
);
741 ret
= dev_change_flags(dev
, flags
);
745 "Failed to cycle device %s; route tables might be wrong!\n",
750 static int do_vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
)
754 ret
= netdev_master_upper_dev_link(port_dev
, dev
, NULL
, NULL
);
758 port_dev
->priv_flags
|= IFF_L3MDEV_SLAVE
;
759 cycle_netdev(port_dev
);
764 static int vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
)
766 if (netif_is_l3_master(port_dev
) || netif_is_l3_slave(port_dev
))
769 return do_vrf_add_slave(dev
, port_dev
);
772 /* inverse of do_vrf_add_slave */
773 static int do_vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
775 netdev_upper_dev_unlink(port_dev
, dev
);
776 port_dev
->priv_flags
&= ~IFF_L3MDEV_SLAVE
;
778 cycle_netdev(port_dev
);
783 static int vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
785 return do_vrf_del_slave(dev
, port_dev
);
788 static void vrf_dev_uninit(struct net_device
*dev
)
790 struct net_vrf
*vrf
= netdev_priv(dev
);
791 struct net_device
*port_dev
;
792 struct list_head
*iter
;
794 vrf_rtable_release(dev
, vrf
);
795 vrf_rt6_release(dev
, vrf
);
797 netdev_for_each_lower_dev(dev
, port_dev
, iter
)
798 vrf_del_slave(dev
, port_dev
);
800 free_percpu(dev
->dstats
);
804 static int vrf_dev_init(struct net_device
*dev
)
806 struct net_vrf
*vrf
= netdev_priv(dev
);
808 dev
->dstats
= netdev_alloc_pcpu_stats(struct pcpu_dstats
);
812 /* create the default dst which points back to us */
813 if (vrf_rtable_create(dev
) != 0)
816 if (vrf_rt6_create(dev
) != 0)
819 dev
->flags
= IFF_MASTER
| IFF_NOARP
;
821 /* MTU is irrelevant for VRF device; set to 64k similar to lo */
822 dev
->mtu
= 64 * 1024;
824 /* similarly, oper state is irrelevant; set to up to avoid confusion */
825 dev
->operstate
= IF_OPER_UP
;
826 netdev_lockdep_set_classes(dev
);
830 vrf_rtable_release(dev
, vrf
);
832 free_percpu(dev
->dstats
);
838 static const struct net_device_ops vrf_netdev_ops
= {
839 .ndo_init
= vrf_dev_init
,
840 .ndo_uninit
= vrf_dev_uninit
,
841 .ndo_start_xmit
= vrf_xmit
,
842 .ndo_get_stats64
= vrf_get_stats64
,
843 .ndo_add_slave
= vrf_add_slave
,
844 .ndo_del_slave
= vrf_del_slave
,
847 static u32
vrf_fib_table(const struct net_device
*dev
)
849 struct net_vrf
*vrf
= netdev_priv(dev
);
854 static int vrf_rcv_finish(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
860 static struct sk_buff
*vrf_rcv_nfhook(u8 pf
, unsigned int hook
,
862 struct net_device
*dev
)
864 struct net
*net
= dev_net(dev
);
866 if (nf_hook(pf
, hook
, net
, NULL
, skb
, dev
, NULL
, vrf_rcv_finish
) != 1)
867 skb
= NULL
; /* kfree_skb(skb) handled by nf code */
872 #if IS_ENABLED(CONFIG_IPV6)
873 /* neighbor handling is done with actual device; do not want
874 * to flip skb->dev for those ndisc packets. This really fails
875 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
878 static bool ipv6_ndisc_frame(const struct sk_buff
*skb
)
880 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
883 if (iph
->nexthdr
== NEXTHDR_ICMP
) {
884 const struct icmp6hdr
*icmph
;
885 struct icmp6hdr _icmph
;
887 icmph
= skb_header_pointer(skb
, sizeof(*iph
),
888 sizeof(_icmph
), &_icmph
);
892 switch (icmph
->icmp6_type
) {
893 case NDISC_ROUTER_SOLICITATION
:
894 case NDISC_ROUTER_ADVERTISEMENT
:
895 case NDISC_NEIGHBOUR_SOLICITATION
:
896 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
907 static struct rt6_info
*vrf_ip6_route_lookup(struct net
*net
,
908 const struct net_device
*dev
,
913 struct net_vrf
*vrf
= netdev_priv(dev
);
914 struct fib6_table
*table
= NULL
;
915 struct rt6_info
*rt6
;
919 /* fib6_table does not have a refcnt and can not be freed */
920 rt6
= rcu_dereference(vrf
->rt6
);
922 table
= rt6
->rt6i_table
;
929 return ip6_pol_route(net
, table
, ifindex
, fl6
, flags
);
932 static void vrf_ip6_input_dst(struct sk_buff
*skb
, struct net_device
*vrf_dev
,
935 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
936 struct flowi6 fl6
= {
939 .flowlabel
= ip6_flowinfo(iph
),
940 .flowi6_mark
= skb
->mark
,
941 .flowi6_proto
= iph
->nexthdr
,
942 .flowi6_iif
= ifindex
,
944 struct net
*net
= dev_net(vrf_dev
);
945 struct rt6_info
*rt6
;
947 rt6
= vrf_ip6_route_lookup(net
, vrf_dev
, &fl6
, ifindex
,
948 RT6_LOOKUP_F_HAS_SADDR
| RT6_LOOKUP_F_IFACE
);
952 if (unlikely(&rt6
->dst
== &net
->ipv6
.ip6_null_entry
->dst
))
955 skb_dst_set(skb
, &rt6
->dst
);
958 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
961 int orig_iif
= skb
->skb_iif
;
964 /* loopback traffic; do not push through packet taps again.
965 * Reset pkt_type for upper layers to process skb
967 if (skb
->pkt_type
== PACKET_LOOPBACK
) {
969 skb
->skb_iif
= vrf_dev
->ifindex
;
970 IP6CB(skb
)->flags
|= IP6SKB_L3SLAVE
;
971 skb
->pkt_type
= PACKET_HOST
;
975 /* if packet is NDISC or addressed to multicast or link-local
976 * then keep the ingress interface
978 need_strict
= rt6_need_strict(&ipv6_hdr(skb
)->daddr
);
979 if (!ipv6_ndisc_frame(skb
) && !need_strict
) {
980 vrf_rx_stats(vrf_dev
, skb
->len
);
982 skb
->skb_iif
= vrf_dev
->ifindex
;
984 skb_push(skb
, skb
->mac_len
);
985 dev_queue_xmit_nit(skb
, vrf_dev
);
986 skb_pull(skb
, skb
->mac_len
);
988 IP6CB(skb
)->flags
|= IP6SKB_L3SLAVE
;
992 vrf_ip6_input_dst(skb
, vrf_dev
, orig_iif
);
994 skb
= vrf_rcv_nfhook(NFPROTO_IPV6
, NF_INET_PRE_ROUTING
, skb
, vrf_dev
);
1000 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
1001 struct sk_buff
*skb
)
1007 static struct sk_buff
*vrf_ip_rcv(struct net_device
*vrf_dev
,
1008 struct sk_buff
*skb
)
1011 skb
->skb_iif
= vrf_dev
->ifindex
;
1012 IPCB(skb
)->flags
|= IPSKB_L3SLAVE
;
1014 if (ipv4_is_multicast(ip_hdr(skb
)->daddr
))
1017 /* loopback traffic; do not push through packet taps again.
1018 * Reset pkt_type for upper layers to process skb
1020 if (skb
->pkt_type
== PACKET_LOOPBACK
) {
1021 skb
->pkt_type
= PACKET_HOST
;
1025 vrf_rx_stats(vrf_dev
, skb
->len
);
1027 skb_push(skb
, skb
->mac_len
);
1028 dev_queue_xmit_nit(skb
, vrf_dev
);
1029 skb_pull(skb
, skb
->mac_len
);
1031 skb
= vrf_rcv_nfhook(NFPROTO_IPV4
, NF_INET_PRE_ROUTING
, skb
, vrf_dev
);
1036 /* called with rcu lock held */
1037 static struct sk_buff
*vrf_l3_rcv(struct net_device
*vrf_dev
,
1038 struct sk_buff
*skb
,
1043 return vrf_ip_rcv(vrf_dev
, skb
);
1045 return vrf_ip6_rcv(vrf_dev
, skb
);
1051 #if IS_ENABLED(CONFIG_IPV6)
1052 /* send to link-local or multicast address via interface enslaved to
1053 * VRF device. Force lookup to VRF table without changing flow struct
1055 static struct dst_entry
*vrf_link_scope_lookup(const struct net_device
*dev
,
1058 struct net
*net
= dev_net(dev
);
1059 int flags
= RT6_LOOKUP_F_IFACE
;
1060 struct dst_entry
*dst
= NULL
;
1061 struct rt6_info
*rt
;
1063 /* VRF device does not have a link-local address and
1064 * sending packets to link-local or mcast addresses over
1065 * a VRF device does not make sense
1067 if (fl6
->flowi6_oif
== dev
->ifindex
) {
1068 dst
= &net
->ipv6
.ip6_null_entry
->dst
;
1073 if (!ipv6_addr_any(&fl6
->saddr
))
1074 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
1076 rt
= vrf_ip6_route_lookup(net
, dev
, fl6
, fl6
->flowi6_oif
, flags
);
1084 static const struct l3mdev_ops vrf_l3mdev_ops
= {
1085 .l3mdev_fib_table
= vrf_fib_table
,
1086 .l3mdev_l3_rcv
= vrf_l3_rcv
,
1087 .l3mdev_l3_out
= vrf_l3_out
,
1088 #if IS_ENABLED(CONFIG_IPV6)
1089 .l3mdev_link_scope_lookup
= vrf_link_scope_lookup
,
1093 static void vrf_get_drvinfo(struct net_device
*dev
,
1094 struct ethtool_drvinfo
*info
)
1096 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
1097 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
1100 static const struct ethtool_ops vrf_ethtool_ops
= {
1101 .get_drvinfo
= vrf_get_drvinfo
,
1104 static inline size_t vrf_fib_rule_nl_size(void)
1108 sz
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
));
1109 sz
+= nla_total_size(sizeof(u8
)); /* FRA_L3MDEV */
1110 sz
+= nla_total_size(sizeof(u32
)); /* FRA_PRIORITY */
1115 static int vrf_fib_rule(const struct net_device
*dev
, __u8 family
, bool add_it
)
1117 struct fib_rule_hdr
*frh
;
1118 struct nlmsghdr
*nlh
;
1119 struct sk_buff
*skb
;
1122 if (family
== AF_INET6
&& !ipv6_mod_enabled())
1125 skb
= nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL
);
1129 nlh
= nlmsg_put(skb
, 0, 0, 0, sizeof(*frh
), 0);
1131 goto nla_put_failure
;
1133 /* rule only needs to appear once */
1134 nlh
->nlmsg_flags
|= NLM_F_EXCL
;
1136 frh
= nlmsg_data(nlh
);
1137 memset(frh
, 0, sizeof(*frh
));
1138 frh
->family
= family
;
1139 frh
->action
= FR_ACT_TO_TBL
;
1141 if (nla_put_u32(skb
, FRA_L3MDEV
, 1))
1142 goto nla_put_failure
;
1144 if (nla_put_u32(skb
, FRA_PRIORITY
, FIB_RULE_PREF
))
1145 goto nla_put_failure
;
1147 nlmsg_end(skb
, nlh
);
1149 /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1150 skb
->sk
= dev_net(dev
)->rtnl
;
1152 err
= fib_nl_newrule(skb
, nlh
);
1156 err
= fib_nl_delrule(skb
, nlh
);
1170 static int vrf_add_fib_rules(const struct net_device
*dev
)
1174 err
= vrf_fib_rule(dev
, AF_INET
, true);
1178 err
= vrf_fib_rule(dev
, AF_INET6
, true);
1182 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1183 err
= vrf_fib_rule(dev
, RTNL_FAMILY_IPMR
, true);
1190 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1192 vrf_fib_rule(dev
, AF_INET6
, false);
1196 vrf_fib_rule(dev
, AF_INET
, false);
1199 netdev_err(dev
, "Failed to add FIB rules.\n");
1203 static void vrf_setup(struct net_device
*dev
)
1207 /* Initialize the device structure. */
1208 dev
->netdev_ops
= &vrf_netdev_ops
;
1209 dev
->l3mdev_ops
= &vrf_l3mdev_ops
;
1210 dev
->ethtool_ops
= &vrf_ethtool_ops
;
1211 dev
->needs_free_netdev
= true;
1213 /* Fill in device structure with ethernet-generic values. */
1214 eth_hw_addr_random(dev
);
1216 /* don't acquire vrf device's netif_tx_lock when transmitting */
1217 dev
->features
|= NETIF_F_LLTX
;
1219 /* don't allow vrf devices to change network namespaces. */
1220 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1222 /* does not make sense for a VLAN to be added to a vrf device */
1223 dev
->features
|= NETIF_F_VLAN_CHALLENGED
;
1225 /* enable offload features */
1226 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1227 dev
->features
|= NETIF_F_RXCSUM
| NETIF_F_HW_CSUM
;
1228 dev
->features
|= NETIF_F_SG
| NETIF_F_FRAGLIST
| NETIF_F_HIGHDMA
;
1230 dev
->hw_features
= dev
->features
;
1231 dev
->hw_enc_features
= dev
->features
;
1233 /* default to no qdisc; user can add if desired */
1234 dev
->priv_flags
|= IFF_NO_QUEUE
;
1237 static int vrf_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1239 if (tb
[IFLA_ADDRESS
]) {
1240 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
1242 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
1243 return -EADDRNOTAVAIL
;
1248 static void vrf_dellink(struct net_device
*dev
, struct list_head
*head
)
1250 unregister_netdevice_queue(dev
, head
);
1253 static int vrf_newlink(struct net
*src_net
, struct net_device
*dev
,
1254 struct nlattr
*tb
[], struct nlattr
*data
[])
1256 struct net_vrf
*vrf
= netdev_priv(dev
);
1257 bool *add_fib_rules
;
1261 if (!data
|| !data
[IFLA_VRF_TABLE
])
1264 vrf
->tb_id
= nla_get_u32(data
[IFLA_VRF_TABLE
]);
1265 if (vrf
->tb_id
== RT_TABLE_UNSPEC
)
1268 dev
->priv_flags
|= IFF_L3MDEV_MASTER
;
1270 err
= register_netdevice(dev
);
1275 add_fib_rules
= net_generic(net
, vrf_net_id
);
1276 if (*add_fib_rules
) {
1277 err
= vrf_add_fib_rules(dev
);
1279 unregister_netdevice(dev
);
1282 *add_fib_rules
= false;
1289 static size_t vrf_nl_getsize(const struct net_device
*dev
)
1291 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_TABLE */
1294 static int vrf_fillinfo(struct sk_buff
*skb
,
1295 const struct net_device
*dev
)
1297 struct net_vrf
*vrf
= netdev_priv(dev
);
1299 return nla_put_u32(skb
, IFLA_VRF_TABLE
, vrf
->tb_id
);
1302 static size_t vrf_get_slave_size(const struct net_device
*bond_dev
,
1303 const struct net_device
*slave_dev
)
1305 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_PORT_TABLE */
1308 static int vrf_fill_slave_info(struct sk_buff
*skb
,
1309 const struct net_device
*vrf_dev
,
1310 const struct net_device
*slave_dev
)
1312 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
1314 if (nla_put_u32(skb
, IFLA_VRF_PORT_TABLE
, vrf
->tb_id
))
1320 static const struct nla_policy vrf_nl_policy
[IFLA_VRF_MAX
+ 1] = {
1321 [IFLA_VRF_TABLE
] = { .type
= NLA_U32
},
1324 static struct rtnl_link_ops vrf_link_ops __read_mostly
= {
1326 .priv_size
= sizeof(struct net_vrf
),
1328 .get_size
= vrf_nl_getsize
,
1329 .policy
= vrf_nl_policy
,
1330 .validate
= vrf_validate
,
1331 .fill_info
= vrf_fillinfo
,
1333 .get_slave_size
= vrf_get_slave_size
,
1334 .fill_slave_info
= vrf_fill_slave_info
,
1336 .newlink
= vrf_newlink
,
1337 .dellink
= vrf_dellink
,
1339 .maxtype
= IFLA_VRF_MAX
,
1342 static int vrf_device_event(struct notifier_block
*unused
,
1343 unsigned long event
, void *ptr
)
1345 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1347 /* only care about unregister events to drop slave references */
1348 if (event
== NETDEV_UNREGISTER
) {
1349 struct net_device
*vrf_dev
;
1351 if (!netif_is_l3_slave(dev
))
1354 vrf_dev
= netdev_master_upper_dev_get(dev
);
1355 vrf_del_slave(vrf_dev
, dev
);
1361 static struct notifier_block vrf_notifier_block __read_mostly
= {
1362 .notifier_call
= vrf_device_event
,
1365 /* Initialize per network namespace state */
1366 static int __net_init
vrf_netns_init(struct net
*net
)
1368 bool *add_fib_rules
= net_generic(net
, vrf_net_id
);
1370 *add_fib_rules
= true;
1375 static struct pernet_operations vrf_net_ops __net_initdata
= {
1376 .init
= vrf_netns_init
,
1378 .size
= sizeof(bool),
1381 static int __init
vrf_init_module(void)
1385 register_netdevice_notifier(&vrf_notifier_block
);
1387 rc
= register_pernet_subsys(&vrf_net_ops
);
1391 rc
= rtnl_link_register(&vrf_link_ops
);
1393 unregister_pernet_subsys(&vrf_net_ops
);
1400 unregister_netdevice_notifier(&vrf_notifier_block
);
1404 module_init(vrf_init_module
);
1405 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1406 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1407 MODULE_LICENSE("GPL");
1408 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);
1409 MODULE_VERSION(DRV_VERSION
);